aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/common/heaptuple.c10
-rw-r--r--src/backend/access/common/indextuple.c6
-rw-r--r--src/backend/access/common/printtup.c2
-rw-r--r--src/backend/access/common/reloptions.c6
-rw-r--r--src/backend/access/common/tupconvert.c2
-rw-r--r--src/backend/access/common/tupdesc.c2
-rw-r--r--src/backend/access/gin/ginarrayproc.c2
-rw-r--r--src/backend/access/gin/ginbulk.c2
-rw-r--r--src/backend/access/gin/ginentrypage.c2
-rw-r--r--src/backend/access/gin/ginfast.c8
-rw-r--r--src/backend/access/gin/ginget.c16
-rw-r--r--src/backend/access/gin/gininsert.c2
-rw-r--r--src/backend/access/gin/ginscan.c2
-rw-r--r--src/backend/access/gin/ginutil.c2
-rw-r--r--src/backend/access/gin/ginxlog.c2
-rw-r--r--src/backend/access/gist/gist.c4
-rw-r--r--src/backend/access/gist/gistget.c6
-rw-r--r--src/backend/access/gist/gistscan.c2
-rw-r--r--src/backend/access/gist/gistsplit.c10
-rw-r--r--src/backend/access/gist/gistutil.c6
-rw-r--r--src/backend/access/gist/gistvacuum.c2
-rw-r--r--src/backend/access/hash/hash.c8
-rw-r--r--src/backend/access/hash/hashfunc.c10
-rw-r--r--src/backend/access/hash/hashovfl.c16
-rw-r--r--src/backend/access/hash/hashpage.c20
-rw-r--r--src/backend/access/hash/hashsearch.c2
-rw-r--r--src/backend/access/hash/hashsort.c4
-rw-r--r--src/backend/access/hash/hashutil.c4
-rw-r--r--src/backend/access/heap/heapam.c72
-rw-r--r--src/backend/access/heap/hio.c14
-rw-r--r--src/backend/access/heap/pruneheap.c12
-rw-r--r--src/backend/access/heap/rewriteheap.c16
-rw-r--r--src/backend/access/heap/syncscan.c6
-rw-r--r--src/backend/access/heap/tuptoaster.c8
-rw-r--r--src/backend/access/index/genam.c6
-rw-r--r--src/backend/access/index/indexam.c16
-rw-r--r--src/backend/access/nbtree/nbtcompare.c2
-rw-r--r--src/backend/access/nbtree/nbtinsert.c28
-rw-r--r--src/backend/access/nbtree/nbtpage.c32
-rw-r--r--src/backend/access/nbtree/nbtree.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c28
-rw-r--r--src/backend/access/nbtree/nbtsort.c24
-rw-r--r--src/backend/access/nbtree/nbtutils.c42
-rw-r--r--src/backend/access/nbtree/nbtxlog.c6
-rw-r--r--src/backend/access/transam/clog.c8
-rw-r--r--src/backend/access/transam/multixact.c50
-rw-r--r--src/backend/access/transam/slru.c12
-rw-r--r--src/backend/access/transam/subtrans.c4
-rw-r--r--src/backend/access/transam/transam.c4
-rw-r--r--src/backend/access/transam/twophase.c12
-rw-r--r--src/backend/access/transam/varsup.c12
-rw-r--r--src/backend/access/transam/xact.c62
-rw-r--r--src/backend/access/transam/xlog.c86
-rw-r--r--src/backend/bootstrap/bootstrap.c14
-rw-r--r--src/backend/catalog/aclchk.c18
-rw-r--r--src/backend/catalog/catalog.c6
-rw-r--r--src/backend/catalog/dependency.c30
-rw-r--r--src/backend/catalog/heap.c36
-rw-r--r--src/backend/catalog/index.c82
-rw-r--r--src/backend/catalog/indexing.c2
-rw-r--r--src/backend/catalog/namespace.c52
-rw-r--r--src/backend/catalog/objectaddress.c2
-rw-r--r--src/backend/catalog/pg_collation.c2
-rw-r--r--src/backend/catalog/pg_constraint.c8
-rw-r--r--src/backend/catalog/pg_db_role_setting.c2
-rw-r--r--src/backend/catalog/pg_depend.c8
-rw-r--r--src/backend/catalog/pg_enum.c2
-rw-r--r--src/backend/catalog/pg_largeobject.c2
-rw-r--r--src/backend/catalog/pg_operator.c4
-rw-r--r--src/backend/catalog/pg_proc.c10
-rw-r--r--src/backend/catalog/pg_shdepend.c18
-rw-r--r--src/backend/catalog/pg_type.c2
-rw-r--r--src/backend/catalog/storage.c4
-rw-r--r--src/backend/catalog/toasting.c2
-rw-r--r--src/backend/commands/aggregatecmds.c2
-rw-r--r--src/backend/commands/alter.c2
-rw-r--r--src/backend/commands/analyze.c46
-rw-r--r--src/backend/commands/async.c30
-rw-r--r--src/backend/commands/cluster.c42
-rw-r--r--src/backend/commands/constraint.c4
-rw-r--r--src/backend/commands/copy.c32
-rw-r--r--src/backend/commands/dbcommands.c14
-rw-r--r--src/backend/commands/define.c2
-rw-r--r--src/backend/commands/explain.c8
-rw-r--r--src/backend/commands/extension.c26
-rw-r--r--src/backend/commands/foreigncmds.c6
-rw-r--r--src/backend/commands/functioncmds.c12
-rw-r--r--src/backend/commands/indexcmds.c36
-rw-r--r--src/backend/commands/opclasscmds.c8
-rw-r--r--src/backend/commands/operatorcmds.c2
-rw-r--r--src/backend/commands/portalcmds.c8
-rw-r--r--src/backend/commands/prepare.c4
-rw-r--r--src/backend/commands/proclang.c2
-rw-r--r--src/backend/commands/schemacmds.c4
-rw-r--r--src/backend/commands/sequence.c14
-rw-r--r--src/backend/commands/tablecmds.c74
-rw-r--r--src/backend/commands/tablespace.c14
-rw-r--r--src/backend/commands/trigger.c58
-rw-r--r--src/backend/commands/typecmds.c22
-rw-r--r--src/backend/commands/user.c8
-rw-r--r--src/backend/commands/vacuum.c24
-rw-r--r--src/backend/commands/vacuumlazy.c8
-rw-r--r--src/backend/commands/variable.c20
-rw-r--r--src/backend/commands/view.c6
-rw-r--r--src/backend/executor/execAmi.c4
-rw-r--r--src/backend/executor/execCurrent.c2
-rw-r--r--src/backend/executor/execJunk.c2
-rw-r--r--src/backend/executor/execMain.c24
-rw-r--r--src/backend/executor/execProcnode.c4
-rw-r--r--src/backend/executor/execQual.c48
-rw-r--r--src/backend/executor/execScan.c4
-rw-r--r--src/backend/executor/execTuples.c12
-rw-r--r--src/backend/executor/execUtils.c20
-rw-r--r--src/backend/executor/functions.c28
-rw-r--r--src/backend/executor/nodeAgg.c46
-rw-r--r--src/backend/executor/nodeAppend.c2
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c4
-rw-r--r--src/backend/executor/nodeFunctionscan.c2
-rw-r--r--src/backend/executor/nodeHash.c16
-rw-r--r--src/backend/executor/nodeHashjoin.c6
-rw-r--r--src/backend/executor/nodeIndexscan.c4
-rw-r--r--src/backend/executor/nodeLimit.c2
-rw-r--r--src/backend/executor/nodeLockRows.c8
-rw-r--r--src/backend/executor/nodeMaterial.c2
-rw-r--r--src/backend/executor/nodeMergeAppend.c2
-rw-r--r--src/backend/executor/nodeMergejoin.c14
-rw-r--r--src/backend/executor/nodeModifyTable.c22
-rw-r--r--src/backend/executor/nodeRecursiveunion.c2
-rw-r--r--src/backend/executor/nodeSetOp.c6
-rw-r--r--src/backend/executor/nodeSubplan.c10
-rw-r--r--src/backend/executor/nodeSubqueryscan.c4
-rw-r--r--src/backend/executor/nodeUnique.c2
-rw-r--r--src/backend/executor/nodeValuesscan.c2
-rw-r--r--src/backend/executor/nodeWindowAgg.c24
-rw-r--r--src/backend/executor/nodeWorktablescan.c2
-rw-r--r--src/backend/executor/spi.c10
-rw-r--r--src/backend/executor/tstoreReceiver.c2
-rw-r--r--src/backend/lib/stringinfo.c4
-rw-r--r--src/backend/libpq/auth.c6
-rw-r--r--src/backend/libpq/be-secure.c4
-rw-r--r--src/backend/libpq/hba.c6
-rw-r--r--src/backend/libpq/md5.c2
-rw-r--r--src/backend/libpq/pqcomm.c4
-rw-r--r--src/backend/libpq/pqformat.c2
-rw-r--r--src/backend/libpq/pqsignal.c4
-rw-r--r--src/backend/main/main.c10
-rw-r--r--src/backend/nodes/bitmapset.c4
-rw-r--r--src/backend/nodes/copyfuncs.c6
-rw-r--r--src/backend/nodes/equalfuncs.c12
-rw-r--r--src/backend/nodes/list.c4
-rw-r--r--src/backend/nodes/nodeFuncs.c12
-rw-r--r--src/backend/nodes/outfuncs.c6
-rw-r--r--src/backend/nodes/params.c2
-rw-r--r--src/backend/nodes/read.c10
-rw-r--r--src/backend/nodes/readfuncs.c14
-rw-r--r--src/backend/nodes/tidbitmap.c16
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c10
-rw-r--r--src/backend/optimizer/path/allpaths.c16
-rw-r--r--src/backend/optimizer/path/clausesel.c20
-rw-r--r--src/backend/optimizer/path/costsize.c72
-rw-r--r--src/backend/optimizer/path/equivclass.c42
-rw-r--r--src/backend/optimizer/path/indxpath.c56
-rw-r--r--src/backend/optimizer/path/joinpath.c14
-rw-r--r--src/backend/optimizer/path/joinrels.c14
-rw-r--r--src/backend/optimizer/path/orindxpath.c10
-rw-r--r--src/backend/optimizer/path/pathkeys.c26
-rw-r--r--src/backend/optimizer/path/tidpath.c4
-rw-r--r--src/backend/optimizer/plan/analyzejoins.c8
-rw-r--r--src/backend/optimizer/plan/createplan.c32
-rw-r--r--src/backend/optimizer/plan/initsplan.c38
-rw-r--r--src/backend/optimizer/plan/planagg.c8
-rw-r--r--src/backend/optimizer/plan/planmain.c16
-rw-r--r--src/backend/optimizer/plan/planner.c48
-rw-r--r--src/backend/optimizer/plan/setrefs.c16
-rw-r--r--src/backend/optimizer/plan/subselect.c38
-rw-r--r--src/backend/optimizer/prep/prepjointree.c40
-rw-r--r--src/backend/optimizer/prep/prepqual.c10
-rw-r--r--src/backend/optimizer/prep/preptlist.c12
-rw-r--r--src/backend/optimizer/prep/prepunion.c16
-rw-r--r--src/backend/optimizer/util/clauses.c62
-rw-r--r--src/backend/optimizer/util/joininfo.c2
-rw-r--r--src/backend/optimizer/util/pathnode.c10
-rw-r--r--src/backend/optimizer/util/plancat.c6
-rw-r--r--src/backend/optimizer/util/predtest.c22
-rw-r--r--src/backend/optimizer/util/relnode.c12
-rw-r--r--src/backend/optimizer/util/restrictinfo.c8
-rw-r--r--src/backend/optimizer/util/tlist.c2
-rw-r--r--src/backend/optimizer/util/var.c16
-rw-r--r--src/backend/parser/analyze.c22
-rw-r--r--src/backend/parser/kwlookup.c2
-rw-r--r--src/backend/parser/parse_agg.c14
-rw-r--r--src/backend/parser/parse_clause.c34
-rw-r--r--src/backend/parser/parse_coerce.c42
-rw-r--r--src/backend/parser/parse_collate.c14
-rw-r--r--src/backend/parser/parse_cte.c8
-rw-r--r--src/backend/parser/parse_expr.c16
-rw-r--r--src/backend/parser/parse_func.c24
-rw-r--r--src/backend/parser/parse_node.c10
-rw-r--r--src/backend/parser/parse_oper.c4
-rw-r--r--src/backend/parser/parse_param.c2
-rw-r--r--src/backend/parser/parse_relation.c10
-rw-r--r--src/backend/parser/parse_target.c22
-rw-r--r--src/backend/parser/parse_type.c10
-rw-r--r--src/backend/parser/parse_utilcmd.c16
-rw-r--r--src/backend/parser/parser.c2
-rw-r--r--src/backend/port/darwin/system.c2
-rw-r--r--src/backend/port/dynloader/darwin.c2
-rw-r--r--src/backend/port/dynloader/freebsd.c2
-rw-r--r--src/backend/port/dynloader/netbsd.c2
-rw-r--r--src/backend/port/dynloader/openbsd.c2
-rw-r--r--src/backend/port/posix_sema.c2
-rw-r--r--src/backend/port/sysv_sema.c14
-rw-r--r--src/backend/port/sysv_shmem.c10
-rw-r--r--src/backend/port/win32_shmem.c2
-rw-r--r--src/backend/postmaster/autovacuum.c46
-rw-r--r--src/backend/postmaster/bgwriter.c28
-rw-r--r--src/backend/postmaster/fork_process.c2
-rw-r--r--src/backend/postmaster/pgarch.c4
-rw-r--r--src/backend/postmaster/pgstat.c32
-rw-r--r--src/backend/postmaster/postmaster.c70
-rw-r--r--src/backend/postmaster/syslogger.c12
-rw-r--r--src/backend/postmaster/walwriter.c6
-rw-r--r--src/backend/regex/regc_color.c2
-rw-r--r--src/backend/regex/regc_cvec.c2
-rw-r--r--src/backend/regex/regc_lex.c2
-rw-r--r--src/backend/regex/regc_locale.c6
-rw-r--r--src/backend/regex/regc_nfa.c6
-rw-r--r--src/backend/regex/regc_pg_locale.c8
-rw-r--r--src/backend/regex/regcomp.c8
-rw-r--r--src/backend/regex/rege_dfa.c2
-rw-r--r--src/backend/regex/regerror.c2
-rw-r--r--src/backend/regex/regexec.c2
-rw-r--r--src/backend/regex/regfree.c2
-rw-r--r--src/backend/replication/syncrep.c2
-rw-r--r--src/backend/replication/walreceiver.c4
-rw-r--r--src/backend/replication/walreceiverfuncs.c2
-rw-r--r--src/backend/replication/walsender.c2
-rw-r--r--src/backend/rewrite/rewriteDefine.c4
-rw-r--r--src/backend/rewrite/rewriteHandler.c28
-rw-r--r--src/backend/rewrite/rewriteManip.c16
-rw-r--r--src/backend/rewrite/rewriteSupport.c2
-rw-r--r--src/backend/storage/buffer/buf_init.c4
-rw-r--r--src/backend/storage/buffer/buf_table.c4
-rw-r--r--src/backend/storage/buffer/bufmgr.c40
-rw-r--r--src/backend/storage/buffer/freelist.c10
-rw-r--r--src/backend/storage/buffer/localbuf.c6
-rw-r--r--src/backend/storage/file/buffile.c6
-rw-r--r--src/backend/storage/file/fd.c22
-rw-r--r--src/backend/storage/freespace/freespace.c4
-rw-r--r--src/backend/storage/freespace/fsmpage.c4
-rw-r--r--src/backend/storage/ipc/ipc.c10
-rw-r--r--src/backend/storage/ipc/ipci.c4
-rw-r--r--src/backend/storage/ipc/pmsignal.c8
-rw-r--r--src/backend/storage/ipc/procarray.c34
-rw-r--r--src/backend/storage/ipc/shmem.c18
-rw-r--r--src/backend/storage/ipc/shmqueue.c2
-rw-r--r--src/backend/storage/ipc/sinval.c12
-rw-r--r--src/backend/storage/ipc/sinvaladt.c18
-rw-r--r--src/backend/storage/ipc/standby.c2
-rw-r--r--src/backend/storage/large_object/inv_api.c2
-rw-r--r--src/backend/storage/lmgr/deadlock.c18
-rw-r--r--src/backend/storage/lmgr/lmgr.c8
-rw-r--r--src/backend/storage/lmgr/lock.c18
-rw-r--r--src/backend/storage/lmgr/lwlock.c8
-rw-r--r--src/backend/storage/lmgr/predicate.c48
-rw-r--r--src/backend/storage/lmgr/proc.c28
-rw-r--r--src/backend/storage/lmgr/s_lock.c6
-rw-r--r--src/backend/storage/lmgr/spin.c2
-rw-r--r--src/backend/storage/page/bufpage.c6
-rw-r--r--src/backend/storage/smgr/md.c24
-rw-r--r--src/backend/storage/smgr/smgr.c2
-rw-r--r--src/backend/tcop/fastpath.c10
-rw-r--r--src/backend/tcop/postgres.c32
-rw-r--r--src/backend/tcop/pquery.c14
-rw-r--r--src/backend/tcop/utility.c2
-rw-r--r--src/backend/tsearch/ts_locale.c4
-rw-r--r--src/backend/tsearch/ts_typanalyze.c14
-rw-r--r--src/backend/tsearch/ts_utils.c8
-rw-r--r--src/backend/tsearch/wparser_def.c2
-rw-r--r--src/backend/utils/adt/acl.c16
-rw-r--r--src/backend/utils/adt/array_userfuncs.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c22
-rw-r--r--src/backend/utils/adt/arrayutils.c2
-rw-r--r--src/backend/utils/adt/char.c2
-rw-r--r--src/backend/utils/adt/date.c6
-rw-r--r--src/backend/utils/adt/datetime.c12
-rw-r--r--src/backend/utils/adt/datum.c2
-rw-r--r--src/backend/utils/adt/dbsize.c2
-rw-r--r--src/backend/utils/adt/domains.c8
-rw-r--r--src/backend/utils/adt/float.c6
-rw-r--r--src/backend/utils/adt/format_type.c6
-rw-r--r--src/backend/utils/adt/formatting.c4
-rw-r--r--src/backend/utils/adt/geo_selfuncs.c4
-rw-r--r--src/backend/utils/adt/inet_cidr_ntop.c2
-rw-r--r--src/backend/utils/adt/int.c30
-rw-r--r--src/backend/utils/adt/int8.c44
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/misc.c2
-rw-r--r--src/backend/utils/adt/nabstime.c4
-rw-r--r--src/backend/utils/adt/network.c14
-rw-r--r--src/backend/utils/adt/numeric.c46
-rw-r--r--src/backend/utils/adt/oid.c2
-rw-r--r--src/backend/utils/adt/pg_locale.c14
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c6
-rw-r--r--src/backend/utils/adt/pseudotypes.c6
-rw-r--r--src/backend/utils/adt/regexp.c6
-rw-r--r--src/backend/utils/adt/regproc.c12
-rw-r--r--src/backend/utils/adt/ri_triggers.c8
-rw-r--r--src/backend/utils/adt/rowtypes.c8
-rw-r--r--src/backend/utils/adt/ruleutils.c42
-rw-r--r--src/backend/utils/adt/selfuncs.c96
-rw-r--r--src/backend/utils/adt/timestamp.c18
-rw-r--r--src/backend/utils/adt/tsginidx.c2
-rw-r--r--src/backend/utils/adt/varchar.c4
-rw-r--r--src/backend/utils/adt/varlena.c30
-rw-r--r--src/backend/utils/adt/xml.c32
-rw-r--r--src/backend/utils/cache/attoptcache.c2
-rw-r--r--src/backend/utils/cache/catcache.c16
-rw-r--r--src/backend/utils/cache/inval.c30
-rw-r--r--src/backend/utils/cache/lsyscache.c10
-rw-r--r--src/backend/utils/cache/plancache.c20
-rw-r--r--src/backend/utils/cache/relcache.c58
-rw-r--r--src/backend/utils/cache/relmapper.c22
-rw-r--r--src/backend/utils/cache/spccache.c6
-rw-r--r--src/backend/utils/cache/syscache.c4
-rw-r--r--src/backend/utils/cache/typcache.c8
-rw-r--r--src/backend/utils/error/elog.c42
-rw-r--r--src/backend/utils/fmgr/dfmgr.c6
-rw-r--r--src/backend/utils/fmgr/fmgr.c18
-rw-r--r--src/backend/utils/fmgr/funcapi.c10
-rw-r--r--src/backend/utils/hash/dynahash.c24
-rw-r--r--src/backend/utils/init/miscinit.c28
-rw-r--r--src/backend/utils/init/postinit.c16
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c2
-rw-r--r--src/backend/utils/mb/mbutils.c14
-rw-r--r--src/backend/utils/mb/wstrcmp.c2
-rw-r--r--src/backend/utils/mb/wstrncmp.c2
-rw-r--r--src/backend/utils/misc/guc.c20
-rw-r--r--src/backend/utils/misc/ps_status.c6
-rw-r--r--src/backend/utils/misc/rbtree.c12
-rw-r--r--src/backend/utils/misc/tzparser.c4
-rw-r--r--src/backend/utils/mmgr/aset.c16
-rw-r--r--src/backend/utils/mmgr/mcxt.c16
-rw-r--r--src/backend/utils/mmgr/portalmem.c16
-rw-r--r--src/backend/utils/resowner/resowner.c8
-rw-r--r--src/backend/utils/sort/logtape.c30
-rw-r--r--src/backend/utils/sort/tuplesort.c68
-rw-r--r--src/backend/utils/sort/tuplestore.c20
-rw-r--r--src/backend/utils/time/combocid.c2
-rw-r--r--src/backend/utils/time/snapmgr.c10
-rw-r--r--src/backend/utils/time/tqual.c14
-rw-r--r--src/bin/initdb/initdb.c10
-rw-r--r--src/bin/pg_controldata/pg_controldata.c2
-rw-r--r--src/bin/pg_ctl/pg_ctl.c8
-rw-r--r--src/bin/pg_dump/common.c4
-rw-r--r--src/bin/pg_dump/dumputils.c16
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c22
-rw-r--r--src/bin/pg_dump/pg_backup_db.c4
-rw-r--r--src/bin/pg_dump/pg_dump.c52
-rw-r--r--src/bin/pg_dump/pg_dump.h8
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c16
-rw-r--r--src/bin/pg_dump/pg_dumpall.c6
-rw-r--r--src/bin/pg_resetxlog/pg_resetxlog.c4
-rw-r--r--src/bin/psql/common.c2
-rw-r--r--src/bin/psql/describe.c2
-rw-r--r--src/bin/psql/input.c6
-rw-r--r--src/bin/psql/large_obj.c2
-rw-r--r--src/bin/psql/mainloop.c4
-rw-r--r--src/bin/psql/mbprint.c2
-rw-r--r--src/bin/psql/print.c6
-rw-r--r--src/bin/psql/settings.h2
-rw-r--r--src/bin/psql/stringutils.c10
-rw-r--r--src/bin/psql/tab-complete.c6
-rw-r--r--src/bin/scripts/common.c4
-rw-r--r--src/include/access/attnum.h2
-rw-r--r--src/include/access/genam.h4
-rw-r--r--src/include/access/gin_private.h2
-rw-r--r--src/include/access/hash.h2
-rw-r--r--src/include/access/htup.h22
-rw-r--r--src/include/access/itup.h2
-rw-r--r--src/include/access/nbtree.h24
-rw-r--r--src/include/access/reloptions.h2
-rw-r--r--src/include/access/skey.h4
-rw-r--r--src/include/access/slru.h2
-rw-r--r--src/include/access/transam.h2
-rw-r--r--src/include/access/tupdesc.h2
-rw-r--r--src/include/access/tupmacs.h4
-rw-r--r--src/include/access/tuptoaster.h4
-rw-r--r--src/include/access/xlog.h6
-rw-r--r--src/include/access/xlog_internal.h4
-rw-r--r--src/include/access/xlogdefs.h8
-rw-r--r--src/include/c.h18
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/catalog/dependency.h2
-rw-r--r--src/include/catalog/namespace.h2
-rw-r--r--src/include/catalog/objectaccess.h2
-rw-r--r--src/include/catalog/pg_attrdef.h2
-rw-r--r--src/include/catalog/pg_attribute.h2
-rw-r--r--src/include/catalog/pg_authid.h2
-rw-r--r--src/include/catalog/pg_constraint.h6
-rw-r--r--src/include/catalog/pg_control.h4
-rw-r--r--src/include/catalog/pg_db_role_setting.h2
-rw-r--r--src/include/catalog/pg_default_acl.h4
-rw-r--r--src/include/catalog/pg_description.h6
-rw-r--r--src/include/catalog/pg_largeobject.h2
-rw-r--r--src/include/catalog/pg_opclass.h6
-rw-r--r--src/include/catalog/pg_proc.h4
-rw-r--r--src/include/catalog/pg_rewrite.h2
-rw-r--r--src/include/catalog/pg_shdepend.h2
-rw-r--r--src/include/catalog/pg_shdescription.h4
-rw-r--r--src/include/catalog/pg_statistic.h24
-rw-r--r--src/include/catalog/pg_trigger.h2
-rw-r--r--src/include/catalog/pg_ts_dict.h2
-rw-r--r--src/include/catalog/pg_ts_template.h2
-rw-r--r--src/include/catalog/pg_type.h12
-rw-r--r--src/include/commands/comment.h2
-rw-r--r--src/include/commands/vacuum.h6
-rw-r--r--src/include/executor/executor.h2
-rw-r--r--src/include/executor/hashjoin.h4
-rw-r--r--src/include/executor/spi_priv.h2
-rw-r--r--src/include/executor/tuptable.h12
-rw-r--r--src/include/fmgr.h18
-rw-r--r--src/include/funcapi.h2
-rw-r--r--src/include/lib/stringinfo.h4
-rw-r--r--src/include/libpq/libpq-be.h8
-rw-r--r--src/include/libpq/pqcomm.h2
-rw-r--r--src/include/mb/pg_wchar.h4
-rw-r--r--src/include/miscadmin.h16
-rw-r--r--src/include/nodes/execnodes.h20
-rw-r--r--src/include/nodes/nodes.h4
-rw-r--r--src/include/nodes/params.h8
-rw-r--r--src/include/nodes/parsenodes.h44
-rw-r--r--src/include/nodes/plannodes.h22
-rw-r--r--src/include/nodes/primnodes.h56
-rw-r--r--src/include/nodes/relation.h58
-rw-r--r--src/include/nodes/tidbitmap.h2
-rw-r--r--src/include/nodes/value.h2
-rw-r--r--src/include/parser/gramparse.h2
-rw-r--r--src/include/parser/parse_node.h4
-rw-r--r--src/include/parser/scanner.h4
-rw-r--r--src/include/pg_config_manual.h14
-rw-r--r--src/include/pgstat.h2
-rw-r--r--src/include/port.h2
-rw-r--r--src/include/port/linux.h2
-rw-r--r--src/include/port/win32.h4
-rw-r--r--src/include/portability/instr_time.h4
-rw-r--r--src/include/postgres.h6
-rw-r--r--src/include/postgres_ext.h2
-rw-r--r--src/include/postmaster/syslogger.h2
-rw-r--r--src/include/regex/regcustom.h2
-rw-r--r--src/include/regex/regex.h2
-rw-r--r--src/include/regex/regguts.h10
-rw-r--r--src/include/replication/walprotocol.h2
-rw-r--r--src/include/replication/walreceiver.h2
-rw-r--r--src/include/snowball/header.h2
-rw-r--r--src/include/storage/block.h2
-rw-r--r--src/include/storage/buf_internals.h6
-rw-r--r--src/include/storage/bufpage.h8
-rw-r--r--src/include/storage/ipc.h2
-rw-r--r--src/include/storage/itemid.h2
-rw-r--r--src/include/storage/itemptr.h2
-rw-r--r--src/include/storage/lock.h16
-rw-r--r--src/include/storage/pg_sema.h2
-rw-r--r--src/include/storage/pg_shmem.h2
-rw-r--r--src/include/storage/pos.h2
-rw-r--r--src/include/storage/predicate_internals.h8
-rw-r--r--src/include/storage/proc.h4
-rw-r--r--src/include/storage/relfilenode.h2
-rw-r--r--src/include/storage/s_lock.h2
-rw-r--r--src/include/storage/sinvaladt.h2
-rw-r--r--src/include/storage/smgr.h6
-rw-r--r--src/include/tcop/dest.h12
-rw-r--r--src/include/tcop/tcopdebug.h2
-rw-r--r--src/include/utils/acl.h4
-rw-r--r--src/include/utils/catcache.h8
-rw-r--r--src/include/utils/datetime.h2
-rw-r--r--src/include/utils/elog.h4
-rw-r--r--src/include/utils/guc.h4
-rw-r--r--src/include/utils/hsearch.h2
-rw-r--r--src/include/utils/inet.h2
-rw-r--r--src/include/utils/memutils.h6
-rw-r--r--src/include/utils/palloc.h8
-rw-r--r--src/include/utils/pg_crc.h2
-rw-r--r--src/include/utils/plancache.h2
-rw-r--r--src/include/utils/portal.h6
-rw-r--r--src/include/utils/rel.h4
-rw-r--r--src/include/utils/relcache.h2
-rw-r--r--src/include/utils/resowner.h2
-rw-r--r--src/include/utils/selfuncs.h2
-rw-r--r--src/include/utils/timestamp.h2
-rw-r--r--src/include/utils/tqual.h2
-rw-r--r--src/include/utils/tuplesort.h8
-rw-r--r--src/include/utils/tuplestore.h2
-rw-r--r--src/include/utils/typcache.h4
-rw-r--r--src/interfaces/ecpg/include/sqlca.h2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt.h2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/interval.c2
-rw-r--r--src/interfaces/ecpg/pgtypeslib/numeric.c2
-rw-r--r--src/interfaces/ecpg/preproc/c_keywords.c2
-rw-r--r--src/interfaces/ecpg/preproc/parser.c2
-rw-r--r--src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c2
-rw-r--r--src/interfaces/ecpg/test/expected/preproc-init.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-array.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-code100.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-copystdout.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-define.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dynalloc.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dynalloc2.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-dyntest.c2
-rw-r--r--src/interfaces/ecpg/test/expected/sql-indicators.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-alloc.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-descriptor.c2
-rw-r--r--src/interfaces/ecpg/test/expected/thread-prep.c2
-rw-r--r--src/interfaces/libpq/fe-auth.c2
-rw-r--r--src/interfaces/libpq/fe-connect.c32
-rw-r--r--src/interfaces/libpq/fe-exec.c28
-rw-r--r--src/interfaces/libpq/fe-lobj.c2
-rw-r--r--src/interfaces/libpq/fe-misc.c4
-rw-r--r--src/interfaces/libpq/fe-protocol2.c8
-rw-r--r--src/interfaces/libpq/fe-protocol3.c26
-rw-r--r--src/interfaces/libpq/fe-secure.c10
-rw-r--r--src/interfaces/libpq/libpq-fe.h4
-rw-r--r--src/interfaces/libpq/pqexpbuffer.c8
-rw-r--r--src/interfaces/libpq/pqexpbuffer.h8
-rw-r--r--src/pl/plperl/plperl.c6
-rw-r--r--src/pl/plpgsql/src/pl_comp.c18
-rw-r--r--src/pl/plpgsql/src/pl_exec.c24
-rw-r--r--src/pl/plpgsql/src/pl_funcs.c4
-rw-r--r--src/pl/plpgsql/src/pl_scanner.c8
-rw-r--r--src/pl/plpgsql/src/plpgsql.h4
-rw-r--r--src/pl/plpython/plpython.c16
-rw-r--r--src/pl/tcl/pltcl.c2
-rw-r--r--src/port/chklocale.c2
-rw-r--r--src/port/crypt.c26
-rw-r--r--src/port/getaddrinfo.c2
-rw-r--r--src/port/getopt.c2
-rw-r--r--src/port/getopt_long.c2
-rw-r--r--src/port/inet_aton.c4
-rw-r--r--src/port/memcmp.c2
-rw-r--r--src/port/path.c4
-rw-r--r--src/port/pgmkdirp.c2
-rw-r--r--src/port/qsort.c2
-rw-r--r--src/port/qsort_arg.c2
-rw-r--r--src/port/snprintf.c6
-rw-r--r--src/port/strlcat.c2
-rw-r--r--src/port/strlcpy.c4
-rw-r--r--src/port/strtol.c4
-rw-r--r--src/port/strtoul.c2
-rw-r--r--src/port/thread.c4
-rw-r--r--src/port/unsetenv.c2
-rw-r--r--src/test/regress/pg_regress.c14
-rw-r--r--src/timezone/localtime.c12
-rw-r--r--src/timezone/pgtz.c12
-rw-r--r--src/tutorial/complex.c2
554 files changed, 3110 insertions, 3110 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 75d599aab9a..ca060fabbd6 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -21,7 +21,7 @@
* tuptoaster.c.
*
* This change will break any code that assumes it needn't detoast values
- * that have been put into a tuple but never sent to disk. Hopefully there
+ * that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@@ -388,7 +388,7 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
@@ -455,7 +455,7 @@ nocachegetattr(HeapTuple tuple,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
@@ -550,7 +550,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
/*
* cmin and cmax are now both aliases for the same field, which
- * can in fact also be a combo command id. XXX perhaps we should
+ * can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
@@ -710,7 +710,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
len += data_len;
/*
- * Allocate and zero the space needed. Note that the tuple body and
+ * Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 2b518c53464..65c77cf41ec 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
/*
* If value is stored EXTERNAL, must fetch it so we are not depending
- * on outside storage. This should be improved someday.
+ * on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
{
@@ -281,7 +281,7 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
@@ -348,7 +348,7 @@ nocache_index_getattr(IndexTuple tup,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index afce6d5edfa..b959d460303 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -166,7 +166,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 465742556f5..e4d0380e1cd 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -466,7 +466,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
- * validity of the values. It must elog(ERROR) when the argument string is
+ * validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
@@ -533,7 +533,7 @@ add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val,
* Note that this is not responsible for determining whether the options
* are valid, but it does check that namespaces for all the options given are
* listed in validnsps. The NULL namespace is always valid and needs not be
- * explicitely listed. Passing a NULL pointer means that only the NULL
+ * explicitely listed. Passing a NULL pointer means that only the NULL
* namespace is valid.
*
* Both oldOptions and the result are text arrays (or NULL for "default"),
@@ -809,7 +809,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
- * returned array. Values of type string are allocated separately and must
+ * returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *
diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c
index 34e5f114404..94a7f87b377 100644
--- a/src/backend/access/common/tupconvert.c
+++ b/src/backend/access/common/tupconvert.c
@@ -5,7 +5,7 @@
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
- * of dropped columns. There is some overlap of functionality with the
+ * of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 014fc3babfd..5ed8e7c00ad 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -533,7 +533,7 @@ TupleDescInitEntryCollation(TupleDesc desc,
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
- * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
+ * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index 2de58604eee..ef3ec45fd71 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -197,7 +197,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = true;
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index 9e5bab194de..7dab03f8fc2 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -187,7 +187,7 @@ ginInsertBAEntry(BuildAccumulator *accum,
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
- * rebalancing operations and is slow. To prevent this, we attempt to insert
+ * rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 97cd755e3ef..ba7408f4b9a 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -164,7 +164,7 @@ GinShortenTuple(IndexTuple itup, uint32 nipd)
* Form a non-leaf entry tuple by copying the key data from the given tuple,
* which can be either a leaf or non-leaf entry tuple.
*
- * Any posting list in the source tuple is not copied. The specified child
+ * Any posting list in the source tuple is not copied. The specified child
* block number is inserted into t_tid.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index cc01deac3ec..8742bc50823 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -444,7 +444,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
* Create temporary index tuples for a single indexable item (one index column
* for the heap tuple specified by ht_ctid), and append them to the array
* in *collector. They will subsequently be written out using
- * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
+ * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
* temp tuples for a given heap tuple must be written in one call to
* ginHeapTupleFastInsert.
*/
@@ -713,7 +713,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
- * either. The reason it's okay is that multiple insertion of the same entry
+ * either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@@ -767,7 +767,7 @@ ginInsertCleanup(GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
- * Initialize. All temporary space will be in opCtx
+ * Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@@ -861,7 +861,7 @@ ginInsertCleanup(GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
- * added to it. If so, process those entries immediately. There
+ * added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* gurantees that inserted row(s) will not continue on next page.
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 4fe87eb46af..5d414c8a575 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -166,7 +166,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
/*
* Collects TIDs into scanEntry->matchBitmap for all heap tuples that
- * match the search entry. This supports three different match modes:
+ * match the search entry. This supports three different match modes:
*
* 1. Partial-match support: scan from current point until the
* comparePartialFn says we're done.
@@ -262,7 +262,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
/*
* In ALL mode, we are not interested in null items, so we can
* stop if we get to a null-item placeholder (which will be the
- * last entry for a given attnum). We do want to include NULL_KEY
+ * last entry for a given attnum). We do want to include NULL_KEY
* and EMPTY_ITEM entries, though.
*/
if (icategory == GIN_CAT_NULL_ITEM)
@@ -960,14 +960,14 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
* that exact TID, or a lossy reference to the same page.
*
* This logic works only if a keyGetItem stream can never contain both
- * exact and lossy pointers for the same page. Else we could have a
+ * exact and lossy pointers for the same page. Else we could have a
* case like
*
* stream 1 stream 2
- * ... ...
+ * ... ...
* 42/6 42/7
* 50/1 42/0xffff
- * ... ...
+ * ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
@@ -996,7 +996,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
break;
/*
- * No hit. Update myAdvancePast to this TID, so that on the next pass
+ * No hit. Update myAdvancePast to this TID, so that on the next pass
* we'll move to the next possible entry.
*/
myAdvancePast = *item;
@@ -1512,10 +1512,10 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* First, scan the pending list and collect any matching entries into the
- * bitmap. After we scan a pending item, some other backend could post it
+ * bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
- * bitmap. (The possibility of duplicate visits is a major reason why GIN
+ * bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 3e32af94a96..2976414f9fc 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -97,7 +97,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems)
* Adds array of item pointers to tuple's posting list, or
* creates posting tree and tuple pointing to tree in case
* of not enough space. Max size of tuple is defined in
- * GinFormTuple(). Returns a new, modified index tuple.
+ * GinFormTuple(). Returns a new, modified index tuple.
* items[] must be in sorted order with no duplicates.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index f8d54b1b462..b2944cfbb31 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -388,7 +388,7 @@ ginNewScanKey(IndexScanDesc scan)
/*
* If the index is version 0, it may be missing null and placeholder
* entries, which would render searches for nulls and full-index scans
- * unreliable. Throw an error if so.
+ * unreliable. Throw an error if so.
*/
if (hasNullQuery && !so->isVoidRes)
{
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index ba142bc874d..493deebb91a 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -438,7 +438,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
* If there's more than one key, sort and unique-ify.
*
* XXX Using qsort here is notationally painful, and the overhead is
- * pretty bad too. For small numbers of keys it'd likely be better to use
+ * pretty bad too. For small numbers of keys it'd likely be better to use
* a simple insertion sort.
*/
if (*nentries > 1)
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index 6b781c5d356..0d58d4830e6 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -692,7 +692,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
/*
* In normal operation, shiftList() takes exclusive lock on all the
- * pages-to-be-deleted simultaneously. During replay, however, it should
+ * pages-to-be-deleted simultaneously. During replay, however, it should
* be all right to lock them one at a time. This is dependent on the fact
* that we are deleting pages from the head of the list, and that readers
* share-lock the next page before releasing the one they are on. So we
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 7172f210286..2b34ce3d09a 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -216,7 +216,7 @@ gistbuildCallback(Relation index,
/*
* Since we already have the index relation locked, we call gistdoinsert
* directly. Normal access method calls dispatch through gistinsert,
- * which locks the relation for write. This is the right thing to do if
+ * which locks the relation for write. This is the right thing to do if
* you're inserting single tups, but not when you're initializing the
* whole index at once.
*
@@ -1495,7 +1495,7 @@ initGISTstate(GISTSTATE *giststate, Relation index)
/*
* If the index column has a specified collation, we should honor that
* while doing comparisons. However, we may have a collatable storage
- * type for a noncollatable indexed data type. If there's no index
+ * type for a noncollatable indexed data type. If there's no index
* collation then specify default collation in case the support
* functions need collation. This is harmless if the support
* functions don't care about collation, so we just do it
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 1aba6868447..6cf6a1f9d56 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -32,7 +32,7 @@
*
* On success return for a heap tuple, *recheck_p is set to indicate
* whether recheck is needed. We recheck if any of the consistent() functions
- * request it. recheck is not interesting when examining a non-leaf entry,
+ * request it. recheck is not interesting when examining a non-leaf entry,
* since we must visit the lower index page if there's any doubt.
*
* If we are doing an ordered scan, so->distances[] is filled with distance
@@ -63,7 +63,7 @@ gistindex_keytest(IndexScanDesc scan,
/*
* If it's a leftover invalid tuple from pre-9.1, treat it as a match with
- * minimum possible distances. This means we'll always follow it to the
+ * minimum possible distances. This means we'll always follow it to the
* referenced page.
*/
if (GistTupleIsInvalid(tuple))
@@ -225,7 +225,7 @@ gistindex_keytest(IndexScanDesc scan,
* ntids: if not NULL, gistgetbitmap's output tuple counter
*
* If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
- * tuples should be reported directly into the bitmap. If they are NULL,
+ * tuples should be reported directly into the bitmap. If they are NULL,
* we're doing a plain or ordered indexscan. For a plain indexscan, heap
* tuple TIDs are returned into so->pageData[]. For an ordered indexscan,
* heap tuple TIDs are pushed into individual search queue items.
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index 92405283d02..870450b1445 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -58,7 +58,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg)
/*
* If new item is heap tuple, it goes to front of chain; otherwise insert
* it before the first index-page item, so that index pages are visited in
- * LIFO order, ensuring depth-first search of index pages. See comments
+ * LIFO order, ensuring depth-first search of index pages. See comments
* in gist_private.h.
*/
if (GISTSearchItemIsHeap(*newitem))
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index 67741b6ca7a..ed061afdd47 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
* Recompute unions of left- and right-side subkeys after a page split,
* ignoring any tuples that are marked in spl->spl_dontcare[].
*
- * Note: we always recompute union keys for all index columns. In some cases
+ * Note: we always recompute union keys for all index columns. In some cases
* this might represent duplicate work for the leftmost column(s), but it's
* not safe to assume that "zero penalty to move a tuple" means "the union
* key doesn't change at all". Penalty functions aren't 100% accurate.
@@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/*
* Remove tuples that are marked don't-cares from the tuple index array a[]
- * of length *len. This is applied separately to the spl_left and spl_right
+ * of length *len. This is applied separately to the spl_left and spl_right
* arrays.
*/
static void
@@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
/*
* Place a single don't-care tuple into either the left or right side of the
* split, according to which has least penalty for merging the tuple into
- * the previously-computed union keys. We need consider only columns starting
+ * the previously-computed union keys. We need consider only columns starting
* at attno.
*/
static void
@@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
/*
* There is only one previously defined union, so we just choose swap
- * or not by lowest penalty for that side. We can only get here if a
+ * or not by lowest penalty for that side. We can only get here if a
* secondary split happened to have all NULLs in its column in the
* tuples that the outer recursion level had assigned to one side.
* (Note that the null checks in gistSplitByKey don't prevent the
@@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
sv->spl_rdatum = v->spl_rattr[attno];
/*
- * Let the opclass-specific PickSplit method do its thing. Note that at
+ * Let the opclass-specific PickSplit method do its thing. Note that at
* this point we know there are no null keys in the entryvec.
*/
FunctionCall2Coll(&giststate->picksplitFn[attno],
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 806f229800f..92c8c6c583a 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -432,7 +432,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* New best penalty for column. Tentatively select this tuple
- * as the target, and record the best penalty. Then reset the
+ * as the target, and record the best penalty. Then reset the
* next column's penalty to "unknown" (and indirectly, the
* same for all the ones to its right). This will force us to
* adopt this tuple's penalty values as the best for all the
@@ -448,7 +448,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* The current tuple is exactly as good for this column as the
- * best tuple seen so far. The next iteration of this loop
+ * best tuple seen so far. The next iteration of this loop
* will compare the next column.
*/
}
@@ -623,7 +623,7 @@ gistcheckpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 50c6270b005..deaadc594bb 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -53,7 +53,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
stats->estimated_count = info->estimated_count;
/*
- * XXX the above is wrong if index is partial. Would it be OK to just
+ * XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 4cb29b2bb45..720ecc548a2 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -76,7 +76,7 @@ hashbuild(PG_FUNCTION_ARGS)
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
- * tuples by (expected) bucket number. However, such a sort is useless
+ * tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
@@ -246,7 +246,7 @@ hashgettuple(PG_FUNCTION_ARGS)
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
- * for the TID we previously returned. (Because we hold share lock on
+ * for the TID we previously returned. (Because we hold share lock on
* the bucket, no deletions or splits could have occurred; therefore
* we can expect that the TID still exists in the current index page,
* at an offset >= where we were.)
@@ -524,7 +524,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
/*
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a bit
+ * hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
@@ -655,7 +655,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by dead-reckoning.
+ * double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 897bf9c1ac9..1b876a77f31 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -11,7 +11,7 @@
* src/backend/access/hash/hashfunc.c
*
* NOTES
- * These functions are stored in pg_amproc. For each operator class
+ * These functions are stored in pg_amproc. For each operator class
* defined for hash indexes, they compute the hash value of the argument.
*
* Additional hash functions appear in /utils/adt/ files for various
@@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS)
/*
* Note: this is currently identical in behavior to hashvarlena, but keep
* it as a separate function in case we someday want to do something
- * different in non-C locales. (See also hashbpchar, if so.)
+ * different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA_ANY(key),
VARSIZE_ANY_EXHDR(key));
@@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS)
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
- * direction from the goal of parallelism. I did what I could. Rotates
+ * direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
@@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
- * bits (affecting 32 bits is enough). The original hash function had
+ * bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
@@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
- * Returns a uint32 value. Every bit of the key affects every bit of
+ * Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index ae8b2b1cfd4..f9bc621c4b4 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -81,7 +81,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
- * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
+ * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
@@ -90,12 +90,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
- * no one else tries to compact the bucket meanwhile. This guarantees that
+ * no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
- * immediate successor of the originally passed 'buf'. Additional overflow
+ * immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
@@ -158,7 +158,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
- * Find an available overflow page and return it. The returned buffer
+ * Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@@ -254,7 +254,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page which
- * is the one we return to the caller. Both of these are correctly
+ * is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
@@ -285,7 +285,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
metap->hashm_spares[splitnum]++;
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -314,7 +314,7 @@ found:
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -495,7 +495,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
/*
* _hash_initbitmap()
*
- * Initialize a new bitmap page. The metapage has a write-lock upon
+ * Initialize a new bitmap page. The metapage has a write-lock upon
* entering the function, and must be written by caller after return.
*
* 'blkno' is the block number of the new bitmap page.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index fe991cf27bb..62e7755634b 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -52,7 +52,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
@@ -139,7 +139,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@@ -347,7 +347,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
@@ -380,7 +380,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
- * calls to occur. This ensures that the smgr level has the right idea of
+ * calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
@@ -516,9 +516,9 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Note: deadlock should be impossible here. Our own backend could only be
* holding bucket sharelocks due to stopped indexscans; those will not
* block other holders of the page-zero lock, who are only interested in
- * acquiring bucket sharelocks themselves. Exclusive bucket locks are
+ * acquiring bucket sharelocks themselves. Exclusive bucket locks are
* only taken here and in hashbulkdelete, and neither of these operations
- * needs any additional locks to complete. (If, due to some flaw in this
+ * needs any additional locks to complete. (If, due to some flaw in this
* reasoning, we manage to deadlock anyway, it's okay to error out; the
* index will be left in a consistent state.)
*/
@@ -560,7 +560,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
@@ -618,7 +618,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
}
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
@@ -656,7 +656,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
+ * date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
@@ -897,7 +897,7 @@ _hash_splitbucket(Relation rel,
/*
* We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index bf42be103f1..a3e9ac10212 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -251,7 +251,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
- * false. Else, return true and set the hashso_curpos for the
+ * false. Else, return true and set the hashso_curpos for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index dbb9c3f39b4..bd60574f32a 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -8,7 +8,7 @@
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
- * bucket splits may occur during the index build. In that case we'd
+ * bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
@@ -52,7 +52,7 @@ _h_spoolinit(Relation index, uint32 num_buckets)
hspool->index = index;
/*
- * Determine the bitmask for hash code values. Since there are currently
+ * Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 6283f4a82b5..50a8aa47c57 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -161,7 +161,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -281,7 +281,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
- * existing hash keys in the page. This is the appropriate place to start
+ * existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 479853d99a1..0105825e14a 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -108,7 +108,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
- * might go into pages we already scanned. To guarantee consistent
+ * might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
@@ -116,7 +116,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
/*
* If the table is large relative to NBuffers, use a bulk-read access
- * strategy and enable synchronized scanning (see syncscan.c). Although
+ * strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of these
@@ -245,7 +245,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1675,7 +1675,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
- * show the latest version of the row. Note that it will not be changed
+ * show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
@@ -1794,7 +1794,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
- * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously. Hence callers should look
* only at XMAX_INVALID.
@@ -1867,7 +1867,7 @@ FreeBulkInsertState(BulkInsertState bistate)
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@@ -1888,7 +1888,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If the object id of this tuple has already been assigned, trust the
- * caller. There are a couple of ways this can happen. At initial db
+ * caller. There are a couple of ways this can happen. At initial db
* creation, the backend program sets oids for tuples. When we define
* an index, we set the oid. Finally, in the future, we may allow
* users to set their own object ids in order to support a persistent
@@ -2186,10 +2186,10 @@ l1:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to delete
+ * other subxacts of this backend. It is legal for us to delete
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
- * exclusive). We don't bother changing the on-disk hint bits
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -2259,7 +2259,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@@ -2360,7 +2360,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -2456,7 +2456,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
* wasted effort if we fail to update or have to put the new tuple on a
- * different page. But we must compute the list before obtaining buffer
+ * different page. But we must compute the list before obtaining buffer
* lock --- in the worst case, if we are doing an update on one of the
* relevant system catalogs, we could deadlock if we try to fetch the list
* later. In any case, the relcache caches the data so this is usually
@@ -2544,10 +2544,10 @@ l2:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to update
+ * other subxacts of this backend. It is legal for us to update
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
- * exclusive). We don't bother changing the on-disk hint bits
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -2643,7 +2643,7 @@ l2:
* If the toaster needs to be activated, OR if the new tuple will not fit
* on the same page as the old, then we need to release the content lock
* (but not the pin!) on the old tuple's buffer while we are off doing
- * TOAST and/or table-file-extension work. We must mark the old tuple to
+ * TOAST and/or table-file-extension work. We must mark the old tuple to
* show that it's already being updated, else other processes may try to
* update it themselves.
*
@@ -2708,7 +2708,7 @@ l2:
* there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
- * buffer locks on both old and new pages. To avoid deadlock against
+ * buffer locks on both old and new pages. To avoid deadlock against
* some other backend trying to get the same two locks in the other
* order, we must be consistent about the order we get the locks in.
* We use the rule "lock the lower-numbered page of the relation
@@ -2766,7 +2766,7 @@ l2:
/*
* At this point newbuf and buffer are both pinned and locked, and newbuf
- * has enough space for the new tuple. If they are the same buffer, only
+ * has enough space for the new tuple. If they are the same buffer, only
* one pin is held.
*/
@@ -2774,7 +2774,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
- * to do a HOT update. Check if any of the index columns have been
+ * to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
@@ -2792,13 +2792,13 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
* XXX Should we set hint on newbuf as well? If the transaction aborts,
* there would be a prunable tuple in the newbuf; but for now we choose
- * not to optimize for aborts. Note that heap_xlog_update must be kept in
+ * not to optimize for aborts. Note that heap_xlog_update must be kept in
* sync if this decision changes.
*/
PageSetPrunable(page, xid);
@@ -2962,7 +2962,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
- * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
+ * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
* single heap_deform_tuple call on each tuple, instead? But that doesn't
* work for system columns ...
*/
@@ -2985,7 +2985,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
- * same logical value. But we should be OK as long as there are no false
+ * same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@@ -3041,7 +3041,7 @@ HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -3123,7 +3123,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* waiter gets the tuple, potentially leading to indefinite starvation of
* some waiters. The possibility of share-locking makes the problem much
* worse --- a steady stream of share-lockers can easily block an exclusive
- * locker forever. To provide more reliable semantics about who gets a
+ * locker forever. To provide more reliable semantics about who gets a
* tuple-level lock first, we use the standard lock manager. The protocol
* for waiting for a tuple-level lock is really
* LockTuple()
@@ -3131,7 +3131,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
* mark tuple as locked by me
* UnlockTuple()
* When there are multiple waiters, arbitration of who is to get the lock next
- * is provided by LockTuple(). However, at most one tuple-level lock will
+ * is provided by LockTuple(). However, at most one tuple-level lock will
* be held or awaited per backend at any time, so we don't risk overflow
* of the lock table. Note that incoming share-lockers are required to
* do LockTuple as well, if there is any conflict, to ensure that they don't
@@ -3273,7 +3273,7 @@ l3:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to lock the
+ * other subxacts of this backend. It is legal for us to lock the
* tuple in either case, however. We don't bother changing the
* on-disk hint bits since we are about to overwrite the xmax
* altogether.
@@ -3431,7 +3431,7 @@ l3:
/*
* Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
* as running, but it finished before
- * TransactionIdIsInProgress() got to run. Treat it like
+ * TransactionIdIsInProgress() got to run. Treat it like
* there's no locker in the tuple.
*/
}
@@ -3467,8 +3467,8 @@ l3:
MarkBufferDirty(*buffer);
/*
- * XLOG stuff. You might think that we don't need an XLOG record because
- * there is no state change worth restoring after a crash. You would be
+ * XLOG stuff. You might think that we don't need an XLOG record because
+ * there is no state change worth restoring after a crash. You would be
* wrong however: we have just written either a TransactionId or a
* MultiXactId that may never have been seen on disk before, and we need
* to make sure that there are XLOG entries covering those ID numbers.
@@ -3530,7 +3530,7 @@ l3:
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
- * of this function in Postgres are extremely limited. Nonetheless we
+ * of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
@@ -3684,7 +3684,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
- * exclusive lock. (We don't need this pushup for xmin, because only
+ * exclusive lock. (We don't need this pushup for xmin, because only
* VACUUM could be interested in changing an existing tuple's xmin, and
* there's only one VACUUM allowed on a table at a time.)
*/
@@ -3829,7 +3829,7 @@ heap_restrpos(HeapScanDesc scan)
else
{
/*
- * If we reached end of scan, rs_inited will now be false. We must
+ * If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
@@ -4013,7 +4013,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-freeze operation. Caller must already
+ * Perform XLogInsert for a heap-freeze operation. Caller must already
* have modified the buffer and marked it dirty.
*/
XLogRecPtr
@@ -4056,7 +4056,7 @@ log_heap_freeze(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-update operation. Caller must already
+ * Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
@@ -4135,7 +4135,7 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
* for writing the page to disk after calling this routine.
*
* Note: all current callers build pages in private memory and write them
- * directly to smgr, rather than using bufmgr. Therefore there is no need
+ * directly to smgr, rather than using bufmgr. Therefore there is no need
* to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
* the critical section.
*
@@ -4617,7 +4617,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
/*
* In normal operation, it is important to lock the two pages in
* page-number order, to avoid possible deadlocks against other update
- * operations going the other way. However, during WAL replay there can
+ * operations going the other way. However, during WAL replay there can
* be no other update happening, so we don't need to worry about that. But
* we *do* need to worry that we don't expose an inconsistent state to Hot
* Standby queries --- so the original page can't be unlocked before we've
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 72a69e52b02..173e82a3d90 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -117,7 +117,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
* same buffer we select for insertion of the new tuple (this could only
* happen if space is freed in that page after heap_update finds there's not
- * enough there). In that case, the page will be pinned and locked only once.
+ * enough there). In that case, the page will be pinned and locked only once.
*
* We normally use FSM to help us find free space. However,
* if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
@@ -134,7 +134,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
- * insertions into the same relation. This keeps a pin on the current
+ * insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@@ -187,7 +187,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
- * on, as cached in the BulkInsertState or relcache entry. If that
+ * on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite
@@ -219,7 +219,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* If the FSM knows nothing of the rel, try the last page before we
- * give up and extend. This avoids one-tuple-per-page syndrome during
+ * give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
@@ -280,7 +280,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Not enough space, so we must give up our page locks and pin (if
- * any) and prepare to look elsewhere. We don't care which order we
+ * any) and prepare to look elsewhere. We don't care which order we
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
@@ -322,7 +322,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* XXX This does an lseek - rather expensive - but at the moment it is the
- * only way to accurately determine how many blocks are in a relation. Is
+ * only way to accurately determine how many blocks are in a relation. Is
* it worth keeping an accurate file length in shared memory someplace,
* rather than relying on the kernel to do it for us?
*/
@@ -342,7 +342,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Release the file-extension lock; it's now OK for someone else to extend
- * the relation some more. Note that we cannot release this lock before
+ * the relation some more. Note that we cannot release this lock before
* we have buffer lock on the new page, or we risk a race condition
* against vacuumlazy.c --- see comments therein.
*/
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 1eec8454717..b5c719b922d 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -100,7 +100,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
* Checking free space here is questionable since we aren't holding any
* lock on the buffer; in the worst case we could get a bogus answer. It's
* unlikely to be *seriously* wrong, though, since reading either pd_lower
- * or pd_upper is probably atomic. Avoiding taking a lock seems more
+ * or pd_upper is probably atomic. Avoiding taking a lock seems more
* important than sometimes getting a wrong answer in what is after all
* just a heuristic estimate.
*/
@@ -319,8 +319,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
- * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
- * prstate showing the changes to be made. Items to be redirected are added
+ * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
+ * prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@@ -362,7 +362,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* We need this primarily to handle aborted HOT updates, that is,
* XMIN_INVALID heap-only tuples. Those might not be linked to by
* any chain, since the parent tuple might be re-updated before
- * any pruning occurs. So we have to be able to reap them
+ * any pruning occurs. So we have to be able to reap them
* separately from chain-pruning. (Note that
* HeapTupleHeaderIsHotUpdated will never return true for an
* XMIN_INVALID tuple, so this code will work even when there were
@@ -549,7 +549,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/*
* If the root entry had been a normal tuple, we are deleting it, so
- * count it in the result. But changing a redirect (even to DEAD
+ * count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
@@ -638,7 +638,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
- * to replay the WAL record when needed after a crash. Note that the
+ * to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index e56140950af..37fe306a80b 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
- * rebuilding indexes. The caller must hold AccessExclusiveLock on the
+ * rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
- * or B first. (Note that a tuple in the middle of a chain is both A and B
+ * or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
- * is encountered. That helps to keep the memory usage down. At the end,
+ * is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
- * for deadness using OldestXmin is not exact. In such a case we might
+ * for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
- * in the whole table. Note that if we do fail halfway through a CLUSTER,
+ * in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@@ -277,7 +277,7 @@ end_heap_rewrite(RewriteState state)
}
/*
- * If the rel is WAL-logged, must fsync before commit. We use heap_sync
+ * If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less
@@ -337,7 +337,7 @@ rewrite_heap_tuple(RewriteState state,
* very-old xmin or xmax, so that future VACUUM effort can be saved.
*
* Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
- * be given a pointer to a tuple in a disk buffer. It happens though that
+ * be given a pointer to a tuple in a disk buffer. It happens though that
* we can get the right things to happen by passing InvalidBuffer for the
* buffer.
*/
@@ -548,7 +548,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
- * Insert a tuple to the new relation. This has to track heap_insert
+ * Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c
index 957d1a12f30..30efe89dfc6 100644
--- a/src/backend/access/heap/syncscan.c
+++ b/src/backend/access/heap/syncscan.c
@@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
- * to keep them synchronized to reduce the overall I/O needed. The goal is
+ * to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
- * tables in progress at any time. Therefore we just keep the scan positions
+ * tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@@ -245,7 +245,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
- * so that number is passed in rather than computing it again. The result
+ * so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index a63cf404599..0ce52d9e474 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -550,7 +550,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
- * PLAIN storage). If necessary, we'll push it out as a new
+ * PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@@ -693,7 +693,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
- * inline. But skip this if there's no toast table to push them to.
+ * inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@@ -803,7 +803,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
}
/*
- * Finally we store attributes of type 'm' externally. At this point we
+ * Finally we store attributes of type 'm' externally. At this point we
* increase the target tuple size, so that 'm' attributes aren't stored
* externally unless really necessary.
*/
@@ -1415,7 +1415,7 @@ toast_save_datum(Relation rel, Datum value,
heap_insert(toastrel, toasttup, mycid, options, NULL);
/*
- * Create the index entry. We cheat a little here by not using
+ * Create the index entry. We cheat a little here by not using
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index f03e88afdcc..915dad78d43 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -44,7 +44,7 @@
*
* At the end of a scan, the AM's endscan routine undoes the locking,
* but does *not* call IndexScanEnd --- the higher-level index_endscan
- * routine does that. (We can't do it in the AM because index_endscan
+ * routine does that. (We can't do it in the AM because index_endscan
* still needs to touch the IndexScanDesc after calling the AM.)
*
* Because of this, the AM does not have a choice whether to call
@@ -184,7 +184,7 @@ BuildIndexValueDescription(Relation indexRelation,
* at rd_opcintype not the index tupdesc.
*
* Note: this is a bit shaky for opclasses that have pseudotype
- * input types such as ANYARRAY or RECORD. Currently, the
+ * input types such as ANYARRAY or RECORD. Currently, the
* typoutput functions associated with the pseudotypes will work
* okay, but we might have to try harder in future.
*/
@@ -410,7 +410,7 @@ systable_endscan(SysScanDesc sysscan)
* index order. Also, for largely historical reasons, the index to use
* is opened and locked by the caller, not here.
*
- * Currently we do not support non-index-based scans here. (In principle
+ * Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 31d705c0561..4b7e4346c42 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -80,7 +80,7 @@
*
* Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
* to check that we don't try to scan or do retail insertions into an index
- * that is currently being rebuilt or pending rebuild. This helps to catch
+ * that is currently being rebuilt or pending rebuild. This helps to catch
* things that don't work when reindexing system catalogs. The assertion
* doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
* when calling the index AM's ambuild routine, and there is no reason for
@@ -138,7 +138,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
- * obtained on the index. (Generally, NoLock should only be
+ * obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@@ -403,7 +403,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
+ * that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@@ -428,7 +428,7 @@ index_restrpos(IndexScanDesc scan)
* index_getnext - get the next heap tuple from a scan
*
* The result is the next heap tuple satisfying the scan keys and the
- * snapshot, or NULL if no more matching tuples exist. On success,
+ * snapshot, or NULL if no more matching tuples exist. On success,
* the buffer containing the heap tuple is pinned (the pin will be dropped
* at the next index_getnext or index_endscan).
*
@@ -466,7 +466,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
{
/*
* We are resuming scan of a HOT chain after having returned an
- * earlier member. Must still hold pin on current heap page.
+ * earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(tid) ==
@@ -588,7 +588,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* The xmin should match the previous xmax value, else chain is
- * broken. (Note: this test is not optional because it protects
+ * broken. (Note: this test is not optional because it protects
* us against the case where the prior chain member's xmax aborted
* since we looked at it.)
*/
@@ -798,7 +798,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info,
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
- * type instead). Only the default functions are stored in relcache
+ * type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@@ -832,7 +832,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
- * support procs in the relcache. As above, only the "default"
+ * support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index 23f2b61fe90..c3f8caab274 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -25,7 +25,7 @@
* Although any negative int32 (except INT_MIN) is acceptable for reporting
* "<", and any positive int32 is acceptable for reporting ">", routines
* that work on 32-bit or wider datatypes can't just return "a - b".
- * That could overflow and give the wrong answer. Also, one must not
+ * That could overflow and give the wrong answer. Also, one must not
* return INT_MIN to report "<", since some callers will negate the result.
*
* NOTE: it is critical that the comparison function impose a total order
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 735291f342e..c25da51cf8c 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -89,7 +89,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
* and btinsert. By here, itup is filled in, including the TID.
*
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
- * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
+ * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
* don't actually insert.
@@ -128,7 +128,7 @@ top:
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be the
* right place for the key we want to insert. In this case, we need to
- * move right in the tree. See Lehman and Yao for an excruciatingly
+ * move right in the tree. See Lehman and Yao for an excruciatingly
* precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE);
@@ -208,7 +208,7 @@ top:
* is the first tuple on the next page.
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
- * we must wait for to see if it commits a conflicting tuple. If an actual
+ * we must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just ereport().
*
* However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
@@ -290,7 +290,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* If we are doing a recheck, we expect to find the tuple we
- * are rechecking. It's not a duplicate, but we have to keep
+ * are rechecking. It's not a duplicate, but we have to keep
* scanning.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING &&
@@ -471,7 +471,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* If the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
- * the next page. If it is equal to the high key, and there's not room
+ * the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@@ -483,7 +483,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* removing any LP_DEAD tuples.
*
* On entry, *buf and *offsetptr point to the first legal position
- * where the new tuple could be inserted. The caller should hold an
+ * where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search for the
* right location within the page if needed. On exit, they point to the
@@ -549,7 +549,7 @@ _bt_findinsertloc(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
- * pages). Currently the probability of moving right is set at 0.99,
+ * pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
@@ -650,7 +650,7 @@ _bt_findinsertloc(Relation rel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
- * insertion, and the buffer must be pinned and write-locked. On return,
+ * insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* The locking interactions in this code are critical. You should
@@ -916,7 +916,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* origpage is the original page to be split. leftpage is a temporary
* buffer that receives the left-sibling data, which will be copied back
* into origpage on success. rightpage is the new page that receives the
- * right-sibling data. If we fail before reaching the critical section,
+ * right-sibling data. If we fail before reaching the critical section,
* origpage hasn't been modified and leftpage is only workspace. In
* principle we shouldn't need to worry about rightpage either, because it
* hasn't been linked into the btree page structure; but to avoid leaving
@@ -1132,7 +1132,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
- * examine both pages. But if D, our right sibling, has a different
+ * examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@@ -1354,7 +1354,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
- * the left or right page. The bool is necessary to disambiguate the case
+ * the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@@ -1590,7 +1590,7 @@ _bt_checksplitloc(FindSplitData *state,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
- * write locks once we have write lock on the parent page. (Any sooner,
+ * write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@@ -1613,7 +1613,7 @@ _bt_insert_parent(Relation rel,
* Here we have to do something Lehman and Yao don't talk about: deal with
* a root split and construction of a new root. If our stack is empty
* then we have just split a node on what had been the root level when we
- * descended the tree. If it was still the root then we perform a
+ * descended the tree. If it was still the root then we perform a
* new-root construction. If it *wasn't* the root anymore, search to find
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
@@ -1763,7 +1763,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* These loops will check every item on the page --- but in an
* order that's attuned to the probability of where it actually
- * is. Scan to the right first, then to the left.
+ * is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index d782d55aa47..f92ef73c728 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -12,7 +12,7 @@
* src/backend/access/nbtree/nbtpage.c
*
* NOTES
- * Postgres btree pages look like ordinary relation pages. The opaque
+ * Postgres btree pages look like ordinary relation pages. The opaque
* data at high addresses includes pointers to left and right siblings
* and flag data describing page state. The first page in a btree, page
* zero, is special -- it stores meta-information describing the tree.
@@ -57,7 +57,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
metaopaque->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) page)->pd_lower =
@@ -75,7 +75,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
- * and no root page exists, we just return InvalidBuffer. For
+ * and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@@ -192,7 +192,7 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to guarantee no
* deadlocks, we have to release the metadata page and start all
- * over again. (Is that really true? But it's hardly worth trying
+ * over again. (Is that really true? But it's hardly worth trying
* to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
@@ -257,7 +257,7 @@ _bt_getroot(Relation rel, int access)
CacheInvalidateRelcache(rel);
/*
- * swap root write lock for read lock. There is no danger of anyone
+ * swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
@@ -325,7 +325,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
- * from whatever non-root page we were at. If we ever do need to lock the
+ * from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@@ -424,7 +424,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -491,7 +491,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
- * blkno == P_NEW means to get an unallocated index page. The page
+ * blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@@ -522,7 +522,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check that
- * the page is still free. (For example, an already-free page could
+ * the page is still free. (For example, an already-free page could
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
@@ -701,7 +701,7 @@ _bt_page_recyclable(Page page)
/*
* Delete item(s) from a btree page.
*
- * This must only be used for deleting leaf items. Deleting an item on a
+ * This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@@ -769,7 +769,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
/*
* The target-offsets array is not in the buffer, but pretend that it
- * is. When XLogInsert stores the whole buffer, the offsets array
+ * is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
@@ -1008,7 +1008,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
BTPageOpaque opaque;
/*
- * We can never delete rightmost pages nor root pages. While at it, check
+ * We can never delete rightmost pages nor root pages. While at it, check
* that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
@@ -1080,7 +1080,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
- * facilities not available in recovery mode). Instead, just set
+ * facilities not available in recovery mode). Instead, just set
* up a dummy stack pointing to the left end of the parent tree
* level, from which _bt_getstackbuf will walk right to the parent
* page. Painful, but we don't care too much about performance in
@@ -1115,7 +1115,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if either
* the sibling or the target page was deleted by someone else meanwhile;
- * if so, give up. (Right now, that should never happen, since page
+ * if so, give up. (Right now, that should never happen, since page
* deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
@@ -1144,7 +1144,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
lbuf = InvalidBuffer;
/*
- * Next write-lock the target page itself. It should be okay to take just
+ * Next write-lock the target page itself. It should be okay to take just
* a write lock not a superexclusive lock, since no scans would stop on an
* empty page.
*/
@@ -1266,7 +1266,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
/*
* Check that the parent-page index items we're about to delete/overwrite
- * contain what we expect. This can fail if the index has become corrupt
+ * contain what we expect. This can fail if the index has become corrupt
* for some reason. We want to throw any error before entering the
* critical section --- otherwise it'd be a PANIC.
*
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 0926c807c1c..4398c34222c 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -154,7 +154,7 @@ btbuild(PG_FUNCTION_ARGS)
/*
* If we are reindexing a pre-existing index, it is critical to send out a
* relcache invalidation SI message to ensure all backends re-read the
- * index metapage. We expect that the caller will ensure that happens
+ * index metapage. We expect that the caller will ensure that happens
* (typically as a side effect of updating index stats, but it must happen
* even if the stats don't change!)
*/
@@ -219,7 +219,7 @@ btbuildempty(PG_FUNCTION_ARGS)
metapage = (Page) palloc(BLCKSZ);
_bt_initmetapage(metapage, P_NONE, 0);
- /* Write the page. If archiving/streaming, XLOG it. */
+ /* Write the page. If archiving/streaming, XLOG it. */
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
if (XLogIsNeeded())
@@ -292,11 +292,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
- * Yes, remember it for later. (We'll deal with all such tuples
+ * Yes, remember it for later. (We'll deal with all such tuples
* at once right before leaving the index page.) The test for
* numKilled overrun is not just paranoia: if the caller reverses
* direction in the indexscan then the same item might get entered
- * multiple times. It's not worth trying to optimize that, so we
+ * multiple times. It's not worth trying to optimize that, so we
* don't detect it, but instead just forget any excess entries.
*/
if (so->killedItems == NULL)
@@ -862,7 +862,7 @@ restart:
vstate->lastBlockLocked = blkno;
/*
- * Check whether we need to recurse back to earlier pages. What we
+ * Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 5c00fac533b..dfb8eeb6ff8 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -50,7 +50,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
- * to be created and returned. When access = BT_READ, an empty index
+ * to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer.
*/
BTStack
@@ -227,7 +227,7 @@ _bt_moveright(Relation rel,
* (or leaf keys > given scankey when nextkey is true).
*
* This procedure is not responsible for walking right, it just examines
- * the given page. _bt_binsrch() has no lock or refcount side effects
+ * the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@@ -359,7 +359,7 @@ _bt_compare(Relation rel,
/*
* The scan key is set up with the attribute number associated with each
* term in the key. It is important that, if the index is multi-key, the
- * scan contain the first k key attributes, and that they be in order. If
+ * scan contain the first k key attributes, and that they be in order. If
* you think about how multi-key ordering works, you'll understand why
* this is.
*
@@ -398,7 +398,7 @@ _bt_compare(Relation rel,
/*
* The sk_func needs to be passed the index value as left arg and
* the sk_argument as right arg (they might be of different
- * types). Since it is convenient for callers to think of
+ * types). Since it is convenient for callers to think of
* _bt_compare as comparing the scankey to the index item, we have
* to flip the sign of the comparison result. (Unless it's a DESC
* column, in which case we *don't* flip the sign.)
@@ -427,7 +427,7 @@ _bt_compare(Relation rel,
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
- * conditions, and the tree ordering. We find the first item (or,
+ * conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
@@ -480,7 +480,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* We want to identify the keys that can be used as starting boundaries;
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
* a backwards scan. We can use keys for multiple attributes so long as
- * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
+ * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
@@ -643,7 +643,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
- * scheme. But, by the same token, if we aren't able to use all
+ * scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
* did use has to be treated as just a ">=" or "<=" condition, and
* so we'd better adjust strat_total accordingly.
@@ -762,7 +762,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* Find first item >= scankey, then back up one to arrive at last
- * item < scankey. (Note: this positioning strategy is only used
+ * item < scankey. (Note: this positioning strategy is only used
* for a backward scan, so that is always the correct starting
* position.)
*/
@@ -811,7 +811,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTGreaterEqualStrategyNumber:
/*
- * Find first item >= scankey. (This is only used for forward
+ * Find first item >= scankey. (This is only used for forward
* scans.)
*/
nextkey = false;
@@ -889,7 +889,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
- * the last item on this page. Adjust the starting offset if needed. (If
+ * the last item on this page. Adjust the starting offset if needed. (If
* this results in an offset before the first item or after the last one,
* _bt_readpage will report no items found, and then we'll step to the
* next page as needed.)
@@ -1173,7 +1173,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
* than the walk-right case because of the possibility that the page
* to our left splits while we are in flight to it, plus the
* possibility that the page we were on gets deleted after we leave
- * it. See nbtree/README for details.
+ * it. See nbtree/README for details.
*/
for (;;)
{
@@ -1268,7 +1268,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
- * because half-dead pages are still in the sibling chain. Caller
+ * because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
@@ -1294,7 +1294,7 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
- * It was deleted. Move right to first nondeleted page (there
+ * It was deleted. Move right to first nondeleted page (there
* must be one); that is the page that has acquired the deleted
* one's keyspace, so stepping left from it will take us where we
* want to be.
@@ -1338,7 +1338,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes ereport(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 93a928c66b2..0a61e09a08b 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -7,7 +7,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. We load source tuples into leaf-level pages.
+ * for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@@ -42,11 +42,11 @@
*
* Since the index will never be used unless it is completely built,
* from a crash-recovery point of view there is no need to WAL-log the
- * steps of the build. After completing the index build, we can just sync
+ * steps of the build. After completing the index build, we can just sync
* the whole file to disk using smgrimmedsync() before exiting this module.
* This can be seen to be sufficient for crash recovery by considering that
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
- * just after the index build. However, it is clearly not sufficient if the
+ * just after the index build. However, it is clearly not sufficient if the
* DBA is using the WAL log for PITR or replication purposes, since another
* machine would not be able to reconstruct the index from WAL. Therefore,
* we log the completed index pages to WAL if and only if WAL archiving is
@@ -88,7 +88,7 @@ struct BTSpool
};
/*
- * Status record for a btree page being built. We have one of these
+ * Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@@ -157,7 +157,7 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead)
* We size the sort area as maintenance_work_mem rather than work_mem to
* speed index creation. This should be OK since a single backend can't
* run multiple index creations in parallel. Note that creation of a
- * unique index actually requires two BTSpool objects. We expect that the
+ * unique index actually requires two BTSpool objects. We expect that the
* second one (for dead tuples) won't get very full, so we give it only
* work_mem.
*/
@@ -296,7 +296,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
}
/*
- * Now write the page. There's no need for smgr to schedule an fsync for
+ * Now write the page. There's no need for smgr to schedule an fsync for
* this write; we'll do it ourselves before ending the build.
*/
if (blkno == wstate->btws_pages_written)
@@ -421,14 +421,14 @@ _bt_sortaddtup(Page page,
* A leaf page being built looks like:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp0 linp1 linp2 ... |
+ * | PageHeaderData | linp0 linp1 linp2 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
* | ^ last |
* | |
* +-------------+------------------------------------+
- * | | itemN ... |
+ * | | itemN ... |
* +-------------+------------------+-----------------+
* | ... item3 item2 item1 | "special space" |
* +--------------------------------+-----------------+
@@ -489,9 +489,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
"or use full text indexing.")));
/*
- * Check to see if page is "full". It's definitely full if the item won't
+ * Check to see if page is "full". It's definitely full if the item won't
* fit. Otherwise, compare to the target freespace derived from the
- * fillfactor. However, we must put at least two items on each page, so
+ * fillfactor. However, we must put at least two items on each page, so
* disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
@@ -564,7 +564,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
}
/*
- * Write out the old page. We never need to touch it again, so we can
+ * Write out the old page. We never need to touch it again, so we can
* free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);
@@ -801,7 +801,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
/*
* If the index is WAL-logged, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a non-WAL-logged index we don't
+ * safe to commit the transaction. (For a non-WAL-logged index we don't
* care since the index will be uninteresting after a crash anyway.)
*
* It's obvious that we must do this when not WAL-logging the build. It's
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index f87eadcdec2..fe3ca62193c 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -95,7 +95,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
- * data is first stored into the key entries. Currently this
+ * data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
@@ -166,7 +166,7 @@ _bt_freestack(BTStack stack)
* _bt_preprocess_keys() -- Preprocess scan keys
*
* The caller-supplied search-type keys (in scan->keyData[]) are copied to
- * so->keyData[] with possible transformation. scan->numberOfKeys is
+ * so->keyData[] with possible transformation. scan->numberOfKeys is
* the number of input keys, so->numberOfKeys gets the number of output
* keys (possibly less, never greater).
*
@@ -177,7 +177,7 @@ _bt_freestack(BTStack stack)
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover how many scan keys
- * must be satisfied to continue the scan. It also attempts to eliminate
+ * must be satisfied to continue the scan. It also attempts to eliminate
* redundant keys and detect contradictory keys. (If the index opfamily
* provides incomplete sets of cross-type operators, we may fail to detect
* redundant or contradictory keys, but we can survive that.)
@@ -209,7 +209,7 @@ _bt_freestack(BTStack stack)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
+ * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@@ -406,7 +406,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
- * mark them if they are required. They are required (possibly
+ * mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
@@ -504,7 +504,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
- * may not be able to make the comparison. If we can make the comparison
+ * may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@@ -530,7 +530,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
StrategyNumber strat;
/*
- * First, deal with cases where one or both args are NULL. This should
+ * First, deal with cases where one or both args are NULL. This should
* only happen when the scankeys represent IS NULL/NOT NULL conditions.
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
@@ -670,7 +670,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
- * a NULL means that the qual cannot be satisfied. We return TRUE if the
+ * a NULL means that the qual cannot be satisfied. We return TRUE if the
* comparison value isn't NULL, or FALSE if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
@@ -699,7 +699,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* --- we can treat IS NULL as an equality operator for purposes of search
* strategy.
*
- * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
+ * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
* than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
* FIRST index.
*
@@ -771,7 +771,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
- * directions or just one. Also, if the key is a row comparison header,
+ * directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns
@@ -783,7 +783,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
- * anyway on a rescan. Something to keep an eye on though.
+ * anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
@@ -967,7 +967,7 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
@@ -980,8 +980,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQFWD) &&
@@ -1072,7 +1072,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
- * one of the "must match" subset. On a forward scan,
+ * one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQBKWD) &&
@@ -1085,7 +1085,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1103,7 +1103,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
- * But it can never match. If all the earlier row comparison
+ * But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop the
* scan, because there can't be another tuple that will succeed.
*/
@@ -1168,7 +1168,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Tuple fails this qual. If it's a required qual for the current
* scan direction, then we can conclude no further tuples will pass,
- * either. Note we have to look at the deciding column, not
+ * either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1194,7 +1194,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* is sufficient for setting LP_DEAD status (which is only a hint).
*
* We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
+ * delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
@@ -1280,8 +1280,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
- * operations. There is a single counter which increments each time we
- * start a vacuum to assign it a cycle ID. Since multiple vacuums could
+ * operations. There is a single counter which increments each time we
+ * start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index f0fe8a3892e..7c72d4ce3d0 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -130,7 +130,7 @@ forget_matching_deletion(RelFileNode node, BlockNumber delblk)
* in correct itemno sequence, but physically the opposite order from the
* original, because we insert them in the opposite of itemno order. This
* does not matter in any current btree code, but it's something to keep an
- * eye on. Is it worth changing just on general principles? See also the
+ * eye on. Is it worth changing just on general principles? See also the
* notes in btree_xlog_split().
*/
static void
@@ -181,7 +181,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn,
pageop->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) metapg)->pd_lower =
@@ -392,7 +392,7 @@ btree_xlog_split(bool onleft, bool isroot,
/*
* Remove the items from the left page that were copied to the
- * right page. Also remove the old high key, if any. (We must
+ * right page. Also remove the old high key, if any. (We must
* remove everything before trying to insert any items, else
* we risk not having enough space.)
*/
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index b8703aac91d..562ade85835 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -11,15 +11,15 @@
* log can be broken into relatively small, independent segments.
*
* XLOG interactions: this module generates an XLOG record whenever a new
- * CLOG page is initialized to zeroes. Other writes of CLOG come from
+ * CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
- * on redo; so we need make no additional XLOG entry here. For synchronous
+ * on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
- * XLOG that far and satisfy the WAL rule. We don't have to worry about this
+ * XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
@@ -104,7 +104,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
* in the tree of xid. In various cases nsubxids may be zero.
*
* lsn must be the WAL location of the commit record when recording an async
- * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
+ * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 9e4ecdc3bba..c940e1d2b1e 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -4,15 +4,15 @@
* PostgreSQL multi-transaction-log manager
*
* The pg_multixact manager is a pg_clog-like manager that stores an array
- * of TransactionIds for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. A share-locked tuple stores a
+ * of TransactionIds for each MultiXactId. It is a fundamental part of the
+ * shared-row-lock implementation. A share-locked tuple stores a
* MultiXactId in its Xmax, and a transaction that needs to wait for the
* tuple to be unlocked can sleep on the potentially-several TransactionIds
* that compose the MultiXactId.
*
* We use two SLRU areas, one for storing the offsets at which the data
* starts for each MultiXactId in the other one. This trick allows us to
- * store variable length arrays of TransactionIds. (We could alternatively
+ * store variable length arrays of TransactionIds. (We could alternatively
* use one area containing counts and TransactionIds, with valid MultiXactId
* values pointing at slots containing counts; but that way seems less robust
* since it would get completely confused if someone inquired about a bogus
@@ -32,7 +32,7 @@
*
* Like clog.c, and unlike subtrans.c, we have to preserve state across
* crashes and ensure that MXID and offset numbering increases monotonically
- * across a crash. We do this in the same way as it's done for transaction
+ * across a crash. We do this in the same way as it's done for transaction
* IDs: the WAL record is guaranteed to contain evidence of every MXID we
* could need to worry about, and we just make sure that at the end of
* replay, the next-MXID and next-offset counters are at least as large as
@@ -64,13 +64,13 @@
/*
- * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
+ * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
* used everywhere else in Postgres.
*
* Note: because both MultiXactOffsets and TransactionIds are 32 bits and
* wrap around at 0xFFFFFFFF, MultiXact page numbering also wraps around at
* 0xFFFFFFFF/MULTIXACT_*_PER_PAGE, and segment numbering at
- * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no
+ * 0xFFFFFFFF/MULTIXACT_*_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need take no
* explicit notice of that fact in this module, except when comparing segment
* and page numbers in TruncateMultiXact
* (see MultiXact{Offset,Member}PagePrecedes).
@@ -101,7 +101,7 @@ static SlruCtlData MultiXactMemberCtlData;
#define MultiXactMemberCtl (&MultiXactMemberCtlData)
/*
- * MultiXact state shared across all backends. All this state is protected
+ * MultiXact state shared across all backends. All this state is protected
* by MultiXactGenLock. (We also use MultiXactOffsetControlLock and
* MultiXactMemberControlLock to guard accesses to the two sets of SLRU
* buffers. For concurrency's sake, we avoid holding more than one of these
@@ -343,7 +343,7 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid)
/*
* Determine which of the members of the MultiXactId are still running,
* and use them to create a new one. (Removing dead members is just an
- * optimization, but a useful one. Note we have the same race condition
+ * optimization, but a useful one. Note we have the same race condition
* here as above: j could be 0 at the end of the loop.)
*/
newMembers = (TransactionId *)
@@ -408,7 +408,7 @@ MultiXactIdIsRunning(MultiXactId multi)
/*
* This could be made faster by having another entry point in procarray.c,
- * walking the PGPROC array only once for all the members. But in most
+ * walking the PGPROC array only once for all the members. But in most
* cases nmembers should be small enough that it doesn't much matter.
*/
for (i = 0; i < nmembers; i++)
@@ -527,7 +527,7 @@ MultiXactIdSetOldestMember(void)
* The value to set is the oldest of nextMXact and all the valid per-backend
* OldestMemberMXactId[] entries. Because of the locking we do, we can be
* certain that no subsequent call to MultiXactIdSetOldestMember can set
- * an OldestMemberMXactId[] entry older than what we compute here. Therefore
+ * an OldestMemberMXactId[] entry older than what we compute here. Therefore
* there is no live transaction, now or later, that can be a member of any
* MultiXactId older than the OldestVisibleMXactId we compute here.
*/
@@ -698,7 +698,7 @@ CreateMultiXactId(int nxids, TransactionId *xids)
* heap_lock_tuple() to have put it there, and heap_lock_tuple() generates
* an XLOG record that must follow ours. The normal LSN interlock between
* the data page and that XLOG record will ensure that our XLOG record
- * reaches disk first. If the SLRU members/offsets data reaches disk
+ * reaches disk first. If the SLRU members/offsets data reaches disk
* sooner than the XLOG record, we do not care because we'll overwrite it
* with zeroes unless the XLOG record is there too; see notes at top of
* this file.
@@ -805,7 +805,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
* GetNewMultiXactId
* Get the next MultiXactId.
*
- * Also, reserve the needed amount of space in the "members" area. The
+ * Also, reserve the needed amount of space in the "members" area. The
* starting offset of the reserved space is returned in *offset.
*
* This may generate XLOG records for expansion of the offsets and/or members
@@ -874,7 +874,7 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* until after file extension has succeeded!
*
* We don't care about MultiXactId wraparound here; it will be handled by
- * the next iteration. But note that nextMXact may be InvalidMultiXactId
+ * the next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
* be prepared to deal with that. Similarly, nextOffset may be zero, but
* we won't use that as the actual start offset of the next multixact.
@@ -942,7 +942,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* SLRU data if we did try to examine it.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. We just
+ * seen, it implies undetected ID wraparound has occurred. We just
* silently assume that such an ID is no longer running.
*
* Shared lock is enough here since we aren't modifying any global state.
@@ -958,7 +958,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Acquire the shared lock just long enough to grab the current counter
- * values. We may need both nextMXact and nextOffset; see below.
+ * values. We may need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -976,12 +976,12 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Find out the offset at which we need to start reading MultiXactMembers
- * and the number of members in the multixact. We determine the latter as
+ * and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about:
*
* 1. This multixact may be the latest one created, in which case there is
- * no next one to look at. In this case the nextOffset value we just
+ * no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in: that
@@ -992,11 +992,11 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
- * have this case. We sleep for a bit and try again.
+ * have this case. We sleep for a bit and try again.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
- * wraparound. If we see next multixact's offset is one, is that our
+ * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
@@ -1388,7 +1388,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info,
/*
* Initialization of shared memory for MultiXact. We use two SLRU areas,
- * thus double memory. Also, reserve space for the shared MultiXactState
+ * thus double memory. Also, reserve space for the shared MultiXactState
* struct and the per-backend MultiXactId arrays (two of those, too).
*/
Size
@@ -1448,7 +1448,7 @@ MultiXactShmemInit(void)
/*
* This func must be called ONCE on system install. It creates the initial
- * MultiXact segments. (The MultiXacts directories are assumed to have been
+ * MultiXact segments. (The MultiXacts directories are assumed to have been
* created by initdb, and MultiXactShmemInit must have been called already.)
*/
void
@@ -1568,7 +1568,7 @@ TrimMultiXact(void)
MultiXactOffsetCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current offsets page. See notes in
+ * Zero out the remainder of the current offsets page. See notes in
* StartupCLOG() for motivation.
*/
entryno = MultiXactIdToOffsetEntry(multi);
@@ -1598,7 +1598,7 @@ TrimMultiXact(void)
MultiXactMemberCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current members page. See notes in
+ * Zero out the remainder of the current members page. See notes in
* TrimCLOG() for motivation.
*/
entryno = MXOffsetToMemberEntry(offset);
@@ -1799,7 +1799,7 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
* Remove all MultiXactOffset and MultiXactMember segments before the oldest
* ones still of interest.
*
- * This is called only during checkpoints. We assume no more than one
+ * This is called only during checkpoints. We assume no more than one
* backend does this at a time.
*
* XXX do we have any issues with needing to checkpoint here?
@@ -1860,7 +1860,7 @@ TruncateMultiXact(void)
return;
/*
- * We need to determine where to truncate MultiXactMember. If we found a
+ * We need to determine where to truncate MultiXactMember. If we found a
* valid oldest MultiXactId, read its starting offset; otherwise we use
* the nextOffset value we saved above.
*/
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 69e5245beba..3e92903f938 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -15,7 +15,7 @@
*
* We use a control LWLock to protect the shared data structures, plus
* per-buffer LWLocks that synchronize I/O for each buffer. The control lock
- * must be held to examine or modify any shared state. A process that is
+ * must be held to examine or modify any shared state. A process that is
* reading in or writing out a page buffer does not hold the control lock,
* only the per-buffer lock for the buffer it is working on.
*
@@ -34,7 +34,7 @@
* could have happened while we didn't have the lock).
*
* As with the regular buffer manager, it is possible for another process
- * to re-dirty a page that is currently being written out. This is handled
+ * to re-dirty a page that is currently being written out. This is handled
* by re-setting the page's page_dirty flag.
*
*
@@ -96,7 +96,7 @@ typedef struct SlruFlushData *SlruFlush;
* page_lru_count entries to be "reset" to lower values than they should have,
* in case a process is delayed while it executes this macro. With care in
* SlruSelectLRUPage(), this does little harm, and in any case the absolute
- * worst possible consequence is a nonoptimal choice of page to evict. The
+ * worst possible consequence is a nonoptimal choice of page to evict. The
* gain from allowing concurrent reads of SLRU pages seems worth it.
*/
#define SlruRecentlyUsed(shared, slotno) \
@@ -483,7 +483,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
- * the write). However, we *do* attempt a fresh write even if the page
+ * the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
@@ -592,7 +592,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* In a crash-and-restart situation, it's possible for us to receive
* commands to set the commit status of transactions whose bits are in
* already-truncated segments of the commit log (see notes in
- * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
+ * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
* where the file doesn't exist, and return zeroes instead.
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
@@ -1123,7 +1123,7 @@ restart:;
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
- * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
+ * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index deef99aeb22..dff85c6b04d 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -5,7 +5,7 @@
*
* The pg_subtrans manager is a pg_clog-like manager that stores the parent
* transaction Id for each transaction. It is a fundamental part of the
- * nested transactions implementation. A main transaction has a parent
+ * nested transactions implementation. A main transaction has a parent
* of InvalidTransactionId, and each subtransaction has its immediate parent.
* The tree can easily be walked from child to parent, but not in the
* opposite direction.
@@ -191,7 +191,7 @@ SUBTRANSShmemInit(void)
* must have been called already.)
*
* Note: it's not really necessary to create the initial segment now,
- * since slru.c would create it on first write anyway. But we may as well
+ * since slru.c would create it on first write anyway. But we may as well
* do it to be sure the directory is set up correctly.
*/
void
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index cc99cb93b31..31d6e5a3e2d 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -149,7 +149,7 @@ TransactionIdDidCommit(TransactionId transactionId)
* be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
- * zeroed. Since this case should not happen under normal conditions, it
+ * zeroed. Since this case should not happen under normal conditions, it
* seems reasonable to emit a WARNING for it.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
@@ -305,7 +305,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2)
{
/*
* If either ID is a permanent XID then we can just do unsigned
- * comparison. If both are normal, do a modulo-2^32 comparison.
+ * comparison. If both are normal, do a modulo-2^32 comparison.
*/
int32 diff;
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 6d496b58044..ccc8ec5ea46 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -432,7 +432,7 @@ LockGXact(const char *gid, Oid user)
/*
* Note: it probably would be possible to allow committing from
* another database; but at the moment NOTIFY is known not to work and
- * there may be some other issues as well. Hence disallow until
+ * there may be some other issues as well. Hence disallow until
* someone gets motivated to make it work.
*/
if (MyDatabaseId != gxact->proc.databaseId)
@@ -1005,7 +1005,7 @@ EndPrepare(GlobalTransaction gxact)
* out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
- * the write --- then, WAL replay should repair the inconsistency. The
+ * the write --- then, WAL replay should repair the inconsistency. The
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
@@ -1050,7 +1050,7 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks MyProc
+ * Mark the prepared transaction as valid. As soon as xact.c marks MyProc
* as not running our XID (which it will do immediately after this
* function returns), others can commit/rollback the xact.
*
@@ -1313,7 +1313,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
* no one else will try to commit/rollback, and so it can be recycled
- * properly later. It is still locked by our XID so it won't go away yet.
+ * properly later. It is still locked by our XID so it won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
*/
@@ -1521,7 +1521,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* This approach creates a race condition: someone else could delete a
* GXACT between the time we release TwoPhaseStateLock and the time we try
- * to open its state file. We handle this by special-casing ENOENT
+ * to open its state file. We handle this by special-casing ENOENT
* failures: if we see that, we verify that the GXACT is no longer valid,
* and if so ignore the failure.
*/
@@ -1601,7 +1601,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* We throw away any prepared xacts with main XID beyond nextXid --- if any
* are present, it suggests that the DBA has done a PITR recovery to an
- * earlier point in time without cleaning out pg_twophase. We dare not
+ * earlier point in time without cleaning out pg_twophase. We dare not
* try to recover such prepared xacts since they likely depend on database
* state that doesn't exist now.
*
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 563d4caa9bb..b29d7458ffb 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -40,7 +40,7 @@ VariableCache ShmemVariableCache = NULL;
*
* Note: when this is called, we are actually already inside a valid
* transaction, since XIDs are now not allocated until the transaction
- * does something. So it is safe to do a database lookup if we want to
+ * does something. So it is safe to do a database lookup if we want to
* issue a warning about XID wrap.
*/
TransactionId
@@ -164,20 +164,20 @@ GetNewTransactionId(bool isSubXact)
/*
* Now advance the nextXid counter. This must not happen until after we
* have successfully completed ExtendCLOG() --- if that routine fails, we
- * want the next incoming transaction to try it again. We cannot assign
+ * want the next incoming transaction to try it again. We cannot assign
* more XIDs until there is CLOG space for them.
*/
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
* We must store the new XID into the shared ProcArray before releasing
- * XidGenLock. This ensures that every active XID older than
+ * XidGenLock. This ensures that every active XID older than
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyProc without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
- * might see a partially-set xid here. But holding both locks at once
+ * might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*
* Note that readers of PGPROC xid fields should be careful to fetch the
@@ -287,7 +287,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
/*
* We'll start complaining loudly when we get within 10M transactions of
- * the stop point. This is kind of arbitrary, but if you let your gas
+ * the stop point. This is kind of arbitrary, but if you let your gas
* gauge get down to 1% of full, would you be looking for the next gas
* station? We need to be fairly liberal about this number because there
* are lots of scenarios where most transactions are done by automatic
@@ -387,7 +387,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
* We primarily check whether oldestXidDB is valid. The cases we have in
* mind are that that database was dropped, or the field was reset to zero
* by pg_resetxlog. In either case we should force recalculation of the
- * wrap limit. Also do it if oldestXid is old enough to be forcing
+ * wrap limit. Also do it if oldestXid is old enough to be forcing
* autovacuums or other actions; this ensures we update our state as soon
* as possible once extra overhead is being incurred.
*/
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index cc5b71ecb08..9b9cf54ab0b 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -552,7 +552,7 @@ GetCurrentSubTransactionId(void)
*
* "used" must be TRUE if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. FALSE means the ID is being fetched
- * for read-only purposes (ie, as a snapshot validity cutoff). See
+ * for read-only purposes (ie, as a snapshot validity cutoff). See
* CommandCounterIncrement() for discussion.
*/
CommandId
@@ -639,7 +639,7 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
/*
* We always say that BootstrapTransactionId is "not my transaction ID"
- * even when it is (ie, during bootstrap). Along with the fact that
+ * even when it is (ie, during bootstrap). Along with the fact that
* transam.c always treats BootstrapTransactionId as already committed,
* this causes the tqual.c routines to see all tuples as committed, which
* is what we need during bootstrap. (Bootstrap mode only inserts tuples,
@@ -781,7 +781,7 @@ AtStart_Memory(void)
/*
* If this is the first time through, create a private context for
* AbortTransaction to work in. By reserving some space now, we can
- * insulate AbortTransaction from out-of-memory scenarios. Like
+ * insulate AbortTransaction from out-of-memory scenarios. Like
* ErrorContext, we set it up with slow growth rate and a nonzero minimum
* size, so that space will be reserved immediately.
*/
@@ -884,7 +884,7 @@ AtSubStart_ResourceOwner(void)
Assert(s->parent != NULL);
/*
- * Create a resource owner for the subtransaction. We make it a child of
+ * Create a resource owner for the subtransaction. We make it a child of
* the immediate parent's resource owner.
*/
s->curTransactionOwner =
@@ -904,7 +904,7 @@ AtSubStart_ResourceOwner(void)
* RecordTransactionCommit
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionCommit(void)
@@ -949,7 +949,7 @@ RecordTransactionCommit(void)
/*
* If we didn't create XLOG entries, we're done here; otherwise we
- * should flush those entries the same as a commit record. (An
+ * should flush those entries the same as a commit record. (An
* example of a possible record that wouldn't cause an XID to be
* assigned is a sequence advance record due to nextval() --- we want
* to flush that to disk before reporting commit.)
@@ -982,7 +982,7 @@ RecordTransactionCommit(void)
xlrec.tsId = MyDatabaseTableSpace;
/*
- * Mark ourselves as within our "commit critical section". This
+ * Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
* pg_clog. Without this, it is possible for the checkpoint to set
* REDO after the XLOG record but fail to flush the pg_clog update to
@@ -990,7 +990,7 @@ RecordTransactionCommit(void)
* crashes a little later.
*
* Note: we could, but don't bother to, set this flag in
- * RecordTransactionAbort. That's because loss of a transaction abort
+ * RecordTransactionAbort. That's because loss of a transaction abort
* is noncritical; the presumption would be that it aborted, anyway.
*
* It's safe to change the inCommit flag of our own backend without
@@ -1044,15 +1044,15 @@ RecordTransactionCommit(void)
/*
* Check if we want to commit asynchronously. We can allow the XLOG flush
* to happen asynchronously if synchronous_commit=off, or if the current
- * transaction has not performed any WAL-logged operation. The latter
+ * transaction has not performed any WAL-logged operation. The latter
* case can arise if the current transaction wrote only to temporary
- * and/or unlogged tables. In case of a crash, the loss of such a
+ * and/or unlogged tables. In case of a crash, the loss of such a
* transaction will be irrelevant since temp tables will be lost anyway,
* and unlogged tables will be truncated. (Given the foregoing, you might
* think that it would be unnecessary to emit the XLOG record at all in
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
- * KnownAssignedXids machinery requires tracking every XID assignment. It
+ * KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < hot_standby, but for now
* we don't.)
*
@@ -1322,7 +1322,7 @@ AtSubCommit_childXids(void)
* RecordTransactionAbort
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionAbort(bool isSubXact)
@@ -1339,7 +1339,7 @@ RecordTransactionAbort(bool isSubXact)
/*
* If we haven't been assigned an XID, nobody will care whether we aborted
- * or not. Hence, we're done in that case. It does not matter if we have
+ * or not. Hence, we're done in that case. It does not matter if we have
* rels to delete (note that this routine is not responsible for actually
* deleting 'em). We cannot have any child XIDs, either.
*/
@@ -1355,7 +1355,7 @@ RecordTransactionAbort(bool isSubXact)
* We have a valid XID, so we should write an ABORT record for it.
*
* We do not flush XLOG to disk here, since the default assumption after a
- * crash would be that we aborted, anyway. For the same reason, we don't
+ * crash would be that we aborted, anyway. For the same reason, we don't
* need to worry about interlocking against checkpoint start.
*/
@@ -1523,7 +1523,7 @@ AtSubAbort_childXids(void)
/*
* We keep the child-XID arrays in TopTransactionContext (see
- * AtSubCommit_childXids). This means we'd better free the array
+ * AtSubCommit_childXids). This means we'd better free the array
* explicitly at abort to avoid leakage.
*/
if (s->childXids != NULL)
@@ -1700,7 +1700,7 @@ StartTransaction(void)
VirtualXactLockTableInsert(vxid);
/*
- * Advertise it in the proc array. We assume assignment of
+ * Advertise it in the proc array. We assume assignment of
* LocalTransactionID is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
@@ -1795,7 +1795,7 @@ CommitTransaction(void)
/*
* The remaining actions cannot call any user-defined code, so it's safe
- * to start shutting down within-transaction services. But note that most
+ * to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
@@ -1998,7 +1998,7 @@ PrepareTransaction(void)
/*
* The remaining actions cannot call any user-defined code, so it's safe
- * to start shutting down within-transaction services. But note that most
+ * to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
@@ -2108,7 +2108,7 @@ PrepareTransaction(void)
XactLastRecEnd.xrecoff = 0;
/*
- * Let others know about no transaction in progress by me. This has to be
+ * Let others know about no transaction in progress by me. This has to be
* done *after* the prepared transaction has been marked valid, else
* someone may think it is unlocked and recyclable.
*/
@@ -2117,7 +2117,7 @@ PrepareTransaction(void)
/*
* This is all post-transaction cleanup. Note that if an error is raised
* here, it's too late to abort the transaction. This should be just
- * noncritical resource releasing. See notes in CommitTransaction.
+ * noncritical resource releasing. See notes in CommitTransaction.
*/
CallXactCallbacks(XACT_EVENT_PREPARE);
@@ -2282,7 +2282,7 @@ AbortTransaction(void)
ProcArrayEndTransaction(MyProc, latestXid);
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering. We can skip all of it if the transaction failed before
* creating a resource owner.
*/
@@ -2516,7 +2516,7 @@ CommitTransactionCommand(void)
/*
* Here we were in a perfectly good transaction block but the user
- * told us to ROLLBACK anyway. We have to abort the transaction
+ * told us to ROLLBACK anyway. We have to abort the transaction
* and then clean up.
*/
case TBLOCK_ABORT_PENDING:
@@ -2536,7 +2536,7 @@ CommitTransactionCommand(void)
/*
* We were just issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already did
+ * Start a subtransaction. (DefineSavepoint already did
* PushTransaction, so as to have someplace to put the SUBBEGIN
* state.)
*/
@@ -2722,7 +2722,7 @@ AbortCurrentTransaction(void)
break;
/*
- * Here, we failed while trying to COMMIT. Clean up the
+ * Here, we failed while trying to COMMIT. Clean up the
* transaction and return to idle state (we do not want to stay in
* the transaction).
*/
@@ -2784,7 +2784,7 @@ AbortCurrentTransaction(void)
/*
* If we failed while trying to create a subtransaction, clean up
- * the broken subtransaction and abort the parent. The same
+ * the broken subtransaction and abort the parent. The same
* applies if we get a failure while ending a subtransaction.
*/
case TBLOCK_SUBBEGIN:
@@ -3313,7 +3313,7 @@ UserAbortTransactionBlock(void)
break;
/*
- * We are inside a subtransaction. Mark everything up to top
+ * We are inside a subtransaction. Mark everything up to top
* level as exitable.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3445,7 +3445,7 @@ ReleaseSavepoint(List *options)
break;
/*
- * We are in a non-aborted subtransaction. This is the only valid
+ * We are in a non-aborted subtransaction. This is the only valid
* case.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3501,7 +3501,7 @@ ReleaseSavepoint(List *options)
/*
* Mark "commit pending" all subtransactions up to the target
- * subtransaction. The actual commits will happen when control gets to
+ * subtransaction. The actual commits will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -3599,7 +3599,7 @@ RollbackToSavepoint(List *options)
/*
* Mark "abort pending" all subtransactions up to the target
- * subtransaction. The actual aborts will happen when control gets to
+ * subtransaction. The actual aborts will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -3998,7 +3998,7 @@ CommitSubTransaction(void)
CommandCounterIncrement();
/*
- * Prior to 8.4 we marked subcommit in clog at this point. We now only
+ * Prior to 8.4 we marked subcommit in clog at this point. We now only
* perform that step, if required, as part of the atomic update of the
* whole transaction tree at top level commit or abort.
*/
@@ -4433,7 +4433,7 @@ TransStateAsString(TransState state)
/*
* xactGetCommittedChildren
*
- * Gets the list of committed children of the current transaction. The return
+ * Gets the list of committed children of the current transaction. The return
* value is the number of child transactions. *ptr is set to point to an
* array of TransactionIds. The array is allocated in TopTransactionContext;
* the caller should *not* pfree() it (this is a change from pre-8.4 code!).
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index d0672a54d0d..da5ba51a1d0 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -90,7 +90,7 @@ bool XLOG_DEBUG = false;
* future XLOG segment as long as there aren't already XLOGfileslop future
* segments; else we'll delete it. This could be made a separate GUC
* variable, but at present I think it's sufficient to hardwire it as
- * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
+ * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* no more than 2*CheckPointSegments log segments, and we want to recycle all
* of them; the +1 allows boundary cases to happen without wasting a
* delete/create-segment cycle.
@@ -173,7 +173,7 @@ static bool LocalHotStandbyActive = false;
* 0: unconditionally not allowed to insert XLOG
* -1: must check RecoveryInProgress(); disallow until it is false
* Most processes start with -1 and transition to 1 after seeing that recovery
- * is not in progress. But we can also force the value for special cases.
+ * is not in progress. But we can also force the value for special cases.
* The coding in XLogInsertAllowed() depends on the first two of these states
* being numerically the same as bool true and false.
*/
@@ -221,7 +221,7 @@ static bool recoveryStopAfter;
*
* expectedTLIs: an integer list of recoveryTargetTLI and the TLIs of
* its known parents, newest first (so recoveryTargetTLI is always the
- * first list member). Only these TLIs are expected to be seen in the WAL
+ * first list member). Only these TLIs are expected to be seen in the WAL
* segments we read, and indeed only these TLIs will be considered as
* candidate WAL files to open at all.
*
@@ -250,9 +250,9 @@ XLogRecPtr XactLastRecEnd = {0, 0};
/*
* RedoRecPtr is this backend's local copy of the REDO record pointer
* (which is almost but not quite the same as a pointer to the most recent
- * CHECKPOINT record). We update this from the shared-memory copy,
+ * CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
- * hold the Insert lock). See XLogInsert for details. We are also allowed
+ * hold the Insert lock). See XLogInsert for details. We are also allowed
* to update from XLogCtl->Insert.RedoRecPtr if we hold the info_lck;
* see GetRedoRecPtr. A freshly spawned backend obtains the value during
* InitXLOGAccess.
@@ -296,7 +296,7 @@ static XLogRecPtr RedoStartLSN = {0, 0};
* without needing to grab info_lck as well.
*
* XLogCtl->Insert.LogwrtResult may lag behind the reality of the other two,
- * but is updated when convenient. Again, it exists for the convenience of
+ * but is updated when convenient. Again, it exists for the convenience of
* code that is already holding WALInsertLock but not the other locks.
*
* The unshared LogwrtResult may lag behind any or all of these, and again
@@ -1895,7 +1895,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
{
/*
* Could get here without iterating above loop, in which case we might
- * have no open file or the wrong one. However, we do not need to
+ * have no open file or the wrong one. However, we do not need to
* fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN &&
@@ -2159,9 +2159,9 @@ XLogFlush(XLogRecPtr record)
* We normally flush only completed blocks; but if there is nothing to do on
* that basis, we check for unflushed async commits in the current incomplete
* block, and flush through the latest one of those. Thus, if async commits
- * are not being used, we will flush complete blocks only. We can guarantee
+ * are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
- * one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
+ * one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
* at the end of the buffer ring; this makes a difference only with very high
* load or long wal_writer_delay, but imposes one extra cycle for the worst
* case for async commits.)
@@ -2328,7 +2328,7 @@ XLogNeedsFlush(XLogRecPtr record)
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
- * pre-existing file will be deleted). On return, TRUE if a pre-existing
+ * pre-existing file will be deleted). On return, TRUE if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
@@ -2398,11 +2398,11 @@ XLogFileInit(uint32 log, uint32 seg,
errmsg("could not create file \"%s\": %m", tmppath)));
/*
- * Zero-fill the file. We have to do this the hard way to ensure that all
+ * Zero-fill the file. We have to do this the hard way to ensure that all
* the file space has really been allocated --- on platforms that allow
* "holes" in files, just seeking to the end doesn't allocate intermediate
* space. This way, we know that we have all the space and (after the
- * fsync below) that all the indirect blocks are down on disk. Therefore,
+ * fsync below) that all the indirect blocks are down on disk. Therefore,
* fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the
* log file.
*
@@ -2490,7 +2490,7 @@ XLogFileInit(uint32 log, uint32 seg,
* a different timeline)
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
@@ -2795,7 +2795,7 @@ XLogFileReadAnyTLI(uint32 log, uint32 seg, int emode, int sources)
* the timelines listed in expectedTLIs.
*
* We expect curFileTLI on entry to be the TLI of the preceding file in
- * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
+ * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
* to go backwards; this prevents us from picking up the wrong file when a
* parent timeline extends to higher segment numbers than the child we
* want to read.
@@ -2845,7 +2845,7 @@ XLogFileClose(void)
/*
* WAL segment files will not be re-read in normal operation, so we advise
- * the OS to release any cached pages. But do not do so if WAL archiving
+ * the OS to release any cached pages. But do not do so if WAL archiving
* or streaming is active, because archiver and walsender process could
* use the cache to read the WAL segment.
*/
@@ -3368,7 +3368,7 @@ RemoveOldXlogFiles(uint32 log, uint32 seg, XLogRecPtr endptr)
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that we
+ * deciding whether a segment is still needed. This ensures that we
* won't prematurely remove a segment from a parent timeline. We could
* probably be a little more proactive about removing segments of
* non-parent timelines, but that would be a whole lot more
@@ -4068,7 +4068,7 @@ next_record_is_invalid:
* Check whether the xlog header of a page just read in looks valid.
*
* This is just a convenience subroutine to avoid duplicated code in
- * ReadRecord. It's not intended for use from anywhere else.
+ * ReadRecord. It's not intended for use from anywhere else.
*/
static bool
ValidXLOGHeader(XLogPageHeader hdr, int emode, bool segmentonly)
@@ -4198,7 +4198,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool segmentonly)
* Try to read a timeline's history file.
*
* If successful, return the list of component TLIs (the given TLI followed by
- * its ancestor TLIs). If we can't find the history file, assume that the
+ * its ancestor TLIs). If we can't find the history file, assume that the
* timeline has no parents, and return a list of just the specified timeline
* ID.
*/
@@ -4429,7 +4429,7 @@ findNewestTimeLine(TimeLineID startTLI)
* endTLI et al: ID of the last used WAL file, for annotation purposes
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
@@ -4623,7 +4623,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
* I/O routines for pg_control
*
* *ControlFile is a buffer in shared memory that holds an image of the
- * contents of pg_control. WriteControlFile() initializes pg_control
+ * contents of pg_control. WriteControlFile() initializes pg_control
* given a preloaded buffer, ReadControlFile() loads the buffer from
* the pg_control file (during postmaster or standalone-backend startup),
* and UpdateControlFile() rewrites pg_control after we modify xlog state.
@@ -4989,7 +4989,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source)
{
/*
* If we haven't yet changed the boot_val default of -1, just let it
- * be. We'll fix it when XLOGShmemSize is called.
+ * be. We'll fix it when XLOGShmemSize is called.
*/
if (XLOGbuffers == -1)
return true;
@@ -5486,7 +5486,7 @@ readRecoveryCommandFile(void)
/*
* If user specified recovery_target_timeline, validate it or compute the
- * "latest" value. We can't do this until after we've gotten the restore
+ * "latest" value. We can't do this until after we've gotten the restore
* command and set InArchiveRecovery, because we need to fetch timeline
* history files from the archive.
*/
@@ -5549,7 +5549,7 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
* the existing xlog segment (if any) with the archival version. This is
* because whatever is in XLOGDIR is very possibly older than what we have
* from the archives, since it could have come from restoring a PGDATA
- * backup. In any case, the archival version certainly is more
+ * backup. In any case, the archival version certainly is more
* descriptive of what our current database state is, because that is what
* we replayed from.
*
@@ -6121,7 +6121,7 @@ StartupXLOG(void)
ValidateXLOGDirectoryStructure();
/*
- * Clear out any old relcache cache files. This is *necessary* if we do
+ * Clear out any old relcache cache files. This is *necessary* if we do
* any WAL replay, since that would probably result in the cache files
* being out of sync with database reality. In theory we could leave them
* in place if the database had been cleanly shut down, but it seems
@@ -6783,8 +6783,8 @@ StartupXLOG(void)
/*
* Consider whether we need to assign a new timeline ID.
*
- * If we are doing an archive recovery, we always assign a new ID. This
- * handles a couple of issues. If we stopped short of the end of WAL
+ * If we are doing an archive recovery, we always assign a new ID. This
+ * handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
* current last segment is problematic because it may result in trying to
@@ -6832,7 +6832,7 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the LastRec
- * record spans, not the one it starts in. The last block is indeed the
+ * record spans, not the one it starts in. The last block is indeed the
* one we want to use.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - XLOG_BLCKSZ) % XLogSegSize);
@@ -6863,7 +6863,7 @@ StartupXLOG(void)
* Write.curridx must point to the *next* page (see XLogWrite()).
*
* Note: it might seem we should do AdvanceXLInsertBuffer() here, but
- * this is sufficient. The first actual attempt to insert a log
+ * this is sufficient. The first actual attempt to insert a log
* record will advance the insert state.
*/
XLogCtl->Write.curridx = NextBufIdx(0);
@@ -7009,7 +7009,7 @@ StartupXLOG(void)
XLogReportParameters();
/*
- * All done. Allow backends to write WAL. (Although the bool flag is
+ * All done. Allow backends to write WAL. (Although the bool flag is
* probably atomic in itself, we use the info_lck here to ensure that
* there are no race conditions concerning visibility of other recent
* updates to shared memory.)
@@ -7108,7 +7108,7 @@ RecoveryInProgress(void)
/*
* Initialize TimeLineID and RedoRecPtr when we discover that recovery
* is finished. InitPostgres() relies upon this behaviour to ensure
- * that InitXLOGAccess() is called at backend startup. (If you change
+ * that InitXLOGAccess() is called at backend startup. (If you change
* this, see also LocalSetXLogInsertAllowed.)
*/
if (!LocalRecoveryInProgress)
@@ -7716,7 +7716,7 @@ CreateCheckPoint(int flags)
/*
* If this isn't a shutdown or forced checkpoint, and we have not inserted
* any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
+ * checkpoint. The idea here is to avoid inserting duplicate checkpoints
* when the system is idle. That wastes log space, and more importantly it
* exposes us to possible loss of both current and previous checkpoint
* records if the machine crashes just as we're writing the update.
@@ -8035,9 +8035,9 @@ CreateCheckPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). During recovery, though, we mustn't do this because
+ * in subtrans.c). During recovery, though, we mustn't do this because
* StartupSUBTRANS hasn't been called yet.
*/
if (!RecoveryInProgress())
@@ -8303,9 +8303,9 @@ CreateRestartPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). When hot standby is disabled, though, we mustn't do
+ * in subtrans.c). When hot standby is disabled, though, we mustn't do
* this because StartupSUBTRANS hasn't been called yet.
*/
if (EnableHotStandby)
@@ -8353,7 +8353,7 @@ XLogPutNextOid(Oid nextOid)
* We need not flush the NEXTOID record immediately, because any of the
* just-allocated OIDs could only reach disk as part of a tuple insert or
* update that would have its own XLOG record that must follow the NEXTOID
- * record. Therefore, the standard buffer LSN interlock applied to those
+ * record. Therefore, the standard buffer LSN interlock applied to those
* records will ensure no such OID reaches disk before the NEXTOID record
* does.
*
@@ -8830,7 +8830,7 @@ get_sync_bit(int method)
/*
* Optimize writes by bypassing kernel cache with O_DIRECT when using
- * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
+ * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
* disabled, otherwise the archive command or walsender process will read
* the WAL soon after writing it, which is guaranteed to cause a physical
* read if we bypassed the kernel cache. We also skip the
@@ -9054,7 +9054,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
* during an on-line backup even if not doing so at other times, because
* it's quite possible for the backup dump to obtain a "torn" (partially
* written) copy of a database page if it reads the page concurrently with
- * our write to the same page. This can be fixed as long as the first
+ * our write to the same page. This can be fixed as long as the first
* write to the page in the WAL sequence is a full-page write. Hence, we
* turn on forcePageWrites and then force a CHECKPOINT, to ensure there
* are no dirty pages in shared memory that might get dumped while the
@@ -9092,7 +9092,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
do
{
/*
- * Force a CHECKPOINT. Aside from being necessary to prevent torn
+ * Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
@@ -9879,7 +9879,7 @@ pg_xlogfile_name(PG_FUNCTION_ARGS)
*
* If we see a backup_label during recovery, we assume that we are recovering
* from a backup dump file, and we therefore roll forward from the checkpoint
- * identified by the label file, NOT what pg_control says. This avoids the
+ * identified by the label file, NOT what pg_control says. This avoids the
* problem that pg_control might have been archived one or more checkpoints
* later than the start of the dump, and so if we rely on it as the start
* point, we will fail to restore a consistent database state.
@@ -10047,7 +10047,7 @@ startupproc_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -10594,9 +10594,9 @@ triggered:
* in the current WAL page, previously read by XLogPageRead().
*
* 'emode' is the error mode that would be used to report a file-not-found
- * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
+ * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
* we're retrying the exact same record that we've tried previously, only
- * complain the first time to keep the noise down. However, we only do when
+ * complain the first time to keep the noise down. However, we only do when
* reading from pg_xlog, because we don't expect any invalid records in archive
* or in records streamed from master. Files in the archive should be complete,
* and we should never hit the end of WAL because we stop and wait for more WAL
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 40ed7ec5112..187af2a53ef 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -77,7 +77,7 @@ int numattr; /* number of attributes for cur. rel */
* in the core "bootstrapped" catalogs.
*
* XXX several of these input/output functions do catalog scans
- * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
+ * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
* order dependencies in the catalog creation process.
*/
struct typinfo
@@ -367,7 +367,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
#endif
/*
- * Assign the ProcSignalSlot for an auxiliary process. Since it
+ * Assign the ProcSignalSlot for an auxiliary process. Since it
* doesn't have a BackendId, the slot is statically allocated based on
* the auxiliary process type (auxType). Backends use slots indexed
* in the range from 1 to MaxBackends (inclusive), so we use
@@ -549,7 +549,7 @@ bootstrap_signals(void)
}
/*
- * Begin shutdown of an auxiliary process. This is approximately the equivalent
+ * Begin shutdown of an auxiliary process. This is approximately the equivalent
* of ShutdownPostgres() in postinit.c. We can't run transactions in an
* auxiliary process, so most of the work of AbortTransaction() is not needed,
* but we do need to make sure we've released any LWLocks we are holding.
@@ -860,7 +860,7 @@ cleanup(void)
* and not an OID at all, until the first reference to a type not known in
* TypInfo[]. At that point it will read and cache pg_type in the Typ array,
* and subsequently return a real OID (and set the global pointer Ap to
- * point at the found row in Typ). So caller must check whether Typ is
+ * point at the found row in Typ). So caller must check whether Typ is
* still NULL to determine what the return value is!
* ----------------
*/
@@ -1057,9 +1057,9 @@ MapArrayTypeName(char *s)
*
* At bootstrap time, we define a bunch of indexes on system catalogs.
* We postpone actually building the indexes until just before we're
- * finished with initialization, however. This is because the indexes
+ * finished with initialization, however. This is because the indexes
* themselves have catalog entries, and those have to be included in the
- * indexes on those catalogs. Doing it in two phases is the simplest
+ * indexes on those catalogs. Doing it in two phases is the simplest
* way of making sure the indexes have the right contents at the end.
*/
void
@@ -1072,7 +1072,7 @@ index_register(Oid heap,
/*
* XXX mao 10/31/92 -- don't gc index reldescs, associated info at
- * bootstrap time. we'll declare the indexes now, but want to create them
+ * bootstrap time. we'll declare the indexes now, but want to create them
* later.
*/
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index cf5e5f03a6b..acfb8fa4c93 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -300,7 +300,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
/*
* Restrict the operation to what we can actually grant or revoke, and
- * issue a warning if appropriate. (For REVOKE this isn't quite what the
+ * issue a warning if appropriate. (For REVOKE this isn't quite what the
* spec says to do: the spec seems to want a warning only if no privilege
* bits actually change in the ACL. In practice that behavior seems much
* too noisy, as well as inconsistent with the GRANT case.)
@@ -1046,7 +1046,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
/*
* The default for a global entry is the hard-wired default ACL for the
- * particular object type. The default for non-global entries is an empty
+ * particular object type. The default for non-global entries is an empty
* ACL. This must be so because global entries replace the hard-wired
* defaults, while others are added on.
*/
@@ -1597,7 +1597,7 @@ ExecGrant_Attribute(InternalGrant *istmt, Oid relOid, const char *relname,
* If the updated ACL is empty, we can set attacl to null, and maybe even
* avoid an update of the pg_attribute row. This is worth testing because
* we'll come through here multiple times for any relation-level REVOKE,
- * even if there were never any column GRANTs. Note we are assuming that
+ * even if there were never any column GRANTs. Note we are assuming that
* the "default" ACL state for columns is empty.
*/
if (ACL_NUM(new_acl) > 0)
@@ -1722,7 +1722,7 @@ ExecGrant_Relation(InternalGrant *istmt)
{
/*
* Mention the object name because the user needs to know
- * which operations succeeded. This is required because
+ * which operations succeeded. This is required because
* WARNING allows the command to continue.
*/
ereport(WARNING,
@@ -1751,7 +1751,7 @@ ExecGrant_Relation(InternalGrant *istmt)
/*
* Set up array in which we'll accumulate any column privilege bits
- * that need modification. The array is indexed such that entry [0]
+ * that need modification. The array is indexed such that entry [0]
* corresponds to FirstLowInvalidHeapAttributeNumber.
*/
num_col_privileges = pg_class_tuple->relnatts - FirstLowInvalidHeapAttributeNumber + 1;
@@ -3282,7 +3282,7 @@ pg_aclmask(AclObjectKind objkind, Oid table_oid, AttrNumber attnum, Oid roleid,
*
* Note: this considers only privileges granted specifically on the column.
* It is caller's responsibility to take relation-level privileges into account
- * as appropriate. (For the same reason, we have no special case for
+ * as appropriate. (For the same reason, we have no special case for
* superuser-ness here.)
*/
AclMode
@@ -3395,12 +3395,12 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
/*
* Deny anyone permission to update a system catalog unless
- * pg_authid.rolcatupdate is set. (This is to let superusers protect
+ * pg_authid.rolcatupdate is set. (This is to let superusers protect
* themselves from themselves.) Also allow it if allowSystemTableMods.
*
* As of 7.4 we have some updatable system views; those shouldn't be
* protected in this way. Assume the view rules can take care of
- * themselves. ACL_USAGE is if we ever have system sequences.
+ * themselves. ACL_USAGE is if we ever have system sequences.
*/
if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE | ACL_USAGE)) &&
IsSystemClass(classForm) &&
@@ -4029,7 +4029,7 @@ pg_attribute_aclcheck_all(Oid table_oid, Oid roleid, AclMode mode,
ReleaseSysCache(classTuple);
/*
- * Initialize result in case there are no non-dropped columns. We want to
+ * Initialize result in case there are no non-dropped columns. We want to
* report failure in such cases for either value of 'how'.
*/
result = ACLCHECK_NO_PRIV;
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index cbce0072de1..03799b5ba6a 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -82,7 +82,7 @@ forkname_to_number(char *forkName)
* forkname_chars
* We use this to figure out whether a filename could be a relation
* fork (as opposed to an oddly named stray file that somehow ended
- * up in the database directory). If the passed string begins with
+ * up in the database directory). If the passed string begins with
* a fork name (other than the main fork name), we return its length,
* and set *fork (if not NULL) to the fork number. If not, we return 0.
*
@@ -357,7 +357,7 @@ IsReservedName(const char *name)
*
* Hard-wiring this list is pretty grotty, but we really need it so that
* we can compute the locktag for a relation (and then lock it) without
- * having already read its pg_class entry. If we try to retrieve relisshared
+ * having already read its pg_class entry. If we try to retrieve relisshared
* from pg_class with no pre-existing lock, there is a race condition against
* anyone who is concurrently committing a change to the pg_class entry:
* since we read system catalog entries under SnapshotNow, it's possible
@@ -426,7 +426,7 @@ IsSharedRelation(Oid relationId)
* Since the OID is not immediately inserted into the table, there is a
* race condition here; but a problem could occur only if someone else
* managed to cycle through 2^32 OIDs and generate the same OID before we
- * finish inserting our row. This seems unlikely to be a problem. Note
+ * finish inserting our row. This seems unlikely to be a problem. Note
* that if we had to *commit* the row to end the race condition, the risk
* would be rather higher; therefore we use SnapshotDirty in the test,
* so that we will see uncommitted rows.
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 3187c9ca5e7..8a85ba0713a 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -225,7 +225,7 @@ performDeletion(const ObjectAddress *object,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * Acquire deletion lock on the target object. (Ideally the caller has
+ * Acquire deletion lock on the target object. (Ideally the caller has
* done this already, but many places are sloppy about it.)
*/
AcquireDeletionLock(object);
@@ -351,7 +351,7 @@ performMultipleDeletions(const ObjectAddresses *objects,
/*
* deleteWhatDependsOn: attempt to drop everything that depends on the
- * specified object, though not the object itself. Behavior is always
+ * specified object, though not the object itself. Behavior is always
* CASCADE.
*
* This is currently used only to clean out the contents of a schema
@@ -373,7 +373,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * Acquire deletion lock on the target object. (Ideally the caller has
+ * Acquire deletion lock on the target object. (Ideally the caller has
* done this already, but many places are sloppy about it.)
*/
AcquireDeletionLock(object);
@@ -425,7 +425,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
*
* For every object that depends on the starting object, acquire a deletion
* lock on the object, add it to targetObjects (if not already there),
- * and recursively find objects that depend on it. An object's dependencies
+ * and recursively find objects that depend on it. An object's dependencies
* will be placed into targetObjects before the object itself; this means
* that the finished list's order represents a safe deletion order.
*
@@ -477,7 +477,7 @@ findDependentObjects(const ObjectAddress *object,
* will not break a loop at an internal dependency: if we enter the loop
* at an "owned" object we will switch and start at the "owning" object
* instead. We could probably hack something up to avoid breaking at an
- * auto dependency, too, if we had to. However there are no known cases
+ * auto dependency, too, if we had to. However there are no known cases
* where that would be necessary.
*/
if (stack_address_present_add_flags(object, flags, stack))
@@ -776,7 +776,7 @@ findDependentObjects(const ObjectAddress *object,
systable_endscan(scan);
/*
- * Finally, we can add the target object to targetObjects. Be careful to
+ * Finally, we can add the target object to targetObjects. Be careful to
* include any flags that were passed back down to us from inner recursion
* levels.
*/
@@ -831,7 +831,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report.
+ * enormous error strings. The server log always gets a full report.
*/
#define MAX_REPORTED_DEPS 100
@@ -864,7 +864,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
DEPFLAG_EXTENSION))
{
/*
- * auto-cascades are reported at DEBUG2, not msglevel. We don't
+ * auto-cascades are reported at DEBUG2, not msglevel. We don't
* try to combine them with the regular message because the
* results are too confusing when client_min_messages and
* log_min_messages are different.
@@ -982,7 +982,7 @@ deleteOneObject(const ObjectAddress *object, Relation depRel)
/*
* First remove any pg_depend records that link from this object to
- * others. (Any records linking to this object should be gone already.)
+ * others. (Any records linking to this object should be gone already.)
*
* When dropping a whole object (subId = 0), remove all pg_depend records
* for its sub-objects too.
@@ -1017,7 +1017,7 @@ deleteOneObject(const ObjectAddress *object, Relation depRel)
systable_endscan(scan);
/*
- * Delete shared dependency references related to this object. Again, if
+ * Delete shared dependency references related to this object. Again, if
* subId = 0, remove records for sub-objects too.
*/
deleteSharedDependencyRecordsFor(object->classId, object->objectId,
@@ -1266,13 +1266,13 @@ recordDependencyOnExpr(const ObjectAddress *depender,
* recordDependencyOnSingleRelExpr - find expression dependencies
*
* As above, but only one relation is expected to be referenced (with
- * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
+ * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
* range table. An additional frammish is that dependencies on that
* relation (or its component columns) will be marked with 'self_behavior',
* whereas 'behavior' is used for everything else.
*
* NOTE: the caller should ensure that a whole-table dependency on the
- * specified relation is created separately, if one is needed. In particular,
+ * specified relation is created separately, if one is needed. In particular,
* a whole-row Var "relation.*" will not cause this routine to emit any
* dependency item. This is appropriate behavior for subexpressions of an
* ordinary query, so other cases need to cope as necessary.
@@ -1392,7 +1392,7 @@ find_expr_references_walker(Node *node,
/*
* A whole-row Var references no specific columns, so adds no new
- * dependency. (We assume that there is a whole-table dependency
+ * dependency. (We assume that there is a whole-table dependency
* arising from each underlying rangetable entry. While we could
* record such a dependency when finding a whole-row Var that
* references a relation directly, it's quite unclear how to extend
@@ -1451,7 +1451,7 @@ find_expr_references_walker(Node *node,
/*
* If it's a regclass or similar literal referring to an existing
- * object, add a reference to that object. (Currently, only the
+ * object, add a reference to that object. (Currently, only the
* regclass and regconfig cases have any likely use, but we may as
* well handle all the OID-alias datatypes consistently.)
*/
@@ -2013,7 +2013,7 @@ object_address_present_add_flags(const ObjectAddress *object,
{
/*
* We get here if we find a need to delete a column after
- * having already decided to drop its whole table. Obviously
+ * having already decided to drop its whole table. Obviously
* we no longer need to drop the column. But don't plaster
* its flags on the table.
*/
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 8cb313ed856..0bb34343fe7 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -21,7 +21,7 @@
* the old heap_create_with_catalog, amcreate, and amdestroy.
* those routines will soon call these routines using the function
* manager,
- * just like the poorly named "NewXXX" routines do. The
+ * just like the poorly named "NewXXX" routines do. The
* "New" routines are all going to die soon, once and for all!
* -cim 1/13/91
*
@@ -198,7 +198,7 @@ SystemAttributeDefinition(AttrNumber attno, bool relhasoids)
/*
* If the given name is a system attribute name, return a Form_pg_attribute
- * pointer for a prototype definition. If not, return NULL.
+ * pointer for a prototype definition. If not, return NULL.
*/
Form_pg_attribute
SystemAttributeByName(const char *attname, bool relhasoids)
@@ -505,7 +505,7 @@ CheckAttributeType(const char *attname,
int i;
/*
- * Check for self-containment. Eventually we might be able to allow
+ * Check for self-containment. Eventually we might be able to allow
* this (just return without complaint, if so) but it's not clear how
* many other places would require anti-recursion defenses before it
* would be safe to allow tables to contain their own rowtype.
@@ -568,7 +568,7 @@ CheckAttributeType(const char *attname,
* attribute to insert (but we ignore attacl and attoptions, which are always
* initialized to NULL).
*
- * indstate is the index state for CatalogIndexInsert. It can be passed as
+ * indstate is the index state for CatalogIndexInsert. It can be passed as
* NULL, in which case we'll fetch the necessary info. (Don't do this when
* inserting multiple attributes, because it's a tad more expensive.)
*/
@@ -734,7 +734,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
* Tuple data is taken from new_rel_desc->rd_rel, except for the
* variable-width fields which are not present in a cached reldesc.
* relacl and reloptions are passed in Datum form (to avoid having
- * to reference the data types in heap.h). Pass (Datum) 0 to set them
+ * to reference the data types in heap.h). Pass (Datum) 0 to set them
* to NULL.
* --------------------------------
*/
@@ -790,7 +790,7 @@ InsertPgClassTuple(Relation pg_class_desc,
tup = heap_form_tuple(RelationGetDescr(pg_class_desc), values, nulls);
/*
- * The new tuple must have the oid already chosen for the rel. Sure would
+ * The new tuple must have the oid already chosen for the rel. Sure would
* be embarrassing to do this sort of thing in polite company.
*/
HeapTupleSetOid(tup, new_rel_oid);
@@ -1117,7 +1117,7 @@ heap_create_with_catalog(const char *relname,
/*
* Decide whether to create an array type over the relation's rowtype. We
* do not create any array types for system catalogs (ie, those made
- * during initdb). We create array types for regular relations, views,
+ * during initdb). We create array types for regular relations, views,
* composite types and foreign tables ... but not, eg, for toast tables or
* sequences.
*/
@@ -1327,8 +1327,8 @@ heap_create_init_fork(Relation rel)
* RelationRemoveInheritance
*
* Formerly, this routine checked for child relations and aborted the
- * deletion if any were found. Now we rely on the dependency mechanism
- * to check for or delete child relations. By the time we get here,
+ * deletion if any were found. Now we rely on the dependency mechanism
+ * to check for or delete child relations. By the time we get here,
* there are no children and we need only remove any pg_inherits rows
* linking this relation to its parent(s).
*/
@@ -1571,7 +1571,7 @@ RemoveAttrDefault(Oid relid, AttrNumber attnum,
/*
* RemoveAttrDefaultById
*
- * Remove a pg_attrdef entry specified by OID. This is the guts of
+ * Remove a pg_attrdef entry specified by OID. This is the guts of
* attribute-default removal. Note it should be called via performDeletion,
* not directly.
*/
@@ -1963,7 +1963,7 @@ StoreConstraints(Relation rel, List *cooked_constraints)
/*
* Deparsing of constraint expressions will fail unless the just-created
- * pg_attribute tuples for this relation are made visible. So, bump the
+ * pg_attribute tuples for this relation are made visible. So, bump the
* command counter. CAUTION: this will cause a relcache entry rebuild.
*/
CommandCounterIncrement();
@@ -2013,7 +2013,7 @@ StoreConstraints(Relation rel, List *cooked_constraints)
* the default and constraint expressions added to the relation.
*
* NB: caller should have opened rel with AccessExclusiveLock, and should
- * hold that lock till end of transaction. Also, we assume the caller has
+ * hold that lock till end of transaction. Also, we assume the caller has
* done a CommandCounterIncrement if necessary to make the relation's catalog
* tuples visible.
*/
@@ -2155,7 +2155,7 @@ AddRelationNewConstraints(Relation rel,
checknames = lappend(checknames, ccname);
/*
- * Check against pre-existing constraints. If we are allowed to
+ * Check against pre-existing constraints. If we are allowed to
* merge with an existing constraint, there's no more to do here.
* (We omit the duplicate constraint from the result, which is
* what ATAddCheckConstraint wants.)
@@ -2171,7 +2171,7 @@ AddRelationNewConstraints(Relation rel,
* column constraint and "tab_check" for a table constraint. We
* no longer have any info about the syntactic positioning of the
* constraint phrase, so we approximate this by seeing whether the
- * expression references more than one column. (If the user
+ * expression references more than one column. (If the user
* played by the rules, the result is the same...)
*
* Note: pull_var_clause() doesn't descend into sublinks, but we
@@ -2563,7 +2563,7 @@ RemoveStatistics(Oid relid, AttrNumber attnum)
* with the heap relation to zero tuples.
*
* The routine will truncate and then reconstruct the indexes on
- * the specified relation. Caller must hold exclusive lock on rel.
+ * the specified relation. Caller must hold exclusive lock on rel.
*/
static void
RelationTruncateIndexes(Relation heapRelation)
@@ -2603,7 +2603,7 @@ RelationTruncateIndexes(Relation heapRelation)
* This routine deletes all data within all the specified relations.
*
* This is not transaction-safe! There is another, transaction-safe
- * implementation in commands/tablecmds.c. We now use this only for
+ * implementation in commands/tablecmds.c. We now use this only for
* ON COMMIT truncation of temporary tables, where it doesn't matter.
*/
void
@@ -2712,7 +2712,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
return;
/*
- * Otherwise, must scan pg_constraint. We make one pass with all the
+ * Otherwise, must scan pg_constraint. We make one pass with all the
* relations considered; if this finds nothing, then all is well.
*/
dependents = heap_truncate_find_FKs(oids);
@@ -2773,7 +2773,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
* behavior to change depending on chance locations of rows in pg_constraint.)
*
* Note: caller should already have appropriate lock on all rels mentioned
- * in relationIds. Since adding or dropping an FK requires exclusive lock
+ * in relationIds. Since adding or dropping an FK requires exclusive lock
* on both rels, this ensures that the answer will be stable.
*/
List *
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 4e6222b3ba9..2eb2aa272e2 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -412,7 +412,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/*
* We do not yet have the correct relation OID for the index, so just
- * set it invalid for now. InitializeAttributeOids() will fix it
+ * set it invalid for now. InitializeAttributeOids() will fix it
* later.
*/
to->attrelid = InvalidOid;
@@ -650,7 +650,7 @@ UpdateIndexRelation(Oid indexoid,
* heapRelation: table to build index on (suitably locked by caller)
* indexRelationName: what it say
* indexRelationId: normally, pass InvalidOid to let this routine
- * generate an OID for the index. During bootstrap this may be
+ * generate an OID for the index. During bootstrap this may be
* nonzero to specify a preselected OID.
* indexInfo: same info executor uses to insert into the index
* indexColNames: column names to use for index (List of char *)
@@ -667,7 +667,7 @@ UpdateIndexRelation(Oid indexoid,
* allow_system_table_mods: allow table to be a system catalog
* skip_build: true to skip the index_build() step for the moment; caller
* must do it later (typically via reindex_index())
- * concurrent: if true, do not lock the table against writers. The index
+ * concurrent: if true, do not lock the table against writers. The index
* will be marked "invalid" and the caller must take additional steps
* to fix it up.
*
@@ -953,7 +953,7 @@ index_create(Relation heapRelation,
/*
* If there are no simply-referenced columns, give the index an
- * auto dependency on the whole table. In most cases, this will
+ * auto dependency on the whole table. In most cases, this will
* be redundant, but it might not be if the index expressions and
* predicate contain no Vars or only whole-row Vars.
*/
@@ -1075,7 +1075,7 @@ index_create(Relation heapRelation,
/*
* Close the index; but we keep the lock that we acquired above until end
- * of transaction. Closing the heap is caller's responsibility.
+ * of transaction. Closing the heap is caller's responsibility.
*/
index_close(indexRelation, NoLock);
@@ -1229,7 +1229,7 @@ index_constraint_create(Relation heapRelation,
* have been so marked already, so no need to clear the flag in the other
* case.
*
- * Note: this might better be done by callers. We do it here to avoid
+ * Note: this might better be done by callers. We do it here to avoid
* exposing index_update_stats() globally, but that wouldn't be necessary
* if relhaspkey went away.
*/
@@ -1404,7 +1404,7 @@ index_drop(Oid indexId)
*
* IndexInfo stores the information about the index that's needed by
* FormIndexDatum, which is used for both index_build() and later insertion
- * of individual index tuples. Normally we build an IndexInfo for an index
+ * of individual index tuples. Normally we build an IndexInfo for an index
* just once per command, and then use it for (potentially) many tuples.
* ----------------
*/
@@ -1474,7 +1474,7 @@ BuildIndexInfo(Relation index)
* context must point to the heap tuple passed in.
*
* Notice we don't actually call index_form_tuple() here; we just prepare
- * its input arrays values[] and isnull[]. This is because the index AM
+ * its input arrays values[] and isnull[]. This is because the index AM
* may wish to alter the data before storage.
* ----------------
*/
@@ -1540,7 +1540,7 @@ FormIndexDatum(IndexInfo *indexInfo,
* index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX
*
* This routine updates the pg_class row of either an index or its parent
- * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
+ * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
* to ensure we can do all the necessary work in just one update.
*
* hasindex: set relhasindex to this value
@@ -1553,7 +1553,7 @@ FormIndexDatum(IndexInfo *indexInfo,
*
* NOTE: an important side-effect of this operation is that an SI invalidation
* message is sent out to all backends --- including me --- causing relcache
- * entries to be flushed or updated with the new data. This must happen even
+ * entries to be flushed or updated with the new data. This must happen even
* if we find that no change is needed in the pg_class row. When updating
* a heap entry, this ensures that other backends find out about the new
* index. When updating an index, it's important because some index AMs
@@ -1592,13 +1592,13 @@ index_update_stats(Relation rel,
* 4. Even with just a single CREATE INDEX, there's a risk factor because
* someone else might be trying to open the rel while we commit, and this
* creates a race condition as to whether he will see both or neither of
- * the pg_class row versions as valid. Again, a non-transactional update
+ * the pg_class row versions as valid. Again, a non-transactional update
* avoids the risk. It is indeterminate which state of the row the other
* process will see, but it doesn't matter (if he's only taking
* AccessShareLock, then it's not critical that he see relhasindex true).
*
* It is safe to use a non-transactional update even though our
- * transaction could still fail before committing. Setting relhasindex
+ * transaction could still fail before committing. Setting relhasindex
* true is safe even if there are no indexes (VACUUM will eventually fix
* it), likewise for relhaspkey. And of course the relpages and reltuples
* counts are correct (or at least more so than the old values)
@@ -1608,7 +1608,7 @@ index_update_stats(Relation rel,
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
/*
- * Make a copy of the tuple to update. Normally we use the syscache, but
+ * Make a copy of the tuple to update. Normally we use the syscache, but
* we can't rely on that during bootstrap or while reindexing pg_class
* itself.
*/
@@ -1699,7 +1699,7 @@ index_update_stats(Relation rel,
* index_build - invoke access-method-specific index build procedure
*
* On entry, the index's catalog entries are valid, and its physical disk
- * file has been created but is empty. We call the AM-specific build
+ * file has been created but is empty. We call the AM-specific build
* procedure to fill in the index contents. We then update the pg_class
* entries of the index and heap relation as needed, using statistics
* returned by ambuild as well as data passed by the caller.
@@ -1797,7 +1797,7 @@ index_build(Relation heapRelation,
* Therefore, this code path can only be taken during non-concurrent
* CREATE INDEX. Thus the fact that heap_update will set the pg_index
* tuple's xmin doesn't matter, because that tuple was created in the
- * current transaction anyway. That also means we don't need to worry
+ * current transaction anyway. That also means we don't need to worry
* about any concurrent readers of the tuple; no other transaction can see
* it yet.
*/
@@ -1849,7 +1849,7 @@ index_build(Relation heapRelation,
/*
* If it's for an exclusion constraint, make a second pass over the heap
- * to verify that the constraint is satisfied. We must not do this until
+ * to verify that the constraint is satisfied. We must not do this until
* the index is fully valid. (Broken HOT chains shouldn't matter, though;
* see comments for IndexCheckExclusion.)
*/
@@ -1874,8 +1874,8 @@ index_build(Relation heapRelation,
* things to add it to the new index. After we return, the AM's index
* build procedure does whatever cleanup it needs.
*
- * The total count of heap tuples is returned. This is for updating pg_class
- * statistics. (It's annoying not to be able to do that here, but we want
+ * The total count of heap tuples is returned. This is for updating pg_class
+ * statistics. (It's annoying not to be able to do that here, but we want
* to merge that update with others; see index_update_stats.) Note that the
* index AM itself must keep track of the number of index tuples; we don't do
* so here because the AM might reject some of the tuples for its own reasons,
@@ -1925,7 +1925,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -1944,7 +1944,7 @@ IndexBuildHeapScan(Relation heapRelation,
* SnapshotAny because we must retrieve all tuples and do our own time
* qual checks (because we have to index RECENTLY_DEAD tuples). In a
* concurrent build, we take a regular MVCC snapshot and index whatever's
- * live according to that. During bootstrap we just use SnapshotNow.
+ * live according to that. During bootstrap we just use SnapshotNow.
*/
if (IsBootstrapProcessingMode())
{
@@ -2055,7 +2055,7 @@ IndexBuildHeapScan(Relation heapRelation,
* building it, and may need to see such tuples.)
*
* However, if it was HOT-updated then we must only index
- * the live tuple at the end of the HOT-chain. Since this
+ * the live tuple at the end of the HOT-chain. Since this
* breaks semantics for pre-existing snapshots, mark the
* index as unusable for them.
*/
@@ -2075,7 +2075,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* Since caller should hold ShareLock or better, normally
* the only way to see this is if it was inserted earlier
- * in our own transaction. However, it can happen in
+ * in our own transaction. However, it can happen in
* system catalogs, since we tend to release write lock
* before commit there. Give a warning if neither case
* applies.
@@ -2227,7 +2227,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* You'd think we should go ahead and build the index tuple here, but
- * some index AMs want to do further processing on the data first. So
+ * some index AMs want to do further processing on the data first. So
* pass the values[] and isnull[] arrays, instead.
*/
@@ -2314,7 +2314,7 @@ IndexCheckExclusion(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2392,11 +2392,11 @@ IndexCheckExclusion(Relation heapRelation,
* We do a concurrent index build by first inserting the catalog entry for the
* index via index_create(), marking it not indisready and not indisvalid.
* Then we commit our transaction and start a new one, then we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* honor its constraints on HOT updates; so while existing HOT-chains might
* be broken with respect to the index, no currently live tuple will have an
- * incompatible HOT update done to it. We now build the index normally via
+ * incompatible HOT update done to it. We now build the index normally via
* index_build(), while holding a weak lock that allows concurrent
* insert/update/delete. Also, we index only tuples that are valid
* as of the start of the scan (see IndexBuildHeapScan), whereas a normal
@@ -2410,13 +2410,13 @@ IndexCheckExclusion(Relation heapRelation,
*
* Next, we mark the index "indisready" (but still not "indisvalid") and
* commit the second transaction and start a third. Again we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* insert their new tuples into it. We then take a new reference snapshot
* which is passed to validate_index(). Any tuples that are valid according
* to this snap, but are not in the index, must be added to the index.
* (Any tuples committed live after the snap will be inserted into the
- * index by their originating transaction. Any tuples committed dead before
+ * index by their originating transaction. Any tuples committed dead before
* the snap need not be indexed, because we will wait out all transactions
* that might care about them before we mark the index valid.)
*
@@ -2425,7 +2425,7 @@ IndexCheckExclusion(Relation heapRelation,
* ever say "delete it". (This should be faster than a plain indexscan;
* also, not all index AMs support full-index indexscan.) Then we sort the
* TIDs, and finally scan the table doing a "merge join" against the TID list
- * to see which tuples are missing from the index. Thus we will ensure that
+ * to see which tuples are missing from the index. Thus we will ensure that
* all tuples valid according to the reference snapshot are in the index.
*
* Building a unique index this way is tricky: we might try to insert a
@@ -2441,7 +2441,7 @@ IndexCheckExclusion(Relation heapRelation,
* were alive at the time of the reference snapshot are gone; this is
* necessary to be sure there are none left with a transaction snapshot
* older than the reference (and hence possibly able to see tuples we did
- * not index). Then we mark the index "indisvalid" and commit. Subsequent
+ * not index). Then we mark the index "indisvalid" and commit. Subsequent
* transactions will be able to use it for queries.
*
* Doing two full table scans is a brute-force strategy. We could try to be
@@ -2467,7 +2467,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
indexRelation = index_open(indexId, RowExclusiveLock);
/*
- * Fetch info needed for index_insert. (You might think this should be
+ * Fetch info needed for index_insert. (You might think this should be
* passed in from DefineIndex, but its copy is long gone due to having
* been built in a previous transaction.)
*/
@@ -2584,7 +2584,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2633,7 +2633,7 @@ validate_index_heapscan(Relation heapRelation,
* visit the live tuples in order by their offsets, but the root
* offsets that we need to compare against the index contents might be
* ordered differently. So we might have to "look back" within the
- * tuplesort output, but only within the current page. We handle that
+ * tuplesort output, but only within the current page. We handle that
* by keeping a bool array in_index[] showing all the
* already-passed-over tuplesort output TIDs of the current page. We
* clear that array here, when advancing onto a new heap page.
@@ -2714,7 +2714,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* For the current heap tuple, extract all the attributes we use
- * in this index, and note which are null. This also performs
+ * in this index, and note which are null. This also performs
* evaluation of any expressions needed.
*/
FormIndexDatum(indexInfo,
@@ -2736,7 +2736,7 @@ validate_index_heapscan(Relation heapRelation,
* for a uniqueness check on the whole HOT-chain. That is, the
* tuple we have here could be dead because it was already
* HOT-updated, and if so the updating transaction will not have
- * thought it should insert index entries. The index AM will
+ * thought it should insert index entries. The index AM will
* check the whole HOT-chain and correctly detect a conflict if
* there is one.
*/
@@ -2826,7 +2826,7 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action)
/*
* IndexGetRelation: given an index's relation OID, get the OID of the
- * relation it is an index on. Uses the system cache.
+ * relation it is an index on. Uses the system cache.
*/
Oid
IndexGetRelation(Oid indexId)
@@ -2859,7 +2859,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
volatile bool skipped_constraint = false;
/*
- * Open and lock the parent heap relation. ShareLock is sufficient since
+ * Open and lock the parent heap relation. ShareLock is sufficient since
* we only need to be sure no schema or data changes are going on.
*/
heapId = IndexGetRelation(indexId);
@@ -3034,7 +3034,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
* the data in a manner that risks a change in constraint validity.
*
* Returns true if any indexes were rebuilt (including toast table's index
- * when relevant). Note that a CommandCounterIncrement will occur after each
+ * when relevant). Note that a CommandCounterIncrement will occur after each
* index rebuild.
*/
bool
@@ -3047,7 +3047,7 @@ reindex_relation(Oid relid, int flags)
bool result;
/*
- * Open and lock the relation. ShareLock is sufficient since we only need
+ * Open and lock the relation. ShareLock is sufficient since we only need
* to prevent schema and data changes in it.
*/
rel = heap_open(relid, ShareLock);
@@ -3065,7 +3065,7 @@ reindex_relation(Oid relid, int flags)
* reindex_index will attempt to update the pg_class rows for the relation
* and index. If we are processing pg_class itself, we want to make sure
* that the updates do not try to insert index entries into indexes we
- * have not processed yet. (When we are trying to recover from corrupted
+ * have not processed yet. (When we are trying to recover from corrupted
* indexes, that could easily cause a crash.) We can accomplish this
* because CatalogUpdateIndexes will use the relcache's index list to know
* which indexes to update. We just force the index list to be only the
@@ -3074,7 +3074,7 @@ reindex_relation(Oid relid, int flags)
* It is okay to not insert entries into the indexes we have not processed
* yet because all of this is transaction-safe. If we fail partway
* through, the updated rows are dead and it doesn't matter whether they
- * have index entries. Also, a new pg_class index will be created with a
+ * have index entries. Also, a new pg_class index will be created with a
* correct entry for its own pg_class row because we do
* RelationSetNewRelfilenode() before we do index_build().
*
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index 21f5cce69c8..824af0e7a24 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -148,7 +148,7 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple)
* CatalogUpdateIndexes - do all the indexing work for a new catalog tuple
*
* This is a convenience routine for the common case where we only need
- * to insert or update a single tuple in a system catalog. Avoid using it for
+ * to insert or update a single tuple in a system catalog. Avoid using it for
* multiple tuples, since opening the indexes and building the index info
* structures is moderately expensive.
*/
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 15622f8423e..c660dca45cb 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -64,10 +64,10 @@
* when we are obeying an override search path spec that says not to use the
* temp namespace, or the temp namespace is included in the explicit list.)
*
- * 2. The system catalog namespace is always searched. If the system
+ * 2. The system catalog namespace is always searched. If the system
* namespace is present in the explicit path then it will be searched in
* the specified order; otherwise it will be searched after TEMP tables and
- * *before* the explicit list. (It might seem that the system namespace
+ * *before* the explicit list. (It might seem that the system namespace
* should be implicitly last, but this behavior appears to be required by
* SQL99. Also, this provides a way to search the system namespace first
* without thereby making it the default creation target namespace.)
@@ -85,7 +85,7 @@
* to refer to the current backend's temp namespace. This is usually also
* ignorable if the temp namespace hasn't been set up, but there's a special
* case: if "pg_temp" appears first then it should be the default creation
- * target. We kluge this case a little bit so that the temp namespace isn't
+ * target. We kluge this case a little bit so that the temp namespace isn't
* set up until the first attempt to create something in it. (The reason for
* klugery is that we can't create the temp namespace outside a transaction,
* but initial GUC processing of search_path happens outside a transaction.)
@@ -96,7 +96,7 @@
* In bootstrap mode, the search path is set equal to "pg_catalog", so that
* the system namespace is the only one searched or inserted into.
* initdb is also careful to set search_path to "pg_catalog" for its
- * post-bootstrap standalone backend runs. Otherwise the default search
+ * post-bootstrap standalone backend runs. Otherwise the default search
* path is determined by GUC. The factory default path contains the PUBLIC
* namespace (if it exists), preceded by the user's personal namespace
* (if one exists).
@@ -160,13 +160,13 @@ static List *overrideStack = NIL;
/*
* myTempNamespace is InvalidOid until and unless a TEMP namespace is set up
* in a particular backend session (this happens when a CREATE TEMP TABLE
- * command is first executed). Thereafter it's the OID of the temp namespace.
+ * command is first executed). Thereafter it's the OID of the temp namespace.
*
* myTempToastNamespace is the OID of the namespace for my temp tables' toast
- * tables. It is set when myTempNamespace is, and is InvalidOid before that.
+ * tables. It is set when myTempNamespace is, and is InvalidOid before that.
*
* myTempNamespaceSubID shows whether we've created the TEMP namespace in the
- * current subtransaction. The flag propagates up the subtransaction tree,
+ * current subtransaction. The flag propagates up the subtransaction tree,
* so the main transaction will correctly recognize the flag if all
* intermediate subtransactions commit. When it is InvalidSubTransactionId,
* we either haven't made the TEMP namespace yet, or have successfully
@@ -642,7 +642,7 @@ TypeIsVisible(Oid typid)
* and the returned nvargs will always be zero.
*
* If expand_defaults is true, functions that could match after insertion of
- * default argument values will also be retrieved. In this case the returned
+ * default argument values will also be retrieved. In this case the returned
* structs could have nargs > passed-in nargs, and ndargs is set to the number
* of additional args (which can be retrieved from the function's
* proargdefaults entry).
@@ -798,7 +798,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
* Call uses positional notation
*
* Check if function is variadic, and get variadic element type if
- * so. If expand_variadic is false, we should just ignore
+ * so. If expand_variadic is false, we should just ignore
* variadic-ness.
*/
if (pronargs <= nargs && expand_variadic)
@@ -928,7 +928,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
if (prevResult)
{
/*
- * We have a match with a previous result. Decide which one
+ * We have a match with a previous result. Decide which one
* to keep, or mark it ambiguous if we can't decide. The
* logic here is preference > 0 means prefer the old result,
* preference < 0 means prefer the new, preference = 0 means
@@ -1313,7 +1313,7 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright)
* identical entries in later namespaces.
*
* The returned items always have two args[] entries --- one or the other
- * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
+ * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
*/
FuncCandidateList
OpernameGetCandidates(List *names, char oprkind)
@@ -2282,7 +2282,7 @@ get_ts_config_oid(List *names, bool missing_ok)
/*
* TSConfigIsVisible
* Determine whether a text search configuration (identified by OID)
- * is visible in the current search path. Visible means "would be found
+ * is visible in the current search path. Visible means "would be found
* by searching for the unqualified text search configuration name".
*/
bool
@@ -2595,7 +2595,7 @@ QualifiedNameGetCreationNamespace(List *names, char **objname_p)
/*
* get_namespace_oid - given a namespace name, look up the OID
*
- * If missing_ok is false, throw an error if namespace name not found. If
+ * If missing_ok is false, throw an error if namespace name not found. If
* true, just return InvalidOid.
*/
Oid
@@ -2810,7 +2810,7 @@ GetTempNamespaceBackendId(Oid namespaceId)
/*
* GetTempToastNamespace - get the OID of my temporary-toast-table namespace,
- * which must already be assigned. (This is only used when creating a toast
+ * which must already be assigned. (This is only used when creating a toast
* table for a temp table, so we must have already done InitTempTableNamespace)
*/
Oid
@@ -2868,8 +2868,8 @@ GetOverrideSearchPath(MemoryContext context)
*
* It's possible that newpath->useTemp is set but there is no longer any
* active temp namespace, if the path was saved during a transaction that
- * created a temp namespace and was later rolled back. In that case we just
- * ignore useTemp. A plausible alternative would be to create a new temp
+ * created a temp namespace and was later rolled back. In that case we just
+ * ignore useTemp. A plausible alternative would be to create a new temp
* namespace, but for existing callers that's not necessary because an empty
* temp namespace wouldn't affect their results anyway.
*
@@ -2902,7 +2902,7 @@ PushOverrideSearchPath(OverrideSearchPath *newpath)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go on
+ * Add any implicitly-searched namespaces to the list. Note these go on
* the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
@@ -3217,7 +3217,7 @@ recomputeNamespacePath(void)
}
/*
- * Remember the first member of the explicit list. (Note: this is
+ * Remember the first member of the explicit list. (Note: this is
* nominally wrong if temp_missing, but we need it anyway to distinguish
* explicit from implicit mention of pg_catalog.)
*/
@@ -3227,7 +3227,7 @@ recomputeNamespacePath(void)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go on
+ * Add any implicitly-searched namespaces to the list. Note these go on
* the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
@@ -3282,7 +3282,7 @@ InitTempTableNamespace(void)
/*
* First, do permission check to see if we are authorized to make temp
- * tables. We use a nonstandard error message here since "databasename:
+ * tables. We use a nonstandard error message here since "databasename:
* permission denied" might be a tad cryptic.
*
* Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask;
@@ -3301,9 +3301,9 @@ InitTempTableNamespace(void)
* Do not allow a Hot Standby slave session to make temp tables. Aside
* from problems with modifying the system catalogs, there is a naming
* conflict: pg_temp_N belongs to the session with BackendId N on the
- * master, not to a slave session with the same BackendId. We should not
+ * master, not to a slave session with the same BackendId. We should not
* be able to get here anyway due to XactReadOnly checks, but let's just
- * make real sure. Note that this also backstops various operations that
+ * make real sure. Note that this also backstops various operations that
* allow XactReadOnly transactions to modify temp tables; they'd need
* RecoveryInProgress checks if not for this.
*/
@@ -3564,7 +3564,7 @@ check_search_path(char **newval, void **extra, GucSource source)
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the individual names. Must accept the list on faith.
+ * cannot verify the individual names. Must accept the list on faith.
* Also, if the value is coming from a noninteractive source, accept it
* anyway.
*/
@@ -3577,7 +3577,7 @@ check_search_path(char **newval, void **extra, GucSource source)
* for USAGE rights, either; should we?
*
* When source == PGC_S_TEST, we are checking the argument of an ALTER
- * DATABASE SET or ALTER USER SET command. It could be that the
+ * DATABASE SET or ALTER USER SET command. It could be that the
* intended use of the search path is for some other database, so we
* should not error out if it mentions schemas not present in the
* current database. We issue a NOTICE instead.
@@ -3698,7 +3698,7 @@ fetch_search_path(bool includeImplicit)
/*
* If the temp namespace should be first, force it to exist. This is so
* that callers can trust the result to reflect the actual default
- * creation namespace. It's a bit bogus to do this here, since
+ * creation namespace. It's a bit bogus to do this here, since
* current_schema() is supposedly a stable function without side-effects,
* but the alternatives seem worse.
*/
@@ -3720,7 +3720,7 @@ fetch_search_path(bool includeImplicit)
/*
* Fetch the active search path into a caller-allocated array of OIDs.
- * Returns the number of path entries. (If this is more than sarray_len,
+ * Returns the number of path entries. (If this is more than sarray_len,
* then the data didn't fit and is not all stored.)
*
* The returned list always includes the implicitly-prepended namespaces,
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 403bd9e283c..988189d5bf6 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -242,7 +242,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
/*
* If we're dealing with a relation or attribute, then the relation is
- * already locked. If we're dealing with any other type of object, we
+ * already locked. If we're dealing with any other type of object, we
* need to lock it and then verify that it still exists.
*/
if (address.classId != RelationRelationId)
diff --git a/src/backend/catalog/pg_collation.c b/src/backend/catalog/pg_collation.c
index 383f36ef1d5..5dec4fe152e 100644
--- a/src/backend/catalog/pg_collation.c
+++ b/src/backend/catalog/pg_collation.c
@@ -76,7 +76,7 @@ CollationCreate(const char *collname, Oid collnamespace,
collname, pg_encoding_to_char(collencoding))));
/*
- * Also forbid matching an any-encoding entry. This test of course is not
+ * Also forbid matching an any-encoding entry. This test of course is not
* backed up by the unique index, but it's not a problem since we don't
* support adding any-encoding entries after initdb.
*/
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 79c5cfa345d..51f50d21947 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -37,7 +37,7 @@
* Create a constraint table entry.
*
* Subsidiary records (such as triggers or indexes to implement the
- * constraint) are *not* created here. But we do make dependency links
+ * constraint) are *not* created here. But we do make dependency links
* from the constraint to the things it depends on.
*/
Oid
@@ -301,7 +301,7 @@ CreateConstraintEntry(const char *constraintName,
{
/*
* Register normal dependency on the unique index that supports a
- * foreign-key constraint. (Note: for indexes associated with unique
+ * foreign-key constraint. (Note: for indexes associated with unique
* or primary-key constraints, the dependency runs the other way, and
* is not made here.)
*/
@@ -828,10 +828,10 @@ get_constraint_oid(Oid relid, const char *conname, bool missing_ok)
* the rel of interest are Vars with the indicated varno/varlevelsup.
*
* Currently we only check to see if the rel has a primary key that is a
- * subset of the grouping_columns. We could also use plain unique constraints
+ * subset of the grouping_columns. We could also use plain unique constraints
* if all their columns are known not null, but there's a problem: we need
* to be able to represent the not-null-ness as part of the constraints added
- * to *constraintDeps. FIXME whenever not-null constraints get represented
+ * to *constraintDeps. FIXME whenever not-null constraints get represented
* in pg_constraint.
*/
bool
diff --git a/src/backend/catalog/pg_db_role_setting.c b/src/backend/catalog/pg_db_role_setting.c
index 494704f5d09..d9b1345e461 100644
--- a/src/backend/catalog/pg_db_role_setting.c
+++ b/src/backend/catalog/pg_db_role_setting.c
@@ -169,7 +169,7 @@ AlterSetting(Oid databaseid, Oid roleid, VariableSetStmt *setstmt)
/*
* Drop some settings from the catalog. These can be for a particular
- * database, or for a particular role. (It is of course possible to do both
+ * database, or for a particular role. (It is of course possible to do both
* too, but it doesn't make sense for current uses.)
*/
void
diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c
index c1a4340190a..7e0d9bf7b1a 100644
--- a/src/backend/catalog/pg_depend.c
+++ b/src/backend/catalog/pg_depend.c
@@ -49,7 +49,7 @@ recordDependencyOn(const ObjectAddress *depender,
/*
* Record multiple dependencies (of the same kind) for a single dependent
- * object. This has a little less overhead than recording each separately.
+ * object. This has a little less overhead than recording each separately.
*/
void
recordMultipleDependencies(const ObjectAddress *depender,
@@ -126,7 +126,7 @@ recordMultipleDependencies(const ObjectAddress *depender,
/*
* If we are executing a CREATE EXTENSION operation, mark the given object
- * as being a member of the extension. Otherwise, do nothing.
+ * as being a member of the extension. Otherwise, do nothing.
*
* This must be called during creation of any user-definable object type
* that could be a member of an extension.
@@ -185,7 +185,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object,
* (possibly with some differences from before).
*
* If skipExtensionDeps is true, we do not delete any dependencies that
- * show that the given object is a member of an extension. This avoids
+ * show that the given object is a member of an extension. This avoids
* needing a lot of extra logic to fetch and recreate that dependency.
*/
long
@@ -491,7 +491,7 @@ getExtensionOfObject(Oid classId, Oid objectId)
* Detect whether a sequence is marked as "owned" by a column
*
* An ownership marker is an AUTO dependency from the sequence to the
- * column. If we find one, store the identity of the owning column
+ * column. If we find one, store the identity of the owning column
* into *tableId and *colId and return TRUE; else return FALSE.
*
* Note: if there's more than one such pg_depend entry then you get
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 61a9322d901..5f0578eac6b 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -436,7 +436,7 @@ restart:
* We avoid doing this unless absolutely necessary; in most installations
* it will never happen. The reason is that updating existing pg_enum
* entries creates hazards for other backends that are concurrently reading
- * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could
+ * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could
* see both old and new versions of an updated row as valid, or neither of
* them, if the commit happens between scanning the two versions. It's
* also quite likely for a concurrent scan to see an inconsistent set of
diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c
index 6d6c9e9f662..171cbcd18aa 100644
--- a/src/backend/catalog/pg_largeobject.c
+++ b/src/backend/catalog/pg_largeobject.c
@@ -79,7 +79,7 @@ LargeObjectCreate(Oid loid)
}
/*
- * Drop a large object having the given LO identifier. Both the data pages
+ * Drop a large object having the given LO identifier. Both the data pages
* and metadata must be dropped.
*/
void
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 88410e5090b..80a666fa11a 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -316,7 +316,7 @@ OperatorShellMake(const char *operatorName,
* specify operators that do not exist. For example, if operator
* "op" is being defined, the negator operator "negop" and the
* commutator "commop" can also be defined without specifying
- * any information other than their names. Since in order to
+ * any information other than their names. Since in order to
* add "op" to the PG_OPERATOR catalog, all the Oid's for these
* operators must be placed in the fields of "op", a forward
* declaration is done on the commutator and negator operators.
@@ -434,7 +434,7 @@ OperatorCreate(const char *operatorName,
operatorName);
/*
- * Set up the other operators. If they do not currently exist, create
+ * Set up the other operators. If they do not currently exist, create
* shells in order to get ObjectId's.
*/
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 5de7b338079..667a77ccea6 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -154,7 +154,7 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow polymorphic return type unless at least one input argument
- * is polymorphic. Also, do not allow return type INTERNAL unless at
+ * is polymorphic. Also, do not allow return type INTERNAL unless at
* least one input argument is INTERNAL.
*/
for (i = 0; i < parameterCount; i++)
@@ -180,7 +180,7 @@ ProcedureCreate(const char *procedureName,
/*
* We don't bother to distinguish input and output params here, so
* if there is, say, just an input INTERNAL param then we will
- * still set internalOutParam. This is OK since we don't really
+ * still set internalOutParam. This is OK since we don't really
* care.
*/
switch (allParams[i])
@@ -647,7 +647,7 @@ ProcedureCreate(const char *procedureName,
/*
* Set per-function configuration parameters so that the validation is
- * done with the environment the function expects. However, if
+ * done with the environment the function expects. However, if
* check_function_bodies is off, we don't do this, because that would
* create dump ordering hazards that pg_dump doesn't know how to deal
* with. (For example, a SET clause might refer to a not-yet-created
@@ -919,7 +919,7 @@ sql_function_parse_error_callback(void *arg)
/*
* Adjust a syntax error occurring inside the function body of a CREATE
- * FUNCTION or DO command. This can be used by any function validator or
+ * FUNCTION or DO command. This can be used by any function validator or
* anonymous-block handler, not only for SQL-language functions.
* It is assumed that the syntax error position is initially relative to the
* function body string (as passed in). If possible, we adjust the position
@@ -1052,7 +1052,7 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
/*
* This implementation handles backslashes and doubled quotes in the
- * string literal. It does not handle the SQL syntax for literals
+ * string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
* We do the comparison a character at a time, not a byte at a time, so
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 91e26f4e159..77e038d267e 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -162,7 +162,7 @@ recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner)
* shdepChangeDep
*
* Update shared dependency records to account for an updated referenced
- * object. This is an internal workhorse for operations such as changing
+ * object. This is an internal workhorse for operations such as changing
* an object's owner.
*
* There must be no more than one existing entry for the given dependent
@@ -311,7 +311,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId)
* was previously granted some rights to the object.
*
* This step is analogous to aclnewowner's removal of duplicate entries
- * in the ACL. We have to do it to handle this scenario:
+ * in the ACL. We have to do it to handle this scenario:
* A grants some rights on an object to B
* ALTER OWNER changes the object's owner to B
* ALTER OWNER changes the object's owner to C
@@ -397,9 +397,9 @@ getOidListDiff(Oid *list1, int *nlist1, Oid *list2, int *nlist2)
* and then insert or delete from pg_shdepend as appropiate.
*
* Note that we can't just insert all referenced roles blindly during GRANT,
- * because we would end up with duplicate registered dependencies. We could
+ * because we would end up with duplicate registered dependencies. We could
* check for existence of the tuples before inserting, but that seems to be
- * more expensive than what we are doing here. Likewise we can't just delete
+ * more expensive than what we are doing here. Likewise we can't just delete
* blindly during REVOKE, because the user may still have other privileges.
* It is also possible that REVOKE actually adds dependencies, due to
* instantiation of a formerly implicit default ACL (although at present,
@@ -530,7 +530,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report.
+ * enormous error strings. The server log always gets a full report.
*/
#define MAX_REPORTED_DEPS 100
@@ -611,7 +611,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
bool stored = false;
/*
- * XXX this info is kept on a simple List. Maybe it's not good
+ * XXX this info is kept on a simple List. Maybe it's not good
* for performance, but using a hash table seems needlessly
* complex. The expected number of databases is not high anyway,
* I suppose.
@@ -848,7 +848,7 @@ shdepAddDependency(Relation sdepRel,
/*
* Make sure the object doesn't go away while we record the dependency on
- * it. DROP routines should lock the object exclusively before they check
+ * it. DROP routines should lock the object exclusively before they check
* shared dependencies.
*/
shdepLockAndCheckObject(refclassId, refobjId);
@@ -999,7 +999,7 @@ shdepLockAndCheckObject(Oid classId, Oid objectId)
/*
* Currently, this routine need not support any other shared
- * object types besides roles. If we wanted to record explicit
+ * object types besides roles. If we wanted to record explicit
* dependencies on databases or tablespaces, we'd need code along
* these lines:
*/
@@ -1145,7 +1145,7 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel)
/*
* shdepDropOwned
*
- * Drop the objects owned by any one of the given RoleIds. If a role has
+ * Drop the objects owned by any one of the given RoleIds. If a role has
* access to an object, the grant will be removed as well (but the object
* will not, of course).
*
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 95b17d764ed..4f2a2ec113b 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -387,7 +387,7 @@ TypeCreate(Oid newTypeOid,
if (HeapTupleIsValid(tup))
{
/*
- * check that the type is not already defined. It may exist as a
+ * check that the type is not already defined. It may exist as a
* shell type, however.
*/
if (((Form_pg_type) GETSTRUCT(tup))->typisdefined)
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 1877395b65b..c1f17a5a0f6 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -34,7 +34,7 @@
* that have been created or deleted in the current transaction. When
* a relation is created, we create the physical file immediately, but
* remember it so that we can delete the file again if the current
- * transaction is aborted. Conversely, a deletion request is NOT
+ * transaction is aborted. Conversely, a deletion request is NOT
* executed immediately, but is just entered in the list. When and if
* the transaction commits, we can delete the physical file.
*
@@ -378,7 +378,7 @@ smgrDoPendingDeletes(bool isCommit)
* *ptr is set to point to a freshly-palloc'd array of RelFileNodes.
* If there are no relations to be deleted, *ptr is set to NULL.
*
- * Only non-temporary relations are included in the returned list. This is OK
+ * Only non-temporary relations are included in the returned list. This is OK
* because the list is used only in contexts where temporary relations don't
* matter: we're either writing to the two-phase state file (and transactions
* that have touched temp tables can't be prepared) or we're writing to xlog
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index 6ef33f94927..6f694cebc35 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -341,7 +341,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptio
}
/*
- * Check to see whether the table needs a TOAST table. It does only if
+ * Check to see whether the table needs a TOAST table. It does only if
* (1) there are any toastable attributes, and (2) the maximum length
* of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
* create a toast table for something like "f1 varchar(20)".)
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index a2122c1d8b6..f4f4c6ba0df 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -173,7 +173,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
*
* transtype can't be a pseudo-type, since we need to be able to store
* values of the transtype. However, we can allow polymorphic transtype
- * in some cases (AggregateCreate will check). Also, we allow "internal"
+ * in some cases (AggregateCreate will check). Also, we allow "internal"
* for functions that want to pass pointers to private data structures;
* but allow that only to superusers, since you could crash the system (or
* worse) by connecting up incompatible internal-using functions in an
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index 91df1a0aca7..3cbb1e290f7 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -42,7 +42,7 @@
/*
- * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
+ * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
* type, the function appropriate to that type is executed.
*/
void
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index b1521be70e6..b5d1c617dd5 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -365,7 +365,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
/*
* Open all indexes of the relation, and see if there are any analyzable
- * columns in the indexes. We do not analyze index columns if there was
+ * columns in the indexes. We do not analyze index columns if there was
* an explicit column list in the ANALYZE command, however. If we are
* doing a recursive scan, we don't want to touch the parent's indexes at
* all.
@@ -422,7 +422,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
/*
* Determine how many rows we need to sample, using the worst case from
- * all analyzable columns. We use a lower bound of 100 rows to avoid
+ * all analyzable columns. We use a lower bound of 100 rows to avoid
* possible overflow in Vitter's algorithm. (Note: that will also be
* the target in the corner case where there are no analyzable columns.)
*/
@@ -455,7 +455,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
&totalrows, &totaldeadrows);
/*
- * Compute the statistics. Temporary results during the calculations for
+ * Compute the statistics. Temporary results during the calculations for
* each column are stored in a child context. The calc routines are
* responsible to make sure that whatever they store into the VacAttrStats
* structure is allocated in anl_context.
@@ -512,7 +512,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
/*
* Emit the completed stats rows into pg_statistic, replacing any
- * previous statistics for the target columns. (If there are stats in
+ * previous statistics for the target columns. (If there are stats in
* pg_statistic for columns we didn't process, we leave them alone.)
*/
update_attstats(RelationGetRelid(onerel), inh,
@@ -556,7 +556,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
}
/*
- * Report ANALYZE to the stats collector, too. However, if doing
+ * Report ANALYZE to the stats collector, too. However, if doing
* inherited stats we shouldn't report, because the stats collector only
* tracks per-table stats.
*/
@@ -818,7 +818,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
return NULL;
/*
- * Create the VacAttrStats struct. Note that we only have a copy of the
+ * Create the VacAttrStats struct. Note that we only have a copy of the
* fixed fields of the pg_attribute tuple.
*/
stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
@@ -828,7 +828,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
/*
* When analyzing an expression index, believe the expression tree's type
* not the column datatype --- the latter might be the opckeytype storage
- * type of the opclass, which is not interesting for our purposes. (Note:
+ * type of the opclass, which is not interesting for our purposes. (Note:
* if we did anything with non-expression index columns, we'd need to
* figure out where to get the correct type info from, but for now that's
* not a problem.) It's not clear whether anyone will care about the
@@ -867,7 +867,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
}
/*
- * Call the type-specific typanalyze function. If none is specified, use
+ * Call the type-specific typanalyze function. If none is specified, use
* std_typanalyze().
*/
if (OidIsValid(stats->attrtype->typanalyze))
@@ -943,7 +943,7 @@ BlockSampler_Next(BlockSampler bs)
* If we are to skip, we should advance t (hence decrease K), and
* repeat the same probabilistic test for the next block. The naive
* implementation thus requires a random_fract() call for each block
- * number. But we can reduce this to one random_fract() call per
+ * number. But we can reduce this to one random_fract() call per
* selected block, by noting that each time the while-test succeeds,
* we can reinterpret V as a uniform random number in the range 0 to p.
* Therefore, instead of choosing a new V, we just adjust p to be
@@ -1072,7 +1072,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
/*
* We ignore unused and redirect line pointers. DEAD line
* pointers should be counted as dead, because we need vacuum to
- * run to get rid of them. Note that this rule agrees with the
+ * run to get rid of them. Note that this rule agrees with the
* way that heap_page_prune() counts things.
*/
if (!ItemIdIsNormal(itemid))
@@ -1117,7 +1117,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
* is the safer option.
*
* A special case is that the inserting transaction might
- * be our own. In this case we should count and sample
+ * be our own. In this case we should count and sample
* the row, to accommodate users who load a table and
* analyze it in one transaction. (pgstat_report_analyze
* has to adjust the numbers we send to the stats
@@ -1159,7 +1159,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
/*
* The first targrows sample rows are simply copied into the
* reservoir. Then we start replacing tuples in the sample
- * until we reach the end of the relation. This algorithm is
+ * until we reach the end of the relation. This algorithm is
* from Jeff Vitter's paper (see full citation below). It
* works by repeatedly computing the number of tuples to skip
* before selecting a tuple, which replaces a randomly chosen
@@ -1217,7 +1217,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
/*
- * Estimate total numbers of rows in relation. For live rows, use
+ * Estimate total numbers of rows in relation. For live rows, use
* vac_estimate_reltuples; for dead rows, we have no source of old
* information, so we have to assume the density is the same in unseen
* pages as in the pages we scanned.
@@ -1537,7 +1537,7 @@ acquire_inherited_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
* Statistics are stored in several places: the pg_class row for the
* relation has stats about the whole relation, and there is a
* pg_statistic row for each (non-system) attribute that has ever
- * been analyzed. The pg_class values are updated by VACUUM, not here.
+ * been analyzed. The pg_class values are updated by VACUUM, not here.
*
* pg_statistic rows are just added or updated normally. This means
* that pg_statistic will probably contain some deleted rows at the
@@ -1939,7 +1939,7 @@ compute_minimal_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
* avoid repeated detoastings and resultant excess memory usage
- * during the comparisons. Also, check to see if the value is
+ * during the comparisons. Also, check to see if the value is
* excessively wide, and if so don't detoast at all --- just
* ignore the value.
*/
@@ -2059,7 +2059,7 @@ compute_minimal_stats(VacAttrStatsP stats,
* We assume (not very reliably!) that all the multiply-occurring
* values are reflected in the final track[] list, and the other
* nonnull values all appeared but once. (XXX this usually
- * results in a drastic overestimate of ndistinct. Can we do
+ * results in a drastic overestimate of ndistinct. Can we do
* any better?)
*----------
*/
@@ -2096,7 +2096,7 @@ compute_minimal_stats(VacAttrStatsP stats,
* Decide how many values are worth storing as most-common values. If
* we are able to generate a complete MCV list (all the values in the
* sample will fit, and we think these are all the ones in the table),
- * then do so. Otherwise, store only those values that are
+ * then do so. Otherwise, store only those values that are
* significantly more common than the (estimated) average. We set the
* threshold rather arbitrarily at 25% more than average, with at
* least 2 instances in the sample.
@@ -2261,7 +2261,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
* avoid repeated detoastings and resultant excess memory usage
- * during the comparisons. Also, check to see if the value is
+ * during the comparisons. Also, check to see if the value is
* excessively wide, and if so don't detoast at all --- just
* ignore the value.
*/
@@ -2307,7 +2307,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* accumulate ordering-correlation statistics.
*
* To determine which are most common, we first have to count the
- * number of duplicates of each value. The duplicates are adjacent in
+ * number of duplicates of each value. The duplicates are adjacent in
* the sorted list, so a brute-force approach is to compare successive
* datum values until we find two that are not equal. However, that
* requires N-1 invocations of the datum comparison routine, which are
@@ -2316,7 +2316,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* that are adjacent in the sorted order; otherwise it could not know
* that it's ordered the pair correctly.) We exploit this by having
* compare_scalars remember the highest tupno index that each
- * ScalarItem has been found equal to. At the end of the sort, a
+ * ScalarItem has been found equal to. At the end of the sort, a
* ScalarItem's tupnoLink will still point to itself if and only if it
* is the last item of its group of duplicates (since the group will
* be ordered by tupno).
@@ -2436,7 +2436,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* Decide how many values are worth storing as most-common values. If
* we are able to generate a complete MCV list (all the values in the
* sample will fit, and we think these are all the ones in the table),
- * then do so. Otherwise, store only those values that are
+ * then do so. Otherwise, store only those values that are
* significantly more common than the (estimated) average. We set the
* threshold rather arbitrarily at 25% more than average, with at
* least 2 instances in the sample. Also, we won't suppress values
@@ -2591,7 +2591,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* The object of this loop is to copy the first and last values[]
- * entries along with evenly-spaced values in between. So the
+ * entries along with evenly-spaced values in between. So the
* i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But
* computing that subscript directly risks integer overflow when
* the stats target is more than a couple thousand. Instead we
@@ -2702,7 +2702,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* qsort_arg comparator for sorting ScalarItems
*
* Aside from sorting the items, we update the tupnoLink[] array
- * whenever two ScalarItems are found to contain equal datums. The array
+ * whenever two ScalarItems are found to contain equal datums. The array
* is indexed by tupno; for each ScalarItem, it contains the highest
* tupno that that item's datum has been found to be equal to. This allows
* us to avoid additional comparisons in compute_scalar_stats().
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 80dda9bf316..bc85ed73f5c 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -150,7 +150,7 @@
*
* This struct declaration has the maximal length, but in a real queue entry
* the data area is only big enough for the actual channel and payload strings
- * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
+ * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
* entry size, if both channel and payload strings are empty (but note it
* doesn't include alignment padding).
*
@@ -266,7 +266,7 @@ static SlruCtlData AsyncCtlData;
*
* The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2
* pages, because more than that would confuse slru.c into thinking there
- * was a wraparound condition. With the default BLCKSZ this means there
+ * was a wraparound condition. With the default BLCKSZ this means there
* can be up to 8GB of queued-and-not-read data.
*
* Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of
@@ -411,7 +411,7 @@ asyncQueuePagePrecedesLogically(int p, int q)
int diff;
/*
- * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
+ * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
* in the range 0..QUEUE_MAX_PAGE.
*/
Assert(p >= 0 && p <= QUEUE_MAX_PAGE);
@@ -849,7 +849,7 @@ PreCommit_Notify(void)
while (nextNotify != NULL)
{
/*
- * Add the pending notifications to the queue. We acquire and
+ * Add the pending notifications to the queue. We acquire and
* release AsyncQueueLock once per page, which might be overkill
* but it does allow readers to get in while we're doing this.
*
@@ -1065,12 +1065,12 @@ Exec_UnlistenAllCommit(void)
* The reason that this is not done in AtCommit_Notify is that there is
* a nonzero chance of errors here (for example, encoding conversion errors
* while trying to format messages to our frontend). An error during
- * AtCommit_Notify would be a PANIC condition. The timing is also arranged
+ * AtCommit_Notify would be a PANIC condition. The timing is also arranged
* to ensure that a transaction's self-notifies are delivered to the frontend
* before it gets the terminating ReadyForQuery message.
*
* Note that we send signals and process the queue even if the transaction
- * eventually aborted. This is because we need to clean out whatever got
+ * eventually aborted. This is because we need to clean out whatever got
* added to the queue.
*
* NOTE: we are outside of any transaction here.
@@ -1160,7 +1160,7 @@ IsListeningOn(const char *channel)
/*
* Remove our entry from the listeners array when we are no longer listening
- * on any channel. NB: must not fail if we're already not listening.
+ * on any channel. NB: must not fail if we're already not listening.
*/
static void
asyncQueueUnregister(void)
@@ -1202,7 +1202,7 @@ asyncQueueIsFull(void)
/*
* The queue is full if creating a new head page would create a page that
* logically precedes the current global tail pointer, ie, the head
- * pointer would wrap around compared to the tail. We cannot create such
+ * pointer would wrap around compared to the tail. We cannot create such
* a head page for fear of confusing slru.c. For safety we round the tail
* pointer back to a segment boundary (compare the truncation logic in
* asyncQueueAdvanceTail).
@@ -1221,7 +1221,7 @@ asyncQueueIsFull(void)
/*
* Advance the QueuePosition to the next entry, assuming that the current
- * entry is of length entryLength. If we jump to a new page the function
+ * entry is of length entryLength. If we jump to a new page the function
* returns true, else false.
*/
static bool
@@ -1290,7 +1290,7 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
* the last byte which simplifies reading the page later.
*
* We are passed the list cell containing the next notification to write
- * and return the first still-unwritten cell back. Eventually we will return
+ * and return the first still-unwritten cell back. Eventually we will return
* NULL indicating all is done.
*
* We are holding AsyncQueueLock already from the caller and grab AsyncCtlLock
@@ -1367,7 +1367,7 @@ asyncQueueAddEntries(ListCell *nextNotify)
* Page is full, so we're done here, but first fill the next page
* with zeroes. The reason to do this is to ensure that slru.c's
* idea of the head page is always the same as ours, which avoids
- * boundary problems in SimpleLruTruncate. The test in
+ * boundary problems in SimpleLruTruncate. The test in
* asyncQueueIsFull() ensured that there is room to create this
* page without overrunning the queue.
*/
@@ -1794,7 +1794,7 @@ EnableNotifyInterrupt(void)
* is disabled until the next EnableNotifyInterrupt call.
*
* The PROCSIG_CATCHUP_INTERRUPT signal handler also needs to call this,
- * so as to prevent conflicts if one signal interrupts the other. So we
+ * so as to prevent conflicts if one signal interrupts the other. So we
* must return the previous state of the flag.
*/
bool
@@ -1889,7 +1889,7 @@ asyncQueueReadAllNotifications(void)
/*
* We copy the data from SLRU into a local buffer, so as to avoid
* holding the AsyncCtlLock while we are examining the entries and
- * possibly transmitting them to our frontend. Copy only the part
+ * possibly transmitting them to our frontend. Copy only the part
* of the page we will actually inspect.
*/
slotno = SimpleLruReadPage_ReadOnly(AsyncCtl, curpage,
@@ -1963,7 +1963,7 @@ asyncQueueReadAllNotifications(void)
* and deliver relevant ones to my frontend.
*
* The current page must have been fetched into page_buffer from shared
- * memory. (We could access the page right in shared memory, but that
+ * memory. (We could access the page right in shared memory, but that
* would imply holding the AsyncCtlLock throughout this routine.)
*
* We stop if we reach the "stop" position, or reach a notification from an
@@ -2169,7 +2169,7 @@ NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
pq_endmessage(&buf);
/*
- * NOTE: we do not do pq_flush() here. For a self-notify, it will
+ * NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
* ProcessIncomingNotify will do it after finding all the notifies.
*/
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 81e72825269..140a6e81cab 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* cluster.c
- * CLUSTER a table on an index. This is now also used for VACUUM FULL.
+ * CLUSTER a table on an index. This is now also used for VACUUM FULL.
*
* There is hardly anything left of Paul Brown's original implementation...
*
@@ -99,7 +99,7 @@ static void reform_and_rewrite_tuple(HeapTuple tuple,
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation to be specified without index. In that case,
+ * We also allow a relation to be specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -214,7 +214,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives in
+ * Build the list of relations to cluster. Note that this lives in
* cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -251,7 +251,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
*
* This clusters the table by creating a new, clustered table and
* swapping the relfilenodes of the new table and the old table, so
- * the OID of the original table is preserved. Thus we do not lose
+ * the OID of the original table is preserved. Thus we do not lose
* GRANT, inheritance nor references to this table (this was a bug
* in releases thru 7.3).
*
@@ -260,7 +260,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
* them incrementally while we load the table.
*
* If indexOid is InvalidOid, the table will be rewritten in physical order
- * instead of index order. This is the new implementation of VACUUM FULL,
+ * instead of index order. This is the new implementation of VACUUM FULL,
* and error messages should refer to the operation as VACUUM not CLUSTER.
*/
void
@@ -274,7 +274,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
/*
* We grab exclusive access to the target rel and index for the duration
- * of the transaction. (This is redundant for the single-transaction
+ * of the transaction. (This is redundant for the single-transaction
* case, since cluster() already did it.) The index lock is taken inside
* check_index_is_clusterable.
*/
@@ -309,7 +309,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
* check in the "recheck" case is appropriate (which currently means
* somebody is executing a database-wide CLUSTER), because there is
* another check in cluster() which will stop any attempt to cluster
- * remote temp tables by name. There is another check in cluster_rel
+ * remote temp tables by name. There is another check in cluster_rel
* which is redundant, but we leave it for extra safety.
*/
if (RELATION_IS_OTHER_TEMP(OldHeap))
@@ -388,7 +388,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
/*
* All predicate locks on the tuples or pages are about to be made
- * invalid, because we move tuples around. Promote them to relation
+ * invalid, because we move tuples around. Promote them to relation
* locks. Predicate locks on indexes will be promoted when they are
* reindexed.
*/
@@ -436,7 +436,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck, LOCKMOD
/*
* Disallow clustering on incomplete indexes (those that might not index
- * every row of the relation). We could relax this by making a separate
+ * every row of the relation). We could relax this by making a separate
* seqscan pass over the table to copy the missing rows, but that seems
* expensive and tedious.
*/
@@ -634,14 +634,14 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace)
/*
* Create the new heap, using a temporary name in the same namespace as
- * the existing table. NOTE: there is some risk of collision with user
+ * the existing table. NOTE: there is some risk of collision with user
* relnames. Working around this seems more trouble than it's worth; in
* particular, we can't create the new heap in a different namespace from
* the old, or we will have problems with the TEMP status of temp tables.
*
* Note: the new heap is not a shared relation, even if we are rebuilding
* a shared rel. However, we do make the new heap mapped if the source is
- * mapped. This simplifies swap_relation_files, and is absolutely
+ * mapped. This simplifies swap_relation_files, and is absolutely
* necessary for rebuilding pg_class, for reasons explained there.
*/
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap);
@@ -770,12 +770,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* If the OldHeap has a toast table, get lock on the toast table to keep
- * it from being vacuumed. This is needed because autovacuum processes
+ * it from being vacuumed. This is needed because autovacuum processes
* toast tables independently of their main tables, with no lock on the
- * latter. If an autovacuum were to start on the toast table after we
+ * latter. If an autovacuum were to start on the toast table after we
* compute our OldestXmin below, it would use a later OldestXmin, and then
* possibly remove as DEAD toast tuples belonging to main tuples we think
- * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
+ * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
* tuples.
*
* We don't need to open the toast relation here, just lock it. The lock
@@ -796,7 +796,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* If both tables have TOAST tables, perform toast swap by content. It is
* possible that the old table has a toast table but the new one doesn't,
- * if toastable columns have been dropped. In that case we have to do
+ * if toastable columns have been dropped. In that case we have to do
* swap by links. This is okay because swap by content is only essential
* for system catalogs, and we don't support schema changes for them.
*/
@@ -815,7 +815,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
*
* Note that we must hold NewHeap open until we are done writing data,
* since the relcache will not guarantee to remember this setting once
- * the relation is closed. Also, this technique depends on the fact
+ * the relation is closed. Also, this technique depends on the fact
* that no one will try to read from the NewHeap until after we've
* finished writing it and swapping the rels --- otherwise they could
* follow the toast pointers to the wrong place. (It would actually
@@ -909,7 +909,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* Scan through the OldHeap, either in OldIndex order or sequentially;
* copy each tuple into the NewHeap, or transiently to the tuplesort
- * module. Note that we don't bother sorting dead tuples (they won't get
+ * module. Note that we don't bother sorting dead tuples (they won't get
* to the new table anyway).
*/
for (;;)
@@ -1198,7 +1198,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
NameStr(relform2->relname), r2);
/*
- * Send replacement mappings to relmapper. Note these won't actually
+ * Send replacement mappings to relmapper. Note these won't actually
* take effect until CommandCounterIncrement.
*/
RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false);
@@ -1388,7 +1388,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
* non-transient relation.)
*
* Caution: the placement of this step interacts with the decision to
- * handle toast rels by recursion. When we are trying to rebuild pg_class
+ * handle toast rels by recursion. When we are trying to rebuild pg_class
* itself, the smgr close on pg_class must happen after all accesses in
* this function.
*/
@@ -1432,9 +1432,9 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
/*
* Rebuild each index on the relation (but not the toast table, which is
- * all-new at this point). It is important to do this before the DROP
+ * all-new at this point). It is important to do this before the DROP
* step because if we are processing a system catalog that will be used
- * during DROP, we want to have its indexes available. There is no
+ * during DROP, we want to have its indexes available. There is no
* advantage to the other order anyway because this is all transactional,
* so no chance to reclaim disk space before commit. We do not need a
* final CommandCounterIncrement() because reindex_relation does it.
diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c
index e8a19a9f03b..86299868669 100644
--- a/src/backend/commands/constraint.c
+++ b/src/backend/commands/constraint.c
@@ -49,7 +49,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
bool isnull[INDEX_MAX_KEYS];
/*
- * Make sure this is being called as an AFTER ROW trigger. Note:
+ * Make sure this is being called as an AFTER ROW trigger. Note:
* translatable error strings are shared with ri_triggers.c, so resist the
* temptation to fold the function name into them.
*/
@@ -86,7 +86,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
* If the new_row is now dead (ie, inserted and then deleted within our
* transaction), we can skip the check. However, we have to be careful,
* because this trigger gets queued only in response to index insertions;
- * which means it does not get queued for HOT updates. The row we are
+ * which means it does not get queued for HOT updates. The row we are
* called for might now be dead, but have a live HOT child, in which case
* we still need to make the check. Therefore we have to use
* heap_hot_search, not just HeapTupleSatisfiesVisibility as is done in
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index c6df89c09ea..e91cc7e02c5 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -176,7 +176,7 @@ typedef struct CopyStateData
/*
* Finally, raw_buf holds raw data read from the data source (file or
- * client connection). CopyReadLine parses this data sufficiently to
+ * client connection). CopyReadLine parses this data sufficiently to
* locate line boundaries, then transfers the data to line_buf and
* converts it. Note: we guarantee that there is a \0 at
* raw_buf[raw_buf_len].
@@ -202,7 +202,7 @@ typedef struct
* function call overhead in tight COPY loops.
*
* We must use "if (1)" because the usual "do {...} while(0)" wrapper would
- * prevent the continue/break processing from working. We end the "if (1)"
+ * prevent the continue/break processing from working. We end the "if (1)"
* with "else ((void) 0)" to ensure the "if" does not unintentionally match
* any "else" in the calling code, and to avoid any compiler warnings about
* empty statements. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
@@ -502,7 +502,7 @@ CopySendEndOfRow(CopyState cstate)
* CopyGetData reads data from the source (file or frontend)
*
* We attempt to read at least minread, and at most maxread, bytes from
- * the source. The actual number of bytes read is returned; if this is
+ * the source. The actual number of bytes read is returned; if this is
* less than minread, EOF was detected.
*
* Note: when copying from the frontend, we expect a proper EOF mark per
@@ -719,7 +719,7 @@ CopyLoadRawBuf(CopyState cstate)
* we also support copying the output of an arbitrary SELECT query.
*
* If <pipe> is false, transfer is between the table and the file named
- * <filename>. Otherwise, transfer is between the table and our regular
+ * <filename>. Otherwise, transfer is between the table and our regular
* input/output stream. The latter could be either stdin/stdout or a
* socket, depending on whether we're running under Postmaster control.
*
@@ -1183,7 +1183,7 @@ BeginCopy(bool is_from,
errmsg("COPY (SELECT) WITH OIDS is not supported")));
/*
- * Run parse analysis and rewrite. Note this also acquires sufficient
+ * Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
@@ -1520,7 +1520,7 @@ CopyTo(CopyState cstate)
* Create a temporary memory context that we can reset once per row to
* recover palloc'd memory. This avoids any problems with leaks inside
* datatype output routines, and should be faster than retail pfree's
- * anyway. (We don't need a whole econtext as CopyFrom does.)
+ * anyway. (We don't need a whole econtext as CopyFrom does.)
*/
cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext,
"COPY TO",
@@ -2462,7 +2462,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
* if client chooses to send that now.
*
* Note that we MUST NOT try to read more data in an old-protocol
- * copy, since there is no protocol-level EOF marker then. We
+ * copy, since there is no protocol-level EOF marker then. We
* could go either way for copy from file, but choose to throw
* error if there's data after the EOF marker, for consistency
* with the new-protocol case.
@@ -2524,7 +2524,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
/*
* Now compute and insert any defaults available for the columns not
- * provided by the input data. Anything not processed here or above will
+ * provided by the input data. Anything not processed here or above will
* remain NULL.
*/
for (i = 0; i < num_defaults; i++)
@@ -2559,7 +2559,7 @@ EndCopyFrom(CopyState cstate)
* server encoding.
*
* Result is true if read was terminated by EOF, false if terminated
- * by newline. The terminating newline or EOF marker is not included
+ * by newline. The terminating newline or EOF marker is not included
* in the final value of line_buf.
*/
static bool
@@ -2714,7 +2714,7 @@ CopyReadLineText(CopyState cstate)
* of read-ahead and avoid the many calls to
* IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol
* does not allow us to read too far ahead or we might read into the
- * next data, so we read-ahead only as far we know we can. One
+ * next data, so we read-ahead only as far we know we can. One
* optimization would be to read-ahead four byte here if
* cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
* considering the size of the buffer.
@@ -2724,7 +2724,7 @@ CopyReadLineText(CopyState cstate)
REFILL_LINEBUF;
/*
- * Try to read some more data. This will certainly reset
+ * Try to read some more data. This will certainly reset
* raw_buf_index to zero, and raw_buf_ptr must go with it.
*/
if (!CopyLoadRawBuf(cstate))
@@ -2782,7 +2782,7 @@ CopyReadLineText(CopyState cstate)
/*
* Updating the line count for embedded CR and/or LF chars is
* necessarily a little fragile - this test is probably about the
- * best we can do. (XXX it's arguable whether we should do this
+ * best we can do. (XXX it's arguable whether we should do this
* at all --- is cur_lineno a physical or logical count?)
*/
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
@@ -2961,7 +2961,7 @@ CopyReadLineText(CopyState cstate)
* after a backslash is special, so we skip over that second
* character too. If we didn't do that \\. would be
* considered an eof-of copy, while in non-CSV mode it is a
- * literal backslash followed by a period. In CSV mode,
+ * literal backslash followed by a period. In CSV mode,
* backslashes are not special, so we want to process the
* character after the backslash just like a normal character,
* so we don't increment in those cases.
@@ -3064,7 +3064,7 @@ CopyReadAttributesText(CopyState cstate)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to do
+ * then transfer data without any checks for enough space. We need to do
* it this way because enlarging attribute_buf mid-stream would invalidate
* pointers already stored into cstate->raw_fields[].
*/
@@ -3294,7 +3294,7 @@ CopyReadAttributesCSV(CopyState cstate)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to do
+ * then transfer data without any checks for enough space. We need to do
* it this way because enlarging attribute_buf mid-stream would invalidate
* pointers already stored into cstate->raw_fields[].
*/
@@ -3509,7 +3509,7 @@ CopyAttributeOutText(CopyState cstate, char *string)
/*
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
- * are infrequent. To avoid overhead from calling CopySendData once per
+ * are infrequent. To avoid overhead from calling CopySendData once per
* character, we dump out all characters between escaped characters in a
* single call. The loop invariant is that the data from "start" to "ptr"
* can be sent literally, but hasn't yet been.
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 37bd0a483ea..a850582b872 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -264,7 +264,7 @@ createdb(const CreatedbStmt *stmt)
* To create a database, must have createdb privilege and must be able to
* become the target role (this does not imply that the target role itself
* must have createdb privilege). The latter provision guards against
- * "giveaway" attacks. Note that a superuser will always have both of
+ * "giveaway" attacks. Note that a superuser will always have both of
* these privileges a fortiori.
*/
if (!have_createdb_privilege())
@@ -394,7 +394,7 @@ createdb(const CreatedbStmt *stmt)
/*
* If we are trying to change the default tablespace of the template,
* we require that the template not have any files in the new default
- * tablespace. This is necessary because otherwise the copied
+ * tablespace. This is necessary because otherwise the copied
* database would contain pg_class rows that refer to its default
* tablespace both explicitly (by OID) and implicitly (as zero), which
* would cause problems. For example another CREATE DATABASE using
@@ -430,7 +430,7 @@ createdb(const CreatedbStmt *stmt)
}
/*
- * Check for db name conflict. This is just to give a more friendly error
+ * Check for db name conflict. This is just to give a more friendly error
* message than "unique index violation". There's a race condition but
* we're willing to accept the less friendly message in that case.
*/
@@ -494,7 +494,7 @@ createdb(const CreatedbStmt *stmt)
/*
* We deliberately set datacl to default (NULL), rather than copying it
- * from the template database. Copying it would be a bad idea when the
+ * from the template database. Copying it would be a bad idea when the
* owner is not the same as the template's owner.
*/
new_record_nulls[Anum_pg_database_datacl - 1] = true;
@@ -546,7 +546,7 @@ createdb(const CreatedbStmt *stmt)
*
* Inconsistency of this sort is inherent to all SnapshotNow scans, unless
* some lock is held to prevent concurrent updates of the rows being
- * sought. There should be a generic fix for that, but in the meantime
+ * sought. There should be a generic fix for that, but in the meantime
* it's worth fixing this case in particular because we are doing very
* heavyweight operations within the scan, so that the elapsed time for
* the scan is vastly longer than for most other catalog scans. That
@@ -1158,7 +1158,7 @@ movedb(const char *dbname, const char *tblspcname)
/*
* Use an ENSURE block to make sure we remove the debris if the copy fails
- * (eg, due to out-of-disk-space). This is not a 100% solution, because
+ * (eg, due to out-of-disk-space). This is not a 100% solution, because
* of the possibility of failure during transaction commit, but it should
* handle most scenarios.
*/
@@ -1625,7 +1625,7 @@ get_db_info(const char *name, LOCKMODE lockmode,
LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode);
/*
- * And now, re-fetch the tuple by OID. If it's still there and still
+ * And now, re-fetch the tuple by OID. If it's still there and still
* the same name, we win; else, drop the lock and loop back to try
* again.
*/
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index eda3b7a9cee..22d4cd92601 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -196,7 +196,7 @@ defGetInt64(DefElem *def)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid int8
+ * constants by the lexer. Accept these if they are valid int8
* strings.
*/
return DatumGetInt64(DirectFunctionCall1(int8in,
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index a770daf88d9..341cad0c927 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -172,7 +172,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
* plancache.c.
*
* Because the rewriter and planner tend to scribble on the input, we make
- * a preliminary copy of the source querytree. This prevents problems in
+ * a preliminary copy of the source querytree. This prevents problems in
* the case that the EXPLAIN is in a portal or plpgsql function and is
* executed repeatedly. (See also the same hack in DECLARE CURSOR and
* PREPARE.) XXX FIXME someday.
@@ -465,7 +465,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, ExplainState *es,
* convert a QueryDesc's plan tree to text and append it to es->str
*
* The caller should have set up the options fields of *es, as well as
- * initializing the output buffer es->str. Other fields in *es are
+ * initializing the output buffer es->str. Other fields in *es are
* initialized here.
*
* NB: will not work on utility statements
@@ -2157,7 +2157,7 @@ ExplainXMLTag(const char *tagname, int flags, ExplainState *es)
/*
* Emit a JSON line ending.
*
- * JSON requires a comma after each property but the last. To facilitate this,
+ * JSON requires a comma after each property but the last. To facilitate this,
* in JSON format, the text emitted for each property begins just prior to the
* preceding line-break (and comma, if applicable).
*/
@@ -2178,7 +2178,7 @@ ExplainJSONLineEnding(ExplainState *es)
* YAML lines are ordinarily indented by two spaces per indentation level.
* The text emitted for each property begins just prior to the preceding
* line-break, except for the first property in an unlabelled group, for which
- * it begins immediately after the "- " that introduces the group. The first
+ * it begins immediately after the "- " that introduces the group. The first
* property of the group appears on the same line as the opening "- ".
*/
static void
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index a3bfec85636..8391c2251ed 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -109,7 +109,7 @@ static void ApplyExtensionUpdates(Oid extensionOid,
/*
* get_extension_oid - given an extension name, look up the OID
*
- * If missing_ok is false, throw an error if extension name not found. If
+ * If missing_ok is false, throw an error if extension name not found. If
* true, just return InvalidOid.
*/
Oid
@@ -258,9 +258,9 @@ check_valid_extension_name(const char *extensionname)
errdetail("Extension names must not contain \"--\".")));
/*
- * No leading or trailing dash either. (We could probably allow this, but
+ * No leading or trailing dash either. (We could probably allow this, but
* it would require much care in filename parsing and would make filenames
- * visually if not formally ambiguous. Since there's no real-world use
+ * visually if not formally ambiguous. Since there's no real-world use
* case, let's just forbid it.)
*/
if (extensionname[0] == '-' || extensionname[namelen - 1] == '-')
@@ -436,7 +436,7 @@ get_extension_script_filename(ExtensionControlFile *control,
/*
* Parse contents of primary or auxiliary control file, and fill in
- * fields of *control. We parse primary file if version == NULL,
+ * fields of *control. We parse primary file if version == NULL,
* else the optional auxiliary file for that version.
*
* Control files are supposed to be very short, half a dozen lines,
@@ -677,7 +677,7 @@ read_extension_script_file(const ExtensionControlFile *control,
* filename is used only to report errors.
*
* Note: it's tempting to just use SPI to execute the string, but that does
- * not work very well. The really serious problem is that SPI will parse,
+ * not work very well. The really serious problem is that SPI will parse,
* analyze, and plan the whole string before executing any of it; of course
* this fails if there are any plannable statements referring to objects
* created earlier in the script. A lesser annoyance is that SPI insists
@@ -852,7 +852,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
/*
* Set creating_extension and related variables so that
* recordDependencyOnCurrentExtension and other functions do the right
- * things. On failure, ensure we reset these variables.
+ * things. On failure, ensure we reset these variables.
*/
creating_extension = true;
CurrentExtensionObject = extensionOid;
@@ -1096,7 +1096,7 @@ identify_update_path(ExtensionControlFile *control,
* is still good.
*
* Result is a List of names of versions to transition through (the initial
- * version is *not* included). Returns NIL if no such path.
+ * version is *not* included). Returns NIL if no such path.
*/
static List *
find_update_path(List *evi_list,
@@ -1197,7 +1197,7 @@ CreateExtension(CreateExtensionStmt *stmt)
check_valid_extension_name(stmt->extname);
/*
- * Check for duplicate extension name. The unique index on
+ * Check for duplicate extension name. The unique index on
* pg_extension.extname would catch this anyway, and serves as a backstop
* in case of race conditions; but this is a friendlier error message, and
* besides we need a check to support IF NOT EXISTS.
@@ -1364,7 +1364,7 @@ CreateExtension(CreateExtensionStmt *stmt)
{
/*
* The extension is not relocatable and the author gave us a schema
- * for it. We create the schema here if it does not already exist.
+ * for it. We create the schema here if it does not already exist.
*/
schemaName = control->schema;
schemaOid = get_namespace_oid(schemaName, true);
@@ -1685,7 +1685,7 @@ RemoveExtensionById(Oid extId)
/*
* This function lists the available extensions (one row per primary control
- * file in the control directory). We parse each control file and report the
+ * file in the control directory). We parse each control file and report the
* interesting fields.
*
* The system view pg_available_extensions provides a user interface to this
@@ -1794,7 +1794,7 @@ pg_available_extensions(PG_FUNCTION_ARGS)
/*
* This function lists the available extension versions (one row per
- * extension installation script). For each version, we parse the related
+ * extension installation script). For each version, we parse the related
* control file(s) and report the interesting fields.
*
* The system view pg_available_extension_versions provides a user interface
@@ -2582,7 +2582,7 @@ AlterExtensionNamespace(List *names, const char *newschema)
Oid dep_oldNspOid;
/*
- * Ignore non-membership dependencies. (Currently, the only other
+ * Ignore non-membership dependencies. (Currently, the only other
* case we could see here is a normal dependency from another
* extension.)
*/
@@ -2986,7 +2986,7 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt)
/*
* Prevent a schema from being added to an extension if the schema
- * contains the extension. That would create a dependency loop.
+ * contains the extension. That would create a dependency loop.
*/
if (object.classId == NamespaceRelationId &&
object.objectId == get_extension_schema(extension.objectId))
diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c
index 8213ec5f0f4..67572fa2f12 100644
--- a/src/backend/commands/foreigncmds.c
+++ b/src/backend/commands/foreigncmds.c
@@ -79,7 +79,7 @@ optionListToArray(List *options)
/*
- * Transform a list of DefElem into text array format. This is substantially
+ * Transform a list of DefElem into text array format. This is substantially
* the same thing as optionListToArray(), except we recognize SET/ADD/DROP
* actions for modifying an existing list of options, which is passed in
* Datum form as oldOptions. Also, if fdwvalidator isn't InvalidOid
@@ -122,7 +122,7 @@ transformGenericOptions(Oid catalogId,
/*
* It is possible to perform multiple SET/DROP actions on the same
- * option. The standard permits this, as long as the options to be
+ * option. The standard permits this, as long as the options to be
* added are unique. Note that an unspecified action is taken to be
* ADD.
*/
@@ -666,7 +666,7 @@ AlterForeignDataWrapper(AlterFdwStmt *stmt)
/*
* It could be that the options for the FDW, SERVER and USER MAPPING
- * are no longer valid with the new validator. Warn about this.
+ * are no longer valid with the new validator. Warn about this.
*/
if (OidIsValid(fdwvalidator))
ereport(WARNING,
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 54b2054188e..89c3688515a 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -77,7 +77,7 @@ static void AlterFunctionOwner_internal(Relation rel, HeapTuple tup,
* allow a shell type to be used, or even created if the specified return type
* doesn't exist yet. (Without this, there's no way to define the I/O procs
* for a new type.) But SQL function creation won't cope, so error out if
- * the target language is SQL. (We do this here, not in the SQL-function
+ * the target language is SQL. (We do this here, not in the SQL-function
* validator, so as not to produce a NOTICE and then an ERROR for the same
* condition.)
*/
@@ -425,7 +425,7 @@ examine_parameter_list(List *parameters, Oid languageOid,
* FUNCTION and ALTER FUNCTION and return it via one of the out
* parameters. Returns true if the passed option was recognized. If
* the out parameter we were going to assign to points to non-NULL,
- * raise a duplicate-clause error. (We don't try to detect duplicate
+ * raise a duplicate-clause error. (We don't try to detect duplicate
* SET parameters though --- if you're redundant, the last one wins.)
*/
static bool
@@ -721,7 +721,7 @@ interpret_AS_clause(Oid languageOid, const char *languageName,
{
/*
* For "C" language, store the file name in probin and, when given,
- * the link symbol name in prosrc. If link symbol is omitted,
+ * the link symbol name in prosrc. If link symbol is omitted,
* substitute procedure name. We also allow link symbol to be
* specified as "-", since that was the habit in PG versions before
* 8.4, and there might be dump files out there that don't translate
@@ -1572,7 +1572,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* Restricting the volatility of a cast function may or may not be a
* good idea in the abstract, but it definitely breaks many old
- * user-defined types. Disable this check --- tgl 2/1/03
+ * user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
@@ -1636,7 +1636,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* We know that composite, enum and array types are never binary-
- * compatible with each other. They all have OIDs embedded in them.
+ * compatible with each other. They all have OIDs embedded in them.
*
* Theoretically you could build a user-defined base type that is
* binary-compatible with a composite, enum, or array type. But we
@@ -1665,7 +1665,7 @@ CreateCast(CreateCastStmt *stmt)
* We also disallow creating binary-compatibility casts involving
* domains. Casting from a domain to its base type is already
* allowed, and casting the other way ought to go through domain
- * coercion to permit constraint checking. Again, if you're intent on
+ * coercion to permit constraint checking. Again, if you're intent on
* having your own semantics for that, create a no-op cast function.
*
* NOTE: if we were to relax this, the above checks for composites
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 1a0ee84963d..52e8aacf984 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -256,7 +256,7 @@ DefineIndex(Oid relationId,
}
/*
- * Force shared indexes into the pg_global tablespace. This is a bit of a
+ * Force shared indexes into the pg_global tablespace. This is a bit of a
* hack but seems simpler than marking them in the BKI commands. On the
* other hand, if it's not shared, don't allow it to be placed there.
*/
@@ -435,7 +435,7 @@ DefineIndex(Oid relationId,
/*
* For a concurrent build, it's important to make the catalog entries
* visible to other transactions before we start to build the index. That
- * will prevent them from making incompatible HOT updates. The new index
+ * will prevent them from making incompatible HOT updates. The new index
* will be marked not indisready and not indisvalid, so that no one else
* tries to either insert into it or use it for queries.
*
@@ -466,8 +466,8 @@ DefineIndex(Oid relationId,
* Now we must wait until no running transaction could have the table open
* with the old list of indexes. To do this, inquire which xacts
* currently would conflict with ShareLock on the table -- ie, which ones
- * have a lock that permits writing the table. Then wait for each of
- * these xacts to commit or abort. Note we do not need to worry about
+ * have a lock that permits writing the table. Then wait for each of
+ * these xacts to commit or abort. Note we do not need to worry about
* xacts that open the table for writing after this point; they will see
* the new index when they open it.
*
@@ -478,7 +478,7 @@ DefineIndex(Oid relationId,
* error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need not
- * check for that. Also, prepared xacts are not reported, which is fine
+ * check for that. Also, prepared xacts are not reported, which is fine
* since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, ShareLock);
@@ -495,7 +495,7 @@ DefineIndex(Oid relationId,
* indexes. We have waited out all the existing transactions and any new
* transaction will have the new index in its list, but the index is still
* marked as "not-ready-for-inserts". The index is consulted while
- * deciding HOT-safety though. This arrangement ensures that no new HOT
+ * deciding HOT-safety though. This arrangement ensures that no new HOT
* chains can be created where the new tuple and the old tuple in the
* chain have different index keys.
*
@@ -561,7 +561,7 @@ DefineIndex(Oid relationId,
/*
* Now take the "reference snapshot" that will be used by validate_index()
- * to filter candidate tuples. Beware! There might still be snapshots in
+ * to filter candidate tuples. Beware! There might still be snapshots in
* use that treat some transaction as in-progress that our reference
* snapshot treats as committed. If such a recently-committed transaction
* deleted tuples in the table, we will not include them in the index; yet
@@ -596,7 +596,7 @@ DefineIndex(Oid relationId,
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted just
+ * interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots. Obtain a list of VXIDs
* of such transactions, and wait for them individually.
@@ -611,7 +611,7 @@ DefineIndex(Oid relationId,
*
* We can also exclude autovacuum processes and processes running manual
* lazy VACUUMs, because they won't be fazed by missing index entries
- * either. (Manual ANALYZEs, however, can't be excluded because they
+ * either. (Manual ANALYZEs, however, can't be excluded because they
* might be within transactions that are going to do arbitrary operations
* later.)
*
@@ -698,7 +698,7 @@ CheckMutability(Expr *expr)
{
/*
* First run the expression through the planner. This has a couple of
- * important consequences. First, function default arguments will get
+ * important consequences. First, function default arguments will get
* inserted, which may affect volatility (consider "default now()").
* Second, inline-able functions will get inlined, which may allow us to
* conclude that the function is really less volatile than it's marked. As
@@ -721,7 +721,7 @@ CheckMutability(Expr *expr)
* Checks that the given partial-index predicate is valid.
*
* This used to also constrain the form of the predicate to forms that
- * indxpath.c could do something with. However, that seems overly
+ * indxpath.c could do something with. However, that seems overly
* restrictive. One useful application of partial indexes is to apply
* a UNIQUE constraint across a subset of a table, and in that scenario
* any evaluatable predicate will work. So accept any predicate here
@@ -839,7 +839,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
attcollation = exprCollation(expr);
/*
- * Strip any top-level COLLATE clause. This ensures that we treat
+ * Strip any top-level COLLATE clause. This ensures that we treat
* "x COLLATE y" and "(x COLLATE y)" alike.
*/
while (IsA(expr, CollateExpr))
@@ -1051,7 +1051,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
* 2000/07/30
*
* Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
- * too for awhile. I'm starting to think we need a better approach. tgl
+ * too for awhile. I'm starting to think we need a better approach. tgl
* 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while
@@ -1120,7 +1120,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
NameListToString(opclass), accessMethodName)));
/*
- * Verify that the index operator class accepts this datatype. Note we
+ * Verify that the index operator class accepts this datatype. Note we
* will accept binary compatibility.
*/
opClassId = HeapTupleGetOid(tuple);
@@ -1141,7 +1141,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
* GetDefaultOpClass
*
* Given the OIDs of a datatype and an access method, find the default
- * operator class, if any. Returns InvalidOid if there is none.
+ * operator class, if any. Returns InvalidOid if there is none.
*/
Oid
GetDefaultOpClass(Oid type_id, Oid am_id)
@@ -1236,7 +1236,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
* Create a name for an implicitly created index, sequence, constraint, etc.
*
* The parameters are typically: the original table name, the original field
- * name, and a "type" string (such as "seq" or "pkey"). The field name
+ * name, and a "type" string (such as "seq" or "pkey"). The field name
* and/or type can be NULL if not relevant.
*
* The result is a palloc'd string.
@@ -1244,7 +1244,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
* The basic result we want is "name1_name2_label", omitting "_name2" or
* "_label" when those parameters are NULL. However, we must generate
* a name with less than NAMEDATALEN characters! So, we truncate one or
- * both names if necessary to make a short-enough string. The label part
+ * both names if necessary to make a short-enough string. The label part
* is never truncated (so it had better be reasonably short).
*
* The caller is responsible for checking uniqueness of the generated
@@ -1439,7 +1439,7 @@ ChooseIndexNameAddition(List *colnames)
/*
* Select the actual names to be used for the columns of an index, given the
- * list of IndexElems for the columns. This is mostly about ensuring the
+ * list of IndexElems for the columns. This is mostly about ensuring the
* names are unique so we don't get a conflicting-attribute-names error.
*
* Returns a List of plain strings (char *, not String nodes).
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 8bad626c834..ee004a77272 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -390,7 +390,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* A minimum expectation therefore is that the caller have execute
* privilege with grant option. Since we don't have a way to make the
* opclass go away if the grant option is revoked, we choose instead to
- * require ownership of the functions. It's also not entirely clear what
+ * require ownership of the functions. It's also not entirely clear what
* permissions should be required on the datatype, but ownership seems
* like a safe choice.
*
@@ -674,7 +674,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
opclassoid, procedures, false);
/*
- * Create dependencies for the opclass proper. Note: we do not create a
+ * Create dependencies for the opclass proper. Note: we do not create a
* dependency link to the AM, because we don't currently support DROP
* ACCESS METHOD.
*/
@@ -1088,7 +1088,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
if (OidIsValid(member->sortfamily))
{
/*
- * Ordering op, check index supports that. (We could perhaps also
+ * Ordering op, check index supports that. (We could perhaps also
* check that the operator returns a type supported by the sortfamily,
* but that seems more trouble than it's worth here. If it does not,
* the operator will never be matchable to any ORDER BY clause, but no
@@ -2127,7 +2127,7 @@ AlterOpFamilyOwner_oid(Oid opfamilyOid, Oid newOwnerId)
}
/*
- * The first parameter is pg_opfamily, opened and suitably locked. The second
+ * The first parameter is pg_opfamily, opened and suitably locked. The second
* parameter is a copy of the tuple from pg_opfamily we want to modify.
*/
static void
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index c99de4b240f..f1e31366e87 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -200,7 +200,7 @@ DefineOperator(List *names, List *parameters)
functionOid = LookupFuncName(functionName, nargs, typeId, false);
/*
- * We require EXECUTE rights for the function. This isn't strictly
+ * We require EXECUTE rights for the function. This isn't strictly
* necessary, since EXECUTE will be checked at any attempted use of the
* operator, but it seems like a good idea anyway.
*/
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index 314f91501df..1621465838c 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -4,7 +4,7 @@
* Utility commands affecting portals (that is, SQL cursor commands)
*
* Note: see also tcop/pquery.c, which implements portal operations for
- * the FE/BE protocol. This module uses pquery.c for some operations.
+ * the FE/BE protocol. This module uses pquery.c for some operations.
* And both modules depend on utils/mmgr/portalmem.c, which controls
* storage management for portals (but doesn't run any queries in them).
*
@@ -89,7 +89,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
/*----------
* Also copy the outer portal's parameter list into the inner portal's
- * memory context. We want to pass down the parameter values in case we
+ * memory context. We want to pass down the parameter values in case we
* had a command like
* DECLARE c CURSOR FOR SELECT ... WHERE foo = $1
* This will have been parsed using the outer parameter set and the
@@ -106,7 +106,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
* based on whether it would require any additional runtime overhead to do
- * so. Also, we disallow scrolling for FOR UPDATE cursors.
+ * so. Also, we disallow scrolling for FOR UPDATE cursors.
*/
portal->cursorOptions = cstmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -365,7 +365,7 @@ PersistHoldablePortal(Portal portal)
ExecutorRewind(queryDesc);
/*
- * Change the destination to output to the tuplestore. Note we tell
+ * Change the destination to output to the tuplestore. Note we tell
* the tuplestore receiver to detoast all data passed through it.
*/
queryDesc->dest = CreateDestReceiver(DestTuplestore);
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index dfa2ab00262..e072f690243 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -436,7 +436,7 @@ InitQueryHashTable(void)
* copy.
*
* Exception: commandTag is presumed to be a pointer to a constant string,
- * or possibly NULL, so it need not be copied. Note that commandTag should
+ * or possibly NULL, so it need not be copied. Note that commandTag should
* be NULL only if the original query (before rewriting) was empty.
*/
void
@@ -550,7 +550,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt)
/*
* Given a prepared statement that returns tuples, extract the query
- * targetlist. Returns NIL if the statement doesn't have a determinable
+ * targetlist. Returns NIL if the statement doesn't have a determinable
* targetlist.
*
* Note: this is pretty ugly, but since it's only used in corner cases like
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 98770c5a61a..623b0f642a9 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -261,7 +261,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a handler function declared OPAQUE, change it to
* LANGUAGE_HANDLER. (This is probably obsolete and removable?)
*/
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 671b17dba1f..1041e960d8c 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -65,7 +65,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
* To create a schema, must have schema-create privilege on the current
* database and must be able to become the target role (this does not
* imply that the target role itself must have create-schema privilege).
- * The latter provision guards against "giveaway" attacks. Note that a
+ * The latter provision guards against "giveaway" attacks. Note that a
* superuser will always have both of these privileges a fortiori.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
@@ -113,7 +113,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
/*
* Examine the list of commands embedded in the CREATE SCHEMA command, and
* reorganize them into a sequentially executable order with no forward
- * references. Note that the result is still a list of raw parsetrees ---
+ * references. Note that the result is still a list of raw parsetrees ---
* we cannot, in general, run parse analysis on one statement until we
* have actually executed the prior ones.
*/
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 47d76a3c866..c4337dd50ea 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -61,7 +61,7 @@ typedef struct sequence_magic
* rely on the relcache, since it's only, well, a cache, and may decide to
* discard entries.)
*
- * XXX We use linear search to find pre-existing SeqTable entries. This is
+ * XXX We use linear search to find pre-existing SeqTable entries. This is
* good when only a small number of sequences are touched in a session, but
* would suck with many different sequences. Perhaps use a hashtable someday.
*/
@@ -280,7 +280,7 @@ ResetSequence(Oid seq_relid)
seq->log_cnt = 0;
/*
- * Create a new storage file for the sequence. We want to keep the
+ * Create a new storage file for the sequence. We want to keep the
* sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs.
*/
RelationSetNewRelfilenode(seq_rel, InvalidTransactionId);
@@ -330,8 +330,8 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
* Two special hacks here:
*
* 1. Since VACUUM does not process sequences, we have to force the tuple
- * to have xmin = FrozenTransactionId now. Otherwise it would become
- * invisible to SELECTs after 2G transactions. It is okay to do this
+ * to have xmin = FrozenTransactionId now. Otherwise it would become
+ * invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
*
@@ -569,7 +569,7 @@ nextval_internal(Oid relid)
}
/*
- * Decide whether we should emit a WAL log record. If so, force up the
+ * Decide whether we should emit a WAL log record. If so, force up the
* fetch count to grab SEQ_LOG_VALS more values than we actually need to
* cache. (These will then be usable without logging.)
*
@@ -940,7 +940,7 @@ setval3_oid(PG_FUNCTION_ARGS)
* Open the sequence and acquire AccessShareLock if needed
*
* If we haven't touched the sequence already in this transaction,
- * we need to acquire AccessShareLock. We arrange for the lock to
+ * we need to acquire AccessShareLock. We arrange for the lock to
* be owned by the top transaction, so that we don't need to do it
* more than once per xact.
*/
@@ -1034,7 +1034,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
/*
* If the sequence has been transactionally replaced since we last saw it,
- * discard any cached-but-unissued values. We do not touch the currval()
+ * discard any cached-but-unissued values. We do not touch the currval()
* state, however.
*/
if (seqrel->rd_rel->relfilenode != elm->filenode)
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index ae62c698499..02736eef7e9 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -511,7 +511,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
&inheritOids, &old_constraints, &parentOidCount);
/*
- * Create a tuple descriptor from the relation schema. Note that this
+ * Create a tuple descriptor from the relation schema. Note that this
* deals with column names, types, and NOT NULL constraints, but not
* default values or CHECK constraints; we handle those below.
*/
@@ -611,7 +611,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
CommandCounterIncrement();
/*
- * Open the new relation and acquire exclusive lock on it. This isn't
+ * Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't see
* the new rel anyway until we commit), but it keeps the lock manager from
* complaining about deadlock risks.
@@ -905,10 +905,10 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * In CASCADE mode, suck in all referencing relations as well. This
+ * In CASCADE mode, suck in all referencing relations as well. This
* requires multiple iterations to find indirectly-dependent relations. At
* each phase, we need to exclusive-lock new rels before looking for their
- * dependencies, else we might miss something. Also, we check each rel as
+ * dependencies, else we might miss something. Also, we check each rel as
* soon as we open it, to avoid a faux pas such as holding lock for a long
* time on a rel we have no permissions for.
*/
@@ -1126,7 +1126,7 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
+ * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
*/
static void
truncate_check_rel(Relation rel)
@@ -1550,7 +1550,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
/*
* Now copy the CHECK constraints of this parent, adjusting attnos
- * using the completed newattno[] map. Identically named constraints
+ * using the completed newattno[] map. Identically named constraints
* are merged if possible, else we throw error.
*/
if (constr && constr->num_check > 0)
@@ -1605,7 +1605,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
+ * commit. That will prevent someone else from deleting or ALTERing
* the parent before the child is committed.
*/
heap_close(relation, NoLock);
@@ -2088,7 +2088,7 @@ renameatt_internal(Oid myrelid,
oldattname)));
/*
- * if the attribute is inherited, forbid the renaming. if this is a
+ * if the attribute is inherited, forbid the renaming. if this is a
* top-level call to renameatt(), then expected_parents will be 0, so the
* effect of this code will be to prohibit the renaming if the attribute
* is inherited at all. if this is a recursive call to renameatt(),
@@ -2250,7 +2250,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, Oid namespaceId)
newrelname)));
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup is OK
+ * Update pg_class tuple with new relname. (Scribbling on reltup is OK
* because it's a copy...)
*/
namestrcpy(&(relform->relname), newrelname);
@@ -2303,7 +2303,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, Oid namespaceId)
* We also reject these commands if there are any pending AFTER trigger events
* for the rel. This is certainly necessary for the rewriting variants of
* ALTER TABLE, because they don't preserve tuple TIDs and so the pending
- * events would try to fetch the wrong tuples. It might be overly cautious
+ * events would try to fetch the wrong tuples. It might be overly cautious
* in other cases, but again it seems better to err on the side of paranoia.
*
* REINDEX calls this with "rel" referencing the index to be rebuilt; here
@@ -2346,23 +2346,23 @@ CheckTableNotInUse(Relation rel, const char *stmt)
* 3. Scan table(s) to check new constraints, and optionally recopy
* the data into new table(s).
* Phase 3 is not performed unless one or more of the subcommands requires
- * it. The intention of this design is to allow multiple independent
+ * it. The intention of this design is to allow multiple independent
* updates of the table schema to be performed with only one pass over the
* data.
*
- * ATPrepCmd performs phase 1. A "work queue" entry is created for
+ * ATPrepCmd performs phase 1. A "work queue" entry is created for
* each table to be affected (there may be multiple affected tables if the
* commands traverse a table inheritance hierarchy). Also we do preliminary
* validation of the subcommands, including parse transformation of those
* expressions that need to be evaluated with respect to the old table
* schema.
*
- * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
+ * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
* phases 2 and 3 normally do no explicit recursion, since phase 1 already
* did it --- although some subcommands have to recurse in phase 2 instead.)
* Certain subcommands need to be performed before others to avoid
* unnecessary conflicts; for example, DROP COLUMN should come before
- * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
+ * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
* lists, one for each logical "pass" of phase 2.
*
* ATRewriteTables performs phase 3 for those tables that need it.
@@ -2931,7 +2931,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/*
* ATRewriteCatalogs
*
- * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
+ * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
* dispatched in a "safe" execution order (designed to avoid unnecessary
* conflicts).
*/
@@ -3504,7 +3504,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
{
/*
* All predicate locks on the tuples or pages are about to be made
- * invalid, because we move tuples around. Promote them to
+ * invalid, because we move tuples around. Promote them to
* relation locks.
*/
TransferPredicateLocksToHeapRelation(oldrel);
@@ -4029,7 +4029,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
*
* Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it
* isn't suitable, throw an error. Currently, we require that the type
- * originated with CREATE TYPE AS. We could support any row type, but doing so
+ * originated with CREATE TYPE AS. We could support any row type, but doing so
* would require handling a number of extra corner cases in the DDL commands.
*/
void
@@ -4048,7 +4048,7 @@ check_of_type(HeapTuple typetuple)
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
+ * commit. That will prevent someone else from deleting or ALTERing
* the type before the typed table creation/conversion commits.
*/
relation_close(typeRelation, NoLock);
@@ -4475,7 +4475,7 @@ add_column_collation_dependency(Oid relid, int32 attnum, Oid collid)
/*
* ALTER TABLE SET WITH OIDS
*
- * Basically this is an ADD COLUMN for the special OID column. We have
+ * Basically this is an ADD COLUMN for the special OID column. We have
* to cons up a ColumnDef node because the ADD COLUMN code needs one.
*/
static void
@@ -4918,7 +4918,7 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE loc
*
* DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism,
* because we have to decide at runtime whether to recurse or not depending
- * on whether attinhcount goes to zero or not. (We can't check this in a
+ * on whether attinhcount goes to zero or not. (We can't check this in a
* static pre-pass because it won't handle multiple inheritance situations
* correctly.)
*/
@@ -5204,7 +5204,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
elog(ERROR, "index \"%s\" is not unique", indexName);
/*
- * Determine name to assign to constraint. We require a constraint to
+ * Determine name to assign to constraint. We require a constraint to
* have the same name as the underlying index; therefore, use the index's
* existing name as the default constraint name, and if the user
* explicitly gives some other name for the constraint, rename the index
@@ -5436,7 +5436,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* Add a foreign-key constraint to a single table
*
- * Subroutine for ATExecAddConstraint. Must already hold exclusive
+ * Subroutine for ATExecAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity checks for it.
* We do permissions checks here, however.
*/
@@ -5573,7 +5573,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* Note that we have to be careful about the difference between the actual
* PK column type and the opclass' declared input type, which might be
- * only binary-compatible with it. The declared opcintype is the right
+ * only binary-compatible with it. The declared opcintype is the right
* thing to probe pg_amop with.
*/
if (numfks != numpks)
@@ -5868,10 +5868,10 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Also return the index OID and index opclasses of the
+ * for the pkrel. Also return the index OID and index opclasses of the
* index supporting the primary key.
*
- * All parameters except pkrel are output parameters. Also, the function
+ * All parameters except pkrel are output parameters. Also, the function
* return value is the number of attributes in the primary key.
*
* Used when the column list in the REFERENCES specification is omitted.
@@ -5911,7 +5911,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
if (indexStruct->indisprimary && IndexIsValid(indexStruct))
{
/*
- * Refuse to use a deferrable primary key. This is per SQL spec,
+ * Refuse to use a deferrable primary key. This is per SQL spec,
* and there would be a lot of interesting semantic problems if we
* tried to allow it.
*/
@@ -6740,7 +6740,7 @@ ATPrepAlterColumnType(List **wqueue,
tab->relkind == RELKIND_FOREIGN_TABLE)
{
/*
- * For composite types, do this check now. Tables will check it later
+ * For composite types, do this check now. Tables will check it later
* when the table is being rewritten.
*/
find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL);
@@ -6749,7 +6749,7 @@ ATPrepAlterColumnType(List **wqueue,
ReleaseSysCache(tuple);
/*
- * The recursion case is handled by ATSimpleRecursion. However, if we are
+ * The recursion case is handled by ATSimpleRecursion. However, if we are
* told not to recurse, there had better not be any child tables; else the
* alter would put them out of step.
*/
@@ -6858,7 +6858,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
*
* We remove any implicit coercion steps at the top level of the old
* default expression; this has been agreed to satisfy the principle of
- * least surprise. (The conversion to the new column type should act like
+ * least surprise. (The conversion to the new column type should act like
* it started from what the user sees as the stored expression, and the
* implicit coercions aren't going to be shown.)
*/
@@ -6887,7 +6887,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* and record enough information to let us recreate the objects.
*
* The actual recreation does not happen here, but only after we have
- * performed all the individual ALTER TYPE operations. We have to save
+ * performed all the individual ALTER TYPE operations. We have to save
* the info before executing ALTER TYPE, though, else the deparser will
* get confused.
*
@@ -7016,7 +7016,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* used in the trigger's WHEN condition. The first case would
* not require any extra work, but the second case would
* require updating the WHEN expression, which will take a
- * significant amount of new code. Since we can't easily tell
+ * significant amount of new code. Since we can't easily tell
* which case applies, we punt for both. FIXME someday.
*/
ereport(ERROR,
@@ -7189,7 +7189,7 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
/*
* Re-parse the index and constraint definitions, and attach them to the
- * appropriate work queue entries. We do this before dropping because in
+ * appropriate work queue entries. We do this before dropping because in
* the case of a FOREIGN KEY constraint, we might not yet have exclusive
* lock on the table the constraint is attached to, and we need to get
* that before dropping. It's safe because the parser won't actually look
@@ -8105,7 +8105,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page);
/*
- * Now write the page. We say isTemp = true even if it's not a temp
+ * Now write the page. We say isTemp = true even if it's not a temp
* rel, because there's no need for smgr to schedule an fsync for this
* write; we'll do it ourselves below.
*/
@@ -8289,7 +8289,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode)
MergeConstraintsIntoExisting(child_rel, parent_rel);
/*
- * OK, it looks valid. Make the catalog entries that show inheritance.
+ * OK, it looks valid. Make the catalog entries that show inheritance.
*/
StoreCatalogInheritance1(RelationGetRelid(child_rel),
RelationGetRelid(parent_rel),
@@ -8744,7 +8744,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
* Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
* INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
* heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
- * be TypeRelationId). There's no convenient way to do this, so go trawling
+ * be TypeRelationId). There's no convenient way to do this, so go trawling
* through pg_depend.
*/
static void
@@ -8927,7 +8927,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
/*
* ALTER TABLE NOT OF
*
- * Detach a typed table from its originating type. Just clear reloftype and
+ * Detach a typed table from its originating type. Just clear reloftype and
* remove the dependency.
*/
static void
@@ -9527,7 +9527,7 @@ AtEOXact_on_commit_actions(bool isCommit)
* Post-subcommit or post-subabort cleanup for ON COMMIT management.
*
* During subabort, we can immediately remove entries created during this
- * subtransaction. During subcommit, just relabel entries marked during
+ * subtransaction. During subcommit, just relabel entries marked during
* this subtransaction as being the parent's responsibility.
*/
void
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 82d4fbbcf53..2003003e2c5 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -31,7 +31,7 @@
* To allow CREATE DATABASE to give a new database a default tablespace
* that's different from the template database's default, we make the
* provision that a zero in pg_class.reltablespace means the database's
- * default tablespace. Without this, CREATE DATABASE would have to go in
+ * default tablespace. Without this, CREATE DATABASE would have to go in
* and munge the system catalogs of the new database.
*
*
@@ -273,7 +273,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
/*
* Check that location isn't too long. Remember that we're going to append
- * 'PG_XXX/<dboid>/<relid>.<nnn>'. FYI, we never actually reference the
+ * 'PG_XXX/<dboid>/<relid>.<nnn>'. FYI, we never actually reference the
* whole path, but mkdir() uses the first two parts.
*/
if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 +
@@ -472,7 +472,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
* Not all files deleted? However, there can be lingering empty files
* in the directories, left behind by for example DROP TABLE, that
* have been scheduled for deletion at next checkpoint (see comments
- * in mdunlink() for details). We could just delete them immediately,
+ * in mdunlink() for details). We could just delete them immediately,
* but we can't tell them apart from important data files that we
* mustn't delete. So instead, we force a checkpoint which will clean
* out any lingering files, and try again.
@@ -550,7 +550,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
TABLESPACE_VERSION_DIRECTORY);
/*
- * Attempt to coerce target directory to safe permissions. If this fails,
+ * Attempt to coerce target directory to safe permissions. If this fails,
* it doesn't exist or has the wrong owner.
*/
if (chmod(location, S_IRWXU) != 0)
@@ -688,7 +688,7 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
*
* If redo is true then ENOENT is a likely outcome here, and we allow it
* to pass without comment. In normal operation we still allow it, but
- * with a warning. This is because even though ProcessUtility disallows
+ * with a warning. This is because even though ProcessUtility disallows
* DROP TABLESPACE in a transaction block, it's possible that a previous
* DROP failed and rolled back after removing the tablespace directories
* and/or symlink. We want to allow a new DROP attempt to succeed at
@@ -1074,7 +1074,7 @@ check_default_tablespace(char **newval, void **extra, GucSource source)
{
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the name. Must accept the value on faith.
+ * cannot verify the name. Must accept the value on faith.
*/
if (IsTransactionState())
{
@@ -1189,7 +1189,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the individual names. Must accept the list on faith.
+ * cannot verify the individual names. Must accept the list on faith.
* Fortunately, there's then also no need to pass the data to fd.c.
*/
if (IsTransactionState())
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 3c2447671c7..bd5dee4286a 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -101,7 +101,7 @@ static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
*
* constraintOid, if nonzero, says that this trigger is being created
* internally to implement that constraint. A suitable pg_depend entry will
- * be made to link the trigger to that constraint. constraintOid is zero when
+ * be made to link the trigger to that constraint. constraintOid is zero when
* executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
* TRIGGER, we build a pg_constraint entry internally.)
*
@@ -392,7 +392,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we see a
+ * We allow OPAQUE just so we can load old dump files. When we see a
* trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
@@ -414,7 +414,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* references one of the built-in RI_FKey trigger functions, assume it is
* from a dump of a pre-7.3 foreign key constraint, and take steps to
* convert this legacy representation into a regular foreign key
- * constraint. Ugly, but necessary for loading old dump files.
+ * constraint. Ugly, but necessary for loading old dump files.
*/
if (stmt->isconstraint && !isInternal &&
list_length(stmt->args) >= 6 &&
@@ -475,7 +475,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
/*
* If trigger is internally generated, modify the provided trigger name to
- * ensure uniqueness by appending the trigger OID. (Callers will usually
+ * ensure uniqueness by appending the trigger OID. (Callers will usually
* supply a simple constant trigger name in these cases.)
*/
if (isInternal)
@@ -599,7 +599,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
int2 attnum;
int j;
- /* Lookup column name. System columns are not allowed */
+ /* Lookup column name. System columns are not allowed */
attnum = attnameAttNum(rel, name, false);
if (attnum == InvalidAttrNumber)
ereport(ERROR,
@@ -704,7 +704,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
else
{
/*
- * User CREATE TRIGGER, so place dependencies. We make trigger be
+ * User CREATE TRIGGER, so place dependencies. We make trigger be
* auto-dropped if its relation is dropped or if the FK relation is
* dropped. (Auto drop is compatible with our pre-7.3 behavior.)
*/
@@ -773,7 +773,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* full-fledged foreign key constraints.
*
* The conversion is complex because a pre-7.3 foreign key involved three
- * separate triggers, which were reported separately in dumps. While the
+ * separate triggers, which were reported separately in dumps. While the
* single trigger on the referencing table adds no new information, we need
* to know the trigger functions of both of the triggers on the referenced
* table to build the constraint declaration. Also, due to lack of proper
@@ -1977,7 +1977,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2052,7 +2052,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2395,7 +2395,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2478,7 +2478,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2871,7 +2871,7 @@ typedef SetConstraintStateData *SetConstraintState;
* Per-trigger-event data
*
* The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
- * status bits and one or two tuple CTIDs. Each event record also has an
+ * status bits and one or two tuple CTIDs. Each event record also has an
* associated AfterTriggerSharedData that is shared across all instances
* of similar events within a "chunk".
*
@@ -2885,7 +2885,7 @@ typedef SetConstraintStateData *SetConstraintState;
* Although this is mutable state, we can keep it in AfterTriggerSharedData
* because all instances of the same type of event in a given event list will
* be fired at the same time, if they were queued between the same firing
- * cycles. So we need only ensure that ats_firing_id is zero when attaching
+ * cycles. So we need only ensure that ats_firing_id is zero when attaching
* a new event to an existing AfterTriggerSharedData record.
*/
typedef uint32 TriggerFlags;
@@ -2932,7 +2932,7 @@ typedef struct AfterTriggerEventDataOneCtid
/*
* To avoid palloc overhead, we keep trigger events in arrays in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). The space between CHUNK_DATA_START and freeptr is occupied by
+ * array). The space between CHUNK_DATA_START and freeptr is occupied by
* AfterTriggerEventData records; the space between endfree and endptr is
* occupied by AfterTriggerSharedData records.
*/
@@ -2974,7 +2974,7 @@ typedef struct AfterTriggerEventList
*
* firing_counter is incremented for each call of afterTriggerInvokeEvents.
* We mark firable events with the current firing cycle's ID so that we can
- * tell which ones to work on. This ensures sane behavior if a trigger
+ * tell which ones to work on. This ensures sane behavior if a trigger
* function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
* only fire those events that weren't already scheduled for firing.
*
@@ -2982,7 +2982,7 @@ typedef struct AfterTriggerEventList
* This is saved and restored across failed subtransactions.
*
* events is the current list of deferred events. This is global across
- * all subtransactions of the current transaction. In a subtransaction
+ * all subtransactions of the current transaction. In a subtransaction
* abort, we know that the events added by the subtransaction are at the
* end of the list, so it is relatively easy to discard them. The event
* list chunks themselves are stored in event_cxt.
@@ -3010,12 +3010,12 @@ typedef struct AfterTriggerEventList
* which we similarly use to clean up at subtransaction abort.
*
* firing_stack is a stack of copies of subtransaction-start-time
- * firing_counter. We use this to recognize which deferred triggers were
+ * firing_counter. We use this to recognize which deferred triggers were
* fired (or marked for firing) within an aborted subtransaction.
*
* We use GetCurrentTransactionNestLevel() to determine the correct array
* index in these stacks. maxtransdepth is the number of allocated entries in
- * each stack. (By not keeping our own stack pointer, we can avoid trouble
+ * each stack. (By not keeping our own stack pointer, we can avoid trouble
* in cases where errors during subxact abort cause multiple invocations
* of AfterTriggerEndSubXact() at the same nesting depth.)
*/
@@ -3283,7 +3283,7 @@ afterTriggerRestoreEventList(AfterTriggerEventList *events,
* single trigger function.
*
* Frequently, this will be fired many times in a row for triggers of
- * a single relation. Therefore, we cache the open relation and provide
+ * a single relation. Therefore, we cache the open relation and provide
* fmgr lookup cache space at the caller level. (For triggers fired at
* the end of a query, we can even piggyback on the executor's state.)
*
@@ -3800,7 +3800,7 @@ AfterTriggerFireDeferred(void)
}
/*
- * Run all the remaining triggers. Loop until they are all gone, in case
+ * Run all the remaining triggers. Loop until they are all gone, in case
* some trigger queues more for us to do.
*/
while (afterTriggerMarkEvents(events, NULL, false))
@@ -3863,7 +3863,7 @@ AfterTriggerBeginSubXact(void)
int my_level = GetCurrentTransactionNestLevel();
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* shouldn't happen?)
*/
if (afterTriggers == NULL)
@@ -3942,7 +3942,7 @@ AfterTriggerEndSubXact(bool isCommit)
CommandId subxact_firing_id;
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* unneeded)
*/
if (afterTriggers == NULL)
@@ -4074,7 +4074,7 @@ SetConstraintStateCopy(SetConstraintState origstate)
}
/*
- * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
+ * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
* pointer to the state object (it will change if we have to repalloc).
*/
static SetConstraintState
@@ -4159,7 +4159,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
* First, identify all the named constraints and make a list of their
* OIDs. Since, unlike the SQL spec, we allow multiple constraints of
* the same name within a schema, the specifications are not
- * necessarily unique. Our strategy is to target all matching
+ * necessarily unique. Our strategy is to target all matching
* constraints within the first search-path schema that has any
* matches, but disregard matches in schemas beyond the first match.
* (This is a bit odd but it's the historical behavior.)
@@ -4185,7 +4185,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* If we're given the schema name with the constraint, look only
- * in that schema. If given a bare constraint name, use the
+ * in that schema. If given a bare constraint name, use the
* search path to find the first matching constraint.
*/
if (constraint->schemaname)
@@ -4288,7 +4288,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* Silently skip triggers that are marked as non-deferrable in
- * pg_trigger. This is not an error condition, since a
+ * pg_trigger. This is not an error condition, since a
* deferrable RI constraint may have some non-deferrable
* actions.
*/
@@ -4359,7 +4359,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* Make sure a snapshot has been established in case trigger
- * functions need one. Note that we avoid setting a snapshot if
+ * functions need one. Note that we avoid setting a snapshot if
* we don't find at least one trigger that has to be fired now.
* This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
* ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
@@ -4419,7 +4419,7 @@ AfterTriggerPendingOnRel(Oid relid)
AfterTriggerShared evtshared = GetTriggerSharedData(event);
/*
- * We can ignore completed events. (Even if a DONE flag is rolled
+ * We can ignore completed events. (Even if a DONE flag is rolled
* back by subxact abort, it's OK because the effects of the TRUNCATE
* or whatever must get rolled back too.)
*/
@@ -4460,7 +4460,7 @@ AfterTriggerPendingOnRel(Oid relid)
* be fired for an event.
*
* NOTE: this is called whenever there are any triggers associated with
- * the event (even if they are disabled). This function decides which
+ * the event (even if they are disabled). This function decides which
* triggers actually need to be queued.
* ----------
*/
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 4943411de0b..9a717d01211 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -499,8 +499,8 @@ DefineType(List *names, List *parameters)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
/*
- * Check permissions on functions. We choose to require the creator/owner
- * of a type to also own the underlying functions. Since creating a type
+ * Check permissions on functions. We choose to require the creator/owner
+ * of a type to also own the underlying functions. Since creating a type
* is tantamount to granting public execute access on the functions, the
* minimum sane check would be for execute-with-grant-option. But we
* don't have a way to make the type go away if the grant option is
@@ -537,7 +537,7 @@ DefineType(List *names, List *parameters)
* now have TypeCreate do all the real work.
*
* Note: the pg_type.oid is stored in user tables as array elements (base
- * types) in ArrayType and in composite types in DatumTupleFields. This
+ * types) in ArrayType and in composite types in DatumTupleFields. This
* oid must be preserved by binary upgrades.
*/
typoid =
@@ -792,7 +792,7 @@ DefineDomain(CreateDomainStmt *stmt)
get_namespace_name(domainNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid2(TYPENAMENSP,
@@ -1131,7 +1131,7 @@ DefineEnum(CreateEnumStmt *stmt)
get_namespace_name(enumNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid2(TYPENAMENSP,
@@ -1846,7 +1846,7 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's a
+ * Okay to update pg_type row. We can scribble on typTup because it's a
* copy.
*/
typTup->typnotnull = notNull;
@@ -2019,7 +2019,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
/*
* Since all other constraint types throw errors, this must be a check
- * constraint. First, process the constraint expression and add an entry
+ * constraint. First, process the constraint expression and add an entry
* to pg_constraint.
*/
@@ -2247,7 +2247,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in column-number
+ * Okay, add column to result. We store the columns in column-number
* order; this is just a hack to improve predictability of regression
* test output ...
*/
@@ -2335,7 +2335,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Set up a CoerceToDomainValue to represent the occurrence of VALUE in
- * the expression. Note that it will appear to have the type of the base
+ * the expression. Note that it will appear to have the type of the base
* type, not the domain. This seems correct since within the check
* expression, we should not assume the input value can be considered a
* member of the domain.
@@ -2719,7 +2719,7 @@ AlterTypeOwner(List *names, Oid newOwnerId)
/*
* If it's a composite type, invoke ATExecChangeOwner so that we fix
- * up the pg_class entry properly. That will call back to
+ * up the pg_class entry properly. That will call back to
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
@@ -2850,7 +2850,7 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved)
* Caller must have already checked privileges.
*
* The function automatically recurses to process the type's array type,
- * if any. isImplicitArray should be TRUE only when doing this internal
+ * if any. isImplicitArray should be TRUE only when doing this internal
* recursion (outside callers must never try to move an array type directly).
*
* If errorOnTableType is TRUE, the function errors out if the type is
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index c9c7430b8ad..5f8b2d38da3 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -964,7 +964,7 @@ DropRole(DropRoleStmt *stmt)
ReleaseSysCache(tuple);
/*
- * Remove role from the pg_auth_members table. We have to remove all
+ * Remove role from the pg_auth_members table. We have to remove all
* tuples that show it as either a role or a member.
*
* XXX what about grantor entries? Maybe we should do one heap scan.
@@ -1059,7 +1059,7 @@ RenameRole(const char *oldname, const char *newname)
* XXX Client applications probably store the session user somewhere, so
* renaming it could cause confusion. On the other hand, there may not be
* an actual problem besides a little confusion, so think about this and
- * decide. Same for SET ROLE ... we don't restrict renaming the current
+ * decide. Same for SET ROLE ... we don't restrict renaming the current
* effective userid, though.
*/
@@ -1311,7 +1311,7 @@ AddRoleMems(const char *rolename, Oid roleid,
/*
* Check permissions: must have createrole or admin option on the role to
- * be changed. To mess with a superuser role, you gotta be superuser.
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
@@ -1457,7 +1457,7 @@ DelRoleMems(const char *rolename, Oid roleid,
/*
* Check permissions: must have createrole or admin option on the role to
- * be changed. To mess with a superuser role, you gotta be superuser.
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 4d1d97284e3..7273c2c0fed 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -373,9 +373,9 @@ vacuum_set_xid_limits(int freeze_min_age,
TransactionId safeLimit;
/*
- * We can always ignore processes running lazy vacuum. This is because we
+ * We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
- * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
+ * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
* ignore it. In theory it could be problematic to ignore lazy vacuums in
* a full vacuum, but keep in mind that only one vacuum process can be
* working on a particular table at any time, and that each vacuum is
@@ -459,7 +459,7 @@ vacuum_set_xid_limits(int freeze_min_age,
* If we scanned the whole relation then we should just use the count of
* live tuples seen; but if we did not, we should not trust the count
* unreservedly, especially not in VACUUM, which may have scanned a quite
- * nonrandom subset of the table. When we have only partial information,
+ * nonrandom subset of the table. When we have only partial information,
* we take the old value of pg_class.reltuples as a measurement of the
* tuple density in the unscanned pages.
*
@@ -598,7 +598,7 @@ vac_update_relstats(Relation relation,
/*
* If we have discovered that there are no indexes, then there's no
- * primary key either. This could be done more thoroughly...
+ * primary key either. This could be done more thoroughly...
*/
if (pgcform->relhaspkey && !hasindex)
{
@@ -645,7 +645,7 @@ vac_update_relstats(Relation relation,
* advance pg_database.datfrozenxid, also try to truncate pg_clog.
*
* We violate transaction semantics here by overwriting the database's
- * existing pg_database tuple with the new value. This is reasonably
+ * existing pg_database tuple with the new value. This is reasonably
* safe since the new value is correct whether or not this transaction
* commits. As with vac_update_relstats, this avoids leaving dead tuples
* behind after a VACUUM.
@@ -745,7 +745,7 @@ vac_update_datfrozenxid(void)
* Also update the XID wrap limit info maintained by varsup.c.
*
* The passed XID is simply the one I just wrote into my pg_database
- * entry. It's used to initialize the "min" calculation.
+ * entry. It's used to initialize the "min" calculation.
*
* This routine is only invoked when we've managed to change our
* DB's datfrozenxid entry, or we found that the shared XID-wrap-limit
@@ -828,7 +828,7 @@ vac_truncate_clog(TransactionId frozenXID)
* vacuum_rel() -- vacuum one heap relation
*
* Doing one heap at a time incurs extra overhead, since we need to
- * check that the heap exists again just before we vacuum it. The
+ * check that the heap exists again just before we vacuum it. The
* reason that we do this is so that vacuuming can be spread across
* many small transactions. Otherwise, two-phase locking would require
* us to lock the entire database during one pass of the vacuum cleaner.
@@ -885,7 +885,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
}
/*
- * Check for user-requested abort. Note we want this to be inside a
+ * Check for user-requested abort. Note we want this to be inside a
* transaction, so xact.c doesn't issue useless WARNING.
*/
CHECK_FOR_INTERRUPTS();
@@ -932,7 +932,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
*
* We allow the user to vacuum a table if he is superuser, the table
* owner, or the database owner (but in the latter case, only if it's not
- * a shared relation). pg_class_ownercheck includes the superuser case.
+ * a shared relation). pg_class_ownercheck includes the superuser case.
*
* Note we choose to treat permissions failure as a WARNING and keep
* trying to vacuum the rest of the DB --- is this appropriate?
@@ -1060,7 +1060,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
/*
* If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that
- * "analyze" will not get done on the toast table. This is good, because
+ * "analyze" will not get done on the toast table. This is good, because
* the toaster always uses hardcoded index access and statistics are
* totally unimportant for toast relations.
*/
@@ -1079,7 +1079,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
/*
* Open all the vacuumable indexes of the given relation, obtaining the
- * specified kind of lock on each. Return an array of Relation pointers for
+ * specified kind of lock on each. Return an array of Relation pointers for
* the indexes into *Irel, and the number of indexes into *nindexes.
*
* We consider an index vacuumable if it is marked insertable (IndexIsReady).
@@ -1129,7 +1129,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode,
}
/*
- * Release the resources acquired by vac_open_indexes. Optionally release
+ * Release the resources acquired by vac_open_indexes. Optionally release
* the locks (say NoLock to keep 'em).
*/
void
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 1d2472e9918..57daca50188 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -13,7 +13,7 @@
* We are willing to use at most maintenance_work_mem memory space to keep
* track of dead tuples. We initially allocate an array of TIDs of that size,
* with an upper limit that depends on table size (this limit ensures we don't
- * allocate a huge area uselessly for vacuuming small tables). If the array
+ * allocate a huge area uselessly for vacuuming small tables). If the array
* threatens to overflow, we suspend the heap scan phase and perform a pass of
* index cleanup and page compaction, then resume the heap scan with an empty
* TID array.
@@ -396,7 +396,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Before entering the main loop, establish the invariant that
* next_not_all_visible_block is the next block number >= blkno that's not
* all-visible according to the visibility map, or nblocks if there's no
- * such block. Also, we set up the skipping_all_visible_blocks flag,
+ * such block. Also, we set up the skipping_all_visible_blocks flag,
* which is needed because we need hysteresis in the decision: once we've
* started skipping blocks, we may as well skip everything up to the next
* not-all-visible block.
@@ -843,8 +843,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* If we remembered any tuples for deletion, then the page will be
* visited again by lazy_vacuum_heap, which will compute and record
- * its post-compaction free space. If not, then we're done with this
- * page, so remember its free space as-is. (This path will always be
+ * its post-compaction free space. If not, then we're done with this
+ * page, so remember its free space as-is. (This path will always be
* taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 7e4072d5212..a3448e4c785 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -174,7 +174,7 @@ check_datestyle(char **newval, void **extra, GucSource source)
}
/*
- * Prepare the canonical string to return. GUC wants it malloc'd.
+ * Prepare the canonical string to return. GUC wants it malloc'd.
*/
result = (char *) malloc(32);
if (!result)
@@ -262,9 +262,9 @@ check_timezone(char **newval, void **extra, GucSource source)
{
/*
* The boot_val given for TimeZone in guc.c is NULL. When we see this
- * we just do nothing. If this isn't overridden from the config file
+ * we just do nothing. If this isn't overridden from the config file
* then pg_timezone_initialize() will eventually select a default
- * value from the environment. This hack has two purposes: to avoid
+ * value from the environment. This hack has two purposes: to avoid
* wasting cycles loading values that might soon be overridden from
* the config file, and to avoid trying to read the timezone files
* during InitializeGUCOptions(). The latter doesn't work in an
@@ -289,7 +289,7 @@ check_timezone(char **newval, void **extra, GucSource source)
if (pg_strncasecmp(*newval, "interval", 8) == 0)
{
/*
- * Support INTERVAL 'foo'. This is for SQL spec compliance, not
+ * Support INTERVAL 'foo'. This is for SQL spec compliance, not
* because it has any actual real-world usefulness.
*/
const char *valueptr = *newval;
@@ -313,7 +313,7 @@ check_timezone(char **newval, void **extra, GucSource source)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport(ERROR), which is not desirable for GUC. We did what we
+ * ereport(ERROR), which is not desirable for GUC. We did what we
* could to guard against this in flatten_set_variable_args, but a
* string coming in from postgresql.conf might contain anything.
*/
@@ -389,11 +389,11 @@ check_timezone(char **newval, void **extra, GucSource source)
}
/*
- * Prepare the canonical string to return. GUC wants it malloc'd.
+ * Prepare the canonical string to return. GUC wants it malloc'd.
*
* Note: the result string should be something that we'd accept as input.
* We use the numeric format for interval cases, because it's simpler to
- * reload. In the named-timezone case, *newval is already OK and need not
+ * reload. In the named-timezone case, *newval is already OK and need not
* be changed; it might not have the canonical casing, but that's taken
* care of by show_timezone.
*/
@@ -569,7 +569,7 @@ show_log_timezone(void)
* We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and
* we also always allow changes from read-write to read-only. However,
* read-only may be changed to read-write only when in a top-level transaction
- * that has not yet taken an initial snapshot. Can't do it in a hot standby
+ * that has not yet taken an initial snapshot. Can't do it in a hot standby
* slave, either.
*
* If we are not in a transaction at all, just allow the change; it means
@@ -730,7 +730,7 @@ check_transaction_deferrable(bool *newval, void **extra, GucSource source)
*
* We can't roll back the random sequence on error, and we don't want
* config file reloads to affect it, so we only want interactive SET SEED
- * commands to set it. We use the "extra" storage to ensure that rollbacks
+ * commands to set it. We use the "extra" storage to ensure that rollbacks
* don't try to do the operation again.
*/
@@ -1006,7 +1006,7 @@ const char *
show_role(void)
{
/*
- * Check whether SET ROLE is active; if not return "none". This is a
+ * Check whether SET ROLE is active; if not return "none". This is a
* kluge to deal with the fact that SET SESSION AUTHORIZATION logically
* resets SET ROLE to NONE, but we cannot set the GUC role variable from
* assign_session_authorization (because we haven't got enough info to
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index b2381996586..4447306aea6 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -367,11 +367,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse)
*rt_entry2;
/*
- * Make a copy of the given parsetree. It's not so much that we don't
+ * Make a copy of the given parsetree. It's not so much that we don't
* want to scribble on our input, it's that the parser has a bad habit of
* outputting multiple links to the same subtree for constructs like
* BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a
- * Var node twice. copyObject will expand any multiply-referenced subtree
+ * Var node twice. copyObject will expand any multiply-referenced subtree
* into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@@ -488,7 +488,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
/*
* If the user didn't explicitly ask for a temporary view, check whether
- * we need one implicitly. We allow TEMP to be inserted automatically as
+ * we need one implicitly. We allow TEMP to be inserted automatically as
* long as the CREATE command is consistent with that --- no explicit
* schema name.
*/
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 01775ce4494..9a2bff53e9d 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -307,7 +307,7 @@ ExecMarkPos(PlanState *node)
*
* NOTE: the semantics of this are that the first ExecProcNode following
* the restore operation will yield the same tuple as the first one following
- * the mark operation. It is unspecified what happens to the plan node's
+ * the mark operation. It is unspecified what happens to the plan node's
* result TupleTableSlot. (In most cases the result slot is unchanged by
* a restore, but the node may choose to clear it or to load it with the
* restored-to tuple.) Hence the caller should discard any previously
@@ -382,7 +382,7 @@ ExecSupportsMarkRestore(NodeTag plantype)
/*
* T_Result only supports mark/restore if it has a child plan that
* does, so we do not have enough information to give a really
- * correct answer. However, for current uses it's enough to
+ * correct answer. However, for current uses it's enough to
* always say "false", because this routine is not asked about
* gating Result plans, only base-case Results.
*/
diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c
index 5312d9aeec0..84a5ecec19c 100644
--- a/src/backend/executor/execCurrent.c
+++ b/src/backend/executor/execCurrent.c
@@ -141,7 +141,7 @@ execCurrentOf(CurrentOfExpr *cexpr,
/*
* This table didn't produce the cursor's current row; some other
- * inheritance child of the same parent must have. Signal caller to
+ * inheritance child of the same parent must have. Signal caller to
* do nothing on this table.
*/
return false;
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index 3e995c8f07b..3975353a25d 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -52,7 +52,7 @@
*
* Initialize the Junk filter.
*
- * The source targetlist is passed in. The output tuple descriptor is
+ * The source targetlist is passed in. The output tuple descriptor is
* built from the non-junk tlist entries, plus the passed specification
* of whether to include room for an OID or not.
* An optional resultSlot can be passed as well.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 5139c3b8e50..5d2f20afc49 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -19,7 +19,7 @@
* ExecutorRun accepts direction and count arguments that specify whether
* the plan is to be executed forwards, backwards, and for how many tuples.
* In some cases ExecutorRun may be called multiple times to process all
- * the tuples for a plan. It is also acceptable to stop short of executing
+ * the tuples for a plan. It is also acceptable to stop short of executing
* the whole plan (but only if it is a SELECT).
*
* ExecutorFinish must be called after the final ExecutorRun call and
@@ -337,12 +337,12 @@ standard_ExecutorRun(QueryDesc *queryDesc,
* ExecutorFinish
*
* This routine must be called after the last ExecutorRun call.
- * It performs cleanup such as firing AFTER triggers. It is
+ * It performs cleanup such as firing AFTER triggers. It is
* separate from ExecutorEnd because EXPLAIN ANALYZE needs to
* include these actions in the total runtime.
*
* We provide a function hook variable that lets loadable plugins
- * get control when ExecutorFinish is called. Such a plugin would
+ * get control when ExecutorFinish is called. Such a plugin would
* normally call standard_ExecutorFinish().
*
* ----------------------------------------------------------------
@@ -579,7 +579,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
* userid to check as: current user unless we have a setuid indication.
*
* Note: GetUserId() is presently fast enough that there's no harm in
- * calling it separately for each RTE. If that stops being true, we could
+ * calling it separately for each RTE. If that stops being true, we could
* call it once in ExecCheckRTPerms and pass the userid down from there.
* But for now, no need for the extra clutter.
*/
@@ -1157,7 +1157,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
* if so it doesn't matter which one we pick.) However, it is sometimes
* necessary to fire triggers on other relations; this happens mainly when an
* RI update trigger queues additional triggers on other relations, which will
- * be processed in the context of the outer query. For efficiency's sake,
+ * be processed in the context of the outer query. For efficiency's sake,
* we want to have a ResultRelInfo for those triggers too; that can avoid
* repeated re-opening of the relation. (It also provides a way for EXPLAIN
* ANALYZE to report the runtimes of such triggers.) So we make additional
@@ -1194,7 +1194,7 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
/*
* Open the target relation's relcache entry. We assume that an
* appropriate lock is still held by the backend from whenever the trigger
- * event got queued, so we need take no new lock here. Also, we need not
+ * event got queued, so we need take no new lock here. Also, we need not
* recheck the relkind, so no need for CheckValidResultRel.
*/
rel = heap_open(relid, NoLock);
@@ -1295,7 +1295,7 @@ ExecPostprocessPlan(EState *estate)
/*
* Run any secondary ModifyTable nodes to completion, in case the main
- * query did not fetch all rows from them. (We do this to ensure that
+ * query did not fetch all rows from them. (We do this to ensure that
* such nodes have predictable results.)
*/
foreach(lc, estate->es_auxmodifytables)
@@ -1708,7 +1708,7 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
*tid = copyTuple->t_self;
/*
- * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
+ * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
*/
EvalPlanQualBegin(epqstate, estate);
@@ -1792,7 +1792,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
/*
* If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies that
+ * recycled and reused for an unrelated tuple. This implies that
* the latest version of the row was deleted, so we need do
* nothing. (Should be safe to examine xmin without getting
* buffer's content lock, since xmin never changes in an existing
@@ -2018,7 +2018,7 @@ EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
/*
* Fetch the current row values for any non-locked relations that need
- * to be scanned by an EvalPlanQual operation. origslot must have been set
+ * to be scanned by an EvalPlanQual operation. origslot must have been set
* to contain the current result row (top-level row) that we need to recheck.
*/
void
@@ -2249,7 +2249,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
/*
* Each EState must have its own es_epqScanDone state, but if we have
- * nested EPQ checks they should share es_epqTuple arrays. This allows
+ * nested EPQ checks they should share es_epqTuple arrays. This allows
* sub-rechecks to inherit the values being examined by an outer recheck.
*/
estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
@@ -2306,7 +2306,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
*
* This is a cut-down version of ExecutorEnd(); basically we want to do most
* of the normal cleanup, but *not* close result relations (which we are
- * just sharing from the outer query). We do, however, have to close any
+ * just sharing from the outer query). We do, however, have to close any
* trigger target relations that got opened, since those are not shared.
* (There probably shouldn't be any of the latter, but just in case...)
*/
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index 17788761d7f..241f4cf6590 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -51,7 +51,7 @@
* * ExecInitNode() notices that it is looking at a nest loop and
* as the code below demonstrates, it calls ExecInitNestLoop().
* Eventually this calls ExecInitNode() on the right and left subplans
- * and so forth until the entire plan is initialized. The result
+ * and so forth until the entire plan is initialized. The result
* of ExecInitNode() is a plan state tree built with the same structure
* as the underlying plan tree.
*
@@ -565,7 +565,7 @@ MultiExecProcNode(PlanState *node)
* at 'node'.
*
* After this operation, the query plan will not be able to be
- * processed any further. This should be called only after
+ * processed any further. This should be called only after
* the query plan has been fully executed.
* ----------------------------------------------------------------
*/
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index a9c5da80dfe..bf86e7ae33e 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -26,7 +26,7 @@
* ExecProject() is used to make tuple projections. Rather then
* trying to speed it up, the execution plan should be pre-processed
* to facilitate attribute sharing between nodes wherever possible,
- * instead of doing needless copying. -cim 5/31/91
+ * instead of doing needless copying. -cim 5/31/91
*
* During expression evaluation, we check_stack_depth only in
* ExecMakeFunctionResult (and substitute routines) rather than at every
@@ -199,7 +199,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
*
* Note: for notational simplicity we declare these functions as taking the
* specific type of ExprState that they work on. This requires casting when
- * assigning the function pointer in ExecInitExpr. Be careful that the
+ * assigning the function pointer in ExecInitExpr. Be careful that the
* function signature is declared correctly, because the cast suppresses
* automatic checking!
*
@@ -234,7 +234,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
* The caller should already have switched into the temporary memory
* context econtext->ecxt_per_tuple_memory. The convenience entry point
* ExecEvalExprSwitchContext() is provided for callers who don't prefer to
- * do the switch in an outer loop. We do not do the switch in these routines
+ * do the switch in an outer loop. We do not do the switch in these routines
* because it'd be a waste of cycles during nested expression evaluation.
* ----------------------------------------------------------------
*/
@@ -364,7 +364,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
* We might have a nested-assignment situation, in which the
* refassgnexpr is itself a FieldStore or ArrayRef that needs to
* obtain and modify the previous value of the array element or slice
- * being replaced. If so, we have to extract that value from the
+ * being replaced. If so, we have to extract that value from the
* array and pass it down via the econtext's caseValue. It's safe to
* reuse the CASE mechanism because there cannot be a CASE between
* here and where the value would be needed, and an array assignment
@@ -437,7 +437,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
/*
* For assignment to varlena arrays, we handle a NULL original array
* by substituting an empty (zero-dimensional) array; insertion of the
- * new element will result in a singleton array value. It does not
+ * new element will result in a singleton array value. It does not
* matter whether the new element is NULL.
*/
if (*isNull)
@@ -821,11 +821,11 @@ ExecEvalWholeRowVar(WholeRowVarExprState *wrvstate, ExprContext *econtext,
* We really only care about numbers of attributes and data types.
* Also, we can ignore type mismatch on columns that are dropped in
* the destination type, so long as (1) the physical storage matches
- * or (2) the actual column value is NULL. Case (1) is helpful in
+ * or (2) the actual column value is NULL. Case (1) is helpful in
* some cases involving out-of-date cached plans, while case (2) is
* expected behavior in situations such as an INSERT into a table with
* dropped columns (the planner typically generates an INT4 NULL
- * regardless of the dropped column type). If we find a dropped
+ * regardless of the dropped column type). If we find a dropped
* column and cannot verify that case (1) holds, we have to use
* ExecEvalWholeRowSlow to check (2) for each row.
*/
@@ -1478,7 +1478,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
* ExecPrepareTuplestoreResult
*
* Subroutine for ExecMakeFunctionResult: prepare to extract rows from a
- * tuplestore function result. We must set up a funcResultSlot (unless
+ * tuplestore function result. We must set up a funcResultSlot (unless
* already done in a previous call cycle) and verify that the function
* returned the expected tuple descriptor.
*/
@@ -1523,7 +1523,7 @@ ExecPrepareTuplestoreResult(FuncExprState *fcache,
}
/*
- * If function provided a tupdesc, cross-check it. We only really need to
+ * If function provided a tupdesc, cross-check it. We only really need to
* do this for functions returning RECORD, but might as well do it always.
*/
if (resultDesc)
@@ -1706,7 +1706,7 @@ restart:
if (fcache->func.fn_retset || hasSetArg)
{
/*
- * We need to return a set result. Complain if caller not ready to
+ * We need to return a set result. Complain if caller not ready to
* accept one.
*/
if (isDone == NULL)
@@ -2025,7 +2025,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
/*
* Normally the passed expression tree will be a FuncExprState, since the
* grammar only allows a function call at the top level of a table
- * function reference. However, if the function doesn't return set then
+ * function reference. However, if the function doesn't return set then
* the planner might have replaced the function call via constant-folding
* or inlining. So if we see any other kind of expression node, execute
* it via the general ExecEvalExpr() code; the only difference is that we
@@ -2064,7 +2064,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
*
* Note: ideally, we'd do this in the per-tuple context, but then the
* argument values would disappear when we reset the context in the
- * inner loop. So do it in caller context. Perhaps we should make a
+ * inner loop. So do it in caller context. Perhaps we should make a
* separate context just to hold the evaluated arguments?
*/
argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext);
@@ -2150,7 +2150,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* Can't do anything very useful with NULL rowtype values. For a
* function returning set, we consider this a protocol violation
* (but another alternative would be to just ignore the result and
- * "continue" to get another row). For a function not returning
+ * "continue" to get another row). For a function not returning
* set, we fall out of the loop; we'll cons up an all-nulls result
* row below.
*/
@@ -2284,7 +2284,7 @@ no_function_result:
}
/*
- * If function provided a tupdesc, cross-check it. We only really need to
+ * If function provided a tupdesc, cross-check it. We only really need to
* do this for functions returning RECORD, but might as well do it always.
*/
if (rsinfo.setDesc)
@@ -2462,7 +2462,7 @@ ExecEvalDistinct(FuncExprState *fcache,
*
* Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean,
* and we combine the results across all array elements using OR and AND
- * (for ANY and ALL respectively). Of course we short-circuit as soon as
+ * (for ANY and ALL respectively). Of course we short-circuit as soon as
* the result is known.
*/
static Datum
@@ -2649,7 +2649,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
* qualification to conjunctive normal form. If we ever get
* an AND to evaluate, we can be sure that it's not a top-level
* clause in the qualification, but appears lower (as a function
- * argument, for example), or in the target list. Not that you
+ * argument, for example), or in the target list. Not that you
* need to know this, mind you...
* ----------------------------------------------------------------
*/
@@ -2780,7 +2780,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
/* ----------------------------------------------------------------
* ExecEvalConvertRowtype
*
- * Evaluate a rowtype coercion operation. This may require
+ * Evaluate a rowtype coercion operation. This may require
* rearranging field positions.
* ----------------------------------------------------------------
*/
@@ -2909,7 +2909,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
/*
* if we have a true test, then we return the result, since the case
- * statement is satisfied. A NULL result from the test is not
+ * statement is satisfied. A NULL result from the test is not
* considered true.
*/
if (DatumGetBool(clause_value) && !*isNull)
@@ -3123,7 +3123,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
* If all items were null or empty arrays, return an empty array;
* otherwise, if some were and some weren't, raise error. (Note: we
* must special-case this somehow to avoid trying to generate a 1-D
- * array formed from empty arrays. It's not ideal...)
+ * array formed from empty arrays. It's not ideal...)
*/
if (haveempty)
{
@@ -4286,7 +4286,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* ExecInitExpr: prepare an expression tree for execution
*
* This function builds and returns an ExprState tree paralleling the given
- * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
+ * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
* for execution. Because the Expr tree itself is read-only as far as
* ExecInitExpr and ExecEvalExpr are concerned, several different executions
* of the same plan tree can occur concurrently.
@@ -4297,9 +4297,9 @@ ExecEvalExprSwitchContext(ExprState *expression,
*
* Any Aggref, WindowFunc, or SubPlan nodes found in the tree are added to the
* lists of such nodes held by the parent PlanState. Otherwise, we do very
- * little initialization here other than building the state-node tree. Any
+ * little initialization here other than building the state-node tree. Any
* nontrivial work associated with initializing runtime info for a node should
- * happen during the first actual evaluation of that node. (This policy lets
+ * happen during the first actual evaluation of that node. (This policy lets
* us avoid work if the node is never actually evaluated.)
*
* Note: there is no ExecEndExpr function; we assume that any resource
@@ -5097,7 +5097,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
- * Evaluate the qual conditions one at a time. If we find a FALSE result,
+ * Evaluate the qual conditions one at a time. If we find a FALSE result,
* we can stop evaluating and return FALSE --- the AND result must be
* FALSE. Also, if we find a NULL result when resultForNull is FALSE, we
* can stop and return FALSE --- the AND result must be FALSE or NULL in
@@ -5256,7 +5256,7 @@ ExecTargetList(List *targetlist,
else
{
/*
- * We have some done and some undone sets. Restart the done ones
+ * We have some done and some undone sets. Restart the done ones
* so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index e90058847d9..3ec6223e655 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -30,7 +30,7 @@ static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, Tuple
* ExecScanFetch -- fetch next potential tuple
*
* This routine is concerned with substituting a test tuple if we are
- * inside an EvalPlanQual recheck. If we aren't, just execute
+ * inside an EvalPlanQual recheck. If we aren't, just execute
* the access method's next-tuple routine.
*/
static inline TupleTableSlot *
@@ -155,7 +155,7 @@ ExecScan(ScanState *node,
ResetExprContext(econtext);
/*
- * get a tuple from the access method. Loop until we obtain a tuple that
+ * get a tuple from the access method. Loop until we obtain a tuple that
* passes the qualification.
*/
for (;;)
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 41f89c00b69..337e8f84f30 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -4,7 +4,7 @@
* Routines dealing with TupleTableSlots. These are used for resource
* management associated with tuples (eg, releasing buffer pins for
* tuples in disk buffers, or freeing the memory occupied by transient
- * tuples). Slots also provide access abstraction that lets us implement
+ * tuples). Slots also provide access abstraction that lets us implement
* "virtual" tuples to reduce data-copying overhead.
*
* Routines dealing with the type information for tuples. Currently,
@@ -260,7 +260,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
ExecClearTuple(slot);
/*
- * Release any old descriptor. Also release old Datum/isnull arrays if
+ * Release any old descriptor. Also release old Datum/isnull arrays if
* present (we don't bother to check if they could be re-used).
*/
if (slot->tts_tupleDescriptor)
@@ -310,7 +310,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
* Another case where it is 'false' is when the referenced tuple is held
* in a tuple table slot belonging to a lower-level executor Proc node.
* In this case the lower-level slot retains ownership and responsibility
- * for eventually releasing the tuple. When this method is used, we must
+ * for eventually releasing the tuple. When this method is used, we must
* be certain that the upper-level Proc node will lose interest in the tuple
* sooner than the lower-level one does! If you're not certain, copy the
* lower-level tuple with heap_copytuple and let the upper-level table
@@ -649,7 +649,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot)
* Fetch the slot's minimal physical tuple.
*
* If the slot contains a virtual tuple, we convert it to minimal
- * physical form. The slot retains ownership of the minimal tuple.
+ * physical form. The slot retains ownership of the minimal tuple.
* If it contains a regular tuple we convert to minimal form and store
* that in addition to the regular tuple (not instead of, because
* callers may hold pointers to Datums within the regular tuple).
@@ -828,7 +828,7 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
* ExecInit{Result,Scan,Extra}TupleSlot
*
* These are convenience routines to initialize the specified slot
- * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
+ * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
* is used for initializing special-purpose slots.
* --------------------------------
*/
@@ -1145,7 +1145,7 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values)
* code would have no way to obtain a tupledesc for the tuple.
*
* Note that if we do build a new tuple, it's palloc'd in the current
- * memory context. Beware of code that changes context between the initial
+ * memory context. Beware of code that changes context between the initial
* heap_form_tuple/etc call and calling HeapTuple(Header)GetDatum.
*
* For performance-critical callers, it could be worthwhile to take extra
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index bfacc6c30ac..23668ef4984 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -347,7 +347,7 @@ CreateStandaloneExprContext(void)
* any previously computed pass-by-reference expression result will go away!
*
* If isCommit is false, we are being called in error cleanup, and should
- * not call callbacks but only release memory. (It might be better to call
+ * not call callbacks but only release memory. (It might be better to call
* the callbacks and pass the isCommit flag to them, but that would require
* more invasive code changes than currently seems justified.)
*
@@ -376,7 +376,7 @@ FreeExprContext(ExprContext *econtext, bool isCommit)
* ReScanExprContext
*
* Reset an expression context in preparation for a rescan of its
- * plan node. This requires calling any registered shutdown callbacks,
+ * plan node. This requires calling any registered shutdown callbacks,
* since any partially complete set-returning-functions must be canceled.
*
* Note we make no assumption about the caller's memory context.
@@ -417,7 +417,7 @@ MakePerTupleExprContext(EState *estate)
/* ----------------
* ExecAssignExprContext
*
- * This initializes the ps_ExprContext field. It is only necessary
+ * This initializes the ps_ExprContext field. It is only necessary
* to do this for nodes which use ExecQual or ExecProject
* because those routines require an econtext. Other nodes that
* don't have to evaluate expressions don't need to do this.
@@ -463,7 +463,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
/*
* ExecTypeFromTL needs the parse-time representation of the tlist, not a
- * list of ExprStates. This is good because some plan nodes don't bother
+ * list of ExprStates. This is good because some plan nodes don't bother
* to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
@@ -491,7 +491,7 @@ ExecGetResultType(PlanState *planstate)
* the given tlist should be a list of ExprState nodes, not Expr nodes.
*
* inputDesc can be NULL, but if it is not, we check to see whether simple
- * Vars in the tlist match the descriptor. It is important to provide
+ * Vars in the tlist match the descriptor. It is important to provide
* inputDesc for relation-scan plan nodes, as a cross check that the relation
* hasn't been changed since the plan was made. At higher levels of a plan,
* there is no need to recheck.
@@ -693,7 +693,7 @@ ExecAssignProjectionInfo(PlanState *planstate,
*
* However ... there is no particular need to do it during ExecEndNode,
* because FreeExecutorState will free any remaining ExprContexts within
- * the EState. Letting FreeExecutorState do it allows the ExprContexts to
+ * the EState. Letting FreeExecutorState do it allows the ExprContexts to
* be freed in reverse order of creation, rather than order of creation as
* will happen if we delete them here, which saves O(N^2) work in the list
* cleanup inside FreeExprContext.
@@ -713,7 +713,7 @@ ExecFreeExprContext(PlanState *planstate)
* the following scan type support functions are for
* those nodes which are stubborn and return tuples in
* their Scan tuple slot instead of their Result tuple
- * slot.. luck fur us, these nodes do not do projections
+ * slot.. luck fur us, these nodes do not do projections
* so we don't have to worry about getting the ProjectionInfo
* right for them... -cim 6/3/91
* ----------------------------------------------------------------
@@ -1096,7 +1096,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/*
* If the index has an associated exclusion constraint, check that.
* This is simpler than the process for uniqueness checks since we
- * always insert first and then check. If the constraint is deferred,
+ * always insert first and then check. If the constraint is deferred,
* we check now anyway, but don't throw error on violation; instead
* we'll queue a recheck event.
*
@@ -1280,7 +1280,7 @@ retry:
/*
* If an in-progress transaction is affecting the visibility of this
- * tuple, we need to wait for it to complete and then recheck. For
+ * tuple, we need to wait for it to complete and then recheck. For
* simplicity we do rechecking by just restarting the whole scan ---
* this case probably doesn't happen often enough to be worth trying
* harder, and anyway we don't want to hold any index internal locks
@@ -1337,7 +1337,7 @@ retry:
/*
* Check existing tuple's index values to see if it really matches the
- * exclusion condition against the new_values. Returns true if conflict.
+ * exclusion condition against the new_values. Returns true if conflict.
*/
static bool
index_recheck_constraint(Relation index, Oid *constr_procs,
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 54f47b5b2a7..a5a1679c808 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -44,7 +44,7 @@ typedef struct
} DR_sqlfunction;
/*
- * We have an execution_state record for each query in a function. Each
+ * We have an execution_state record for each query in a function. Each
* record contains a plantree for its query. If the query is currently in
* F_EXEC_RUN state then there's a QueryDesc too.
*
@@ -255,7 +255,7 @@ sql_fn_param_ref(ParseState *pstate, ParamRef *pref)
* Set up the per-query execution_state records for a SQL function.
*
* The input is a List of Lists of parsed and rewritten, but not planned,
- * querytrees. The sublist structure denotes the original query boundaries.
+ * querytrees. The sublist structure denotes the original query boundaries.
*/
static List *
init_execution_state(List *queryTree_list,
@@ -442,7 +442,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
fcache->src = TextDatumGetCString(tmp);
/*
- * Parse and rewrite the queries in the function text. Use sublists to
+ * Parse and rewrite the queries in the function text. Use sublists to
* keep track of the original query boundaries. But we also build a
* "flat" list of the rewritten queries to pass to check_sql_fn_retval.
* This is because the last canSetTag query determines the result type
@@ -477,7 +477,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
* any polymorphic arguments.
*
* Note: we set fcache->returnsTuple according to whether we are returning
- * the whole tuple result or just a single column. In the latter case we
+ * the whole tuple result or just a single column. In the latter case we
* clear returnsTuple because we need not act different from the scalar
* result case, even if it's a rowtype column. (However, we have to force
* lazy eval mode in that case; otherwise we'd need extra code to expand
@@ -703,7 +703,7 @@ postquel_get_single_result(TupleTableSlot *slot,
/*
* Set up to return the function value. For pass-by-reference datatypes,
* be sure to allocate the result in resultcontext, not the current memory
- * context (which has query lifespan). We can't leave the data in the
+ * context (which has query lifespan). We can't leave the data in the
* TupleTableSlot because we intend to clear the slot before returning.
*/
oldcontext = MemoryContextSwitchTo(resultcontext);
@@ -849,7 +849,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
* suspend execution before completion is if we are returning a row from a
* lazily-evaluated SELECT. So, when first entering this loop, we'll
* either start a new query (and push a fresh snapshot) or re-establish
- * the active snapshot from the existing query descriptor. If we need to
+ * the active snapshot from the existing query descriptor. If we need to
* start a new query in a subsequent execution of the loop, either we need
* a fresh snapshot (and pushed_snapshot is false) or the existing
* snapshot is on the active stack and we can just bump its command ID.
@@ -905,7 +905,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
* Break from loop if we didn't shut down (implying we got a
* lazily-evaluated row). Otherwise we'll press on till the whole
* function is done, relying on the tuplestore to keep hold of the
- * data to eventually be returned. This is necessary since an
+ * data to eventually be returned. This is necessary since an
* INSERT/UPDATE/DELETE RETURNING that sets the result might be
* followed by additional rule-inserted commands, and we want to
* finish doing all those commands before we return anything.
@@ -927,7 +927,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
/*
* Flush the current snapshot so that we will take a new one for
- * the new query list. This ensures that new snaps are taken at
+ * the new query list. This ensures that new snaps are taken at
* original-query boundaries, matching the behavior of interactive
* execution.
*/
@@ -985,7 +985,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
else if (fcache->lazyEval)
{
/*
- * We are done with a lazy evaluation. Clean up.
+ * We are done with a lazy evaluation. Clean up.
*/
tuplestore_clear(fcache->tstore);
@@ -1009,8 +1009,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
else
{
/*
- * We are done with a non-lazy evaluation. Return whatever is in
- * the tuplestore. (It is now caller's responsibility to free the
+ * We are done with a non-lazy evaluation. Return whatever is in
+ * the tuplestore. (It is now caller's responsibility to free the
* tuplestore when done.)
*/
rsi->returnMode = SFRM_Materialize;
@@ -1122,7 +1122,7 @@ sql_exec_error_callback(void *arg)
/*
* Try to determine where in the function we failed. If there is a query
- * with non-null QueryDesc, finger it. (We check this rather than looking
+ * with non-null QueryDesc, finger it. (We check this rather than looking
* for F_EXEC_RUN state, so that errors during ExecutorStart or
* ExecutorEnd are blamed on the appropriate query; see postquel_start and
* postquel_end.)
@@ -1414,7 +1414,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
* the function that's calling it.
*
* XXX Note that if rettype is RECORD, the IsBinaryCoercible check
- * will succeed for any composite restype. For the moment we rely on
+ * will succeed for any composite restype. For the moment we rely on
* runtime type checking to catch any discrepancy, but it'd be nice to
* do better at parse time.
*/
@@ -1460,7 +1460,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
/*
* Verify that the targetlist matches the return tuple type. We scan
* the non-deleted attributes to ensure that they match the datatypes
- * of the non-resjunk columns. For deleted attributes, insert NULL
+ * of the non-resjunk columns. For deleted attributes, insert NULL
* result columns if the caller asked for that.
*/
tupnatts = tupdesc->natts;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 13d77234801..34afb24b380 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -23,7 +23,7 @@
* The agg's first input type and transtype must be the same in this case!
*
* If transfunc is marked "strict" then NULL input_values are skipped,
- * keeping the previous transvalue. If transfunc is not strict then it
+ * keeping the previous transvalue. If transfunc is not strict then it
* is called for every input tuple and must deal with NULL initcond
* or NULL input_values for itself.
*
@@ -55,7 +55,7 @@
* it is completely forbidden for functions to modify pass-by-ref inputs,
* but in the aggregate case we know the left input is either the initial
* transition value or a previous function result, and in either case its
- * value need not be preserved. See int8inc() for an example. Notice that
+ * value need not be preserved. See int8inc() for an example. Notice that
* advance_transition_function() is coded to avoid a data copy step when
* the previous transition value pointer is returned. Also, some
* transition functions want to store working state in addition to the
@@ -174,7 +174,7 @@ typedef struct AggStatePerAggData
transtypeByVal;
/*
- * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but
+ * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but
* with the addition of ORDER BY we now need at least a slot for passing
* data to the sort object, which requires a tupledesc, so we might as
* well go whole hog and use ExecProject too.
@@ -194,7 +194,7 @@ typedef struct AggStatePerAggData
* input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT/ORDER BY) aggregate, we just feed the input
- * values straight to the transition function. If it's DISTINCT or
+ * values straight to the transition function. If it's DISTINCT or
* requires ORDER BY, we pass the input values into a Tuplesort object;
* then at completion of the input tuple group, we scan the sorted values,
* eliminate duplicates if needed, and run the transition function on the
@@ -229,7 +229,7 @@ typedef struct AggStatePerGroupData
/*
* Note: noTransValue initially has the same value as transValueIsNull,
- * and if true both are cleared to false at the same time. They are not
+ * and if true both are cleared to false at the same time. They are not
* the same though: if transfn later returns a NULL, we want to keep that
* NULL and not auto-replace it with a later input value. Only the first
* non-NULL input will be auto-substituted.
@@ -239,7 +239,7 @@ typedef struct AggStatePerGroupData
/*
* To implement hashed aggregation, we need a hashtable that stores a
* representative tuple and an array of AggStatePerGroup structs for each
- * distinct set of GROUP BY column values. We compute the hash key from
+ * distinct set of GROUP BY column values. We compute the hash key from
* the GROUP BY columns.
*/
typedef struct AggHashEntryData *AggHashEntry;
@@ -442,7 +442,7 @@ advance_transition_function(AggState *aggstate,
/*
* If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
+ * pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
@@ -466,7 +466,7 @@ advance_transition_function(AggState *aggstate,
}
/*
- * Advance all the aggregates for one input tuple. The input tuple
+ * Advance all the aggregates for one input tuple. The input tuple
* has been stored in tmpcontext->ecxt_outertuple, so that it is accessible
* to ExecEvalExpr. pergroup is the array of per-group structs to use
* (this might be in a hashtable entry).
@@ -544,7 +544,7 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
/*
* Run the transition function for a DISTINCT or ORDER BY aggregate
* with only one input. This is called after we have completed
- * entering all the input values into the sort object. We complete the
+ * entering all the input values into the sort object. We complete the
* sort, read out the values in sorted order, and run the transition
* function on each value (applying DISTINCT if appropriate).
*
@@ -641,7 +641,7 @@ process_ordered_aggregate_single(AggState *aggstate,
/*
* Run the transition function for a DISTINCT or ORDER BY aggregate
* with more than one input. This is called after we have completed
- * entering all the input values into the sort object. We complete the
+ * entering all the input values into the sort object. We complete the
* sort, read out the values in sorted order, and run the transition
* function on each value (applying DISTINCT if appropriate).
*
@@ -966,9 +966,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot)
* the appropriate attribute for each aggregate function use (Aggref
* node) appearing in the targetlist or qual of the node. The number
* of tuples to aggregate over depends on whether grouped or plain
- * aggregation is selected. In grouped aggregation, we produce a result
+ * aggregation is selected. In grouped aggregation, we produce a result
* row for each group; in plain aggregation there's a single result row
- * for the whole query. In either case, the value of each aggregate is
+ * for the whole query. In either case, the value of each aggregate is
* stored in the expression context to be used when ExecProject evaluates
* the result tuple.
*/
@@ -993,7 +993,7 @@ ExecAgg(AggState *node)
}
/*
- * Exit if nothing left to do. (We must do the ps_TupFromTlist check
+ * Exit if nothing left to do. (We must do the ps_TupFromTlist check
* first, because in some cases agg_done gets set before we emit the final
* aggregate tuple, and we have to finish running SRFs for it.)
*/
@@ -1077,7 +1077,7 @@ agg_retrieve_direct(AggState *aggstate)
/*
* Clear the per-output-tuple context for each group, as well as
* aggcontext (which contains any pass-by-ref transvalues of the old
- * group). We also clear any child contexts of the aggcontext; some
+ * group). We also clear any child contexts of the aggcontext; some
* aggregate functions store working state in such contexts.
*/
ResetExprContext(econtext);
@@ -1175,7 +1175,7 @@ agg_retrieve_direct(AggState *aggstate)
/*
* Use the representative input tuple for any references to
- * non-aggregated input columns in the qual and tlist. (If we are not
+ * non-aggregated input columns in the qual and tlist. (If we are not
* grouping, and there are no input rows at all, we will come here
* with an empty firstSlot ... but if not grouping, there can't be any
* references to non-aggregated input columns, so no problem.)
@@ -1399,8 +1399,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
aggstate->hashtable = NULL;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
@@ -1433,7 +1433,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
- * contain other agg calls in their arguments. This would make no sense
+ * contain other agg calls in their arguments. This would make no sense
* under SQL semantics anyway (and it's forbidden by the spec). Because
* that is true, we don't need to worry about evaluating the aggs in any
* particular order.
@@ -1480,7 +1480,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* This is not an error condition: we might be using the Agg node just
* to do hash-based grouping. Even in the regular case,
* constant-expression simplification could optimize away all of the
- * Aggrefs in the targetlist and qual. So keep going, but force local
+ * Aggrefs in the targetlist and qual. So keep going, but force local
* copy of numaggs positive so that palloc()s below don't choke.
*/
numaggs = 1;
@@ -1593,7 +1593,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
peraggstate->sortstate = NULL;
/*
- * Get actual datatypes of the inputs. These could be different from
+ * Get actual datatypes of the inputs. These could be different from
* the agg's declared input types, when the agg accepts ANY or a
* polymorphic type.
*/
@@ -1718,7 +1718,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary-compatible), so
* that it's OK to use the first input value as the initial
- * transValue. This should have been checked at agg definition time,
+ * transValue. This should have been checked at agg definition time,
* but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
@@ -1992,8 +1992,8 @@ ExecReScanAgg(AggState *node)
*
* The transition and/or final functions of an aggregate may want to verify
* that they are being called as aggregates, rather than as plain SQL
- * functions. They should use this function to do so. The return value
- * is nonzero if being called as an aggregate, or zero if not. (Specific
+ * functions. They should use this function to do so. The return value
+ * is nonzero if being called as an aggregate, or zero if not. (Specific
* nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more
* values could conceivably appear in future.)
*
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index 340c4431b5f..8f490881107 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -33,7 +33,7 @@
* /
* Append -------+------+------+--- nil
* / \ | | |
- * nil nil ... ... ...
+ * nil nil ... ... ...
* subplans
*
* Append nodes are currently used for unions, and to support
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index c376c55f1e1..152f7d4c2af 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -5,7 +5,7 @@
*
* NOTE: it is critical that this plan type only be used with MVCC-compliant
* snapshots (ie, regular snapshots, not SnapshotNow or one of the other
- * special snapshots). The reason is that since index and heap scans are
+ * special snapshots). The reason is that since index and heap scans are
* decoupled, there can be no assurance that the index tuple prompting a
* visit to a particular heap TID still exists when the visit is made.
* Therefore the tuple might not exist anymore either (which is OK because
@@ -329,7 +329,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c
index dedd2550102..42d699d0159 100644
--- a/src/backend/executor/nodeFunctionscan.c
+++ b/src/backend/executor/nodeFunctionscan.c
@@ -281,7 +281,7 @@ ExecReScanFunctionScan(FunctionScanState *node)
/*
* Here we have a choice whether to drop the tuplestore (and recompute the
* function outputs) or just rescan it. We must recompute if the
- * expression contains parameters, else we rescan. XXX maybe we should
+ * expression contains parameters, else we rescan. XXX maybe we should
* recompute if the function is volatile?
*/
if (node->ss.ps.chgParam != NULL)
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index dff66a60075..17de5e95d2d 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -366,7 +366,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls)
/*
* Set up for skew optimization, if possible and there's a need for more
- * than one batch. (In a one-batch join, there's no point in it.)
+ * than one batch. (In a one-batch join, there's no point in it.)
*/
if (nbatch > 1)
ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
@@ -408,7 +408,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Estimate tupsize based on footprint of tuple in hashtable... note this
- * does not allow for any palloc overhead. The manipulations of spaceUsed
+ * does not allow for any palloc overhead. The manipulations of spaceUsed
* don't count palloc overhead either.
*/
tupsize = HJTUPLE_OVERHEAD +
@@ -460,7 +460,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
* memory is filled. Set nbatch to the smallest power of 2 that appears
- * sufficient. The Min() steps limit the results so that the pointer
+ * sufficient. The Min() steps limit the results so that the pointer
* arrays we'll try to allocate do not exceed work_mem.
*/
max_pointers = (work_mem * 1024L) / sizeof(void *);
@@ -499,8 +499,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Both nbuckets and nbatch must be powers of 2 to make
- * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
- * nbuckets to the next larger power of 2. We also force nbuckets to not
+ * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
+ * nbuckets to the next larger power of 2. We also force nbuckets to not
* be real small, by starting the search at 2^10. (Note: above we made
* sure that nbuckets is not more than INT_MAX / 2, so this loop cannot
* overflow, nor can the final shift to recalculate nbuckets.)
@@ -818,7 +818,7 @@ ExecHashGetHashValue(HashJoinTable hashtable,
* the hash support function as strict even if the operator is not.
*
* Note: currently, all hashjoinable operators must be strict since
- * the hash index AM assumes that. However, it takes so little extra
+ * the hash index AM assumes that. However, it takes so little extra
* code here to allow non-strict that we may as well do it.
*/
if (isNull)
@@ -1238,7 +1238,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
/*
* While we have not hit a hole in the hashtable and have not hit
* the desired bucket, we have collided with some previous hash
- * value, so try the next bucket location. NB: this code must
+ * value, so try the next bucket location. NB: this code must
* match ExecHashGetSkewBucket.
*/
bucket = hashvalue & (nbuckets - 1);
@@ -1436,7 +1436,7 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
* NOTE: this is not nearly as simple as it looks on the surface, because
* of the possibility of collisions in the hashtable. Suppose that hash
* values A and B collide at a particular hashtable entry, and that A was
- * entered first so B gets shifted to a different table entry. If we were
+ * entered first so B gets shifted to a different table entry. If we were
* to remove A first then ExecHashGetSkewBucket would mistakenly start
* reporting that B is not in the hashtable, because it would hit the NULL
* before finding B. However, we always remove entries in the reverse
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 3a6698105f2..2e8722d4dad 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -125,7 +125,7 @@ ExecHashJoin(HashJoinState *node)
* check this when the outer relation's startup cost is less
* than the projected cost of building the hash table.
* Otherwise it's best to build the hash table first and see
- * if the inner relation is empty. (When it's a left join, we
+ * if the inner relation is empty. (When it's a left join, we
* should always make this check, since we aren't going to be
* able to skip the join on the strength of an empty inner
* relation anyway.)
@@ -521,7 +521,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
* tuple slot of the Hash node (which is our inner plan). we can do this
* because Hash nodes don't return tuples via ExecProcNode() -- instead
* the hash join node uses ExecScanHashBucket() to get at the contents of
- * the hash table. -cim 6/9/91
+ * the hash table. -cim 6/9/91
*/
{
HashState *hashstate = (HashState *) innerPlanState(hjstate);
@@ -887,7 +887,7 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
/*
* ExecHashJoinGetSavedTuple
- * read the next tuple from a batch file. Return NULL if no more.
+ * read the next tuple from a batch file. Return NULL if no more.
*
* On success, *hashvalue is set to the tuple's hash value, and the tuple
* itself is stored in the given slot.
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index dbc1467d5be..08379fd85ba 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -212,7 +212,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
/*
* For each run-time key, extract the run-time expression and evaluate
- * it with respect to the current context. We then stick the result
+ * it with respect to the current context. We then stick the result
* into the proper scan key.
*
* Note: the result of the eval could be a pass-by-ref value that's
@@ -345,7 +345,7 @@ ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys)
/*
* Note we advance the rightmost array key most quickly, since it will
* correspond to the lowest-order index column among the available
- * qualifications. This is hypothesized to result in better locality of
+ * qualifications. This is hypothesized to result in better locality of
* access in the index.
*/
for (j = numArrayKeys - 1; j >= 0; j--)
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index f2d356d1f18..a3e3c7ee261 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -113,7 +113,7 @@ ExecLimit(LimitState *node)
/*
* The subplan is known to return no tuples (or not more than
- * OFFSET tuples, in general). So we return no tuples.
+ * OFFSET tuples, in general). So we return no tuples.
*/
return NULL;
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index d71278ebd72..6f5c2d2290d 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -155,7 +155,7 @@ lnext:
tuple.t_self = copyTuple->t_self;
/*
- * Need to run a recheck subquery. Initialize EPQ state if we
+ * Need to run a recheck subquery. Initialize EPQ state if we
* didn't do so already.
*/
if (!epq_started)
@@ -186,7 +186,7 @@ lnext:
{
/*
* First, fetch a copy of any rows that were successfully locked
- * without any update having occurred. (We do this in a separate pass
+ * without any update having occurred. (We do this in a separate pass
* so as to avoid overhead in the common case where there are no
* concurrent updates.)
*/
@@ -291,7 +291,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
/*
* Locate the ExecRowMark(s) that this node is responsible for, and
- * construct ExecAuxRowMarks for them. (InitPlan should already have
+ * construct ExecAuxRowMarks for them. (InitPlan should already have
* built the global list of ExecRowMarks.)
*/
lrstate->lr_arowMarks = NIL;
@@ -313,7 +313,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
/*
- * Only locking rowmarks go into our own list. Non-locking marks are
+ * Only locking rowmarks go into our own list. Non-locking marks are
* passed off to the EvalPlanQual machinery. This is because we don't
* want to bother fetching non-locked rows unless we actually have to
* do an EPQ recheck.
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 4d99dac1d65..aa984544e0f 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -185,7 +185,7 @@ ExecInitMaterial(Material *node, EState *estate, int eflags)
/*
* Tuplestore's interpretation of the flag bits is subtly different from
* the general executor meaning: it doesn't think BACKWARD necessarily
- * means "backwards all the way to start". If told to support BACKWARD we
+ * means "backwards all the way to start". If told to support BACKWARD we
* must include REWIND in the tuplestore eflags, else tuplestore_trim
* might throw away too much.
*/
diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c
index 43059664b93..db0ead5edf1 100644
--- a/src/backend/executor/nodeMergeAppend.c
+++ b/src/backend/executor/nodeMergeAppend.c
@@ -32,7 +32,7 @@
* /
* MergeAppend---+------+------+--- nil
* / \ | | |
- * nil nil ... ... ...
+ * nil nil ... ... ...
* subplans
*/
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 7d27123cf05..16da6e03a87 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -41,7 +41,7 @@
*
* Therefore, rather than directly executing the merge join clauses,
* we evaluate the left and right key expressions separately and then
- * compare the columns one at a time (see MJCompare). The planner
+ * compare the columns one at a time (see MJCompare). The planner
* passes us enough information about the sort ordering of the inputs
* to allow us to determine how to make the comparison. We may use the
* appropriate btree comparison function, since Postgres' only notion
@@ -272,7 +272,7 @@ MJExamineQuals(List *mergeclauses,
* input, since we assume mergejoin operators are strict. If the NULL
* is in the first join column, and that column sorts nulls last, then
* we can further conclude that no following tuple can match anything
- * either, since they must all have nulls in the first column. However,
+ * either, since they must all have nulls in the first column. However,
* that case is only interesting if we're not in FillOuter mode, else
* we have to visit all the tuples anyway.
*
@@ -327,7 +327,7 @@ MJEvalOuterValues(MergeJoinState *mergestate)
/*
* MJEvalInnerValues
*
- * Same as above, but for the inner tuple. Here, we have to be prepared
+ * Same as above, but for the inner tuple. Here, we have to be prepared
* to load data from either the true current inner, or the marked inner,
* so caller must tell us which slot to load from.
*/
@@ -455,7 +455,7 @@ MJCompare(MergeJoinState *mergestate)
/*
* If we had any null comparison results or NULL-vs-NULL inputs, we do not
* want to report that the tuples are equal. Instead, if result is still
- * 0, change it to +1. This will result in advancing the inner side of
+ * 0, change it to +1. This will result in advancing the inner side of
* the join.
*
* Likewise, if there was a constant-false joinqual, do not report
@@ -768,7 +768,7 @@ ExecMergeJoin(MergeJoinState *node)
case MJEVAL_MATCHABLE:
/*
- * OK, we have the initial tuples. Begin by skipping
+ * OK, we have the initial tuples. Begin by skipping
* non-matching tuples.
*/
node->mj_JoinState = EXEC_MJ_SKIP_TEST;
@@ -1159,7 +1159,7 @@ ExecMergeJoin(MergeJoinState *node)
* which means that all subsequent outer tuples will be
* larger than our marked inner tuples. So we need not
* revisit any of the marked tuples but can proceed to
- * look for a match to the current inner. If there's
+ * look for a match to the current inner. If there's
* no more inners, no more matches are possible.
* ----------------
*/
@@ -1550,7 +1550,7 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags)
* For certain types of inner child nodes, it is advantageous to issue
* MARK every time we advance past an inner tuple we will never return to.
* For other types, MARK on a tuple we cannot return to is a waste of
- * cycles. Detect which case applies and set mj_ExtraMarks if we want to
+ * cycles. Detect which case applies and set mj_ExtraMarks if we want to
* issue "unnecessary" MARK calls.
*
* Currently, only Material wants the extra MARKs, and it will be helpful
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index d7c1e4a208b..c2bcf9c330d 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -30,7 +30,7 @@
*
* If the query specifies RETURNING, then the ModifyTable returns a
* RETURNING tuple after completing each row insert, update, or delete.
- * It must be called again to continue the operation. Without RETURNING,
+ * It must be called again to continue the operation. Without RETURNING,
* we just loop within the node until all the work is done, then
* return NULL. This avoids useless call/return overhead.
*/
@@ -409,7 +409,7 @@ ldelete:;
{
/*
* We have to put the target tuple into a slot, which means first we
- * gotta fetch it. We can use the trigger tuple slot.
+ * gotta fetch it. We can use the trigger tuple slot.
*/
TupleTableSlot *slot = estate->es_trig_tuple_slot;
TupleTableSlot *rslot;
@@ -461,7 +461,7 @@ ldelete:;
* note: we can't run UPDATE queries with transactions
* off because UPDATEs are actually INSERTs and our
* scan will mistakenly loop forever, updating the tuple
- * it just inserted.. This should be fixed but until it
+ * it just inserted.. This should be fixed but until it
* is, we don't want to get stuck in an infinite loop
* which corrupts your database..
*
@@ -550,7 +550,7 @@ ExecUpdate(ItemPointer tupleid,
*
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here and recheck constraints. (We don't need to
- * redo triggers, however. If there are any BEFORE triggers then
+ * redo triggers, however. If there are any BEFORE triggers then
* trigger.c will have done heap_lock_tuple to lock the correct tuple,
* so there's no need to do them again.)
*/
@@ -757,7 +757,7 @@ ExecModifyTable(ModifyTableState *node)
/*
* es_result_relation_info must point to the currently active result
- * relation while we are within this ModifyTable node. Even though
+ * relation while we are within this ModifyTable node. Even though
* ModifyTable nodes can't be nested statically, they can be nested
* dynamically (since our subplan could include a reference to a modifying
* CTE). So we have to save and restore the caller's value.
@@ -773,7 +773,7 @@ ExecModifyTable(ModifyTableState *node)
for (;;)
{
/*
- * Reset the per-output-tuple exprcontext. This is needed because
+ * Reset the per-output-tuple exprcontext. This is needed because
* triggers expect to use that context as workspace. It's a bit ugly
* to do this below the top level of the plan, however. We might need
* to rethink this later.
@@ -934,7 +934,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* call ExecInitNode on each of the plans to be executed and save the
* results into the array "mt_plans". This is also a convenient place to
* verify that the proposed target relations are valid and open their
- * indexes for insertion of new index entries. Note we *must* set
+ * indexes for insertion of new index entries. Note we *must* set
* estate->es_result_relation_info correctly while we initialize each
* sub-plan; ExecContextForcesOids depends on that!
*/
@@ -954,7 +954,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
- * index entries for the tuples we add/update. We need not do this
+ * index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes.
* Also, inside an EvalPlanQual operation, the indexes might be open
* already, since we share the resultrel state with the original
@@ -985,7 +985,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* Initialize result tuple slot and assign its rowtype using the first
- * RETURNING list. We assume the rest will look the same.
+ * RETURNING list. We assume the rest will look the same.
*/
tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
false);
@@ -1031,7 +1031,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* If we have any secondary relations in an UPDATE or DELETE, they need to
* be treated like non-locked relations in SELECT FOR UPDATE, ie, the
- * EvalPlanQual mechanism needs to be told about them. Locate the
+ * EvalPlanQual mechanism needs to be told about them. Locate the
* relevant ExecRowMarks.
*/
foreach(l, node->rowMarks)
@@ -1072,7 +1072,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* attribute present --- no need to look first.
*
* If there are multiple result relations, each one needs its own junk
- * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
+ * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
* can't be fooled by some needing a filter and some not.
*
* This section of code is also a convenient place to verify that the
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index dace6eeae5b..7a9dce5cb7c 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -316,7 +316,7 @@ ExecReScanRecursiveUnion(RecursiveUnionState *node)
/*
* if chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. Because of above, we only have to do this to the
+ * first ExecProcNode. Because of above, we only have to do this to the
* non-recursive term.
*/
if (outerPlan->chgParam == NULL)
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 9106f148730..9c7ab46b10c 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -5,7 +5,7 @@
*
* The input of a SetOp node consists of tuples from two relations,
* which have been combined into one dataset, with a junk attribute added
- * that shows which relation each tuple came from. In SETOP_SORTED mode,
+ * that shows which relation each tuple came from. In SETOP_SORTED mode,
* the input has furthermore been sorted according to all the grouping
* columns (ie, all the non-junk attributes). The SetOp node scans each
* group of identical tuples to determine how many came from each input
@@ -18,7 +18,7 @@
* relation is the left-hand one for EXCEPT, and tries to make the smaller
* input relation come first for INTERSECT. We build a hash table in memory
* with one entry for each group of identical tuples, and count the number of
- * tuples in the group from each relation. After seeing all the input, we
+ * tuples in the group from each relation. After seeing all the input, we
* scan the hashtable and generate the correct output using those counts.
* We can avoid making hashtable entries for any tuples appearing only in the
* second input relation, since they cannot result in any output.
@@ -267,7 +267,7 @@ setop_retrieve_direct(SetOpState *setopstate)
/*
* Store the copied first input tuple in the tuple table slot reserved
- * for it. The tuple will be deleted when it is cleared from the
+ * for it. The tuple will be deleted when it is cleared from the
* slot.
*/
ExecStoreTuple(setopstate->grp_firstTuple,
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index b7a644ed304..42cb9e978fa 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -259,12 +259,12 @@ ExecScanSubPlan(SubPlanState *node,
* semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
* (ROWCOMPARE_SUBLINK doesn't allow multiple tuples from the subplan.)
* NULL results from the combining operators are handled according to the
- * usual SQL semantics for OR and AND. The result for no input tuples is
+ * usual SQL semantics for OR and AND. The result for no input tuples is
* FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
* ROWCOMPARE_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. If zero tuples are produced, we return
+ * tuple, else an error is raised. If zero tuples are produced, we return
* NULL. Assuming we get a tuple, we just use its first column (there can
* be only one non-junk column in this case).
*
@@ -407,7 +407,7 @@ ExecScanSubPlan(SubPlanState *node,
else if (!found)
{
/*
- * deal with empty subplan result. result/isNull were previously
+ * deal with empty subplan result. result/isNull were previously
* initialized correctly for all sublink types except EXPR and
* ROWCOMPARE; for those, return NULL.
*/
@@ -892,7 +892,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent)
*
* This is called from ExecEvalParamExec() when the value of a PARAM_EXEC
* parameter is requested and the param's execPlan field is set (indicating
- * that the param has not yet been evaluated). This allows lazy evaluation
+ * that the param has not yet been evaluated). This allows lazy evaluation
* of initplans: we don't run the subplan until/unless we need its output.
* Note that this routine MUST clear the execPlan fields of the plan's
* output parameters after evaluating them!
@@ -1120,7 +1120,7 @@ ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent)
/*
* Select the one to be used. For this, we need an estimate of the number
* of executions of the subplan. We use the number of output rows
- * expected from the parent plan node. This is a good estimate if we are
+ * expected from the parent plan node. This is a good estimate if we are
* in the parent's targetlist, and an underestimate (but probably not by
* more than a factor of 2) if we are in the qual.
*/
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 04cdab9fa15..0a48ce1240b 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -100,7 +100,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
Assert(!(eflags & EXEC_FLAG_MARK));
/*
- * SubqueryScan should not have any "normal" children. Also, if planner
+ * SubqueryScan should not have any "normal" children. Also, if planner
* left anything in subrtable/subrowmark, it's fishy.
*/
Assert(outerPlan(node) == NULL);
@@ -199,7 +199,7 @@ ExecReScanSubqueryScan(SubqueryScanState *node)
/*
* ExecReScan doesn't know about my subplan, so I have to do
- * changed-parameter signaling myself. This is just as well, because the
+ * changed-parameter signaling myself. This is just as well, because the
* subplan has its own memory context in which its chgParam state lives.
*/
if (node->ss.ps.chgParam != NULL)
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index ebc3a61e607..da938192321 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -4,7 +4,7 @@
* Routines to handle unique'ing of queries where appropriate
*
* Unique is a very simple node type that just filters out duplicate
- * tuples from a stream of sorted tuples from its subplan. It's essentially
+ * tuples from a stream of sorted tuples from its subplan. It's essentially
* a dumbed-down form of Group: the duplicate-removal functionality is
* identical. However, Unique doesn't do projection nor qual checking,
* so it's marginally more efficient for cases where neither is needed.
diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c
index 766407388f5..b60a6fdd935 100644
--- a/src/backend/executor/nodeValuesscan.c
+++ b/src/backend/executor/nodeValuesscan.c
@@ -213,7 +213,7 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
planstate = &scanstate->ss.ps;
/*
- * Create expression contexts. We need two, one for per-sublist
+ * Create expression contexts. We need two, one for per-sublist
* processing and one for execScan.c to use for quals and projections. We
* cheat a little by using ExecAssignExprContext() to build both.
*/
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index c90d40506f9..7db0fabc018 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -4,7 +4,7 @@
* routines to handle WindowAgg nodes.
*
* A WindowAgg node evaluates "window functions" across suitable partitions
- * of the input tuple set. Any one WindowAgg works for just a single window
+ * of the input tuple set. Any one WindowAgg works for just a single window
* specification, though it can evaluate multiple window functions sharing
* identical window specifications. The input tuples are required to be
* delivered in sorted order, with the PARTITION BY columns (if any) as
@@ -14,7 +14,7 @@
*
* Since window functions can require access to any or all of the rows in
* the current partition, we accumulate rows of the partition into a
- * tuplestore. The window functions are called using the WindowObject API
+ * tuplestore. The window functions are called using the WindowObject API
* so that they can access those rows as needed.
*
* We also support using plain aggregate functions as window functions.
@@ -299,7 +299,7 @@ advance_windowaggregate(WindowAggState *winstate,
/*
* If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
+ * pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
@@ -441,7 +441,7 @@ eval_windowaggregates(WindowAggState *winstate)
* TODO: Rerunning aggregates from the frame start can be pretty slow. For
* some aggregates like SUM and COUNT we could avoid that by implementing
* a "negative transition function" that would be called for each row as
- * it exits the frame. We'd have to think about avoiding recalculation of
+ * it exits the frame. We'd have to think about avoiding recalculation of
* volatile arguments of aggregate functions, too.
*/
@@ -512,7 +512,7 @@ eval_windowaggregates(WindowAggState *winstate)
* Advance until we reach a row not in frame (or end of partition).
*
* Note the loop invariant: agg_row_slot is either empty or holds the row
- * at position aggregatedupto. We advance aggregatedupto after processing
+ * at position aggregatedupto. We advance aggregatedupto after processing
* a row.
*/
for (;;)
@@ -776,7 +776,7 @@ spool_tuples(WindowAggState *winstate, int64 pos)
/*
* If the tuplestore has spilled to disk, alternate reading and writing
- * becomes quite expensive due to frequent buffer flushes. It's cheaper
+ * becomes quite expensive due to frequent buffer flushes. It's cheaper
* to force the entire partition to get spooled in one go.
*
* XXX this is a horrid kluge --- it'd be better to fix the performance
@@ -868,7 +868,7 @@ release_partition(WindowAggState *winstate)
* to our window framing rule
*
* The caller must have already determined that the row is in the partition
- * and fetched it into a slot. This function just encapsulates the framing
+ * and fetched it into a slot. This function just encapsulates the framing
* rules.
*/
static bool
@@ -970,7 +970,7 @@ row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot)
*
* Uses the winobj's read pointer for any required fetches; hence, if the
* frame mode is one that requires row comparisons, the winobj's mark must
- * not be past the currently known frame head. Also uses the specified slot
+ * not be past the currently known frame head. Also uses the specified slot
* for any required fetches.
*/
static void
@@ -1075,7 +1075,7 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot)
*
* Uses the winobj's read pointer for any required fetches; hence, if the
* frame mode is one that requires row comparisons, the winobj's mark must
- * not be past the currently known frame tail. Also uses the specified slot
+ * not be past the currently known frame tail. Also uses the specified slot
* for any required fetches.
*/
static void
@@ -1418,8 +1418,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
winstate->ss.ps.state = estate;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &winstate->ss.ps);
@@ -1970,7 +1970,7 @@ window_gettupleslot(WindowObject winobj, int64 pos, TupleTableSlot *slot)
* requested amount of space. Subsequent calls just return the same chunk.
*
* Memory obtained this way is normally used to hold state that should be
- * automatically reset for each new partition. If a window function wants
+ * automatically reset for each new partition. If a window function wants
* to hold state across the whole query, fcinfo->fn_extra can be used in the
* usual way for that.
*/
diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c
index bdebb6d55c8..9c1494c2e4c 100644
--- a/src/backend/executor/nodeWorktablescan.c
+++ b/src/backend/executor/nodeWorktablescan.c
@@ -84,7 +84,7 @@ ExecWorkTableScan(WorkTableScanState *node)
{
/*
* On the first call, find the ancestor RecursiveUnion's state via the
- * Param slot reserved for it. (We can't do this during node init because
+ * Param slot reserved for it. (We can't do this during node init because
* there are corner cases where we'll get the init call before the
* RecursiveUnion does.)
*/
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index c8d2b1cf34a..7f4624cba59 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -253,7 +253,7 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
}
/*
- * Pop the stack entry and reset global variables. Unlike
+ * Pop the stack entry and reset global variables. Unlike
* SPI_finish(), we don't risk switching to memory contexts that might
* be already gone.
*/
@@ -1030,7 +1030,7 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
/*
* SPI_cursor_open_with_args()
*
- * Parse and plan a query and open it as a portal. Like SPI_execute_with_args,
+ * Parse and plan a query and open it as a portal. Like SPI_execute_with_args,
* we can tell the planner to rely on the parameter values as constants,
* because the plan will only be used once.
*/
@@ -1214,7 +1214,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
}
/*
- * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
+ * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
* check in transformDeclareCursorStmt because the cursor options might
* not have come through there.
*/
@@ -1484,7 +1484,7 @@ SPI_plan_is_valid(SPIPlanPtr plan)
/*
* SPI_result_code_string --- convert any SPI return code to a string
*
- * This is often useful in error messages. Most callers will probably
+ * This is often useful in error messages. Most callers will probably
* only pass negative (error-case) codes, but for generality we recognize
* the success codes too.
*/
@@ -1935,7 +1935,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
/*
* The last canSetTag query sets the status values returned to the
- * caller. Be careful to free any tuptables not returned, to
+ * caller. Be careful to free any tuptables not returned, to
* avoid intratransaction memory leak.
*/
if (canSetTag)
diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c
index ba4636bc5e0..e20b70abd1d 100644
--- a/src/backend/executor/tstoreReceiver.c
+++ b/src/backend/executor/tstoreReceiver.c
@@ -5,7 +5,7 @@
* a Tuplestore.
*
* Optionally, we can force detoasting (but not decompression) of out-of-line
- * toasted values. This is to support cursors WITH HOLD, which must retain
+ * toasted values. This is to support cursors WITH HOLD, which must retain
* data even if the underlying table is dropped.
*
*
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index 2d343096e50..1b741f39912 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -99,7 +99,7 @@ appendStringInfo(StringInfo str, const char *fmt,...)
* appendStringInfoVA
*
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return true; if not (because there's not enough space), return false
* without modifying str. Typically the caller would enlarge str and retry
* on false return --- see appendStringInfo for standard usage pattern.
@@ -255,7 +255,7 @@ enlargeStringInfo(StringInfo str, int needed)
int newlen;
/*
- * Guard against out-of-range "needed" values. Without this, we can get
+ * Guard against out-of-range "needed" values. Without this, we can get
* an overflow or infinite loop in the following.
*/
if (needed < 0) /* should not happen */
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index d2f8e8d4582..4b4dc3efd09 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -49,7 +49,7 @@ static int recv_and_check_password_packet(Port *port);
/* Max size of username ident server can return */
#define IDENT_USERNAME_MAX 512
-/* Standard TCP port number for Ident service. Assigned by IANA */
+/* Standard TCP port number for Ident service. Assigned by IANA */
#define IDENT_PORT 113
static int ident_inet(hbaPort *port);
@@ -705,7 +705,7 @@ recv_password_packet(Port *port)
(errmsg("received password packet")));
/*
- * Return the received string. Note we do not attempt to do any
+ * Return the received string. Note we do not attempt to do any
* character-set conversion on it; since we don't yet know the client's
* encoding, there wouldn't be much point.
*/
@@ -1601,7 +1601,7 @@ interpret_ident_response(const char *ident_response,
/*
* Talk to the ident server on host "remote_ip_addr" and find out who
* owns the tcp connection from his port "remote_port" to port
- * "local_port_addr" on host "local_ip_addr". Return the user name the
+ * "local_port_addr" on host "local_ip_addr". Return the user name the
* ident server gives as "*ident_user".
*
* IP addresses and port numbers are in network byte order.
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 222add09db6..6e094965057 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -30,13 +30,13 @@
* impersonations.
*
* Another benefit of EDH is that it allows the backend and
- * clients to use DSA keys. DSA keys can only provide digital
+ * clients to use DSA keys. DSA keys can only provide digital
* signatures, not encryption, and are often acceptable in
* jurisdictions where RSA keys are unacceptable.
*
* The downside to EDH is that it makes it impossible to
* use ssldump(1) if there's a problem establishing an SSL
- * session. In this case you'll need to temporarily disable
+ * session. In this case you'll need to temporarily disable
* EDH by commenting out the callback.
*
* ...
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index 472a960728a..69ae6e96fa0 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -58,8 +58,8 @@ static List *parsed_hba_lines = NIL;
/*
* These variables hold the pre-parsed contents of the ident usermap
- * configuration file. ident_lines is a list of sublists, one sublist for
- * each (non-empty, non-comment) line of the file. The sublist items are
+ * configuration file. ident_lines is a list of sublists, one sublist for
+ * each (non-empty, non-comment) line of the file. The sublist items are
* palloc'd strings, one string per token on the line. Note there will always
* be at least one token, since blank lines are not entered in the data
* structure. ident_line_nums is an integer list containing the actual line
@@ -2031,7 +2031,7 @@ load_ident(void)
/*
* Determine what authentication method should be used when accessing database
- * "database" from frontend "raddr", user "user". Return the method and
+ * "database" from frontend "raddr", user "user". Return the method and
* an optional argument (stored in fields of *port), and STATUS_OK.
*
* Note that STATUS_ERROR indicates a problem with the hba config file.
diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c
index d40c37b6375..e37a05f9fa5 100644
--- a/src/backend/libpq/md5.c
+++ b/src/backend/libpq/md5.c
@@ -2,7 +2,7 @@
* md5.c
*
* Implements the MD5 Message-Digest Algorithm as specified in
- * RFC 1321. This implementation is a simple one, in that it
+ * RFC 1321. This implementation is a simple one, in that it
* needs every input byte to be buffered before doing any
* calculations. I do not expect this file to be used for
* general purpose MD5'ing of large amounts of data, only for
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 2a1fc145ee6..b36f9263728 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -418,7 +418,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
/*
* Note: This might fail on some OS's, like Linux older than
* 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map
- * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
+ * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
* connections.
*/
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
@@ -1097,7 +1097,7 @@ pq_getmessage(StringInfo s, int maxlen)
if (len > 0)
{
/*
- * Allocate space for message. If we run out of room (ridiculously
+ * Allocate space for message. If we run out of room (ridiculously
* large message), we will elog(ERROR), but we want to discard the
* message body so as not to lose communication sync.
*/
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index 52922b54d33..f4f12a8c374 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -120,7 +120,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen)
* pq_sendcountedtext - append a counted text string (with character set conversion)
*
* The data sent to the frontend by this routine is a 4-byte count field
- * followed by the string. The count includes itself or not, as per the
+ * followed by the string. The count includes itself or not, as per the
* countincludesself flag (pre-3.0 protocol requires it to include itself).
* The passed text string need not be null-terminated, and the data sent
* to the frontend isn't either.
diff --git a/src/backend/libpq/pqsignal.c b/src/backend/libpq/pqsignal.c
index 0fee280ae33..a3b81279b4a 100644
--- a/src/backend/libpq/pqsignal.c
+++ b/src/backend/libpq/pqsignal.c
@@ -27,14 +27,14 @@
* Ultrix and SunOS provide BSD signal(2) semantics by default.
*
* SVID2 and POSIX signal(2) semantics differ from BSD signal(2)
- * semantics. We can use the POSIX sigaction(2) on systems that
+ * semantics. We can use the POSIX sigaction(2) on systems that
* allow us to request restartable signals (SA_RESTART).
*
* Some systems don't allow restartable signals at all unless we
* link to a special BSD library.
*
* We devoutly hope that there aren't any systems that provide
- * neither POSIX signals nor BSD signals. The alternative
+ * neither POSIX signals nor BSD signals. The alternative
* is to do signal-handler reinstallation, which doesn't work well
* at all.
* ------------------------------------------------------------------------*/
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 420c395a61c..138963b55f4 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -71,7 +71,7 @@ main(int argc, char *argv[])
/*
* Remember the physical location of the initially given argv[] array for
- * possible use by ps display. On some platforms, the argv[] storage must
+ * possible use by ps display. On some platforms, the argv[] storage must
* be overwritten in order to set the process title for ps. In such cases
* save_ps_display_args makes and returns a new copy of the argv[] array.
*
@@ -100,10 +100,10 @@ main(int argc, char *argv[])
MemoryContextInit();
/*
- * Set up locale information from environment. Note that LC_CTYPE and
+ * Set up locale information from environment. Note that LC_CTYPE and
* LC_COLLATE will be overridden later from pg_control if we are in an
* already-initialized database. We set them here so that they will be
- * available to fill pg_control during initdb. LC_MESSAGES will get set
+ * available to fill pg_control during initdb. LC_MESSAGES will get set
* later during GUC option processing, but we set it here to allow startup
* error messages to be localized.
*/
@@ -212,9 +212,9 @@ main(int argc, char *argv[])
/*
- * Place platform-specific startup hacks here. This is the right
+ * Place platform-specific startup hacks here. This is the right
* place to put code that must be executed early in the launch of any new
- * server process. Note that this code will NOT be executed when a backend
+ * server process. Note that this code will NOT be executed when a backend
* or sub-bootstrap process is forked, unless we are in a fork/exec
* environment (ie EXEC_BACKEND is defined).
*
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index 5546034694d..9715e42cfb7 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -39,7 +39,7 @@
* where x's are unspecified bits. The two's complement negative is formed
* by inverting all the bits and adding one. Inversion gives
* yyyyyy01111
- * where each y is the inverse of the corresponding x. Incrementing gives
+ * where each y is the inverse of the corresponding x. Incrementing gives
* yyyyyy10000
* and then ANDing with the original value gives
* 00000010000
@@ -721,7 +721,7 @@ bms_join(Bitmapset *a, Bitmapset *b)
/*----------
* bms_first_member - find and remove first member of a set
*
- * Returns -1 if set is empty. NB: set is destructively modified!
+ * Returns -1 if set is empty. NB: set is destructively modified!
*
* This is intended as support for iterating through the members of a set.
* The typical pattern is
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 09ba0732b43..eb613aec7af 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -4,7 +4,7 @@
* Copy functions for Postgres tree nodes.
*
* NOTE: we currently support copying all node types found in parse and
- * plan trees. We do not support copying executor state trees; there
+ * plan trees. We do not support copying executor state trees; there
* is no need for that, and no point in maintaining all the code that
* would be needed. We also do not support copying Path trees, mainly
* because the circular linkages between RelOptInfo and Path nodes can't
@@ -31,7 +31,7 @@
/*
* Macros to simplify copying of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in a Copy routine are
* named 'newnode' and 'from'.
*/
@@ -1027,7 +1027,7 @@ _copyIntoClause(IntoClause *from)
/*
* We don't need a _copyExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out copying the common fields...
*/
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 0de60d5c6c1..76055c64fdc 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -11,7 +11,7 @@
* be handled easily in a simple depth-first traversal.
*
* Currently, in fact, equal() doesn't know how to compare Plan trees
- * either. This might need to be fixed someday.
+ * either. This might need to be fixed someday.
*
* NOTE: it is intentional that parse location fields (in nodes that have
* one) are not compared. This is because we want, for example, a variable
@@ -34,8 +34,8 @@
/*
- * Macros to simplify comparison of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify comparison of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in an Equal routine are
* named 'a' and 'b'.
*/
@@ -125,7 +125,7 @@ _equalIntoClause(IntoClause *a, IntoClause *b)
/*
* We don't need an _equalExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out comparing the common fields...
*/
@@ -828,9 +828,9 @@ static bool
_equalPlaceHolderVar(PlaceHolderVar *a, PlaceHolderVar *b)
{
/*
- * We intentionally do not compare phexpr. Two PlaceHolderVars with the
+ * We intentionally do not compare phexpr. Two PlaceHolderVars with the
* same ID and levelsup should be considered equal even if the contained
- * expressions have managed to mutate to different states. One way in
+ * expressions have managed to mutate to different states. One way in
* which that can happen is that initplan sublinks would get replaced by
* differently-numbered Params when sublink folding is done. (The end
* result of such a situation would be some unreferenced initplans, which
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index d4684d3cc82..283829f175b 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -793,7 +793,7 @@ list_union_oid(List *list1, List *list2)
* "intersection" if list1 is known unique beforehand.
*
* This variant works on lists of pointers, and determines list
- * membership via equal(). Note that the list1 member will be pointed
+ * membership via equal(). Note that the list1 member will be pointed
* to in the result.
*/
List *
@@ -985,7 +985,7 @@ list_append_unique_oid(List *list, Oid datum)
* via equal().
*
* This is almost the same functionality as list_union(), but list1 is
- * modified in-place rather than being copied. Note also that list2's cells
+ * modified in-place rather than being copied. Note also that list2's cells
* are not inserted in list1, so the analogy to list_concat() isn't perfect.
*/
List *
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 0e57f6c6d7d..f24fb7f1364 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -238,7 +238,7 @@ exprType(Node *expr)
/*
* exprTypmod -
* returns the type-specific modifier of the expression's result type,
- * if it can be determined. In many cases, it can't and we return -1.
+ * if it can be determined. In many cases, it can't and we return -1.
*/
int32
exprTypmod(Node *expr)
@@ -1453,8 +1453,8 @@ leftmostLoc(int loc1, int loc2)
*
* The walker routine should return "false" to continue the tree walk, or
* "true" to abort the walk and immediately return "true" to the top-level
- * caller. This can be used to short-circuit the traversal if the walker
- * has found what it came for. "false" is returned to the top-level caller
+ * caller. This can be used to short-circuit the traversal if the walker
+ * has found what it came for. "false" is returned to the top-level caller
* iff no invocation of the walker returned "true".
*
* The node types handled by expression_tree_walker include all those
@@ -1492,7 +1492,7 @@ leftmostLoc(int loc1, int loc2)
*
* expression_tree_walker will handle SubPlan nodes by recursing normally
* into the "testexpr" and the "args" list (which are expressions belonging to
- * the outer plan). It will not touch the completed subplan, however. Since
+ * the outer plan). It will not touch the completed subplan, however. Since
* there is no link to the original Query, it is not possible to recurse into
* subselects of an already-planned expression tree. This is OK for current
* uses, but may need to be revisited in future.
@@ -2529,7 +2529,7 @@ expression_tree_mutator(Node *node,
* This routine exists just to reduce the number of places that need to know
* where all the expression subtrees of a Query are. Note it can be used
* for starting a walk at top level of a Query regardless of whether the
- * mutator intends to descend into subqueries. It is also useful for
+ * mutator intends to descend into subqueries. It is also useful for
* descending into subqueries within a mutator.
*
* Some callers want to suppress mutating of certain items in the Query,
@@ -2539,7 +2539,7 @@ expression_tree_mutator(Node *node,
* indicated items. (More flag bits may be added as needed.)
*
* Normally the Query node itself is copied, but some callers want it to be
- * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
+ * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
* modified substructure is safely copied in any case.
*/
Query *
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index bb342ca6d39..cd6c275c639 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -13,7 +13,7 @@
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
* have an output function defined here (as well as an input function
- * in readfuncs.c). For use in debugging, we also provide output
+ * in readfuncs.c). For use in debugging, we also provide output
* functions for nodes that appear in raw parsetrees, path, and plan trees.
* These nodes however need not have input functions.
*
@@ -31,8 +31,8 @@
/*
- * Macros to simplify output of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify output of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in an Out
* routine.
*/
diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c
index 62d766a2827..98e53bf2d21 100644
--- a/src/backend/nodes/params.c
+++ b/src/backend/nodes/params.c
@@ -28,7 +28,7 @@
*
* Note: the intent of this function is to make a static, self-contained
* set of parameter values. If dynamic parameter hooks are present, we
- * intentionally do not copy them into the result. Rather, we forcibly
+ * intentionally do not copy them into the result. Rather, we forcibly
* instantiate all available parameter values and copy the datum values.
*/
ParamListInfo
diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c
index 78775e8bbd3..d3d686e7fc1 100644
--- a/src/backend/nodes/read.c
+++ b/src/backend/nodes/read.c
@@ -85,21 +85,21 @@ stringToNode(char *str)
* Backslashes themselves must also be backslashed for consistency.
* Any other character can be, but need not be, backslashed as well.
* * If the resulting token is '<>' (with no backslash), it is returned
- * as a non-NULL pointer to the token but with length == 0. Note that
+ * as a non-NULL pointer to the token but with length == 0. Note that
* there is no other way to get a zero-length token.
*
* Returns a pointer to the start of the next token, and the length of the
- * token (including any embedded backslashes!) in *length. If there are
+ * token (including any embedded backslashes!) in *length. If there are
* no more tokens, NULL and 0 are returned.
*
* NOTE: this routine doesn't remove backslashes; the caller must do so
* if necessary (see "debackslash").
*
* NOTE: prior to release 7.0, this routine also had a special case to treat
- * a token starting with '"' as extending to the next '"'. This code was
+ * a token starting with '"' as extending to the next '"'. This code was
* broken, however, since it would fail to cope with a string containing an
* embedded '"'. I have therefore removed this special case, and instead
- * introduced rules for using backslashes to quote characters. Higher-level
+ * introduced rules for using backslashes to quote characters. Higher-level
* code should add backslashes to a string constant to ensure it is treated
* as a single token.
*/
@@ -258,7 +258,7 @@ nodeTokenType(char *token, int length)
* Slightly higher-level reader.
*
* This routine applies some semantic knowledge on top of the purely
- * lexical tokenizer pg_strtok(). It can read
+ * lexical tokenizer pg_strtok(). It can read
* * Value token nodes (integers, floats, or strings);
* * General nodes (via parseNodeString() from readfuncs.c);
* * Lists of the above;
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 22885147cfb..a7a7ec4abe3 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -12,7 +12,7 @@
*
* NOTES
* Path and Plan nodes do not have any readfuncs support, because we
- * never have occasion to read them in. (There was once code here that
+ * never have occasion to read them in. (There was once code here that
* claimed to read them, but it was broken as well as unused.) We
* never read executor state trees, either.
*
@@ -34,7 +34,7 @@
/*
* Macros to simplify reading of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in a Read
* routine.
*/
@@ -127,7 +127,7 @@
/*
* NOTE: use atoi() to read values written with %d, or atoui() to read
* values written with %u in outfuncs.c. An exception is OID values,
- * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
+ * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
* but this will probably change in the future.)
*/
#define atoui(x) ((unsigned int) strtoul((x), NULL, 10))
@@ -574,7 +574,7 @@ _readOpExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -605,7 +605,7 @@ _readDistinctExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -636,7 +636,7 @@ _readNullIfExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -667,7 +667,7 @@ _readScalarArrayOpExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index a1db72f6515..adde0fcd78b 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -19,7 +19,7 @@
* of lossiness. In theory we could fall back to page ranges at some
* point, but for now that seems useless complexity.
*
- * We also support the notion of candidate matches, or rechecking. This
+ * We also support the notion of candidate matches, or rechecking. This
* means we know that a search need visit only some tuples on a page,
* but we are not certain that all of those tuples are real matches.
* So the eventual heap scan must recheck the quals for these tuples only,
@@ -49,7 +49,7 @@
/*
* The maximum number of tuples per page is not large (typically 256 with
* 8K pages, or 1024 with 32K pages). So there's not much point in making
- * the per-page bitmaps variable size. We just legislate that the size
+ * the per-page bitmaps variable size. We just legislate that the size
* is this:
*/
#define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage
@@ -62,10 +62,10 @@
* for that page in the page table.
*
* We actually store both exact pages and lossy chunks in the same hash
- * table, using identical data structures. (This is because dynahash.c's
+ * table, using identical data structures. (This is because dynahash.c's
* memory management doesn't allow space to be transferred easily from one
* hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the
- * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
+ * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
* also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer
* remainder operations. So, define it like this:
*/
@@ -143,7 +143,7 @@ struct TIDBitmap
/*
* When iterating over a bitmap in sorted order, a TBMIterator is used to
- * track our progress. There can be several iterators scanning the same
+ * track our progress. There can be several iterators scanning the same
* bitmap concurrently. Note that the bitmap becomes read-only as soon as
* any iterator is created.
*/
@@ -791,7 +791,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno)
*
* If new, the entry is marked as an exact (non-chunk) entry.
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static PagetableEntry *
@@ -871,7 +871,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno)
/*
* tbm_mark_page_lossy - mark the page number as lossily stored
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static void
@@ -892,7 +892,7 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno)
chunk_pageno = pageno - bitno;
/*
- * Remove any extant non-lossy entry for the page. If the page is its own
+ * Remove any extant non-lossy entry for the page. If the page is its own
* chunk header, however, we skip this and handle the case below.
*/
if (bitno != 0)
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index 61caec4f85c..8584f3b9c12 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -81,11 +81,11 @@ geqo_eval(PlannerInfo *root, Gene *tour, int num_gene)
* not already contain some entries. The newly added entries will be
* recycled by the MemoryContextDelete below, so we must ensure that the
* list is restored to its former state before exiting. We can do this by
- * truncating the list to its original length. NOTE this assumes that any
+ * truncating the list to its original length. NOTE this assumes that any
* added entries are appended at the end!
*
* We also must take care not to mess up the outer join_rel_hash, if there
- * is one. We can do this by just temporarily setting the link to NULL.
+ * is one. We can do this by just temporarily setting the link to NULL.
* (If we are dealing with enough join rels, which we very likely are, a
* new hash table will get built and used locally.)
*
@@ -214,7 +214,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
* Merge a "clump" into the list of existing clumps for gimme_tree.
*
* We try to merge the clump into some existing clump, and repeat if
- * successful. When no more merging is possible, insert the clump
+ * successful. When no more merging is possible, insert the clump
* into the list, preserving the list ordering rule (namely, that
* clumps of larger size appear earlier).
*
@@ -265,7 +265,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
/*
* Recursively try to merge the enlarged old_clump with
- * others. When no further merge is possible, we'll reinsert
+ * others. When no further merge is possible, we'll reinsert
* it into the list.
*/
return merge_clump(root, clumps, old_clump, force);
@@ -276,7 +276,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
/*
* No merging is possible, so add new_clump as an independent clump, in
- * proper order according to size. We can be fast for the common case
+ * proper order according to size. We can be fast for the common case
* where it has size 1 --- it should always go at the end.
*/
if (clumps == NIL || new_clump->size == 1)
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 08acb1d01be..715f44675f8 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -288,7 +288,7 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
* set_append_rel_pathlist
* Build access paths for an "append relation"
*
- * The passed-in rel and RTE represent the entire append relation. The
+ * The passed-in rel and RTE represent the entire append relation. The
* relation's contents are computed by appending together the output of
* the individual member relations. Note that in the inheritance case,
* the first member relation is actually the same table as is mentioned in
@@ -360,7 +360,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* We have to copy the parent's targetlist and quals to the child,
- * with appropriate substitution of variables. However, only the
+ * with appropriate substitution of variables. However, only the
* baserestrictinfo quals are needed before we can check for
* constraint exclusion; so do that first and then check to see if we
* can disregard this child.
@@ -422,7 +422,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* We have to make child entries in the EquivalenceClass data
- * structures as well. This is needed either if the parent
+ * structures as well. This is needed either if the parent
* participates in some eclass joins (because we will want to consider
* inner-indexscan joins on the individual children) or if the parent
* has useful pathkeys (because we should try to build MergeAppend
@@ -1064,7 +1064,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* independent jointree items in the query. This is > 1.
*
* 'initial_rels' is a list of RelOptInfo nodes for each independent
- * jointree item. These are the components to be joined together.
+ * jointree item. These are the components to be joined together.
* Note that levels_needed == list_length(initial_rels).
*
* Returns the final level of join relations, i.e., the relation that is
@@ -1080,7 +1080,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* needed for these paths need have been instantiated.
*
* Note to plugin authors: the functions invoked during standard_join_search()
- * modify root->join_rel_list and root->join_rel_hash. If you want to do more
+ * modify root->join_rel_list and root->join_rel_hash. If you want to do more
* than one join-order search, you'll probably need to save and restore the
* original states of those data structures. See geqo_eval() for an example.
*/
@@ -1179,7 +1179,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* column k is found to be unsafe to reference, we set unsafeColumns[k] to
* TRUE, but we don't reject the subquery overall since column k might
* not be referenced by some/all quals. The unsafeColumns[] array will be
- * consulted later by qual_is_pushdown_safe(). It's better to do it this
+ * consulted later by qual_is_pushdown_safe(). It's better to do it this
* way than to make the checks directly in qual_is_pushdown_safe(), because
* when the subquery involves set operations we have to check the output
* expressions in each arm of the set op.
@@ -1272,7 +1272,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
* check_output_expressions - check subquery's output expressions for safety
*
* There are several cases in which it's unsafe to push down an upper-level
- * qual if it references a particular output column of a subquery. We check
+ * qual if it references a particular output column of a subquery. We check
* each output column of the subquery and set unsafeColumns[k] to TRUE if
* that column is unsafe for a pushed-down qual to reference. The conditions
* checked here are:
@@ -1290,7 +1290,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
* of rows returned. (This condition is vacuous for DISTINCT, because then
* there are no non-DISTINCT output columns, so we needn't check. But note
* we are assuming that the qual can't distinguish values that the DISTINCT
- * operator sees as equal. This is a bit shaky but we have no way to test
+ * operator sees as equal. This is a bit shaky but we have no way to test
* for the case, and it's unlikely enough that we shouldn't refuse the
* optimization just because it could theoretically happen.)
*/
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index b9e97d414d8..1ba171723e0 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -59,7 +59,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* See clause_selectivity() for the meaning of the additional parameters.
*
* Our basic approach is to take the product of the selectivities of the
- * subclauses. However, that's only right if the subclauses have independent
+ * subclauses. However, that's only right if the subclauses have independent
* probabilities, and in reality they are often NOT independent. So,
* we want to be smarter where we can.
@@ -76,12 +76,12 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* see that hisel is the fraction of the range below the high bound, while
* losel is the fraction above the low bound; so hisel can be interpreted
* directly as a 0..1 value but we need to convert losel to 1-losel before
- * interpreting it as a value. Then the available range is 1-losel to hisel.
+ * interpreting it as a value. Then the available range is 1-losel to hisel.
* However, this calculation double-excludes nulls, so really we need
* hisel + losel + null_frac - 1.)
*
* If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
- * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
+ * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
* yields an impossible (negative) result.
*
* A free side-effect is that we can recognize redundant inequalities such
@@ -175,7 +175,7 @@ clauselist_selectivity(PlannerInfo *root,
{
/*
* If it's not a "<" or ">" operator, just merge the
- * selectivity in generically. But if it's the right oprrest,
+ * selectivity in generically. But if it's the right oprrest,
* add the clause to rqlist for later processing.
*/
switch (get_oprrest(expr->opno))
@@ -460,14 +460,14 @@ treat_as_join_clause(Node *clause, RestrictInfo *rinfo,
* nestloop join's inner relation --- varRelid should then be the ID of the
* inner relation.
*
- * When varRelid is 0, all variables are treated as variables. This
+ * When varRelid is 0, all variables are treated as variables. This
* is appropriate for ordinary join clauses and restriction clauses.
*
* jointype is the join type, if the clause is a join clause. Pass JOIN_INNER
* if the clause isn't a join clause.
*
* sjinfo is NULL for a non-join clause, otherwise it provides additional
- * context information about the join being performed. There are some
+ * context information about the join being performed. There are some
* special cases:
* 1. For a special (not INNER) join, sjinfo is always a member of
* root->join_info_list.
@@ -502,7 +502,7 @@ clause_selectivity(PlannerInfo *root,
/*
* If the clause is marked pseudoconstant, then it will be used as a
* gating qual and should not affect selectivity estimates; hence
- * return 1.0. The only exception is that a constant FALSE may be
+ * return 1.0. The only exception is that a constant FALSE may be
* taken as having selectivity 0.0, since it will surely mean no rows
* out of the plan. This case is simple enough that we need not
* bother caching the result.
@@ -521,11 +521,11 @@ clause_selectivity(PlannerInfo *root,
/*
* If possible, cache the result of the selectivity calculation for
- * the clause. We can cache if varRelid is zero or the clause
+ * the clause. We can cache if varRelid is zero or the clause
* contains only vars of that relid --- otherwise varRelid will affect
* the result, so mustn't cache. Outer join quals might be examined
* with either their join's actual jointype or JOIN_INNER, so we need
- * two cache variables to remember both cases. Note: we assume the
+ * two cache variables to remember both cases. Note: we assume the
* result won't change if we are switching the input relations or
* considering a unique-ified case, so we only need one cache variable
* for all non-JOIN_INNER cases.
@@ -686,7 +686,7 @@ clause_selectivity(PlannerInfo *root,
/*
* This is not an operator, so we guess at the selectivity. THIS IS A
* HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
- * SELECTIVITIES THEMSELVES. -- JMH 7/9/92
+ * SELECTIVITIES THEMSELVES. -- JMH 7/9/92
*/
s1 = (Selectivity) 0.3333333;
}
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index b1ce54392ff..dcdfb8142a7 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -24,7 +24,7 @@
*
* Obviously, taking constants for these values is an oversimplification,
* but it's tough enough to get any useful estimates even at this level of
- * detail. Note that all of these parameters are user-settable, in case
+ * detail. Note that all of these parameters are user-settable, in case
* the default values are drastically off for a particular platform.
*
* seq_page_cost and random_page_cost can also be overridden for an individual
@@ -455,7 +455,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
* computed for us by query_planner.
*
* Caller is expected to have ensured that tuples_fetched is greater than zero
- * and rounded to integer (see clamp_row_est). The result will likewise be
+ * and rounded to integer (see clamp_row_est). The result will likewise be
* greater than zero and integral.
*/
double
@@ -651,7 +651,7 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* For small numbers of pages we should charge spc_random_page_cost
* apiece, while if nearly all the table's pages are being read, it's more
- * appropriate to charge spc_seq_page_cost apiece. The effect is
+ * appropriate to charge spc_seq_page_cost apiece. The effect is
* nonlinear, too. For lack of a better idea, interpolate like this to
* determine the cost per page.
*/
@@ -723,7 +723,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
* Estimate the cost of a BitmapAnd node
*
* Note that this considers only the costs of index scanning and bitmap
- * creation, not the eventual heap access. In that sense the object isn't
+ * creation, not the eventual heap access. In that sense the object isn't
* truly a Path, but it has enough path-like properties (costs in particular)
* to warrant treating it as one.
*/
@@ -780,7 +780,7 @@ cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
/*
* We estimate OR selectivity on the assumption that the inputs are
* non-overlapping, since that's often the case in "x IN (list)" type
- * situations. Of course, we clamp to 1.0 at the end.
+ * situations. Of course, we clamp to 1.0 at the end.
*
* The runtime cost of the BitmapOr itself is estimated at 100x
* cpu_operator_cost for each tbm_union needed. Probably too small,
@@ -857,7 +857,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
/*
* We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
- * understands how to do it correctly. Therefore, honor enable_tidscan
+ * understands how to do it correctly. Therefore, honor enable_tidscan
* only when CURRENT OF isn't present. Also note that cost_qual_eval
* counts a CurrentOfExpr as having startup cost disable_cost, which we
* subtract off here; that's to prevent other plan types such as seqscan
@@ -950,7 +950,7 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
*
* Currently, nodeFunctionscan.c always executes the function to
* completion before returning any rows, and caches the results in a
- * tuplestore. So the function eval cost is all startup cost, and per-row
+ * tuplestore. So the function eval cost is all startup cost, and per-row
* costs are minimal.
*
* XXX in principle we ought to charge tuplestore spill costs if the
@@ -1007,7 +1007,7 @@ cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
*
* Note: this is used for both self-reference and regular CTEs; the
* possible cost differences are below the threshold of what we could
- * estimate accurately anyway. Note that the costs of evaluating the
+ * estimate accurately anyway. Note that the costs of evaluating the
* referenced CTE query are added into the final plan as initplan costs,
* and should NOT be counted here.
*/
@@ -1091,7 +1091,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
* If the total volume exceeds sort_mem, we switch to a tape-style merge
* algorithm. There will still be about t*log2(t) tuple comparisons in
* total, but we will also need to write and read each tuple once per
- * merge pass. We expect about ceil(logM(r)) merge passes where r is the
+ * merge pass. We expect about ceil(logM(r)) merge passes where r is the
* number of initial runs formed and M is the merge order used by tuplesort.c.
* Since the average initial run should be about twice sort_mem, we have
* disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem)))
@@ -1105,7 +1105,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
* accesses (XXX can't we refine that guess?)
*
* By default, we charge two operator evals per tuple comparison, which should
- * be in the right ballpark in most cases. The caller can tweak this by
+ * be in the right ballpark in most cases. The caller can tweak this by
* specifying nonzero comparison_cost; typically that's used for any extra
* work that has to be done to prepare the inputs to the comparison operators.
*
@@ -1227,7 +1227,7 @@ cost_sort(Path *path, PlannerInfo *root,
* Determines and returns the cost of a MergeAppend node.
*
* MergeAppend merges several pre-sorted input streams, using a heap that
- * at any given instant holds the next tuple from each stream. If there
+ * at any given instant holds the next tuple from each stream. If there
* are N streams, we need about N*log2(N) tuple comparisons to construct
* the heap at startup, and then for each output tuple, about log2(N)
* comparisons to delete the top heap entry and another log2(N) comparisons
@@ -1383,7 +1383,7 @@ cost_agg(Path *path, PlannerInfo *root,
* group otherwise. We charge cpu_tuple_cost for each output tuple.
*
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
- * same total CPU cost, but AGG_SORTED has lower startup cost. If the
+ * same total CPU cost, but AGG_SORTED has lower startup cost. If the
* input path is already sorted appropriately, AGG_SORTED should be
* preferred (since it has no risk of memory overflow). This will happen
* as long as the computed total costs are indeed exactly equal --- but if
@@ -1709,10 +1709,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* Unlike other costsize functions, this routine makes one actual decision:
* whether we should materialize the inner path. We do that either because
* the inner path can't support mark/restore, or because it's cheaper to
- * use an interposed Material node to handle mark/restore. When the decision
+ * use an interposed Material node to handle mark/restore. When the decision
* is cost-based it would be logically cleaner to build and cost two separate
* paths with and without that flag set; but that would require repeating most
- * of the calculations here, which are not all that cheap. Since the choice
+ * of the calculations here, which are not all that cheap. Since the choice
* will not affect output pathkeys or startup cost, only total cost, there is
* no possibility of wanting to keep both paths. So it seems best to make
* the decision here and record it in the path's materialize_inner field.
@@ -1775,7 +1775,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
/*
- * Get approx # tuples passing the mergequals. We use approx_tuple_count
+ * Get approx # tuples passing the mergequals. We use approx_tuple_count
* here because we need an estimate done with JOIN_INNER semantics.
*/
mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
@@ -1789,7 +1789,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* estimated approximately as size of merge join output minus size of
* inner relation. Assume that the distinct key values are 1, 2, ..., and
* denote the number of values of each key in the outer relation as m1,
- * m2, ...; in the inner relation, n1, n2, ... Then we have
+ * m2, ...; in the inner relation, n1, n2, ... Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
@@ -1800,7 +1800,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* This equation works correctly for outer tuples having no inner match
* (nk = 0), but not for inner tuples having no outer match (mk = 0); we
* are effectively subtracting those from the number of rescanned tuples,
- * when we should not. Can we do better without expensive selectivity
+ * when we should not. Can we do better without expensive selectivity
* computations?
*
* The whole issue is moot if we are working from a unique-ified outer
@@ -1972,7 +1972,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* Decide whether we want to materialize the inner input to shield it from
- * mark/restore and performing re-fetches. Our cost model for regular
+ * mark/restore and performing re-fetches. Our cost model for regular
* re-fetches is that a re-fetch costs the same as an original fetch,
* which is probably an overestimate; but on the other hand we ignore the
* bookkeeping costs of mark/restore. Not clear if it's worth developing
@@ -2065,7 +2065,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic since
+ * clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*
* Note: we could adjust for SEMI/ANTI joins skipping some qual
@@ -2292,7 +2292,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* If inner relation is too big then we will need to "batch" the join,
* which implies writing and reading most of the tuples to disk an extra
* time. Charge seq_page_cost per page, since the I/O should be nice and
- * sequential. Writing the inner rel counts as startup cost, all the rest
+ * sequential. Writing the inner rel counts as startup cost, all the rest
* as run cost.
*/
if (numbatches > 1)
@@ -2384,7 +2384,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic since
+ * clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
@@ -2437,7 +2437,7 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output we will
+ * evaluation. We need to estimate how much of the output we will
* actually need to scan. NOTE: this logic should agree with the
* tuple_fraction estimates used by make_subplan() in
* plan/subselect.c.
@@ -2485,10 +2485,10 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
/*
* cost_rescan
* Given a finished Path, estimate the costs of rescanning it after
- * having done so the first time. For some Path types a rescan is
+ * having done so the first time. For some Path types a rescan is
* cheaper than an original scan (if no parameters change), and this
* function embodies knowledge about that. The default is to return
- * the same costs stored in the Path. (Note that the cost estimates
+ * the same costs stored in the Path. (Note that the cost estimates
* actually stored in Paths are always for first scans.)
*
* This function is not currently intended to model effects such as rescans
@@ -2529,7 +2529,7 @@ cost_rescan(PlannerInfo *root, Path *path,
{
/*
* These plan types materialize their final result in a
- * tuplestore or tuplesort object. So the rescan cost is only
+ * tuplestore or tuplesort object. So the rescan cost is only
* cpu_tuple_cost per tuple, unless the result is large enough
* to spill to disk.
*/
@@ -2554,8 +2554,8 @@ cost_rescan(PlannerInfo *root, Path *path,
{
/*
* These plan types not only materialize their results, but do
- * not implement qual filtering or projection. So they are
- * even cheaper to rescan than the ones above. We charge only
+ * not implement qual filtering or projection. So they are
+ * even cheaper to rescan than the ones above. We charge only
* cpu_operator_cost per tuple. (Note: keep that in sync with
* the run_cost charge in cost_sort, and also see comments in
* cost_material before you change it.)
@@ -2696,7 +2696,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
* evaluation of AND/OR? Probably *not*, because that would make the
* results depend on the clause ordering, and we are not in any position
* to expect that the current ordering of the clauses is the one that's
- * going to end up being used. The above per-RestrictInfo caching would
+ * going to end up being used. The above per-RestrictInfo caching would
* not mix well with trying to re-order clauses anyway.
*/
if (IsA(node, FuncExpr))
@@ -2811,7 +2811,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
else if (IsA(node, AlternativeSubPlan))
{
/*
- * Arbitrarily use the first alternative plan for costing. (We should
+ * Arbitrarily use the first alternative plan for costing. (We should
* certainly only include one alternative, and we don't yet have
* enough information to know which one the executor is most likely to
* use.)
@@ -2937,7 +2937,7 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
/*
* jselec can be interpreted as the fraction of outer-rel rows that have
* any matches (this is true for both SEMI and ANTI cases). And nselec is
- * the fraction of the Cartesian product that matches. So, the average
+ * the fraction of the Cartesian product that matches. So, the average
* number of matches for each outer-rel row that has at least one match is
* nselec * inner_rows / jselec.
*
@@ -2960,7 +2960,7 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
/*
* If requested, check whether the inner path uses all the joinquals as
- * indexquals. (If that's true, we can assume that an unmatched outer
+ * indexquals. (If that's true, we can assume that an unmatched outer
* tuple is cheap to process, whereas otherwise it's probably expensive.)
*/
if (indexed_join_quals)
@@ -3117,7 +3117,7 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
double nrows;
/*
- * Compute joinclause selectivity. Note that we are only considering
+ * Compute joinclause selectivity. Note that we are only considering
* clauses that become restriction clauses at this join level; we are not
* double-counting them because they were not considered in estimating the
* sizes of the component rels.
@@ -3175,7 +3175,7 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
*
* If we are doing an outer join, take that into account: the joinqual
* selectivity has to be clamped using the knowledge that the output must
- * be at least as large as the non-nullable input. However, any
+ * be at least as large as the non-nullable input. However, any
* pushed-down quals are applied after the outer join, so their
* selectivity applies fully.
*
@@ -3246,7 +3246,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel,
/*
* Compute per-output-column width estimates by examining the subquery's
- * targetlist. For any output that is a plain Var, get the width estimate
+ * targetlist. For any output that is a plain Var, get the width estimate
* that was made while planning the subquery. Otherwise, we leave it to
* set_rel_width to fill in a datatype-based default estimate.
*/
@@ -3402,7 +3402,7 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
* of estimating baserestrictcost, so we set that, and we also set up width
* using what will be purely datatype-driven estimates from the targetlist.
* There is no way to do anything sane with the rows value, so we just put
- * a default estimate and hope that the wrapper can improve on it. The
+ * a default estimate and hope that the wrapper can improve on it. The
* wrapper's PlanForeignScan function will be called momentarily.
*
* The rel's targetlist and restrictinfo list must have been constructed
@@ -3517,7 +3517,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
{
/*
* We could be looking at an expression pulled up from a subquery,
- * or a ROW() representing a whole-row child Var, etc. Do what we
+ * or a ROW() representing a whole-row child Var, etc. Do what we
* can using the expression type information.
*/
int32 item_width;
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index 57214a33a24..2a3826cfa5e 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -73,7 +73,7 @@ static bool reconsider_full_join_clause(PlannerInfo *root,
*
* If below_outer_join is true, then the clause was found below the nullable
* side of an outer join, so its sides might validly be both NULL rather than
- * strictly equal. We can still deduce equalities in such cases, but we take
+ * strictly equal. We can still deduce equalities in such cases, but we take
* care to mark an EquivalenceClass if it came from any such clauses. Also,
* we have to check that both sides are either pseudo-constants or strict
* functions of Vars, else they might not both go to NULL above the outer
@@ -140,9 +140,9 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
collation);
/*
- * Reject clauses of the form X=X. These are not as redundant as they
+ * Reject clauses of the form X=X. These are not as redundant as they
* might seem at first glance: assuming the operator is strict, this is
- * really an expensive way to write X IS NOT NULL. So we must not risk
+ * really an expensive way to write X IS NOT NULL. So we must not risk
* just losing the clause, which would be possible if there is already a
* single-element EquivalenceClass containing X. The case is not common
* enough to be worth contorting the EC machinery for, so just reject the
@@ -186,14 +186,14 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
* Sweep through the existing EquivalenceClasses looking for matches to
* item1 and item2. These are the possible outcomes:
*
- * 1. We find both in the same EC. The equivalence is already known, so
+ * 1. We find both in the same EC. The equivalence is already known, so
* there's nothing to do.
*
* 2. We find both in different ECs. Merge the two ECs together.
*
* 3. We find just one. Add the other to its EC.
*
- * 4. We find neither. Make a new, two-entry EC.
+ * 4. We find neither. Make a new, two-entry EC.
*
* Note: since all ECs are built through this process or the similar
* search in get_eclass_for_sort_expr(), it's impossible that we'd match
@@ -397,7 +397,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
* Also, the expression's exposed collation must match the EC's collation.
* This is important because in comparisons like "foo < bar COLLATE baz",
* only one of the expressions has the correct exposed collation as we receive
- * it from the parser. Forcing both of them to have it ensures that all
+ * it from the parser. Forcing both of them to have it ensures that all
* variant spellings of such a construct behave the same. Again, we can
* stick on a RelabelType to force the right exposed collation. (It might
* work to not label the collation at all in EC members, but this is risky
@@ -502,7 +502,7 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
* single-member EquivalenceClass for it.
*
* sortref is the SortGroupRef of the originating SortGroupClause, if any,
- * or zero if not. (It should never be zero if the expression is volatile!)
+ * or zero if not. (It should never be zero if the expression is volatile!)
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
@@ -656,7 +656,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
*
* When an EC contains pseudoconstants, our strategy is to generate
* "member = const1" clauses where const1 is the first constant member, for
- * every other member (including other constants). If we are able to do this
+ * every other member (including other constants). If we are able to do this
* then we don't need any "var = var" comparisons because we've successfully
* constrained all the vars at their points of creation. If we fail to
* generate any of these clauses due to lack of cross-type operators, we fall
@@ -681,7 +681,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
* "WHERE a.x = b.y AND b.y = a.z", the scheme breaks down if we cannot
* generate "a.x = a.z" as a restriction clause for A.) In this case we mark
* the EC "ec_broken" and fall back to regurgitating its original source
- * RestrictInfos at appropriate times. We do not try to retract any derived
+ * RestrictInfos at appropriate times. We do not try to retract any derived
* clauses already generated from the broken EC, so the resulting plan could
* be poor due to bad selectivity estimates caused by redundant clauses. But
* the correct solution to that is to fix the opfamilies ...
@@ -941,7 +941,7 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* we consider different join paths, we avoid generating multiple copies:
* whenever we select a particular pair of EquivalenceMembers to join,
* we check to see if the pair matches any original clause (in ec_sources)
- * or previously-built clause (in ec_derives). This saves memory and allows
+ * or previously-built clause (in ec_derives). This saves memory and allows
* re-use of information cached in RestrictInfos.
*/
List *
@@ -1011,7 +1011,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
* First, scan the EC to identify member values that are computable at the
* outer rel, at the inner rel, or at this relation but not in either
* input rel. The outer-rel members should already be enforced equal,
- * likewise for the inner-rel members. We'll need to create clauses to
+ * likewise for the inner-rel members. We'll need to create clauses to
* enforce that any newly computable members are all equal to each other
* as well as to at least one input member, plus enforce at least one
* outer-rel member equal to at least one inner-rel member.
@@ -1034,7 +1034,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
}
/*
- * First, select the joinclause if needed. We can equate any one outer
+ * First, select the joinclause if needed. We can equate any one outer
* member to any one inner member, but we have to find a datatype
* combination for which an opfamily member operator exists. If we have
* choices, we prefer simple Var members (possibly with RelabelType) since
@@ -1236,8 +1236,8 @@ create_join_clause(PlannerInfo *root,
/*
* Search to see if we already built a RestrictInfo for this pair of
- * EquivalenceMembers. We can use either original source clauses or
- * previously-derived clauses. The check on opno is probably redundant,
+ * EquivalenceMembers. We can use either original source clauses or
+ * previously-derived clauses. The check on opno is probably redundant,
* but be safe ...
*/
foreach(lc, ec->ec_sources)
@@ -1368,7 +1368,7 @@ create_join_clause(PlannerInfo *root,
*
* Outer join clauses that are marked outerjoin_delayed are special: this
* condition means that one or both VARs might go to null due to a lower
- * outer join. We can still push a constant through the clause, but only
+ * outer join. We can still push a constant through the clause, but only
* if its operator is strict; and we *have to* throw the clause back into
* regular joinclause processing. By keeping the strict join clause,
* we ensure that any null-extended rows that are mistakenly generated due
@@ -1562,7 +1562,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo,
/*
* Yes it does! Try to generate a clause INNERVAR = CONSTANT for each
- * CONSTANT in the EC. Note that we must succeed with at least one
+ * CONSTANT in the EC. Note that we must succeed with at least one
* constant before we can decide to throw away the outer-join clause.
*/
match = false;
@@ -2051,7 +2051,7 @@ find_eclass_clauses_for_index_join(PlannerInfo *root, RelOptInfo *rel,
* a joinclause between the two given relations.
*
* This is essentially a very cut-down version of
- * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
+ * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
* incorrectly. Hence we don't bother with details like whether the lack of a
* cross-type operator might prevent the clause from actually being generated.
*/
@@ -2081,7 +2081,7 @@ have_relevant_eclass_joinclause(PlannerInfo *root,
* as a possibly-overoptimistic heuristic.
*
* We don't test ec_has_const either, even though a const eclass won't
- * generate real join clauses. This is because if we had "WHERE a.x =
+ * generate real join clauses. This is because if we had "WHERE a.x =
* b.y and a.x = 42", it is worth considering a join between a and b,
* since the join result is likely to be small even though it'll end
* up being an unqualified nestloop.
@@ -2155,7 +2155,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
* as a possibly-overoptimistic heuristic.
*
* We don't test ec_has_const either, even though a const eclass won't
- * generate real join clauses. This is because if we had "WHERE a.x =
+ * generate real join clauses. This is because if we had "WHERE a.x =
* b.y and a.x = 42", it is worth considering a join between a and b,
* since the join result is likely to be small even though it'll end
* up being an unqualified nestloop.
@@ -2202,7 +2202,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
* against the specified relation.
*
* This is just a heuristic test and doesn't have to be exact; it's better
- * to say "yes" incorrectly than "no". Hence we don't bother with details
+ * to say "yes" incorrectly than "no". Hence we don't bother with details
* like whether the lack of a cross-type operator might prevent the clause
* from actually being generated.
*/
@@ -2223,7 +2223,7 @@ eclass_useful_for_merging(EquivalenceClass *eclass,
/*
* Note we don't test ec_broken; if we did, we'd need a separate code path
- * to look through ec_sources. Checking the members anyway is OK as a
+ * to look through ec_sources. Checking the members anyway is OK as a
* possibly-overoptimistic heuristic.
*/
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 5a30ba6e2eb..63281c17cf9 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -157,7 +157,7 @@ static Const *string_to_const(const char *str, Oid datatype);
* scan this routine deems potentially interesting for the current query.
*
* We also determine the set of other relids that participate in join
- * clauses that could be used with each index. The actually best innerjoin
+ * clauses that could be used with each index. The actually best innerjoin
* path will be generated for each outer relation later on, but knowing the
* set of potential otherrels allows us to identify equivalent outer relations
* and avoid repeated computation.
@@ -334,16 +334,16 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * Ignore partial indexes that do not match the query. If a partial
+ * Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK; otherwise, if we are
* at top level we know it's not OK (since predOK is exactly whether
* its predicate could be proven from the toplevel clauses).
* Otherwise, we have to test whether the added clauses are sufficient
- * to imply the predicate. If so, we could use the index in the
+ * to imply the predicate. If so, we could use the index in the
* current context.
*
* We set useful_predicate to true iff the predicate was proven using
- * the current set of clauses. This is needed to prevent matching a
+ * the current set of clauses. This is needed to prevent matching a
* predOK index to an arm of an OR, which would be a legal but
* pointlessly inefficient plan. (A better plan will be generated by
* just scanning the predOK index alone, no OR.)
@@ -636,7 +636,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
* Given a nonempty list of bitmap paths, AND them into one path.
*
* This is a nontrivial decision since we can legally use any subset of the
- * given path set. We want to choose a good tradeoff between selectivity
+ * given path set. We want to choose a good tradeoff between selectivity
* and cost of computing the bitmap.
*
* The result is either a single one of the inputs, or a BitmapAndPath
@@ -664,12 +664,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. Moreover, it's completely impractical if there are a large
+ * OR clauses. Moreover, it's completely impractical if there are a large
* number of paths, since the work would grow as O(2^N).
*
* As a heuristic, we first check for paths using exactly the same sets of
* WHERE clauses + index predicate conditions, and reject all but the
- * cheapest-to-scan in any such group. This primarily gets rid of indexes
+ * cheapest-to-scan in any such group. This primarily gets rid of indexes
* that include the interesting columns but also irrelevant columns. (In
* situations where the DBA has gone overboard on creating variant
* indexes, this can make for a very large reduction in the number of
@@ -689,14 +689,14 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
* costsize.c and clausesel.c aren't very smart about redundant clauses.
* They will usually double-count the redundant clauses, producing a
* too-small selectivity that makes a redundant AND step look like it
- * reduces the total cost. Perhaps someday that code will be smarter and
+ * reduces the total cost. Perhaps someday that code will be smarter and
* we can remove this limitation. (But note that this also defends
* against flat-out duplicate input paths, which can happen because
* best_inner_indexscan will find the same OR join clauses that
* create_or_index_quals has pulled OR restriction clauses out of.)
*
* For the same reason, we reject AND combinations in which an index
- * predicate clause duplicates another clause. Here we find it necessary
+ * predicate clause duplicates another clause. Here we find it necessary
* to be even stricter: we'll reject a partial index if any of its
* predicate clauses are implied by the set of WHERE clauses and predicate
* clauses used so far. This covers cases such as a condition "x = 42"
@@ -759,7 +759,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
/*
* For each surviving index, consider it as an "AND group leader", and see
* whether adding on any of the later indexes results in an AND path with
- * cheaper total cost than before. Then take the cheapest AND group.
+ * cheaper total cost than before. Then take the cheapest AND group.
*/
for (i = 0; i < npaths; i++)
{
@@ -1015,7 +1015,7 @@ find_indexpath_quals(Path *bitmapqual, List **quals, List **preds)
/*
* find_list_position
* Return the given node's position (counting from 0) in the given
- * list of nodes. If it's not equal() to any existing list member,
+ * list of nodes. If it's not equal() to any existing list member,
* add it at the end, and return that position.
*/
static int
@@ -1056,7 +1056,7 @@ find_list_position(Node *node, List **nodelist)
*
* We can use clauses from either the current clauses or outer_clauses lists,
* but *found_clause is set TRUE only if we used at least one clause from
- * the "current clauses" list. See find_usable_indexes() for motivation.
+ * the "current clauses" list. See find_usable_indexes() for motivation.
*
* outer_relids determines what Vars will be allowed on the other side
* of a possible index qual; see match_clause_to_indexcol().
@@ -1169,7 +1169,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
* to the caller-specified outer_relids relations (which had better not
* include the relation whose index is being tested). outer_relids should
* be NULL when checking simple restriction clauses, and the outer side
- * of the join when building a join inner scan. Other than that, the
+ * of the join when building a join inner scan. Other than that, the
* only thing we don't like is volatile functions.
*
* Note: in most cases we already know that the clause as a whole uses
@@ -1194,7 +1194,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
* It is also possible to match RowCompareExpr clauses to indexes (but
* currently, only btree indexes handle this). In this routine we will
* report a match if the first column of the row comparison matches the
- * target index column. This is sufficient to guarantee that some index
+ * target index column. This is sufficient to guarantee that some index
* condition can be constructed from the RowCompareExpr --- whether the
* remaining columns match the index too is considered in
* expand_indexqual_rowcompare().
@@ -1237,7 +1237,7 @@ match_clause_to_indexcol(IndexOptInfo *index,
bool plain_op;
/*
- * Never match pseudoconstants to indexes. (Normally this could not
+ * Never match pseudoconstants to indexes. (Normally this could not
* happen anyway, since a pseudoconstant clause couldn't contain a Var,
* but what if someone builds an expression index on a constant? It's not
* totally unreasonable to do so with a partial index, either.)
@@ -1552,7 +1552,7 @@ match_index_to_pathkeys(IndexOptInfo *index, List *pathkeys)
* Note that we currently do not consider the collation of the ordering
* operator's result. In practical cases the result type will be numeric
* and thus have no collation, and it's not very clear what to match to
- * if it did have a collation. The index's collation should match the
+ * if it did have a collation. The index's collation should match the
* ordering operator's input collation, not its result.
*
* If successful, return 'clause' as-is if the indexkey is on the left,
@@ -1683,7 +1683,7 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* indexable_outerrelids
* Finds all other relids that participate in any indexable join clause
- * for the specified table. Returns a set of relids.
+ * for the specified table. Returns a set of relids.
*/
static Relids
indexable_outerrelids(PlannerInfo *root, RelOptInfo *rel)
@@ -1870,7 +1870,7 @@ eclass_matches_any_index(EquivalenceClass *ec, EquivalenceMember *em,
* compatible with the EC, since no clause generated from the EC
* could be used with the index. For non-btree indexes, we can't
* easily tell whether clauses generated from the EC could be used
- * with the index, so only check for expression match. This might
+ * with the index, so only check for expression match. This might
* mean we return "true" for a useless index, but that will just
* cause some wasted planner cycles; it's better than ignoring
* useful indexes.
@@ -1970,7 +1970,7 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
/*
* Look to see if we already computed the result for this set of relevant
* outerrels. (We include the isouterjoin status in the cache lookup key
- * for safety. In practice I suspect this is not necessary because it
+ * for safety. In practice I suspect this is not necessary because it
* should always be the same for a given combination of rels.)
*
* NOTE: because we cache on outer_relids rather than outer_rel->relids,
@@ -1999,7 +1999,7 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
*
* Note: because we include restriction clauses, we will find indexscans
* that could be plain indexscans, ie, they don't require the join context
- * at all. This may seem redundant, but we need to include those scans in
+ * at all. This may seem redundant, but we need to include those scans in
* the input given to choose_bitmap_and() to be sure we find optimal AND
* combinations of join and non-join scans. Also, even if the "best inner
* indexscan" is just a plain indexscan, it will have a different cost
@@ -2137,7 +2137,7 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
/*
* Also check to see if any EquivalenceClasses can produce a relevant
- * joinclause. Since all such clauses are effectively pushed-down, this
+ * joinclause. Since all such clauses are effectively pushed-down, this
* doesn't apply to outer joins.
*/
if (!isouterjoin && rel->has_eclass_joins)
@@ -2293,7 +2293,7 @@ match_index_to_operand(Node *operand,
int indkey;
/*
- * Ignore any RelabelType node above the operand. This is needed to be
+ * Ignore any RelabelType node above the operand. This is needed to be
* able to apply indexscanning in binary-compatible-operator cases. Note:
* we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
@@ -2360,10 +2360,10 @@ match_index_to_operand(Node *operand,
* indexscan machinery. The key idea is that these operators allow us
* to derive approximate indexscan qual clauses, such that any tuples
* that pass the operator clause itself must also satisfy the simpler
- * indexscan condition(s). Then we can use the indexscan machinery
+ * indexscan condition(s). Then we can use the indexscan machinery
* to avoid scanning as much of the table as we'd otherwise have to,
* while applying the original operator as a qpqual condition to ensure
- * we deliver only the tuples we want. (In essence, we're using a regular
+ * we deliver only the tuples we want. (In essence, we're using a regular
* index as if it were a lossy index.)
*
* An example of what we're doing is
@@ -2377,7 +2377,7 @@ match_index_to_operand(Node *operand,
*
* Another thing that we do with this machinery is to provide special
* smarts for "boolean" indexes (that is, indexes on boolean columns
- * that support boolean equality). We can transform a plain reference
+ * that support boolean equality). We can transform a plain reference
* to the indexkey into "indexkey = true", or "NOT indexkey" into
* "indexkey = false", so as to make the expression indexable using the
* regular index operators. (As of Postgres 8.1, we must do this here
@@ -2798,7 +2798,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily, Oid idxcollation)
/*
* LIKE and regex operators are not members of any btree index opfamily,
* but they can be members of opfamilies for more exotic index types such
- * as GIN. Therefore, we should only do expansion if the operator is
+ * as GIN. Therefore, we should only do expansion if the operator is
* actually not in the opfamily. But checking that requires a syscache
* lookup, so it's best to first see if the operator is one we are
* interested in.
@@ -2881,7 +2881,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily, Oid idxcollation)
* column matches) or a simple OpExpr (if the first-column match is all
* there is). In these cases the modified clause is always "<=" or ">="
* even when the original was "<" or ">" --- this is necessary to match all
- * the rows that could match the original. (We are essentially building a
+ * the rows that could match the original. (We are essentially building a
* lossy version of the row comparison when we do this.)
*/
static RestrictInfo *
@@ -2964,7 +2964,7 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
break; /* no good, volatile comparison value */
/*
- * The Var side can match any column of the index. If the user does
+ * The Var side can match any column of the index. If the user does
* something weird like having multiple identical index columns, we
* insist the match be on the first such column, to avoid confusing
* the executor.
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 7d3cf425da5..7c654ddf4eb 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -97,7 +97,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 1. Consider mergejoin paths where both relations must be explicitly
- * sorted. Skip this if we can't mergejoin.
+ * sorted. Skip this if we can't mergejoin.
*/
if (mergejoin_allowed)
sort_inner_and_outer(root, joinrel, outerrel, innerrel,
@@ -118,7 +118,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 3. Consider paths where the inner relation need not be explicitly
- * sorted. This includes mergejoins only (nestloops were already built in
+ * sorted. This includes mergejoins only (nestloops were already built in
* match_unsorted_outer).
*
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
@@ -149,7 +149,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* We already know that the clause is a binary opclause referencing only the
* rels in the current join. The point here is to check whether it has the
* form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
- * rather than mixing outer and inner vars on either side. If it matches,
+ * rather than mixing outer and inner vars on either side. If it matches,
* we set the transient flag outer_is_left to identify which side is which.
*/
static inline bool
@@ -238,7 +238,7 @@ sort_inner_and_outer(PlannerInfo *root,
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * partially redundant (refer to the same EquivalenceClasses). Therefore,
+ * partially redundant (refer to the same EquivalenceClasses). Therefore,
* what we do is convert the mergeclause list to a list of canonical
* pathkeys, and then consider different orderings of the pathkeys.
*
@@ -331,7 +331,7 @@ sort_inner_and_outer(PlannerInfo *root,
* cheapest-total inner-indexscan path (if any), and one on the
* cheapest-startup inner-indexscan path (if different).
*
- * We also consider mergejoins if mergejoin clauses are available. We have
+ * We also consider mergejoins if mergejoin clauses are available. We have
* two ways to generate the inner path for a mergejoin: sort the cheapest
* inner path, or use an inner path that is already suitably ordered for the
* merge. If we have several mergeclauses, it could be that there is no inner
@@ -648,7 +648,7 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Look for an inner path ordered well enough for the first
- * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
* destructively, which is why we made a copy...
*/
trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
@@ -857,7 +857,7 @@ hash_inner_and_outer(PlannerInfo *root,
* best_appendrel_indexscan
* Finds the best available set of inner indexscans for a nestloop join
* with the given append relation on the inside and the given outer_rel
- * outside. Returns an AppendPath comprising the best inner scans, or
+ * outside. Returns an AppendPath comprising the best inner scans, or
* NULL if there are no possible inner indexscans.
*
* Note that we currently consider only cheapest-total-cost. It's not
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 24e4e59ea6a..357ee5399c7 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -182,7 +182,7 @@ join_search_one_level(PlannerInfo *root, int level)
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
*
* The join clause will be usable at level 3, but at level 2 we have no
- * choice but to make cartesian joins. We consider only left-sided and
+ * choice but to make cartesian joins. We consider only left-sided and
* right-sided cartesian joins in this case (no bushy).
*/
if (joinrels[level] == NIL)
@@ -210,7 +210,7 @@ join_search_one_level(PlannerInfo *root, int level)
/*----------
* When special joins are involved, there may be no legal way
- * to make an N-way join for some values of N. For example consider
+ * to make an N-way join for some values of N. For example consider
*
* SELECT ... FROM t1 WHERE
* x IN (SELECT ... FROM t2,t3 WHERE ...) AND
@@ -329,7 +329,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
ListCell *l;
/*
- * Ensure output params are set on failure return. This is just to
+ * Ensure output params are set on failure return. This is just to
* suppress uninitialized-variable warnings from overly anal compilers.
*/
*sjinfo_p = NULL;
@@ -337,7 +337,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* If we have any special joins, the proposed join might be illegal; and
- * in any case we have to determine its join type. Scan the join info
+ * in any case we have to determine its join type. Scan the join info
* list for conflicts.
*/
match_sjinfo = NULL;
@@ -560,7 +560,7 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
/*
* If it's a plain inner join, then we won't have found anything in
- * join_info_list. Make up a SpecialJoinInfo so that selectivity
+ * join_info_list. Make up a SpecialJoinInfo so that selectivity
* estimation functions will know what's being joined.
*/
if (sjinfo == NULL)
@@ -848,7 +848,7 @@ have_join_order_restriction(PlannerInfo *root,
*
* Essentially, this tests whether have_join_order_restriction() could
* succeed with this rel and some other one. It's OK if we sometimes
- * say "true" incorrectly. (Therefore, we don't bother with the relatively
+ * say "true" incorrectly. (Therefore, we don't bother with the relatively
* expensive has_legal_joinclause test.)
*/
static bool
@@ -953,7 +953,7 @@ is_dummy_rel(RelOptInfo *rel)
* dummy.
*
* Also, when called during GEQO join planning, we are in a short-lived
- * memory context. We must make sure that the dummy path attached to a
+ * memory context. We must make sure that the dummy path attached to a
* baserel survives the GEQO cycle, else the baserel is trashed for future
* GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
* we don't want the dummy path to clutter the main planning context. Upshot
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index 732ab063638..0114708095c 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -41,7 +41,7 @@
*
* The added quals are partially redundant with the original OR, and therefore
* will cause the size of the joinrel to be underestimated when it is finally
- * formed. (This would be true of a full transformation to CNF as well; the
+ * formed. (This would be true of a full transformation to CNF as well; the
* fault is not really in the transformation, but in clauselist_selectivity's
* inability to recognize redundant conditions.) To minimize the collateral
* damage, we want to minimize the number of quals added. Therefore we do
@@ -56,7 +56,7 @@
* it is finally formed. This is a MAJOR HACK: it depends on the fact
* that clause selectivities are cached and on the fact that the same
* RestrictInfo node will appear in every joininfo list that might be used
- * when the joinrel is formed. And it probably isn't right in cases where
+ * when the joinrel is formed. And it probably isn't right in cases where
* the size estimation is nonlinear (i.e., outer and IN joins). But it
* beats not doing anything.
*
@@ -96,10 +96,10 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* enforced at the relation scan level.
*
* We must also ignore clauses that are marked !is_pushed_down (ie they
- * are themselves outer-join clauses). It would be safe to extract an
+ * are themselves outer-join clauses). It would be safe to extract an
* index condition from such a clause if we are within the nullable rather
* than the non-nullable side of its join, but we haven't got enough
- * context here to tell which applies. OR clauses in outer-join quals
+ * context here to tell which applies. OR clauses in outer-join quals
* aren't exactly common, so we'll let that case go unoptimized for now.
*/
foreach(i, rel->joininfo)
@@ -114,7 +114,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* Use the generate_bitmap_or_paths() machinery to estimate the
* value of each OR clause. We can use regular restriction
* clauses along with the OR clause contents to generate
- * indexquals. We pass outer_rel = NULL so that sub-clauses that
+ * indexquals. We pass outer_rel = NULL so that sub-clauses that
* are actually joins will be ignored.
*/
List *orpaths;
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 991e0965124..6cd2b4dd262 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -259,7 +259,7 @@ make_pathkey_from_sortinfo(PlannerInfo *root,
/*
* EquivalenceClasses need to contain opfamily lists based on the family
* membership of mergejoinable equality operators, which could belong to
- * more than one opfamily. So we have to look up the opfamily's equality
+ * more than one opfamily. So we have to look up the opfamily's equality
* operator and get its membership.
*/
equality_op = get_opfamily_member(opfamily,
@@ -432,7 +432,7 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys,
/*
* Since cost comparison is a lot cheaper than pathkey comparison, do
- * that first. (XXX is that still true?)
+ * that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_path_costs(matched_path, path, cost_criterion) <= 0)
@@ -619,7 +619,7 @@ find_indexkey_var(PlannerInfo *root, RelOptInfo *rel, AttrNumber varattno)
/*
* convert_subquery_pathkeys
* Build a pathkeys list that describes the ordering of a subquery's
- * result, in the terms of the outer query. This is essentially a
+ * result, in the terms of the outer query. This is essentially a
* task of conversion.
*
* 'rel': outer query's RelOptInfo for the subquery relation.
@@ -672,7 +672,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* Note: it might look funny to be setting sortref = 0 for a
- * reference to a volatile sub_eclass. However, the
+ * reference to a volatile sub_eclass. However, the
* expression is *not* volatile in the outer query: it's just
* a Var referencing whatever the subquery emitted. (IOW, the
* outer query isn't going to re-execute the volatile
@@ -706,7 +706,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* Otherwise, the sub_pathkey's EquivalenceClass could contain
* multiple elements (representing knowledge that multiple items
- * are effectively equal). Each element might match none, one, or
+ * are effectively equal). Each element might match none, one, or
* more of the output columns that are visible to the outer query.
* This means we may have multiple possible representations of the
* sub_pathkey in the context of the outer query. Ideally we
@@ -936,7 +936,7 @@ make_pathkeys_for_sortclauses(PlannerInfo *root,
* right sides.
*
* Note this is called before EC merging is complete, so the links won't
- * necessarily point to canonical ECs. Before they are actually used for
+ * necessarily point to canonical ECs. Before they are actually used for
* anything, update_mergeclause_eclasses must be called to ensure that
* they've been updated to point to canonical ECs.
*/
@@ -1068,7 +1068,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* It's possible that multiple matching clauses might have different
* ECs on the other side, in which case the order we put them into our
* result makes a difference in the pathkeys required for the other
- * input path. However this routine hasn't got any info about which
+ * input path. However this routine hasn't got any info about which
* order would be best, so we don't worry about that.
*
* It's also possible that the selected mergejoin clauses produce
@@ -1099,7 +1099,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can still
+ * sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched_restrictinfos == NIL)
@@ -1131,7 +1131,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* Returns a pathkeys list that can be applied to the outer relation.
*
* Since we assume here that a sort is required, there is no particular use
- * in matching any available ordering of the outerrel. (joinpath.c has an
+ * in matching any available ordering of the outerrel. (joinpath.c has an
* entirely separate code path for considering sort-free mergejoins.) Rather,
* it's interesting to try to match the requested query_pathkeys so that a
* second output sort may be avoided; and failing that, we try to list "more
@@ -1462,7 +1462,7 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can still
+ * sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched)
@@ -1492,7 +1492,7 @@ right_merge_direction(PlannerInfo *root, PathKey *pathkey)
pathkey->pk_opfamily == query_pathkey->pk_opfamily)
{
/*
- * Found a matching query sort column. Prefer this pathkey's
+ * Found a matching query sort column. Prefer this pathkey's
* direction iff it matches. Note that we ignore pk_nulls_first,
* which means that a sort might be needed anyway ... but we still
* want to prefer only one of the two possible directions, and we
@@ -1568,13 +1568,13 @@ truncate_useless_pathkeys(PlannerInfo *root,
* useful according to truncate_useless_pathkeys().
*
* This is a cheap test that lets us skip building pathkeys at all in very
- * simple queries. It's OK to err in the direction of returning "true" when
+ * simple queries. It's OK to err in the direction of returning "true" when
* there really aren't any usable pathkeys, but erring in the other direction
* is bad --- so keep this in sync with the routines above!
*
* We could make the test more complex, for example checking to see if any of
* the joinclauses are really mergejoinable, but that likely wouldn't win
- * often enough to repay the extra cycles. Queries with neither a join nor
+ * often enough to repay the extra cycles. Queries with neither a join nor
* a sort are reasonably common, though, so this much work seems worthwhile.
*/
bool
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index 05c18b5a871..e54561e175d 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -19,7 +19,7 @@
* representation all the way through to execution.
*
* There is currently no special support for joins involving CTID; in
- * particular nothing corresponding to best_inner_indexscan(). Since it's
+ * particular nothing corresponding to best_inner_indexscan(). Since it's
* not very useful to store TIDs of one table in another table, there
* doesn't seem to be enough use-case to justify adding a lot of code
* for that.
@@ -57,7 +57,7 @@ static List *TidQualFromRestrictinfo(List *restrictinfo, int varno);
* or
* pseudoconstant = CTID
*
- * We check that the CTID Var belongs to relation "varno". That is probably
+ * We check that the CTID Var belongs to relation "varno". That is probably
* redundant considering this is only applied to restriction clauses, but
* let's be safe.
*/
diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c
index 1784ac2fc5b..111df3c5320 100644
--- a/src/backend/optimizer/plan/analyzejoins.c
+++ b/src/backend/optimizer/plan/analyzejoins.c
@@ -40,7 +40,7 @@ static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved);
* Check for relations that don't actually need to be joined at all,
* and remove them from the query.
*
- * We are passed the current joinlist and return the updated list. Other
+ * We are passed the current joinlist and return the updated list. Other
* data structures that have to be updated are accessible via "root".
*/
List *
@@ -90,7 +90,7 @@ restart:
* Restart the scan. This is necessary to ensure we find all
* removable joins independently of ordering of the join_info_list
* (note that removal of attr_needed bits may make a join appear
- * removable that did not before). Also, since we just deleted the
+ * removable that did not before). Also, since we just deleted the
* current list cell, we'd have to have some kluge to continue the
* list scan anyway.
*/
@@ -107,7 +107,7 @@ restart:
* We already know that the clause is a binary opclause referencing only the
* rels in the current join. The point here is to check whether it has the
* form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
- * rather than mixing outer and inner vars on either side. If it matches,
+ * rather than mixing outer and inner vars on either side. If it matches,
* we set the transient flag outer_is_left to identify which side is which.
*/
static inline bool
@@ -154,7 +154,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* Currently, we only know how to remove left joins to a baserel with
- * unique indexes. We can check most of these criteria pretty trivially
+ * unique indexes. We can check most of these criteria pretty trivially
* to avoid doing useless extra work. But checking whether any of the
* indexes are unique would require iterating over the indexlist, so for
* now we just make sure there are indexes of some sort or other. If none
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index e791f20dc12..c246468c929 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -168,7 +168,7 @@ static Material *make_material(Plan *lefttree);
/*
* create_plan
* Creates the access plan for a query by recursively processing the
- * desired tree of pathnodes, starting at the node 'best_path'. For
+ * desired tree of pathnodes, starting at the node 'best_path'. For
* every pathnode found, we create a corresponding plan node containing
* appropriate id, target list, and qualification information.
*
@@ -282,7 +282,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
/*
* For table scans, rather than using the relation targetlist (which is
* only those Vars actually needed by the query), we prefer to generate a
- * tlist containing all Vars in order. This will allow the executor to
+ * tlist containing all Vars in order. This will allow the executor to
* optimize away projection of the table tuples, if possible. (Note that
* planner.c may replace the tlist we generate here, forcing projection to
* occur.)
@@ -480,7 +480,7 @@ use_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
*
* If the plan node immediately above a scan would prefer to get only
* needed Vars and not a physical tlist, it must call this routine to
- * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
+ * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
* and Material nodes want this, so they don't have to store useless columns.
*/
static void
@@ -610,7 +610,7 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path)
/*
* * Expensive function pullups may have pulled local predicates * into
- * this path node. Put them in the qpqual of the plan node. * JMH,
+ * this path node. Put them in the qpqual of the plan node. * JMH,
* 6/15/92
*/
if (get_loc_restrictinfo(best_path) != NIL)
@@ -639,7 +639,7 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path)
/*
* It is possible for the subplans list to contain only one entry, or even
- * no entries. Handle these cases specially.
+ * no entries. Handle these cases specially.
*
* XXX ideally, if there's just one entry, we'd not bother to generate an
* Append node but just return the single child. At the moment this does
@@ -1298,7 +1298,7 @@ create_bitmap_scan_plan(PlannerInfo *root,
/*
* When dealing with special operators, we will at this point have
- * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
+ * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
* 'em from bitmapqualorig, since there's no point in making the tests
* twice.
*/
@@ -1388,7 +1388,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -1478,7 +1478,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* We know that the index predicate must have been implied by the
* query condition as a whole, but it may or may not be implied by
- * the conditions that got pushed into the bitmapqual. Avoid
+ * the conditions that got pushed into the bitmapqual. Avoid
* generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
@@ -2572,7 +2572,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
*
* This is a simplified version of fix_indexqual_references. The input does
* not have RestrictInfo nodes, and we assume that indxqual.c already
- * commuted the clauses to put the index keys on the left. Also, we don't
+ * commuted the clauses to put the index keys on the left. Also, we don't
* bother to support any cases except simple OpExprs, since nothing else
* is allowed for ordering operators.
*/
@@ -2809,7 +2809,7 @@ order_qual_clauses(PlannerInfo *root, List *clauses)
/*
* Sort. We don't use qsort() because it's not guaranteed stable for
- * equal keys. The expected number of entries is small enough that a
+ * equal keys. The expected number of entries is small enough that a
* simple insertion sort should be good enough.
*/
for (i = 1; i < nitems; i++)
@@ -3434,7 +3434,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
* prepare_sort_from_pathkeys
* Prepare to sort according to given pathkeys
*
- * This is used to set up for both Sort and MergeAppend nodes. It calculates
+ * This is used to set up for both Sort and MergeAppend nodes. It calculates
* the executor's representation of the sort key information, and adjusts the
* plan targetlist if needed to add resjunk sort columns.
*
@@ -3447,7 +3447,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
*
* We must convert the pathkey information into arrays of sort key column
* numbers, sort operator OIDs, collation OIDs, and nulls-first flags,
- * which is the representation the executor wants. These are returned into
+ * which is the representation the executor wants. These are returned into
* the output parameters *p_numsortkeys etc.
*
* When looking for matches to an EquivalenceClass's members, we will only
@@ -3890,7 +3890,7 @@ make_material(Plan *lefttree)
* materialize_finished_plan: stick a Material node atop a completed plan
*
* There are a couple of places where we want to attach a Material node
- * after completion of subquery_planner(). This currently requires hackery.
+ * after completion of subquery_planner(). This currently requires hackery.
* Since subquery_planner has already run SS_finalize_plan on the subplan
* tree, we have to kluge up parameter lists for the Material node.
* Possibly this could be fixed by postponing SS_finalize_plan processing
@@ -4106,7 +4106,7 @@ make_group(PlannerInfo *root,
/*
* distinctList is a list of SortGroupClauses, identifying the targetlist items
- * that should be considered by the Unique filter. The input path must
+ * that should be considered by the Unique filter. The input path must
* already be sorted accordingly.
*/
Unique *
@@ -4124,7 +4124,7 @@ make_unique(Plan *lefttree, List *distinctList)
/*
* Charge one cpu_operator_cost per comparison per input tuple. We assume
- * all columns get compared at most of the tuples. (XXX probably this is
+ * all columns get compared at most of the tuples. (XXX probably this is
* an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
@@ -4380,7 +4380,7 @@ make_result(PlannerInfo *root,
* Build a ModifyTable plan node
*
* Currently, we don't charge anything extra for the actual table modification
- * work, nor for the RETURNING expressions if any. It would only be window
+ * work, nor for the RETURNING expressions if any. It would only be window
* dressing, since these are always top-level nodes and there is no way for
* the costs to change any higher-level planning choices. But we might want
* to make it look better sometime.
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 24185a6cecb..a779bc3fa0e 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -77,12 +77,12 @@ static void check_hashjoinable(RestrictInfo *restrictinfo);
* appearing in the jointree.
*
* The initial invocation must pass root->parse->jointree as the value of
- * jtnode. Internally, the function recurses through the jointree.
+ * jtnode. Internally, the function recurses through the jointree.
*
* At the end of this process, there should be one baserel RelOptInfo for
* every non-join RTE that is used in the query. Therefore, this routine
* is the only place that should call build_simple_rel with reloptkind
- * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
+ * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
* "other rel" RelOptInfos for the members of any appendrels we find here.)
*/
void
@@ -224,7 +224,7 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
* deconstruct_jointree
* Recursively scan the query's join tree for WHERE and JOIN/ON qual
* clauses, and add these to the appropriate restrictinfo and joininfo
- * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
+ * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
* to root->join_info_list for any outer joins appearing in the query tree.
* Return a "joinlist" data structure showing the join order decisions
* that need to be made by make_one_rel().
@@ -241,9 +241,9 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
* be evaluated at the lowest level where all the variables it mentions are
* available. However, we cannot push a qual down into the nullable side(s)
* of an outer join since the qual might eliminate matching rows and cause a
- * NULL row to be incorrectly emitted by the join. Therefore, we artificially
+ * NULL row to be incorrectly emitted by the join. Therefore, we artificially
* OR the minimum-relids of such an outer join into the required_relids of
- * clauses appearing above it. This forces those clauses to be delayed until
+ * clauses appearing above it. This forces those clauses to be delayed until
* application of the outer join (or maybe even higher in the join tree).
*/
List *
@@ -379,7 +379,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
* regard for whether this level is an outer join, which is correct.
* Then we place our own join quals, which are restricted by lower
* outer joins in any case, and are forced to this level if this is an
- * outer join and they mention the outer side. Finally, if this is an
+ * outer join and they mention the outer side. Finally, if this is an
* outer join, we create a join_info_list entry for the join. This
* will prevent quals above us in the join tree that use those rels
* from being pushed down below this level. (It's okay for upper
@@ -580,7 +580,7 @@ make_outerjoininfo(PlannerInfo *root,
* any nullable rel is FOR UPDATE/SHARE.
*
* You might be wondering why this test isn't made far upstream in the
- * parser. It's because the parser hasn't got enough info --- consider
+ * parser. It's because the parser hasn't got enough info --- consider
* FOR UPDATE applied to a view. Only after rewriting and flattening do
* we know whether the view contains an outer join.
*
@@ -635,7 +635,7 @@ make_outerjoininfo(PlannerInfo *root,
min_lefthand = bms_intersect(clause_relids, left_rels);
/*
- * Similarly for required RHS. But here, we must also include any lower
+ * Similarly for required RHS. But here, we must also include any lower
* inner joins, to ensure we don't try to commute with any of them.
*/
min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels),
@@ -687,7 +687,7 @@ make_outerjoininfo(PlannerInfo *root,
* Here, we have to consider that "our join condition" includes any
* clauses that syntactically appeared above the lower OJ and below
* ours; those are equivalent to degenerate clauses in our OJ and must
- * be treated as such. Such clauses obviously can't reference our
+ * be treated as such. Such clauses obviously can't reference our
* LHS, and they must be non-strict for the lower OJ's RHS (else
* reduce_outer_joins would have reduced the lower OJ to a plain
* join). Hence the other ways in which we handle clauses within our
@@ -771,7 +771,7 @@ make_outerjoininfo(PlannerInfo *root,
* distribute_qual_to_rels
* Add clause information to either the baserestrictinfo or joininfo list
* (depending on whether the clause is a join) of each base relation
- * mentioned in the clause. A RestrictInfo node is created and added to
+ * mentioned in the clause. A RestrictInfo node is created and added to
* the appropriate list for each rel. Alternatively, if the clause uses a
* mergejoinable operator and is not delayed by outer-join rules, enter
* the left- and right-side expressions into the query's list of
@@ -853,10 +853,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* gating Result plan node. We put such a clause into the regular
* RestrictInfo lists for the moment, but eventually createplan.c will
* pull it out and make a gating Result node immediately above whatever
- * plan node the pseudoconstant clause is assigned to. It's usually best
+ * plan node the pseudoconstant clause is assigned to. It's usually best
* to put a gating node as high in the plan tree as possible. If we are
* not below an outer join, we can actually push the pseudoconstant qual
- * all the way to the top of the tree. If we are below an outer join, we
+ * all the way to the top of the tree. If we are below an outer join, we
* leave the qual at its original syntactic level (we could push it up to
* just below the outer join, but that seems more complex than it's
* worth).
@@ -910,7 +910,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* Note: it is not immediately obvious that a simple boolean is enough
* for this: if for some reason we were to attach a degenerate qual to
* its original join level, it would need to be treated as an outer join
- * qual there. However, this cannot happen, because all the rels the
+ * qual there. However, this cannot happen, because all the rels the
* clause mentions must be in the outer join's min_righthand, therefore
* the join it needs must be formed before the outer join; and we always
* attach quals to the lowest level where they can be evaluated. But
@@ -944,7 +944,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* We can't use such a clause to deduce equivalence (the left and
* right sides might be unequal above the join because one of them has
* gone to NULL) ... but we might be able to use it for more limited
- * deductions, if it is mergejoinable. So consider adding it to the
+ * deductions, if it is mergejoinable. So consider adding it to the
* lists of set-aside outer-join clauses.
*/
is_pushed_down = false;
@@ -974,7 +974,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
else
{
/*
- * Normal qual clause or degenerate outer-join clause. Either way, we
+ * Normal qual clause or degenerate outer-join clause. Either way, we
* can mark it as pushed-down.
*/
is_pushed_down = true;
@@ -1092,7 +1092,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
*
* In all cases, it's important to initialize the left_ec and right_ec
* fields of a mergejoinable clause, so that all possibly mergejoinable
- * expressions have representations in EquivalenceClasses. If
+ * expressions have representations in EquivalenceClasses. If
* process_equivalence is successful, it will take care of that;
* otherwise, we have to call initialize_mergeclause_eclasses to do it.
*/
@@ -1168,7 +1168,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* For an is_pushed_down qual, we can evaluate the qual as soon as (1) we have
* all the rels it mentions, and (2) we are at or above any outer joins that
* can null any of these rels and are below the syntactic location of the
- * given qual. We must enforce (2) because pushing down such a clause below
+ * given qual. We must enforce (2) because pushing down such a clause below
* the OJ might cause the OJ to emit null-extended rows that should not have
* been formed, or that should have been rejected by the clause. (This is
* only an issue for non-strict quals, since if we can prove a qual mentioning
@@ -1194,7 +1194,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* required relids overlap the LHS too) causes that OJ's delay_upper_joins
* flag to be set TRUE. This will prevent any higher-level OJs from
* being interchanged with that OJ, which would result in not having any
- * correct place to evaluate the qual. (The case we care about here is a
+ * correct place to evaluate the qual. (The case we care about here is a
* sub-select WHERE clause within the RHS of some outer join. The WHERE
* clause must effectively be treated as a degenerate clause of that outer
* join's condition. Rather than trying to match such clauses with joins
@@ -1590,7 +1590,7 @@ check_mergejoinable(RestrictInfo *restrictinfo)
* info fields in the restrictinfo.
*
* Currently, we support hashjoin for binary opclauses where
- * the operator is a hashjoinable operator. The arguments can be
+ * the operator is a hashjoinable operator. The arguments can be
* anything --- as long as there are no volatile functions in them.
*/
static void
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 88a6c99ea50..a7f950a4f64 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -10,9 +10,9 @@
* ORDER BY col ASC/DESC
* LIMIT 1)
* Given a suitable index on tab.col, this can be much faster than the
- * generic scan-all-the-rows aggregation plan. We can handle multiple
+ * generic scan-all-the-rows aggregation plan. We can handle multiple
* MIN/MAX aggregates by generating multiple subqueries, and their
- * orderings can be different. However, if the query contains any
+ * orderings can be different. However, if the query contains any
* non-optimizable aggregates, there's no point since we'll have to
* scan all the rows anyway.
*
@@ -125,7 +125,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
/*
* Scan the tlist and HAVING qual to find all the aggregates and verify
- * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
+ * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
*/
aggs_list = NIL;
if (find_minmax_aggs_walker((Node *) tlist, &aggs_list))
@@ -160,7 +160,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
* We can use either an ordering that gives NULLS FIRST or one that
* gives NULLS LAST; furthermore there's unlikely to be much
* performance difference between them, so it doesn't seem worth
- * costing out both ways if we get a hit on the first one. NULLS
+ * costing out both ways if we get a hit on the first one. NULLS
* FIRST is more likely to be available if the operator is a
* reverse-sort operator, so try that first if reverse.
*/
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index ff39d5754dc..009841d9dd3 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -40,7 +40,7 @@ static void canonicalize_all_pathkeys(PlannerInfo *root);
* which may involve joins but not any fancier features.
*
* Since query_planner does not handle the toplevel processing (grouping,
- * sorting, etc) it cannot select the best path by itself. It selects
+ * sorting, etc) it cannot select the best path by itself. It selects
* two paths: the cheapest path that produces all the required tuples,
* independent of any ordering considerations, and the cheapest path that
* produces the expected fraction of the required tuples in the required
@@ -64,7 +64,7 @@ static void canonicalize_all_pathkeys(PlannerInfo *root);
* does not use grouping
*
* Note: the PlannerInfo node also includes a query_pathkeys field, which is
- * both an input and an output of query_planner(). The input value signals
+ * both an input and an output of query_planner(). The input value signals
* query_planner that the indicated sort order is wanted in the final output
* plan. But this value has not yet been "canonicalized", since the needed
* info does not get computed until we scan the qual clauses. We canonicalize
@@ -109,7 +109,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If the query has an empty join tree, then it's something easy like
- * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
+ * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
*/
if (parse->jointree->fromlist == NIL)
{
@@ -179,7 +179,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Examine the targetlist and join tree, adding entries to baserel
* targetlists for all referenced Vars, and generating PlaceHolderInfo
- * entries for all referenced PlaceHolderVars. Restrict and join clauses
+ * entries for all referenced PlaceHolderVars. Restrict and join clauses
* are added to appropriate lists belonging to the mentioned relations. We
* also build EquivalenceClasses for provably equivalent expressions. The
* SpecialJoinInfo list is also built to hold information about join order
@@ -201,7 +201,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If we formed any equivalence classes, generate additional restriction
- * clauses as appropriate. (Implied join clauses are formed on-the-fly
+ * clauses as appropriate. (Implied join clauses are formed on-the-fly
* later.)
*/
generate_base_implied_equalities(root);
@@ -216,14 +216,14 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Examine any "placeholder" expressions generated during subquery pullup.
* Make sure that the Vars they need are marked as needed at the relevant
- * join level. This must be done before join removal because it might
+ * join level. This must be done before join removal because it might
* cause Vars or placeholders to be needed above a join when they weren't
* so marked before.
*/
fix_placeholder_input_needed_levels(root);
/*
- * Remove any useless outer joins. Ideally this would be done during
+ * Remove any useless outer joins. Ideally this would be done during
* jointree preprocessing, but the necessary information isn't available
* until we've built baserel data structures and classified qual clauses.
*/
@@ -308,7 +308,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If both GROUP BY and ORDER BY are specified, we will need two
* levels of sort --- and, therefore, certainly need to read all the
- * tuples --- unless ORDER BY is a subset of GROUP BY. Likewise if we
+ * tuples --- unless ORDER BY is a subset of GROUP BY. Likewise if we
* have both DISTINCT and GROUP BY, or if we have a window
* specification not compatible with the GROUP BY.
*/
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 2d8b008607a..3a812877d62 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -184,7 +184,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
/*
* We document cursor_tuple_fraction as simply being a fraction, which
- * means the edge cases 0 and 1 have to be treated specially here. We
+ * means the edge cases 0 and 1 have to be treated specially here. We
* convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
*/
if (tuple_fraction >= 1.0)
@@ -386,7 +386,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
}
/*
- * Preprocess RowMark information. We need to do this after subquery
+ * Preprocess RowMark information. We need to do this after subquery
* pullup (so that all non-inherited RTEs are present) and before
* inheritance expansion (so that the info is available for
* expand_inherited_tables to examine and modify).
@@ -474,7 +474,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
* to execute that we're better off doing it only once per group, despite
* the loss of selectivity. This is hard to estimate short of doing the
* entire planning process twice, so we use a heuristic: clauses
- * containing subplans are left in HAVING. Otherwise, we move or copy the
+ * containing subplans are left in HAVING. Otherwise, we move or copy the
* HAVING clause into WHERE, in hopes of eliminating tuples before
* aggregation instead of after.
*
@@ -892,7 +892,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* If there's a top-level ORDER BY, assume we have to fetch all the
- * tuples. This might be too simplistic given all the hackery below
+ * tuples. This might be too simplistic given all the hackery below
* to possibly avoid the sort; but the odds of accurate estimates here
* are pretty low anyway.
*/
@@ -920,7 +920,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* We should not need to call preprocess_targetlist, since we must be
- * in a SELECT query node. Instead, use the targetlist returned by
+ * in a SELECT query node. Instead, use the targetlist returned by
* plan_set_operations (since this tells whether it returned any
* resjunk columns!), and transfer any sort key information from the
* original tlist.
@@ -1034,7 +1034,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* Calculate pathkeys that represent grouping/ordering requirements.
* Stash them in PlannerInfo so that query_planner can canonicalize
- * them after EquivalenceClasses have been formed. The sortClause is
+ * them after EquivalenceClasses have been formed. The sortClause is
* certainly sort-able, but GROUP BY and DISTINCT might not be, in
* which case we just leave their pathkeys empty.
*/
@@ -1091,7 +1091,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a
* superset of GROUP BY, it would be tempting to request sort by ORDER
* BY --- but that might just leave us failing to exploit an available
- * sort order at all. Needs more thought. The choice for DISTINCT
+ * sort order at all. Needs more thought. The choice for DISTINCT
* versus ORDER BY is much easier, since we know that the parser
* ensured that one is a superset of the other.
*/
@@ -1237,7 +1237,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* create_plan() returns a plan with just a "flat" tlist of
* required Vars. Usually we need to insert the sub_tlist as the
- * tlist of the top plan node. However, we can skip that if we
+ * tlist of the top plan node. However, we can skip that if we
* determined that whatever query_planner chose to return will be
* good enough.
*/
@@ -1400,7 +1400,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Furthermore, there cannot be any variables in either HAVING
* or the targetlist, so we actually do not need the FROM
* table at all! We can just throw away the plan-so-far and
- * generate a Result node. This is a sufficiently unusual
+ * generate a Result node. This is a sufficiently unusual
* corner case that it's not worth contorting the structure of
* this routine to avoid having to generate the plan in the
* first place.
@@ -1449,7 +1449,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* make_sort_from_pathkeys won't add those on its own, and anyway
* we want them evaluated only once at the bottom of the stack. As
* we climb up the stack, we add outputs for the WindowFuncs
- * computed at each level. Also, each input tlist has to present
+ * computed at each level. Also, each input tlist has to present
* all the columns needed to sort the data for the next WindowAgg
* step. That's handled internally by make_sort_from_pathkeys,
* but we need the copyObject steps here to ensure that each plan
@@ -1854,7 +1854,7 @@ preprocess_rowmarks(PlannerInfo *root)
/*
* Currently, it is syntactically impossible to have FOR UPDATE
- * applied to an update/delete target rel. If that ever becomes
+ * applied to an update/delete target rel. If that ever becomes
* possible, we should drop the target from the PlanRowMark list.
*/
Assert(rc->rti != parse->resultRelation);
@@ -1918,7 +1918,7 @@ preprocess_rowmarks(PlannerInfo *root)
* preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
*
* We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
- * results back in *count_est and *offset_est. These variables are set to
+ * results back in *count_est and *offset_est. These variables are set to
* 0 if the corresponding clause is not present, and -1 if it's present
* but we couldn't estimate the value for it. (The "0" convention is OK
* for OFFSET but a little bit bogus for LIMIT: effectively we estimate
@@ -1927,7 +1927,7 @@ preprocess_rowmarks(PlannerInfo *root)
* be passed to make_limit, which see if you change this code.
*
* The return value is the suitably adjusted tuple_fraction to use for
- * planning the query. This adjustment is not overridable, since it reflects
+ * planning the query. This adjustment is not overridable, since it reflects
* plan actions that grouping_planner() will certainly take, not assumptions
* about context.
*/
@@ -2051,7 +2051,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
else if (*offset_est != 0 && tuple_fraction > 0.0)
{
/*
- * We have an OFFSET but no LIMIT. This acts entirely differently
+ * We have an OFFSET but no LIMIT. This acts entirely differently
* from the LIMIT case: here, we need to increase rather than decrease
* the caller's tuple_fraction, because the OFFSET acts to cause more
* tuples to be fetched instead of fewer. This only matters if we got
@@ -2066,7 +2066,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
/*
* If we have absolute counts from both caller and OFFSET, add them
- * together; likewise if they are both fractional. If one is
+ * together; likewise if they are both fractional. If one is
* fractional and the other absolute, we want to take the larger, and
* we heuristically assume that's the fractional one.
*/
@@ -2214,7 +2214,7 @@ choose_hashed_grouping(PlannerInfo *root,
/*
* Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
- * aggregates. (Doing so would imply storing *all* the input values in
+ * aggregates. (Doing so would imply storing *all* the input values in
* the hash table, and/or running many sorts in parallel, either of which
* seems like a certain loser.)
*/
@@ -2356,7 +2356,7 @@ choose_hashed_grouping(PlannerInfo *root,
* pass in the costs as individual variables.)
*
* But note that making the two choices independently is a bit bogus in
- * itself. If the two could be combined into a single choice operation
+ * itself. If the two could be combined into a single choice operation
* it'd probably be better, but that seems far too unwieldy to be practical,
* especially considering that the combination of GROUP BY and DISTINCT
* isn't very common in real queries. By separating them, we are giving
@@ -2449,7 +2449,7 @@ choose_hashed_distinct(PlannerInfo *root,
0.0, work_mem, limit_tuples);
/*
- * Now for the GROUP case. See comments in grouping_planner about the
+ * Now for the GROUP case. See comments in grouping_planner about the
* sorting choices here --- this code should match that code.
*/
sorted_p.startup_cost = sorted_startup_cost;
@@ -2517,7 +2517,7 @@ choose_hashed_distinct(PlannerInfo *root,
* we want to pass this targetlist to the subplan:
* a,b,c,d,a+b
* where the a+b target will be used by the Sort/Group steps, and the
- * other targets will be used for computing the final results. (In the
+ * other targets will be used for computing the final results. (In the
* above example we could theoretically suppress the a and b targets and
* pass down only c,d,a+b, but it's not really worth the trouble to
* eliminate simple var references from the subplan. We will avoid doing
@@ -2633,7 +2633,7 @@ make_subplanTargetList(PlannerInfo *root,
* Locate grouping columns in the tlist chosen by query_planner.
*
* This is only needed if we don't use the sub_tlist chosen by
- * make_subplanTargetList. We have to forget the column indexes found
+ * make_subplanTargetList. We have to forget the column indexes found
* by that routine and re-locate the grouping exprs in the real sub_tlist.
* We assume the grouping exprs are just Vars (see make_subplanTargetList).
*/
@@ -2664,11 +2664,11 @@ locate_grouping_columns(PlannerInfo *root,
/*
* The grouping column returned by create_plan might not have the same
- * typmod as the original Var. (This can happen in cases where a
+ * typmod as the original Var. (This can happen in cases where a
* set-returning function has been inlined, so that we now have more
* knowledge about what it returns than we did when the original Var
* was created.) So we can't use tlist_member() to search the tlist;
- * instead use tlist_member_match_var. For safety, still check that
+ * instead use tlist_member_match_var. For safety, still check that
* the vartype matches.
*/
if (!(groupexpr && IsA(groupexpr, Var)))
@@ -2897,7 +2897,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* This depends on the behavior of make_pathkeys_for_window()!
*
* We are given the target WindowClause and an array of the input column
- * numbers associated with the resulting pathkeys. In the easy case, there
+ * numbers associated with the resulting pathkeys. In the easy case, there
* are the same number of pathkey columns as partitioning + ordering columns
* and we just have to copy some data around. However, it's possible that
* some of the original partitioning + ordering columns were eliminated as
@@ -2909,7 +2909,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* determine which keys are significant.
*
* The method used here is a bit brute-force: add the sort columns to a list
- * one at a time and note when the resulting pathkey list gets longer. But
+ * one at a time and note when the resulting pathkey list gets longer. But
* it's a sufficiently uncommon case that a faster way doesn't seem worth
* the amount of code refactoring that'd be needed.
*----------
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index da5a3fadf45..def28c7f134 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -135,7 +135,7 @@ static bool extract_query_dependencies_walker(Node *node,
/*
* set_plan_references
*
- * This is the final processing pass of the planner/optimizer. The plan
+ * This is the final processing pass of the planner/optimizer. The plan
* tree is complete; we just have to adjust some representational details
* for the convenience of the executor:
*
@@ -185,7 +185,7 @@ static bool extract_query_dependencies_walker(Node *node,
* and glob->invalItems (for everything else).
*
* Notice that we modify Plan nodes in-place, but use expression_tree_mutator
- * to process targetlist and qual expressions. We can assume that the Plan
+ * to process targetlist and qual expressions. We can assume that the Plan
* nodes were just built by the planner and are not multiply referenced, but
* it's not so safe to assume that for expression tree nodes.
*/
@@ -234,7 +234,7 @@ set_plan_references(PlannerGlobal *glob, Plan *plan,
* We do this even though the RTE might be unreferenced in the plan
* tree; this would correspond to cases such as views that were
* expanded, child tables that were eliminated by constraint
- * exclusion, etc. Schema invalidation on such a rel must still force
+ * exclusion, etc. Schema invalidation on such a rel must still force
* rebuilding of the plan.
*
* Note we don't bother to avoid duplicate list entries. We could,
@@ -436,7 +436,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
/*
* These plan types don't actually bother to evaluate their
* targetlists, because they just return their unmodified input
- * tuples. Even though the targetlist won't be used by the
+ * tuples. Even though the targetlist won't be used by the
* executor, we fix it up for possible use by EXPLAIN (not to
* mention ease of debugging --- wrong varnos are very confusing).
*/
@@ -454,7 +454,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
/*
* Like the plan types above, LockRows doesn't evaluate its
- * tlist or quals. But we have to fix up the RT indexes in
+ * tlist or quals. But we have to fix up the RT indexes in
* its rowmarks.
*/
set_dummy_tlist_references(plan, rtoffset);
@@ -755,7 +755,7 @@ set_subqueryscan_references(PlannerGlobal *glob,
else
{
/*
- * Keep the SubqueryScan node. We have to do the processing that
+ * Keep the SubqueryScan node. We have to do the processing that
* set_plan_references would otherwise have done on it. Notice we do
* not do set_upper_references() here, because a SubqueryScan will
* always have been created with correct references to its subplan's
@@ -1220,7 +1220,7 @@ set_dummy_tlist_references(Plan *plan, int rtoffset)
*
* In most cases, subplan tlists will be "flat" tlists with only Vars,
* so we try to optimize that case by extracting information about Vars
- * in advance. Matching a parent tlist to a child is still an O(N^2)
+ * in advance. Matching a parent tlist to a child is still an O(N^2)
* operation, but at least with a much smaller constant factor than plain
* tlist_member() searches.
*
@@ -1661,7 +1661,7 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context)
* adjust any Vars that refer to other tables to reference junk tlist
* entries in the top subplan's targetlist. Vars referencing the result
* table should be left alone, however (the executor will evaluate them
- * using the actual heap tuple, after firing triggers if any). In the
+ * using the actual heap tuple, after firing triggers if any). In the
* adjusted RETURNING list, result-table Vars will have their original
* varno (plus rtoffset), but Vars for other rels will have varno OUTER.
*
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 05ce048c447..c50709ccc1b 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -435,7 +435,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
Node *result;
/*
- * Copy the source Query node. This is a quick and dirty kluge to resolve
+ * Copy the source Query node. This is a quick and dirty kluge to resolve
* the fact that the parser can generate trees with multiple links to the
* same sub-Query node, but the planner wants to scribble on the Query.
* Try to clean this up when we do querytree redesign...
@@ -460,7 +460,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
* path/costsize.c.
*
* XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash
- * its output. In that case it would've been better to specify full
+ * its output. In that case it would've been better to specify full
* retrieval. At present, however, we can only check hashability after
* we've made the subplan :-(. (Determining whether it'll fit in work_mem
* is the really hard part.) Therefore, we don't want to be too
@@ -499,7 +499,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
/*
* If it's a correlated EXISTS with an unimportant targetlist, we might be
* able to transform it to the equivalent of an IN and then implement it
- * by hashing. We don't have enough information yet to tell which way is
+ * by hashing. We don't have enough information yet to tell which way is
* likely to be better (it depends on the expected number of executions of
* the EXISTS qual, and we are much too early in planning the outer query
* to be able to guess that). So we generate both plans, if possible, and
@@ -729,7 +729,7 @@ build_subplan(PlannerInfo *root, Plan *plan, List *rtable, List *rowmarks,
* Otherwise, we have the option to tack a Material node onto the top
* of the subplan, to reduce the cost of reading it repeatedly. This
* is pointless for a direct-correlated subplan, since we'd have to
- * recompute its results each time anyway. For uncorrelated/undirect
+ * recompute its results each time anyway. For uncorrelated/undirect
* correlated subplans, we add Material unless the subplan's top plan
* node would materialize its output anyway. Also, if enable_material
* is false, then the user does not want us to materialize anything
@@ -756,10 +756,10 @@ build_subplan(PlannerInfo *root, Plan *plan, List *rtable, List *rowmarks,
/*
* A parameterless subplan (not initplan) should be prepared to handle
- * REWIND efficiently. If it has direct parameters then there's no point
+ * REWIND efficiently. If it has direct parameters then there's no point
* since it'll be reset on each scan anyway; and if it's an initplan then
* there's no point since it won't get re-run without parameter changes
- * anyway. The input of a hashed subplan doesn't need REWIND either.
+ * anyway. The input of a hashed subplan doesn't need REWIND either.
*/
if (splan->parParam == NIL && !isInitPlan && !splan->useHashTable)
root->glob->rewindPlanIDs = bms_add_member(root->glob->rewindPlanIDs,
@@ -859,7 +859,7 @@ generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno)
/*
* convert_testexpr: convert the testexpr given by the parser into
* actually executable form. This entails replacing PARAM_SUBLINK Params
- * with Params or Vars representing the results of the sub-select. The
+ * with Params or Vars representing the results of the sub-select. The
* nodes to be substituted are passed in as the List result from
* generate_subquery_params or generate_subquery_vars.
*/
@@ -961,7 +961,7 @@ testexpr_is_hashable(Node *testexpr)
*
* The combining operators must be hashable and strict. The need for
* hashability is obvious, since we want to use hashing. Without
- * strictness, behavior in the presence of nulls is too unpredictable. We
+ * strictness, behavior in the presence of nulls is too unpredictable. We
* actually must assume even more than plain strictness: they can't yield
* NULL for non-null inputs, either (see nodeSubplan.c). However, hash
* indexes and hash joins assume that too.
@@ -1069,7 +1069,7 @@ SS_process_ctes(PlannerInfo *root)
}
/*
- * Copy the source Query node. Probably not necessary, but let's keep
+ * Copy the source Query node. Probably not necessary, but let's keep
* this similar to make_subplan.
*/
subquery = (Query *) copyObject(cte->ctequery);
@@ -1095,7 +1095,7 @@ SS_process_ctes(PlannerInfo *root)
elog(ERROR, "unexpected outer reference in CTE query");
/*
- * Make a SubPlan node for it. This is just enough unlike
+ * Make a SubPlan node for it. This is just enough unlike
* build_subplan that we can't share code.
*
* Note plan_id, plan_name, and cost fields are set further down.
@@ -1321,7 +1321,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
/*
* See if the subquery can be simplified based on the knowledge that it's
- * being used in EXISTS(). If we aren't able to get rid of its
+ * being used in EXISTS(). If we aren't able to get rid of its
* targetlist, we have to fail, because the pullup operation leaves us
* with noplace to evaluate the targetlist.
*/
@@ -1370,9 +1370,9 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
* what pull_up_subqueries has to go through.
*
* In fact, it's even easier than what convert_ANY_sublink_to_join has to
- * do. The machinations of simplify_EXISTS_query ensured that there is
+ * do. The machinations of simplify_EXISTS_query ensured that there is
* nothing interesting in the subquery except an rtable and jointree, and
- * even the jointree FromExpr no longer has quals. So we can just append
+ * even the jointree FromExpr no longer has quals. So we can just append
* the rtable to our own and use the FromExpr in our jointree. But first,
* adjust all level-zero varnos in the subquery to account for the rtable
* merger.
@@ -1504,7 +1504,7 @@ simplify_EXISTS_query(Query *query)
*
* On success, the modified subselect is returned, and we store a suitable
* upper-level test expression at *testexpr, plus a list of the subselect's
- * output Params at *paramIds. (The test expression is already Param-ified
+ * output Params at *paramIds. (The test expression is already Param-ified
* and hence need not go through convert_testexpr, which is why we have to
* deal with the Param IDs specially.)
*
@@ -1667,7 +1667,7 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
return NULL;
/*
- * Also reject sublinks in the stuff we intend to pull up. (It might be
+ * Also reject sublinks in the stuff we intend to pull up. (It might be
* possible to support this, but doesn't seem worth the complication.)
*/
if (contain_subplans((Node *) leftargs))
@@ -1869,7 +1869,7 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
* is needed for a bare List.)
*
* Anywhere within the top-level AND/OR clause structure, we can tell
- * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
+ * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
* propagates down in both cases. (Note that this is unlike the meaning
* of "top level qual" used in most other places in Postgres.)
*/
@@ -1975,7 +1975,7 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans)
* Now determine the set of params that are validly referenceable in this
* query level; to wit, those available from outer query levels plus the
* output parameters of any local initPlans. (We do not include output
- * parameters of regular subplans. Those should only appear within the
+ * parameters of regular subplans. Those should only appear within the
* testexpr of SubPlan nodes, and are taken care of locally within
* finalize_primnode. Likewise, special parameters that are generated by
* nodes such as ModifyTable are handled within finalize_plan.)
@@ -2139,7 +2139,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params,
/*
* In a SubqueryScan, SS_finalize_plan has already been run on the
* subplan by the inner invocation of subquery_planner, so there's
- * no need to do it again. Instead, just pull out the subplan's
+ * no need to do it again. Instead, just pull out the subplan's
* extParams list, which represents the params it needs from my
* level and higher levels.
*/
@@ -2471,7 +2471,7 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
/*
* Remove any param IDs of output parameters of the subplan that were
- * referenced in the testexpr. These are not interesting for
+ * referenced in the testexpr. These are not interesting for
* parameter change signaling since we always re-evaluate the subplan.
* Note that this wouldn't work too well if there might be uses of the
* same param IDs elsewhere in the plan, but that can't happen because
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index a7a7574af64..3b8b4eda883 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -104,7 +104,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
*
* A clause "foo op ANY (sub-SELECT)" can be processed by pulling the
* sub-SELECT up to become a rangetable entry and treating the implied
- * comparisons as quals of a semijoin. However, this optimization *only*
+ * comparisons as quals of a semijoin. However, this optimization *only*
* works at the top level of WHERE or a JOIN/ON clause, because we cannot
* distinguish whether the ANY ought to return FALSE or NULL in cases
* involving NULL inputs. Also, in an outer join's ON clause we can only
@@ -121,7 +121,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
* transformations if any are found.
*
* This routine has to run before preprocess_expression(), so the quals
- * clauses are not yet reduced to implicit-AND format. That means we need
+ * clauses are not yet reduced to implicit-AND format. That means we need
* to recursively search through explicit AND clauses, which are
* probably only binary ANDs. We stop as soon as we hit a non-AND item.
*/
@@ -239,7 +239,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
* pull up quals for which that's okay.
*
* XXX for the moment, we refrain from pulling up IN/EXISTS clauses
- * appearing in LEFT or RIGHT join conditions. Although it is
+ * appearing in LEFT or RIGHT join conditions. Although it is
* semantically valid to do so under the above conditions, we end up
* with a query in which the semijoin or antijoin must be evaluated
* below the outer join, which could perform far worse than leaving it
@@ -285,7 +285,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
/*
* Although we could include the pulled-up subqueries in the returned
* relids, there's no need since upper quals couldn't refer to their
- * outputs anyway. But we *do* need to include the join's own rtindex
+ * outputs anyway. But we *do* need to include the join's own rtindex
* because we haven't yet collapsed join alias variables, so upper
* levels would mistakenly think they couldn't use references to this
* join.
@@ -626,7 +626,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode,
* Attempt to pull up a single simple subquery.
*
* jtnode is a RangeTblRef that has been tentatively identified as a simple
- * subquery by pull_up_subqueries. We return the replacement jointree node,
+ * subquery by pull_up_subqueries. We return the replacement jointree node,
* or jtnode itself if we determine that the subquery can't be pulled up after
* all.
*
@@ -658,7 +658,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Create a PlannerInfo data structure for this subquery.
*
* NOTE: the next few steps should match the first processing in
- * subquery_planner(). Can we refactor to avoid code duplication, or
+ * subquery_planner(). Can we refactor to avoid code duplication, or
* would that just make things uglier?
*/
subroot = makeNode(PlannerInfo);
@@ -707,7 +707,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
/*
* Now we must recheck whether the subquery is still simple enough to pull
- * up. If not, abandon processing it.
+ * up. If not, abandon processing it.
*
* We don't really need to recheck all the conditions involved, but it's
* easier just to keep this "if" looking the same as the one in
@@ -724,7 +724,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Give up, return unmodified RangeTblRef.
*
* Note: The work we just did will be redone when the subquery gets
- * planned on its own. Perhaps we could avoid that by storing the
+ * planned on its own. Perhaps we could avoid that by storing the
* modified subquery back into the rangetable, but I'm not gonna risk
* it now.
*/
@@ -764,7 +764,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* insert into the top query, but if we are under an outer join then
* non-nullable items may have to be turned into PlaceHolderVars. If we
* are dealing with an appendrel member then anything that's not a simple
- * Var has to be turned into a PlaceHolderVar. Set up appropriate context
+ * Var has to be turned into a PlaceHolderVar. Set up appropriate context
* data for pullup_replace_vars.
*/
rvcontext.root = root;
@@ -785,7 +785,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* replace any of the jointree structure. (This'd be a lot cleaner if we
* could use query_tree_mutator.) We have to use PHVs in the targetList,
* returningList, and havingQual, since those are certainly above any
- * outer join. replace_vars_in_jointree tracks its location in the
+ * outer join. replace_vars_in_jointree tracks its location in the
* jointree and uses PHVs or not appropriately.
*/
parse->targetList = (List *)
@@ -822,7 +822,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* You might think that we could avoid using PHVs for alias vars of joins
* below lowest_outer_join, but that doesn't work because the alias vars
* could be referenced above that join; we need the PHVs to be present in
- * such references after the alias vars get flattened. (It might be worth
+ * such references after the alias vars get flattened. (It might be worth
* trying to be smarter here, someday.)
*/
foreach(lc, parse->rtable)
@@ -912,7 +912,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Pull up a single simple UNION ALL subquery.
*
* jtnode is a RangeTblRef that has been identified as a simple UNION ALL
- * subquery by pull_up_subqueries. We pull up the leaf subqueries and
+ * subquery by pull_up_subqueries. We pull up the leaf subqueries and
* build an "append relation" for the union set. The result value is just
* jtnode, since we don't actually need to change the query jointree.
*/
@@ -1104,7 +1104,7 @@ is_simple_subquery(Query *subquery)
/*
* Don't pull up a subquery that has any set-returning functions in its
- * targetlist. Otherwise we might well wind up inserting set-returning
+ * targetlist. Otherwise we might well wind up inserting set-returning
* functions into places where they mustn't go, such as quals of higher
* queries.
*/
@@ -1113,7 +1113,7 @@ is_simple_subquery(Query *subquery)
/*
* Don't pull up a subquery that has any volatile functions in its
- * targetlist. Otherwise we might introduce multiple evaluations of these
+ * targetlist. Otherwise we might introduce multiple evaluations of these
* functions, if they get copied to multiple places in the upper query,
* leading to surprising results. (Note: the PlaceHolderVar mechanism
* doesn't quite guarantee single evaluation; else we could pull up anyway
@@ -1386,7 +1386,7 @@ pullup_replace_vars_callback(Var *var,
/*
* Insert PlaceHolderVar if needed. Notice that we are wrapping one
* PlaceHolderVar around the whole RowExpr, rather than putting one
- * around each element of the row. This is because we need the
+ * around each element of the row. This is because we need the
* expression to yield NULL, not ROW(NULL,NULL,...) when it is forced
* to null by an outer join.
*/
@@ -1465,7 +1465,7 @@ pullup_replace_vars_callback(Var *var,
/*
* Cache it if possible (ie, if the attno is in range, which it
- * probably always should be). We can cache the value even if we
+ * probably always should be). We can cache the value even if we
* decided we didn't need a PHV, since this result will be
* suitable for any request that has need_phvs.
*/
@@ -1489,7 +1489,7 @@ pullup_replace_vars_callback(Var *var,
*
* If a query's setOperations tree consists entirely of simple UNION ALL
* operations, flatten it into an append relation, which we can process more
- * intelligently than the general setops case. Otherwise, do nothing.
+ * intelligently than the general setops case. Otherwise, do nothing.
*
* In most cases, this can succeed only for a top-level query, because for a
* subquery in FROM, the parent query's invocation of pull_up_subqueries would
@@ -1601,7 +1601,7 @@ flatten_simple_union_all(PlannerInfo *root)
* SELECT ... FROM a LEFT JOIN b ON (a.x = b.y) WHERE b.y IS NULL;
* If the join clause is strict for b.y, then only null-extended rows could
* pass the upper WHERE, and we can conclude that what the query is really
- * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
+ * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
* to JOIN_ANTI. The IS NULL clause then becomes redundant, and must be
* removed to prevent bogus selectivity calculations, but we leave it to
* distribute_qual_to_rels to get rid of such clauses.
@@ -1841,7 +1841,7 @@ reduce_outer_joins_pass2(Node *jtnode,
/*
* See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case if
* the join's own quals are strict for any var that was forced null by
- * higher qual levels. NOTE: there are other ways that we could
+ * higher qual levels. NOTE: there are other ways that we could
* detect an anti-join, in particular if we were to check whether Vars
* coming from the RHS must be non-null because of table constraints.
* That seems complicated and expensive though (in particular, one
@@ -1999,7 +1999,7 @@ reduce_outer_joins_pass2(Node *jtnode,
* pulled-up relid, and change them to reference the replacement relid(s).
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. This should be OK since the tree was copied by
+ * nodes in-place. This should be OK since the tree was copied by
* pullup_replace_vars earlier. Avoid scribbling on the original values of
* the bitmapsets, though, because expression_tree_mutator doesn't copy those.
*/
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index f6f00c4ee91..743d386daab 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -54,12 +54,12 @@ static Expr *process_duplicate_ors(List *orlist);
* Although this can be invoked on its own, it's mainly intended as a helper
* for eval_const_expressions(), and that context drives several design
* decisions. In particular, if the input is already AND/OR flat, we must
- * preserve that property. We also don't bother to recurse in situations
+ * preserve that property. We also don't bother to recurse in situations
* where we can assume that lower-level executions of eval_const_expressions
* would already have simplified sub-clauses of the input.
*
* The difference between this and a simple make_notclause() is that this
- * tries to get rid of the NOT node by logical simplification. It's clearly
+ * tries to get rid of the NOT node by logical simplification. It's clearly
* always a win if the NOT node can be eliminated altogether. However, our
* use of DeMorgan's laws could result in having more NOT nodes rather than
* fewer. We do that unconditionally anyway, because in WHERE clauses it's
@@ -152,7 +152,7 @@ negate_clause(Node *node)
* those properties. For example, if no direct child of
* the given AND clause is an AND or a NOT-above-OR, then
* the recursive calls of negate_clause() can't return any
- * OR clauses. So we needn't call pull_ors() before
+ * OR clauses. So we needn't call pull_ors() before
* building a new OR clause. Similarly for the OR case.
*--------------------
*/
@@ -374,7 +374,7 @@ pull_ors(List *orlist)
*
* This may seem like a fairly useless activity, but it turns out to be
* applicable to many machine-generated queries, and there are also queries
- * in some of the TPC benchmarks that need it. This was in fact almost the
+ * in some of the TPC benchmarks that need it. This was in fact almost the
* sole useful side-effect of the old prepqual code that tried to force
* the query into canonical AND-of-ORs form: the canonical equivalent of
* ((A AND B) OR (A AND C))
@@ -393,7 +393,7 @@ pull_ors(List *orlist)
* OR clauses to which the inverse OR distributive law might apply.
* Only the top-level AND/OR structure is searched.
*
- * Returns the modified qualification. AND/OR flatness is preserved.
+ * Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_duplicate_ors(Expr *qual)
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index 475dba635d2..b2c59429942 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -4,7 +4,7 @@
* Routines to preprocess the parse tree target list
*
* For INSERT and UPDATE queries, the targetlist must contain an entry for
- * each attribute of the target relation in the correct order. For all query
+ * each attribute of the target relation in the correct order. For all query
* types, we may need to add junk tlist entries for Vars used in the RETURNING
* list and row ID information needed for EvalPlanQual checking.
*
@@ -80,7 +80,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* Add necessary junk columns for rowmarked rels. These values are needed
* for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual
- * rechecking. See comments for PlanRowMark in plannodes.h.
+ * rechecking. See comments for PlanRowMark in plannodes.h.
*/
foreach(lc, root->rowMarks)
{
@@ -145,7 +145,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* If the query has a RETURNING list, add resjunk entries for any Vars
* used in RETURNING that belong to other relations. We need to do this
- * to make these Vars available for the RETURNING calculation. Vars that
+ * to make these Vars available for the RETURNING calculation. Vars that
* belong to the result rel don't need to be added, because they will be
* made to refer to the actual heap tuple.
*/
@@ -253,9 +253,9 @@ expand_targetlist(List *tlist, int command_type,
* When generating a NULL constant for a dropped column, we label
* it INT4 (any other guaranteed-to-exist datatype would do as
* well). We can't label it with the dropped column's datatype
- * since that might not exist anymore. It does not really matter
+ * since that might not exist anymore. It does not really matter
* what we claim the type is, since NULL is NULL --- its
- * representation is datatype-independent. This could perhaps
+ * representation is datatype-independent. This could perhaps
* confuse code comparing the finished plan to the target
* relation, however.
*/
@@ -337,7 +337,7 @@ expand_targetlist(List *tlist, int command_type,
/*
* The remaining tlist entries should be resjunk; append them all to the
* end of the new tlist, making sure they have resnos higher than the last
- * real attribute. (Note: although the rewriter already did such
+ * real attribute. (Note: although the rewriter already did such
* renumbering, we have to do it again here in case we are doing an UPDATE
* in a table with dropped columns, or an inheritance child table with
* extra columns.)
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 3d9c9768cc3..76fd1034d9a 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -6,14 +6,14 @@
*
* There are two code paths in the planner for set-operation queries.
* If a subquery consists entirely of simple UNION ALL operations, it
- * is converted into an "append relation". Otherwise, it is handled
+ * is converted into an "append relation". Otherwise, it is handled
* by the general code in this module (plan_set_operations and its
* subroutines). There is some support code here for the append-relation
* case, but most of the heavy lifting for that is done elsewhere,
* notably in prepjointree.c and allpaths.c.
*
* There is also some code here to support planning of queries that use
- * inheritance (SELECT FROM foo*). Inheritance trees are converted into
+ * inheritance (SELECT FROM foo*). Inheritance trees are converted into
* append relations, and thenceforth share code with the UNION ALL case.
*
*
@@ -553,7 +553,7 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
*
* The tlist for an Append plan isn't important as far as the Append is
* concerned, but we must make it look real anyway for the benefit of the
- * next plan level up. In fact, it has to be real enough that the flag
+ * next plan level up. In fact, it has to be real enough that the flag
* column is shown as a variable not a constant, else setrefs.c will get
* confused.
*/
@@ -946,7 +946,7 @@ generate_setop_tlist(List *colTypes, List *colCollations,
* Ensure the tlist entry's exposed collation matches the set-op. This
* is necessary because plan_set_operations() reports the result
* ordering as a list of SortGroupClauses, which don't carry collation
- * themselves but just refer to tlist entries. If we don't show the
+ * themselves but just refer to tlist entries. If we don't show the
* right collation then planner.c might do the wrong thing in
* higher-level queries.
*
@@ -1160,7 +1160,7 @@ generate_setop_grouplist(SetOperationStmt *op, List *targetlist)
/*
* expand_inherited_tables
* Expand each rangetable entry that represents an inheritance set
- * into an "append relation". At the conclusion of this process,
+ * into an "append relation". At the conclusion of this process,
* the "inh" flag is set in all and only those RTEs that are append
* relation parents.
*/
@@ -1192,7 +1192,7 @@ expand_inherited_tables(PlannerInfo *root)
* Check whether a rangetable entry represents an inheritance set.
* If so, add entries for all the child tables to the query's
* rangetable, and build AppendRelInfo nodes for all the child tables
- * and add them to root->append_rel_list. If not, clear the entry's
+ * and add them to root->append_rel_list. If not, clear the entry's
* "inh" flag to prevent later code from looking for AppendRelInfos.
*
* Note that the original RTE is considered to represent the whole
@@ -1503,7 +1503,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation,
* parent rel's attribute numbering to the child's.
*
* The only surprise here is that we don't translate a parent whole-row
- * reference into a child whole-row reference. That would mean requiring
+ * reference into a child whole-row reference. That would mean requiring
* permissions on all child columns, which is overly strict, since the
* query is really only going to reference the inherited columns. Instead
* we set the per-column bits for all inherited columns.
@@ -1810,7 +1810,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid)
*
* The expressions have already been fixed, but we have to make sure that
* the target resnos match the child table (they may not, in the case of
- * a column that was added after-the-fact by ALTER TABLE). In some cases
+ * a column that was added after-the-fact by ALTER TABLE). In some cases
* this can force us to re-order the tlist to preserve resno ordering.
* (We do all this work in special cases so that preptlist.c is fast for
* the typical case.)
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 4b484338240..e7bfc38d566 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -526,7 +526,7 @@ count_agg_clauses_walker(Node *node, count_agg_clauses_context *context)
/*
* If the transition type is pass-by-value then it doesn't add
- * anything to the required size of the hashtable. If it is
+ * anything to the required size of the hashtable. If it is
* pass-by-reference then we have to add the estimated size of the
* value itself, plus palloc overhead.
*/
@@ -666,7 +666,7 @@ find_window_functions_walker(Node *node, WindowFuncLists *lists)
* Estimate the number of rows in a set result.
*
* We use the product of the rowcount estimates of all the functions in
- * the given tree. The result is 1 if there are no set-returning functions.
+ * the given tree. The result is 1 if there are no set-returning functions.
*
* Note: keep this in sync with expression_returns_set() in nodes/nodeFuncs.c.
*/
@@ -782,7 +782,7 @@ contain_subplans_walker(Node *node, void *context)
* Recursively search for mutable functions within a clause.
*
* Returns true if any mutable function (or operator implemented by a
- * mutable function) is found. This test is needed so that we don't
+ * mutable function) is found. This test is needed so that we don't
* mistakenly think that something like "WHERE random() < 0.5" can be treated
* as a constant qualification.
*
@@ -909,7 +909,7 @@ contain_mutable_functions_walker(Node *node, void *context)
* invalid conversions of volatile expressions into indexscan quals.
*
* We will recursively look into Query nodes (i.e., SubLink sub-selects)
- * but not into SubPlans. This is a bit odd, but intentional. If we are
+ * but not into SubPlans. This is a bit odd, but intentional. If we are
* looking at a SubLink, we are probably deciding whether a query tree
* transformation is safe, and a contained sub-select should affect that;
* for example, duplicating a sub-select containing a volatile function
@@ -1040,7 +1040,7 @@ contain_volatile_functions_walker(Node *node, void *context)
* The idea here is that the caller has verified that the expression contains
* one or more Var or Param nodes (as appropriate for the caller's need), and
* now wishes to prove that the expression result will be NULL if any of these
- * inputs is NULL. If we return false, then the proof succeeded.
+ * inputs is NULL. If we return false, then the proof succeeded.
*/
bool
contain_nonstrict_functions(Node *clause)
@@ -1157,7 +1157,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
*
* Returns the set of all Relids that are referenced in the clause in such
* a way that the clause cannot possibly return TRUE if any of these Relids
- * is an all-NULL row. (It is OK to err on the side of conservatism; hence
+ * is an all-NULL row. (It is OK to err on the side of conservatism; hence
* the analysis here is simplistic.)
*
* The semantics here are subtly different from contain_nonstrict_functions:
@@ -1263,7 +1263,7 @@ find_nonnullable_rels_walker(Node *node, bool top_level)
* could be FALSE (hence not NULL). However, if *all* the
* arms produce NULL then the result is NULL, so we can take
* the intersection of the sets of nonnullable rels, just as
- * for OR. Fall through to share code.
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
@@ -1471,7 +1471,7 @@ find_nonnullable_vars_walker(Node *node, bool top_level)
* could be FALSE (hence not NULL). However, if *all* the
* arms produce NULL then the result is NULL, so we can take
* the intersection of the sets of nonnullable vars, just as
- * for OR. Fall through to share code.
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
@@ -1741,7 +1741,7 @@ is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK)
* variables of the current query level and no uses of volatile functions.
* Such an expr is not necessarily a true constant: it can still contain
* Params and outer-level Vars, not to mention functions whose results
- * may vary from one statement to the next. However, the expr's value
+ * may vary from one statement to the next. However, the expr's value
* will be constant over any one scan of the current query, so it can be
* used as, eg, an indexscan key.
*
@@ -2044,7 +2044,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
* expression tree, for example "2 + 2" => "4". More interestingly,
* we can reduce certain boolean expressions even when they contain
* non-constant subexpressions: "x OR true" => "true" no matter what
- * the subexpression x is. (XXX We assume that no such subexpression
+ * the subexpression x is. (XXX We assume that no such subexpression
* will have important side-effects, which is not necessarily a good
* assumption in the presence of user-defined functions; do we need a
* pg_proc flag that prevents discarding the execution of a function?)
@@ -2057,7 +2057,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
*
* Whenever a function is eliminated from the expression by means of
* constant-expression evaluation or inlining, we add the function to
- * root->glob->invalItems. This ensures the plan is known to depend on
+ * root->glob->invalItems. This ensures the plan is known to depend on
* such functions, even though they aren't referenced anymore.
*
* We assume that the tree has already been type-checked and contains
@@ -2252,7 +2252,7 @@ eval_const_expressions_mutator(Node *node,
(void *) context);
/*
- * Need to get OID of underlying function. Okay to scribble on input
+ * Need to get OID of underlying function. Okay to scribble on input
* to this extent.
*/
set_opfuncid(expr);
@@ -2349,7 +2349,7 @@ eval_const_expressions_mutator(Node *node,
/* (NOT okay to try to inline it, though!) */
/*
- * Need to get OID of underlying function. Okay to scribble on
+ * Need to get OID of underlying function. Okay to scribble on
* input to this extent.
*/
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
@@ -2633,7 +2633,7 @@ eval_const_expressions_mutator(Node *node,
/*
* If we can simplify the input to a constant, then we don't need the
* CollateExpr node at all: just change the constcollid field of the
- * Const node. Otherwise, replace the CollateExpr with a RelabelType.
+ * Const node. Otherwise, replace the CollateExpr with a RelabelType.
* (We do that so as to improve uniformity of expression
* representation and thus simplify comparison of expressions.)
*/
@@ -2690,7 +2690,7 @@ eval_const_expressions_mutator(Node *node,
* placeholder nodes, so that we have the opportunity to reduce
* constant test conditions. For example this allows
* CASE 0 WHEN 0 THEN 1 ELSE 1/0 END
- * to reduce to 1 rather than drawing a divide-by-0 error. Note
+ * to reduce to 1 rather than drawing a divide-by-0 error. Note
* that when the test expression is constant, we don't have to
* include it in the resulting CASE; for example
* CASE 0 WHEN x THEN y ELSE z END
@@ -2777,7 +2777,7 @@ eval_const_expressions_mutator(Node *node,
/*
* Found a TRUE condition, so none of the remaining alternatives
- * can be reached. We treat the result as the default result.
+ * can be reached. We treat the result as the default result.
*/
defresult = caseresult;
break;
@@ -3153,7 +3153,7 @@ simplify_or_arguments(List *args,
/*
* Since the parser considers OR to be a binary operator, long OR lists
* become deeply nested expressions. We must flatten these into long
- * argument lists of a single OR operator. To avoid blowing out the stack
+ * argument lists of a single OR operator. To avoid blowing out the stack
* with recursion of eval_const_expressions, we resort to some tenseness
* here: we keep a list of not-yet-processed inputs, and handle flattening
* of nested ORs by prepending to the to-do list instead of recursing.
@@ -3201,7 +3201,7 @@ simplify_or_arguments(List *args,
}
/*
- * OK, we have a const-simplified non-OR argument. Process it per
+ * OK, we have a const-simplified non-OR argument. Process it per
* comments above.
*/
if (IsA(arg, Const))
@@ -3650,7 +3650,7 @@ fetch_function_defaults(HeapTuple func_tuple)
*
* It is possible for some of the defaulted arguments to be polymorphic;
* therefore we can't assume that the default expressions have the correct
- * data types already. We have to re-resolve polymorphics and do coercion
+ * data types already. We have to re-resolve polymorphics and do coercion
* just like the parser did.
*
* This should be a no-op if there are no polymorphic arguments,
@@ -3810,7 +3810,7 @@ evaluate_function(Oid funcid, Oid result_type, int32 result_typmod,
* do not re-expand them. Also, if a parameter is used more than once
* in the SQL-function body, we require it not to contain any volatile
* functions (volatiles might deliver inconsistent answers) nor to be
- * unreasonably expensive to evaluate. The expensiveness check not only
+ * unreasonably expensive to evaluate. The expensiveness check not only
* prevents us from doing multiple evaluations of an expensive parameter
* at runtime, but is a safety value to limit growth of an expression due
* to repeated inlining.
@@ -3852,7 +3852,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. (The nargs check is just paranoia.)
+ * properties. (The nargs check is just paranoia.)
*/
if (funcform->prolang != SQLlanguageId ||
funcform->prosecdef ||
@@ -3929,7 +3929,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* We just do parsing and parse analysis, not rewriting, because rewriting
* will not affect table-free-SELECT-only queries, which is all that we
- * care about. Also, we can punt as soon as we detect more than one
+ * care about. Also, we can punt as soon as we detect more than one
* command in the function body.
*/
raw_parsetree_list = pg_parse_query(src);
@@ -3972,7 +3972,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* a RelabelType if needed to make the tlist expression match the declared
* type of the function.
*
@@ -4017,7 +4017,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* We may be able to do it; there are still checks on parameter usage to
* make, but those are most easily done in combination with the actual
- * substitution of the inputs. So start building expression with inputs
+ * substitution of the inputs. So start building expression with inputs
* substituted.
*/
usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
@@ -4217,7 +4217,7 @@ evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
fix_opfuncids((Node *) expr);
/*
- * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
+ * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
* because it'd result in recursively invoking eval_const_expressions.)
*/
exprstate = ExecInitExpr(expr, NULL);
@@ -4329,7 +4329,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
* Refuse to inline if the arguments contain any volatile functions or
* sub-selects. Volatile functions are rejected because inlining may
* result in the arguments being evaluated multiple times, risking a
- * change in behavior. Sub-selects are rejected partly for implementation
+ * change in behavior. Sub-selects are rejected partly for implementation
* reasons (pushing them down another level might change their behavior)
* and partly because they're likely to be expensive and so multiple
* evaluation would be bad.
@@ -4356,7 +4356,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. In particular it mustn't be declared STRICT, since we
+ * properties. In particular it mustn't be declared STRICT, since we
* couldn't enforce that. It also mustn't be VOLATILE, because that is
* supposed to cause it to be executed with its own snapshot, rather than
* sharing the snapshot of the calling query. (Rechecking proretset is
@@ -4386,9 +4386,9 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* When we call eval_const_expressions below, it might try to add items to
- * root->glob->invalItems. Since it is running in the temp context, those
+ * root->glob->invalItems. Since it is running in the temp context, those
* items will be in that context, and will need to be copied out if we're
- * successful. Temporarily reset the list so that we can keep those items
+ * successful. Temporarily reset the list so that we can keep those items
* separate from the pre-existing list contents.
*/
saveInvalItems = root->glob->invalItems;
@@ -4418,7 +4418,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Run eval_const_expressions on the function call. This is necessary to
* ensure that named-argument notation is converted to positional notation
- * and any default arguments are inserted. It's a bit of overkill for the
+ * and any default arguments are inserted. It's a bit of overkill for the
* arguments, since they'll get processed again later, but no harm will be
* done.
*/
@@ -4471,7 +4471,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* RelabelType(s) and/or NULL columns if needed to make the tlist
* expression(s) match the declared type of the function.
*
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
index b6187777622..771dff41008 100644
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -73,7 +73,7 @@ have_relevant_joinclause(PlannerInfo *root,
* Add 'restrictinfo' to the joininfo list of each relation it requires.
*
* Note that the same copy of the restrictinfo node is linked to by all the
- * lists it is in. This allows us to exploit caching of information about
+ * lists it is in. This allows us to exploit caching of information about
* the restriction clause (but we must be careful that the information does
* not depend on context).
*
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 161d5ab122e..df4e9247d79 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -291,7 +291,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
/*
* If the two paths compare differently for startup and total cost,
* then we want to keep both, and we can skip the (much slower)
- * comparison of pathkeys. If they compare the same, proceed with the
+ * comparison of pathkeys. If they compare the same, proceed with the
* pathkeys comparison. Note: this test relies on the fact that
* compare_fuzzy_path_costs will only return 0 if both costs are
* effectively equal (and, therefore, there's no need to call it twice
@@ -1187,7 +1187,7 @@ translate_sub_tlist(List *tlist, int relid)
*
* colnos is an integer list of output column numbers (resno's). We are
* interested in whether rows consisting of just these columns are certain
- * to be distinct. "Distinctness" is defined according to whether the
+ * to be distinct. "Distinctness" is defined according to whether the
* corresponding upper-level equality operators listed in opids would think
* the values are distinct. (Note: the opids entries could be cross-type
* operators, and thus not exactly the equality operators that the subquery
@@ -1308,7 +1308,7 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
* distinct_col_search - subroutine for query_is_distinct_for
*
* If colno is in colnos, return the corresponding element of opids,
- * else return InvalidOid. (We expect colnos does not contain duplicates,
+ * else return InvalidOid. (We expect colnos does not contain duplicates,
* so the result is well-defined.)
*/
static Oid
@@ -1590,10 +1590,10 @@ create_hashjoin_path(PlannerInfo *root,
/*
* A hashjoin never has pathkeys, since its output ordering is
- * unpredictable due to possible batching. XXX If the inner relation is
+ * unpredictable due to possible batching. XXX If the inner relation is
* small enough, we could instruct the executor that it must not batch,
* and then we could assume that the output inherits the outer relation's
- * ordering, which might save a sort step. However there is considerable
+ * ordering, which might save a sort step. However there is considerable
* downside if our estimate of the inner relation size is badly off. For
* the moment we don't risk it. (Note also that if we wanted to take this
* seriously, joinpath.c would have to consider many more paths for the
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index d0a3f72351d..fee6b8d7c14 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -404,7 +404,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* such as temporary tables.)
*
* We approximate "never vacuumed" by "has relpages = 0", which
- * means this will also fire on genuinely empty relations. Not
+ * means this will also fire on genuinely empty relations. Not
* great, but fortunately that's a seldom-seen case in the real
* world, and it shouldn't degrade the quality of the plan too
* much anyway to err in this direction.
@@ -730,7 +730,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
return false;
/*
- * OK to fetch the constraint expressions. Include "col IS NOT NULL"
+ * OK to fetch the constraint expressions. Include "col IS NOT NULL"
* expressions for attnotnull columns, in case we can refute those.
*/
constraint_pred = get_relation_constraints(root, rte->relid, rel, true);
@@ -778,7 +778,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
* Exception: if there are any dropped columns, we punt and return NIL.
* Ideally we would like to handle the dropped-column case too. However this
* creates problems for ExecTypeFromTL, which may be asked to build a tupdesc
- * for a tlist that includes vars of no-longer-existent types. In theory we
+ * for a tlist that includes vars of no-longer-existent types. In theory we
* could dig out the required info from the pg_attribute entries of the
* relation, but that data is not readily available to ExecTypeFromTL.
* For now, we don't apply the physical-tlist optimization when there are
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index bbfbc7947b1..0312f4fb6a4 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -135,7 +135,7 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -193,7 +193,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -227,7 +227,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
* OR-expr A => AND-expr B iff: A => each of B's components
* OR-expr A => OR-expr B iff: each of A's components => any of B's
*
- * An "atom" is anything other than an AND or OR node. Notice that we don't
+ * An "atom" is anything other than an AND or OR node. Notice that we don't
* have any special logic to handle NOT nodes; these should have been pushed
* down or eliminated where feasible by prepqual.c.
*
@@ -660,7 +660,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
* We cannot make the stronger conclusion that B is refuted if B
* implies A's arg; that would only prove that B is not-TRUE, not
* that it's not NULL either. Hence use equal() rather than
- * predicate_implied_by_recurse(). We could do the latter if we
+ * predicate_implied_by_recurse(). We could do the latter if we
* ever had a need for the weak form of refutation.
*/
not_arg = extract_strong_not_arg(clause);
@@ -822,7 +822,7 @@ predicate_classify(Node *clause, PredIterInfo info)
}
/*
- * PredIterInfo routines for iterating over regular Lists. The iteration
+ * PredIterInfo routines for iterating over regular Lists. The iteration
* state variable is the next ListCell to visit.
*/
static void
@@ -1016,13 +1016,13 @@ arrayexpr_cleanup_fn(PredIterInfo info)
* implies another:
*
* A simple and general way is to see if they are equal(); this works for any
- * kind of expression. (Actually, there is an implied assumption that the
+ * kind of expression. (Actually, there is an implied assumption that the
* functions in the expression are immutable, ie dependent only on their input
* arguments --- but this was checked for the predicate by the caller.)
*
* When the predicate is of the form "foo IS NOT NULL", we can conclude that
* the predicate is implied if the clause is a strict operator or function
- * that has "foo" as an input. In this case the clause must yield NULL when
+ * that has "foo" as an input. In this case the clause must yield NULL when
* "foo" is NULL, which we can take as equivalent to FALSE because we know
* we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is
* already known immutable, so the clause will certainly always fail.)
@@ -1246,7 +1246,7 @@ list_member_strip(List *list, Expr *datum)
*
* The strategy numbers defined by btree indexes (see access/skey.h) are:
* (1) < (2) <= (3) = (4) >= (5) >
- * and in addition we use (6) to represent <>. <> is not a btree-indexable
+ * and in addition we use (6) to represent <>. <> is not a btree-indexable
* operator, but we assume here that if an equality operator of a btree
* opfamily has a negator operator, the negator behaves as <> for the opfamily.
* (This convention is also known to get_op_btree_interpretation().)
@@ -1330,7 +1330,7 @@ static const StrategyNumber BT_refute_table[6][6] = {
* if not able to prove it.
*
* What we look for here is binary boolean opclauses of the form
- * "foo op constant", where "foo" is the same in both clauses. The operators
+ * "foo op constant", where "foo" is the same in both clauses. The operators
* and constants can be different but the operators must be in the same btree
* operator family. We use the above operator implication tables to
* derive implications between nonidentical clauses. (Note: "foo" is known
@@ -1420,7 +1420,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
/*
* Check for matching subexpressions on the non-Const sides. We used to
* only allow a simple Var, but it's about as easy to allow any
- * expression. Remember we already know that the pred expression does not
+ * expression. Remember we already know that the pred expression does not
* contain any non-immutable functions, so identical expressions should
* yield identical results.
*/
@@ -1692,7 +1692,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
* Last check: test_op must be immutable.
*
* Note that we require only the test_op to be immutable, not the
- * original clause_op. (pred_op is assumed to have been checked
+ * original clause_op. (pred_op is assumed to have been checked
* immutable by the caller.) Essentially we are assuming that the
* opfamily is consistent even if it contains operators that are
* merely stable.
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index b7a5845bb15..59f019bd0d4 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -226,7 +226,7 @@ RelOptInfo *
find_join_rel(PlannerInfo *root, Relids relids)
{
/*
- * Switch to using hash lookup when list grows "too long". The threshold
+ * Switch to using hash lookup when list grows "too long". The threshold
* is arbitrary and is known only here.
*/
if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
@@ -404,7 +404,7 @@ build_join_rel(PlannerInfo *root,
/*
* Also, if dynamic-programming join search is active, add the new joinrel
- * to the appropriate sublist. Note: you might think the Assert on number
+ * to the appropriate sublist. Note: you might think the Assert on number
* of members should be for equality, but some of the level 1 rels might
* have been joinrels already, so we can only assert <=.
*/
@@ -491,7 +491,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
* the join list need only be computed once for any join RelOptInfo.
* The join list is fully determined by the set of rels making up the
* joinrel, so we should get the same results (up to ordering) from any
- * candidate pair of sub-relations. But the restriction list is whatever
+ * candidate pair of sub-relations. But the restriction list is whatever
* is not handled in the sub-relations, so it depends on which
* sub-relations are considered.
*
@@ -500,7 +500,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
* we put it into the joininfo list for the joinrel. Otherwise,
* the clause is now a restrict clause for the joined relation, and we
* return it to the caller of build_joinrel_restrictlist() to be stored in
- * join paths made from this pair of sub-relations. (It will not need to
+ * join paths made from this pair of sub-relations. (It will not need to
* be considered further up the join tree.)
*
* In many case we will find the same RestrictInfos in both input
@@ -519,7 +519,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
*
* NB: Formerly, we made deep(!) copies of each input RestrictInfo to pass
* up to the join relation. I believe this is no longer necessary, because
- * RestrictInfo nodes are no longer context-dependent. Instead, just include
+ * RestrictInfo nodes are no longer context-dependent. Instead, just include
* the original nodes in the lists made for the join relation.
*/
static List *
@@ -539,7 +539,7 @@ build_joinrel_restrictlist(PlannerInfo *root,
result = subbuild_joinrel_restrictlist(joinrel, inner_rel->joininfo, result);
/*
- * Add on any clauses derived from EquivalenceClasses. These cannot be
+ * Add on any clauses derived from EquivalenceClasses. These cannot be
* redundant with the clauses in the joininfo lists, so don't bother
* checking.
*/
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 93f9aa846a6..65f54ab2566 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -152,7 +152,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -249,7 +249,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
* We know that the index predicate must have been implied by
* the query condition as a whole, but it may or may not be
* implied by the conditions that got pushed into the
- * bitmapqual. Avoid generating redundant conditions.
+ * bitmapqual. Avoid generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), result))
result = lappend(result,
@@ -391,7 +391,7 @@ make_restrictinfo_internal(Expr *clause,
/*
* Fill in all the cacheable fields with "not yet set" markers. None of
- * these will be computed until/unless needed. Note in particular that we
+ * these will be computed until/unless needed. Note in particular that we
* don't mark a binary opclause as mergejoinable or hashjoinable here;
* that happens only if it appears in the right context (top level of a
* joinclause list).
@@ -669,7 +669,7 @@ select_nonredundant_join_clauses(PlannerInfo *root,
* OK because we're only trying to prove we can dispense with some
* join quals; failing to prove that doesn't result in an incorrect
* plan. It's quite unlikely that a join qual could be proven
- * redundant by an index predicate anyway. (Also, if we did manage to
+ * redundant by an index predicate anyway. (Also, if we did manage to
* prove it, we'd have to have a special case for update targets; see
* notes about EvalPlanQual testing in create_indexscan_plan().)
*/
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index c98c767f310..52c62a7f3a7 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -27,7 +27,7 @@
/*
* tlist_member
* Finds the (first) member of the given tlist whose expression is
- * equal() to the given expression. Result is NULL if no such member.
+ * equal() to the given expression. Result is NULL if no such member.
*/
TargetEntry *
tlist_member(Node *node, List *targetlist)
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index e5fa48d3644..a2111c30b06 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -261,7 +261,7 @@ contain_var_clause_walker(Node *node, void *context)
*
* Returns true if any such Var found.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
bool
contain_vars_of_level(Node *node, int levelsup)
@@ -321,10 +321,10 @@ contain_vars_of_level_walker(Node *node, int *sublevels_up)
* Find the parse location of any Var of the specified query level.
*
* Returns -1 if no such Var is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*
* Note: it might seem appropriate to merge this functionality into
* contain_vars_of_level, but that would complicate that function's API.
@@ -397,7 +397,7 @@ locate_var_of_level_walker(Node *node,
* Returns -1 if no such Var is in the querytree, or if they all have
* unknown parse location.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
int
locate_var_of_relation(Node *node, int relid, int levelsup)
@@ -471,7 +471,7 @@ locate_var_of_relation_walker(Node *node,
*
* -1 is returned if the clause has no variables at all.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
int
find_minimum_var_level(Node *node)
@@ -634,7 +634,7 @@ find_minimum_var_level_walker(Node *node,
* Upper-level vars (with varlevelsup > 0) should not be seen here,
* likewise for upper-level Aggrefs and PlaceHolderVars.
*
- * Returns list of nodes found. Note the nodes themselves are not
+ * Returns list of nodes found. Note the nodes themselves are not
* copied, only referenced.
*
* Does not examine subqueries, therefore must only be used after reduction
@@ -711,7 +711,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
* flatten_join_alias_vars
* Replace Vars that reference JOIN outputs with references to the original
* relation variables instead. This allows quals involving such vars to be
- * pushed down. Whole-row Vars that reference JOIN relations are expanded
+ * pushed down. Whole-row Vars that reference JOIN relations are expanded
* into RowExpr constructs that name the individual output Vars. This
* is necessary since we will not scan the JOIN as a base relation, which
* is the only way that the executor can directly handle whole-row Vars.
@@ -723,7 +723,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
* entries might now be arbitrary expressions, not just Vars. This affects
* this function in one important way: we might find ourselves inserting
* SubLink expressions into subqueries, and we must make sure that their
- * Query.hasSubLinks fields get set to TRUE if so. If there are any
+ * Query.hasSubLinks fields get set to TRUE if so. If there are any
* SubLinks in the join alias lists, the outer Query should already have
* hasSubLinks = TRUE, so this is only relevant to un-flattened subqueries.
*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 973639b48fa..58ce60936ed 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -75,7 +75,7 @@ static void transformLockingClause(ParseState *pstate, Query *qry,
* Optionally, information about $n parameter types can be supplied.
* References to $n indexes not defined by paramTypes[] are disallowed.
*
- * The result is a Query node. Optimizable statements require considerable
+ * The result is a Query node. Optimizable statements require considerable
* transformation, while utility-type statements are simply hung off
* a dummy CMD_UTILITY Query node.
*/
@@ -385,7 +385,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
/*
* If a non-nil rangetable/namespace was passed in, and we are doing
* INSERT/SELECT, arrange to pass the rangetable/namespace down to the
- * SELECT. This can only happen if we are inside a CREATE RULE, and in
+ * SELECT. This can only happen if we are inside a CREATE RULE, and in
* that case we want the rule's OLD and NEW rtable entries to appear as
* part of the SELECT's rtable, not as outer references for it. (Kluge!)
* The SELECT's joinlist is not affected however. We must do this before
@@ -576,7 +576,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* We must assign collations now because assign_query_collations
* doesn't process rangetable entries. We just assign all the
* collations independently in each row, and don't worry about
- * whether they are consistent vertically. The outer INSERT query
+ * whether they are consistent vertically. The outer INSERT query
* isn't going to care about the collations of the VALUES columns,
* so it's not worth the effort to identify a common collation for
* each one here. (But note this does have one user-visible
@@ -612,7 +612,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* Another thing we can't currently support is NEW/OLD references in
* rules --- seems we'd need something like SQL99's LATERAL construct
* to ensure that the values would be available while evaluating the
- * VALUES RTE. This is a shame. FIXME
+ * VALUES RTE. This is a shame. FIXME
*/
if (list_length(pstate->p_rtable) != 1 &&
contain_vars_of_level((Node *) exprsLists, 0))
@@ -649,7 +649,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* INSERT INTO foo VALUES(bar.*)
*
* The sublist is just computed directly as the Query's targetlist,
- * with no VALUES RTE. So it works just like SELECT without FROM.
+ * with no VALUES RTE. So it works just like SELECT without FROM.
*----------
*/
List *valuesLists = selectStmt->valuesLists;
@@ -757,7 +757,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
* Check length of expr list. It must not have more expressions than
* there are target columns. We allow fewer, but only if no explicit
* columns list was given (the remaining columns are implicitly
- * defaulted). Note we must check this *after* transformation because
+ * defaulted). Note we must check this *after* transformation because
* that could expand '*' into multiple items.
*/
if (list_length(exprlist) > list_length(icolumns))
@@ -826,7 +826,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
* return -1 if expression isn't a RowExpr or a Var referencing one.
*
* This is currently used only for hint purposes, so we aren't terribly
- * tense about recognizing all possible cases. The Var case is interesting
+ * tense about recognizing all possible cases. The Var case is interesting
* because that's what we'll get in the INSERT ... SELECT (...) case.
*/
static int
@@ -1410,7 +1410,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* As a first step towards supporting sort clauses that are expressions
* using the output columns, generate a varnamespace entry that makes the
- * output columns visible. A Join RTE node is handy for this, since we
+ * output columns visible. A Join RTE node is handy for this, since we
* can easily control the Vars generated upon matches.
*
* Note: we don't yet do anything useful with such cases, but at least
@@ -1504,7 +1504,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
* Recursively transform leaves and internal nodes of a set-op tree
*
* In addition to returning the transformed node, if targetlist isn't NULL
- * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
+ * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
* set-op node these are the actual targetlist entries; otherwise they are
* dummy entries created to carry the type, typmod, collation, and location
* (for error messages) of each output column of the set-op node. This info
@@ -1718,7 +1718,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
rescoltypmod = -1;
/*
- * Verify the coercions are actually possible. If not, we'd fail
+ * Verify the coercions are actually possible. If not, we'd fail
* later anyway, but we want to fail now while we have sufficient
* context to produce an error cursor position.
*
@@ -1727,7 +1727,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
* child query's semantics.
*
* If a child expression is an UNKNOWN-type Const or Param, we
- * want to replace it with the coerced expression. This can only
+ * want to replace it with the coerced expression. This can only
* happen when the child is a leaf set-op node. It's safe to
* replace the expression because if the child query's semantics
* depended on the type of this output column, it'd have already
diff --git a/src/backend/parser/kwlookup.c b/src/backend/parser/kwlookup.c
index 1d8b7283c1f..b9796d1a8ca 100644
--- a/src/backend/parser/kwlookup.c
+++ b/src/backend/parser/kwlookup.c
@@ -52,7 +52,7 @@ ScanKeywordLookup(const char *text,
return NULL;
/*
- * Apply an ASCII-only downcasing. We must not use tolower() since it may
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
* produce the wrong translation in some locales (eg, Turkish).
*/
for (i = 0; i < len; i++)
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 83561337966..495cefb6aa8 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -56,7 +56,7 @@ static bool check_ungrouped_columns_walker(Node *node,
*
* Here we convert the args list into a targetlist by inserting TargetEntry
* nodes, and then transform the aggorder and agg_distinct specifications to
- * produce lists of SortGroupClause nodes. (That might also result in adding
+ * produce lists of SortGroupClause nodes. (That might also result in adding
* resjunk expressions to the targetlist.)
*
* We must also determine which query level the aggregate actually belongs to,
@@ -152,7 +152,7 @@ transformAggregateCall(ParseState *pstate, Aggref *agg,
/*
* An aggregate can't directly contain another aggregate call of the same
- * level (though outer aggs are okay). We can skip this check if we
+ * level (though outer aggs are okay). We can skip this check if we
* didn't find any local vars or aggs.
*/
if (min_varlevel == 0)
@@ -369,7 +369,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
/*
* If there are join alias vars involved, we have to flatten them to the
* underlying vars, so that aliased and unaliased vars will be correctly
- * taken as equal. We can skip the expense of doing this if no rangetable
+ * taken as equal. We can skip the expense of doing this if no rangetable
* entries are RTE_JOIN kind. We use the planner's flatten_join_alias_vars
* routine to do the flattening; it wants a PlannerInfo root node, which
* fortunately can be mostly dummy.
@@ -407,7 +407,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
*
* Note: because we check resjunk tlist elements as well as regular ones,
* this will also find ungrouped variables that came from ORDER BY and
- * WINDOW clauses. For that matter, it's also going to examine the
+ * WINDOW clauses. For that matter, it's also going to examine the
* grouping expressions themselves --- but they'll all pass the test ...
*/
clause = (Node *) qry->targetList;
@@ -440,7 +440,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
* Check for window functions where they shouldn't be.
*
* We have to forbid window functions in WHERE, JOIN/ON, HAVING, GROUP BY,
- * and window specifications. (Other clauses, such as RETURNING and LIMIT,
+ * and window specifications. (Other clauses, such as RETURNING and LIMIT,
* have already been checked.) Transformation of all these clauses must
* be completed already.
*/
@@ -600,7 +600,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* If we have an ungrouped Var of the original query level, we have a
* failure. Vars below the original query level are not a problem, and
- * neither are Vars from above it. (If such Vars are ungrouped as far as
+ * neither are Vars from above it. (If such Vars are ungrouped as far as
* their own query level is concerned, that's someone else's problem...)
*/
if (IsA(node, Var))
@@ -631,7 +631,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* Check whether the Var is known functionally dependent on the GROUP
- * BY columns. If so, we can allow the Var to be used, because the
+ * BY columns. If so, we can allow the Var to be used, because the
* grouping is really a no-op for this table. However, this deduction
* depends on one or more constraints of the table, so we have to add
* those constraints to the query's constraintDeps list, because it's
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 8f697f85ec5..692207fe1cf 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -153,7 +153,7 @@ transformFromClause(ParseState *pstate, List *frmList)
*
* If alsoSource is true, add the target to the query's joinlist and
* namespace. For INSERT, we don't want the target to be joined to;
- * it's a destination of tuples, not a source. For UPDATE/DELETE,
+ * it's a destination of tuples, not a source. For UPDATE/DELETE,
* we do need to scan or join the target. (NOTE: we do not bother
* to check for namespace conflict; we assume that the namespace was
* initially empty in these cases.)
@@ -219,7 +219,7 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
* Simplify InhOption (yes/no/default) into boolean yes/no.
*
* The reason we do things this way is that we don't want to examine the
- * SQL_inheritance option flag until parse_analyze() is run. Otherwise,
+ * SQL_inheritance option flag until parse_analyze() is run. Otherwise,
* we'd do the wrong thing with query strings that intermix SET commands
* with queries.
*/
@@ -410,7 +410,7 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j,
* rels outside the input subtrees of the JOIN. It could do that despite
* our hack on the namespace if it uses fully-qualified names. So, grovel
* through the transformed clause and make sure there are no bogus
- * references. (Outer references are OK, and are ignored here.)
+ * references. (Outer references are OK, and are ignored here.)
*/
clause_varnos = pull_varnos(result);
clause_varnos = bms_del_members(clause_varnos, containedRels);
@@ -489,7 +489,7 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
isLockedRefname(pstate, r->alias->aliasname));
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
*/
if (!IsA(query, Query) ||
@@ -547,7 +547,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* Get function name for possible use as alias. We use the same
- * transformation rules as for a SELECT output expression. For a FuncCall
+ * transformation rules as for a SELECT output expression. For a FuncCall
* node, the result will be the function name, but it is possible for the
* grammar to hand back other node types.
*/
@@ -565,7 +565,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* The function parameters cannot make use of any variables from other
- * FROM items. (Compare to transformRangeSubselect(); the coding is
+ * FROM items. (Compare to transformRangeSubselect(); the coding is
* different though because we didn't parse as a sub-select with its own
* level of namespace.)
*
@@ -583,7 +583,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
}
/*
- * Disallow aggregate functions in the expression. (No reason to postpone
+ * Disallow aggregate functions in the expression. (No reason to postpone
* this check until parseCheckAggregates.)
*/
if (pstate->p_hasAggs &&
@@ -642,7 +642,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
* (We could extract this from the function return node, but it saves cycles
* to pass it back separately.)
*
- * *top_rti: receives the rangetable index of top_rte. (Ditto.)
+ * *top_rti: receives the rangetable index of top_rte. (Ditto.)
*
* *relnamespace: receives a List of the RTEs exposed as relation names
* by this item.
@@ -1263,9 +1263,9 @@ checkExprIsVarFree(ParseState *pstate, Node *n, const char *constructName)
*
* This function supports the old SQL92 ORDER BY interpretation, where the
* expression is an output column name or number. If we fail to find a
- * match of that sort, we fall through to the SQL99 rules. For historical
+ * match of that sort, we fall through to the SQL99 rules. For historical
* reasons, Postgres also allows this interpretation for GROUP BY, though
- * the standard never did. However, for GROUP BY we prefer a SQL99 match.
+ * the standard never did. However, for GROUP BY we prefer a SQL99 match.
* This function is *not* used for WINDOW definitions.
*
* node the ORDER BY, GROUP BY, or DISTINCT ON expression to be matched
@@ -1283,7 +1283,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
*
* 1. Bare ColumnName (no qualifier or subscripts)
* For a bare identifier, we search for a matching column name
- * in the existing target list. Multiple matches are an error
+ * in the existing target list. Multiple matches are an error
* unless they refer to identical values; for example,
* we allow SELECT a, a FROM table ORDER BY a
* but not SELECT a AS b, b FROM table ORDER BY b
@@ -1292,7 +1292,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
* For GROUP BY, it is incorrect to match the grouping item against
* targetlist entries: according to SQL92, an identifier in GROUP BY
* is a reference to a column name exposed by FROM, not to a target
- * list column. However, many implementations (including pre-7.0
+ * list column. However, many implementations (including pre-7.0
* PostgreSQL) accept this anyway. So for GROUP BY, we look first
* to see if the identifier matches any FROM column name, and only
* try for a targetlist name if it doesn't. This ensures that we
@@ -1439,7 +1439,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist)
/*
* Convert the untransformed node to a transformed expression, and search
* for a match in the tlist. NOTE: it doesn't really matter whether there
- * is more than one match. Also, we are willing to match an existing
+ * is more than one match. Also, we are willing to match an existing
* resjunk target here, though the SQL92 cases above must ignore resjunk
* targets.
*/
@@ -1467,7 +1467,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist)
/*
* If no matches, construct a new target entry which is appended to the
- * end of the target list. This target is given resjunk = TRUE so that it
+ * end of the target list. This target is given resjunk = TRUE so that it
* will not be projected into the final tuple.
*/
target_result = transformTargetEntry(pstate, node, expr, NULL, true);
@@ -1672,7 +1672,7 @@ transformWindowDefinitions(ParseState *pstate,
* <window clause> syntax rule 10 and general rule 1. The frame
* clause rule is especially bizarre because it makes "OVER foo"
* different from "OVER (foo)", and requires the latter to throw an
- * error if foo has a nondefault frame clause. Well, ours not to
+ * error if foo has a nondefault frame clause. Well, ours not to
* reason why, but we do go out of our way to throw a useful error
* message for such cases.
*/
@@ -1775,7 +1775,7 @@ transformDistinctClause(ParseState *pstate,
/*
* The distinctClause should consist of all ORDER BY items followed by all
- * other non-resjunk targetlist items. There must not be any resjunk
+ * other non-resjunk targetlist items. There must not be any resjunk
* ORDER BY items --- that would imply that we are sorting by a value that
* isn't necessarily unique within a DISTINCT group, so the results
* wouldn't be well-defined. This construction ensures we follow the rule
@@ -1898,7 +1898,7 @@ transformDistinctOnClause(ParseState *pstate, List *distinctlist,
/*
* Now add any remaining DISTINCT ON items, using default sort/group
- * semantics for their data types. (Note: this is pretty questionable; if
+ * semantics for their data types. (Note: this is pretty questionable; if
* the ORDER BY list doesn't include all the DISTINCT ON items and more
* besides, you certainly aren't using DISTINCT ON in the intended way,
* and you probably aren't going to get consistent results. It might be
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index bfd379323a5..c1bff328aa7 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -57,12 +57,12 @@ static bool typeIsOfTypedTable(Oid reltypeId, Oid reloftypeId);
* Convert an expression to a target type and typmod.
*
* This is the general-purpose entry point for arbitrary type coercion
- * operations. Direct use of the component operations can_coerce_type,
+ * operations. Direct use of the component operations can_coerce_type,
* coerce_type, and coerce_type_typmod should be restricted to special
* cases (eg, when the conversion is expected to succeed).
*
* Returns the possibly-transformed expression tree, or NULL if the type
- * conversion is not possible. (We do this, rather than ereport'ing directly,
+ * conversion is not possible. (We do this, rather than ereport'ing directly,
* so that callers can generate custom error messages indicating context.)
*
* pstate - parse state (can be NULL, see coerce_type)
@@ -146,7 +146,7 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
* already be properly coerced to the specified typmod.
*
* pstate is only used in the case that we are able to resolve the type of
- * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
+ * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
* caller does not want type information updated for Params.
*
* Note: this function must not modify the given expression tree, only add
@@ -176,7 +176,7 @@ coerce_type(ParseState *pstate, Node *node,
*
* Note: by returning the unmodified node here, we are saying that
* it's OK to treat an UNKNOWN constant as a valid input for a
- * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
+ * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
* all right, since an UNKNOWN value is still a perfectly valid Datum.
*
* NB: we do NOT want a RelabelType here: the exposed type of the
@@ -193,7 +193,7 @@ coerce_type(ParseState *pstate, Node *node,
*
* These cases are unlike the ones above because the exposed type of
* the argument must be an actual array or enum type. In particular
- * the argument must *not* be an UNKNOWN constant. If it is, we just
+ * the argument must *not* be an UNKNOWN constant. If it is, we just
* fall through; below, we'll call anyarray_in or anyenum_in, which
* will produce an error. Also, if what we have is a domain over
* array or enum, we have to relabel it to its base type.
@@ -249,7 +249,7 @@ coerce_type(ParseState *pstate, Node *node,
/*
* If the target type is a domain, we want to call its base type's
- * input routine, not domain_in(). This is to avoid premature failure
+ * input routine, not domain_in(). This is to avoid premature failure
* when the domain applies a typmod: existing input routines follow
* implicit-coercion semantics for length checks, which is not always
* what we want here. The needed check will be applied properly
@@ -262,7 +262,7 @@ coerce_type(ParseState *pstate, Node *node,
* For most types we pass typmod -1 to the input routine, because
* existing input routines follow implicit-coercion semantics for
* length checks, which is not always what we want here. Any length
- * constraint will be applied later by our caller. An exception
+ * constraint will be applied later by our caller. An exception
* however is the INTERVAL type, for which we *must* pass the typmod
* or it won't be able to obey the bizarre SQL-spec input rules. (Ugly
* as sin, but so is this part of the spec...)
@@ -342,7 +342,7 @@ coerce_type(ParseState *pstate, Node *node,
{
/*
* If we have a COLLATE clause, we have to push the coercion
- * underneath the COLLATE. This is really ugly, but there is little
+ * underneath the COLLATE. This is really ugly, but there is little
* choice because the above hacks on Consts and Params wouldn't happen
* otherwise. This kluge has consequences in coerce_to_target_type.
*/
@@ -365,7 +365,7 @@ coerce_type(ParseState *pstate, Node *node,
{
/*
* Generate an expression tree representing run-time application
- * of the conversion function. If we are dealing with a domain
+ * of the conversion function. If we are dealing with a domain
* target type, the conversion function will yield the base type,
* and we need to extract the correct typmod to use from the
* domain's typtypmod.
@@ -401,7 +401,7 @@ coerce_type(ParseState *pstate, Node *node,
* to have the intended type when inspected by higher-level code.
*
* Also, domains may have value restrictions beyond the base type
- * that must be accounted for. If the destination is a domain
+ * that must be accounted for. If the destination is a domain
* then we won't need a RelabelType node.
*/
result = coerce_to_domain(node, InvalidOid, -1, targetTypeId,
@@ -648,7 +648,7 @@ coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId,
}
/*
- * Now build the domain coercion node. This represents run-time checking
+ * Now build the domain coercion node. This represents run-time checking
* of any constraints currently attached to the domain. This also ensures
* that the expression is properly labeled as to result type.
*/
@@ -721,7 +721,7 @@ coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod,
* Mark a coercion node as IMPLICIT so it will never be displayed by
* ruleutils.c. We use this when we generate a nest of coercion nodes
* to implement what is logically one conversion; the inner nodes are
- * forced to IMPLICIT_CAST format. This does not change their semantics,
+ * forced to IMPLICIT_CAST format. This does not change their semantics,
* only display behavior.
*
* It is caller error to call this on something that doesn't have a
@@ -1180,7 +1180,7 @@ select_common_type(ParseState *pstate, List *exprs, const char *context,
}
/*
- * Nope, so set up for the full algorithm. Note that at this point, lc
+ * Nope, so set up for the full algorithm. Note that at this point, lc
* points to the first list item with type different from pexpr's; we need
* not re-examine any items the previous loop advanced over.
*/
@@ -1470,19 +1470,19 @@ check_generic_type_consistency(Oid *actual_arg_types,
* is an extra restriction if not.)
*
* Domains over arrays match ANYARRAY arguments, and are immediately flattened
- * to their base type. (In particular, if the return type is also ANYARRAY,
+ * to their base type. (In particular, if the return type is also ANYARRAY,
* we'll set it to the base type not the domain type.)
*
* When allow_poly is false, we are not expecting any of the actual_arg_types
* to be polymorphic, and we should not return a polymorphic result type
- * either. When allow_poly is true, it is okay to have polymorphic "actual"
+ * either. When allow_poly is true, it is okay to have polymorphic "actual"
* arg types, and we can return ANYARRAY or ANYELEMENT as the result. (This
* case is currently used only to check compatibility of an aggregate's
* declaration with the underlying transfn.)
*
* A special case is that we could see ANYARRAY as an actual_arg_type even
* when allow_poly is false (this is possible only because pg_statistic has
- * columns shown as anyarray in the catalogs). We allow this to match a
+ * columns shown as anyarray in the catalogs). We allow this to match a
* declared ANYARRAY argument, but only if there is no ANYELEMENT argument
* or result (since we can't determine a specific element type to match to
* ANYELEMENT). Note this means that functions taking ANYARRAY had better
@@ -1566,7 +1566,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
/*
* Fast Track: if none of the arguments are polymorphic, return the
- * unmodified rettype. We assume it can't be polymorphic either.
+ * unmodified rettype. We assume it can't be polymorphic either.
*/
if (!have_generics)
return rettype;
@@ -1831,8 +1831,8 @@ IsPreferredType(TYPCATEGORY category, Oid type)
* Check if srctype is binary-coercible to targettype.
*
* This notion allows us to cheat and directly exchange values without
- * going through the trouble of calling a conversion function. Note that
- * in general, this should only be an implementation shortcut. Before 7.4,
+ * going through the trouble of calling a conversion function. Note that
+ * in general, this should only be an implementation shortcut. Before 7.4,
* this was also used as a heuristic for resolving overloaded functions and
* operators, but that's basically a bad idea.
*
@@ -1845,7 +1845,7 @@ IsPreferredType(TYPCATEGORY category, Oid type)
* types.
*
* This function replaces IsBinaryCompatible(), which was an inherently
- * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
+ * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
* the order of the operands is now significant.
*/
bool
@@ -2022,7 +2022,7 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
* Hack: disallow coercions to oidvector and int2vector, which
* otherwise tend to capture coercions that should go to "real" array
* types. We want those types to be considered "real" arrays for many
- * purposes, but not this one. (Also, ArrayCoerceExpr isn't
+ * purposes, but not this one. (Also, ArrayCoerceExpr isn't
* guaranteed to produce an output that meets the restrictions of
* these datatypes, such as being 1-dimensional.)
*/
diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c
index ec606ebcac7..b8c835b91f7 100644
--- a/src/backend/parser/parse_collate.c
+++ b/src/backend/parser/parse_collate.c
@@ -14,19 +14,19 @@
* 1. The output collation of each expression node, or InvalidOid if it
* returns a noncollatable data type. This can also be InvalidOid if the
* result type is collatable but the collation is indeterminate.
- * 2. The collation to be used in executing each function. InvalidOid means
+ * 2. The collation to be used in executing each function. InvalidOid means
* that there are no collatable inputs or their collation is indeterminate.
* This value is only stored in node types that might call collation-using
* functions.
*
* You might think we could get away with storing only one collation per
- * node, but the two concepts really need to be kept distinct. Otherwise
+ * node, but the two concepts really need to be kept distinct. Otherwise
* it's too confusing when a function produces a collatable output type but
* has no collatable inputs or produces noncollatable output from collatable
* inputs.
*
* Cases with indeterminate collation might result in an error being thrown
- * at runtime. If we knew exactly which functions require collation
+ * at runtime. If we knew exactly which functions require collation
* information, we could throw those errors at parse time instead.
*
* Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
@@ -231,7 +231,7 @@ select_common_collation(ParseState *pstate, List *exprs, bool none_ok)
* Recursive guts of collation processing.
*
* Nodes with no children (eg, Vars, Consts, Params) must have been marked
- * when built. All upper-level nodes are marked here.
+ * when built. All upper-level nodes are marked here.
*
* Note: if this is invoked directly on a List, it will attempt to infer a
* common collation for all the list members. In particular, it will throw
@@ -511,7 +511,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
/*
* TargetEntry can have only one child, and should bubble that
- * state up to its parent. We can't use the general-case code
+ * state up to its parent. We can't use the general-case code
* below because exprType and friends don't work on TargetEntry.
*/
collation = loccontext.collation;
@@ -526,7 +526,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
* There are some cases where there might not be a failure, for
* example if the planner chooses to use hash aggregation instead
* of sorting for grouping; but it seems better to predictably
- * throw an error. (Compare transformSetOperationTree, which will
+ * throw an error. (Compare transformSetOperationTree, which will
* throw error for indeterminate collation of set-op columns, even
* though the planner might be able to implement the set-op
* without sorting.)
@@ -564,7 +564,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
* SubLink. Act as though the Query returns its first output
* column, which indeed is what it does for EXPR_SUBLINK and
* ARRAY_SUBLINK cases. In the cases where the SubLink
- * returns boolean, this info will be ignored. Special case:
+ * returns boolean, this info will be ignored. Special case:
* in EXISTS, the Query might return no columns, in which case
* we need do nothing.
*
diff --git a/src/backend/parser/parse_cte.c b/src/backend/parser/parse_cte.c
index ef96df603c1..5dcea16582c 100644
--- a/src/backend/parser/parse_cte.c
+++ b/src/backend/parser/parse_cte.c
@@ -181,7 +181,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause)
checkWellFormedRecursion(&cstate);
/*
- * Set up the ctenamespace for parse analysis. Per spec, all the WITH
+ * Set up the ctenamespace for parse analysis. Per spec, all the WITH
* items are visible to all others, so stuff them all in before parse
* analysis. We build the list in safe processing order so that the
* planner can process the queries in sequence.
@@ -207,7 +207,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause)
{
/*
* For non-recursive WITH, just analyze each CTE in sequence and then
- * add it to the ctenamespace. This corresponds to the spec's
+ * add it to the ctenamespace. This corresponds to the spec's
* definition of the scope of each WITH name. However, to allow error
* reports to be aware of the possibility of an erroneous reference,
* we maintain a list in p_future_ctes of the not-yet-visible CTEs.
@@ -245,7 +245,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
cte->ctequery = (Node *) query;
/*
- * Check that we got something reasonable. These first two cases should
+ * Check that we got something reasonable. These first two cases should
* be prevented by the grammar.
*/
if (!IsA(query, Query))
@@ -400,7 +400,7 @@ analyzeCTETargetList(ParseState *pstate, CommonTableExpr *cte, List *tlist)
/*
* If the CTE is recursive, force the exposed column type of any
- * "unknown" column to "text". This corresponds to the fact that
+ * "unknown" column to "text". This corresponds to the fact that
* SELECT 'foo' UNION SELECT 'bar' will ultimately produce text. We
* might see "unknown" as a result of an untyped literal in the
* non-recursive term's select list, and if we don't convert to text
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 8bac8824b09..abbe3ef9eeb 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -483,7 +483,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
} crerr = CRERR_NO_COLUMN;
/*
- * Give the PreParseColumnRefHook, if any, first shot. If it returns
+ * Give the PreParseColumnRefHook, if any, first shot. If it returns
* non-null then that's all, folks.
*/
if (pstate->p_pre_columnref_hook != NULL)
@@ -554,7 +554,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
}
/*
- * Try to find the name as a relation. Note that only
+ * Try to find the name as a relation. Note that only
* relations already entered into the rangetable will be
* recognized.
*
@@ -797,7 +797,7 @@ transformParamRef(ParseState *pstate, ParamRef *pref)
Node *result;
/*
- * The core parser knows nothing about Params. If a hook is supplied,
+ * The core parser knows nothing about Params. If a hook is supplied,
* call it. If not, or if the hook returns NULL, throw a generic error.
*/
if (pstate->p_paramref_hook != NULL)
@@ -1097,7 +1097,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
* We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is only
* possible if there is a suitable array type available. If not, we fall
* back to a boolean condition tree with multiple copies of the lefthand
- * expression. Also, any IN-list items that contain Vars are handled as
+ * expression. Also, any IN-list items that contain Vars are handled as
* separate boolean conditions, because that gives the planner more scope
* for optimization on such clauses.
*
@@ -1128,7 +1128,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
Oid array_type;
/*
- * Try to select a common type for the array elements. Note that
+ * Try to select a common type for the array elements. Note that
* since the LHS' type is first in the list, it will be preferred when
* there is doubt (eg, when all the RHS items are unknown literals).
*
@@ -1403,7 +1403,7 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
qtree = parse_sub_analyze(sublink->subselect, pstate, NULL, false);
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
*/
if (!IsA(qtree, Query) ||
@@ -1812,7 +1812,7 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
newx->location = x->location;
/*
- * gram.y built the named args as a list of ResTarget. Transform each,
+ * gram.y built the named args as a list of ResTarget. Transform each,
* and break the names out as a separate list.
*/
newx->named_args = NIL;
@@ -2261,7 +2261,7 @@ make_row_comparison_op(ParseState *pstate, List *opname,
/*
* Now we must determine which row comparison semantics (= <> < <= > >=)
- * apply to this set of operators. We look for btree opfamilies
+ * apply to this set of operators. We look for btree opfamilies
* containing the operators, and see which interpretations (strategy
* numbers) exist for each operator.
*/
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index 75f1e20475d..07e9014765f 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -81,7 +81,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Most of the rest of the parser just assumes that functions do not have
- * more than FUNC_MAX_ARGS parameters. We have to test here to protect
+ * more than FUNC_MAX_ARGS parameters. We have to test here to protect
* against array overruns, etc. Of course, this may not be a function,
* but the test doesn't hurt.
*/
@@ -98,7 +98,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* Extract arg type info in preparation for function lookup.
*
* If any arguments are Param markers of type VOID, we discard them from
- * the parameter list. This is a hack to allow the JDBC driver to not
+ * the parameter list. This is a hack to allow the JDBC driver to not
* have to distinguish "input" and "output" parameter symbols while
* parsing function-call constructs. We can't use foreach() because we
* may modify the list ...
@@ -308,7 +308,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* If there are default arguments, we have to include their types in
* actual_arg_types for the purpose of checking generic type consistency.
* However, we do NOT put them into the generated parse node, because
- * their actual values might change before the query gets run. The
+ * their actual values might change before the query gets run. The
* planner has to insert the up-to-date values at plan time.
*/
nargsplusdefs = nargs;
@@ -404,7 +404,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (fargs == NIL && !agg_star)
ereport(ERROR,
@@ -470,7 +470,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (wfunc->winagg && fargs == NIL && !agg_star)
ereport(ERROR,
@@ -648,7 +648,7 @@ func_select_candidate(int nargs,
* matches" in the exact-match heuristic; it also makes it possible to do
* something useful with the type-category heuristics. Note that this
* makes it difficult, but not impossible, to use functions declared to
- * take a domain as an input datatype. Such a function will be selected
+ * take a domain as an input datatype. Such a function will be selected
* over the base-type function only if it is an exact match at all
* argument positions, and so was already chosen by our caller.
*/
@@ -757,7 +757,7 @@ func_select_candidate(int nargs,
* essentially a special case of the general algorithm we try next.
*
* We do this by examining each unknown argument position to see if we can
- * determine a "type category" for it. If any candidate has an input
+ * determine a "type category" for it. If any candidate has an input
* datatype of STRING category, use STRING category (this bias towards
* STRING is appropriate since unknown-type literals look like strings).
* Otherwise, if all the candidates agree on the type category of this
@@ -768,7 +768,7 @@ func_select_candidate(int nargs,
* the candidates takes a preferred datatype within the category.
*
* Having completed this examination, remove candidates that accept the
- * wrong category at any unknown position. Also, if at least one
+ * wrong category at any unknown position. Also, if at least one
* candidate accepted a preferred type at a position, remove candidates
* that accept non-preferred types.
*
@@ -999,7 +999,7 @@ func_get_detail(List *funcname,
*
* NB: it's important that this code does not exceed what coerce_type
* can do, because the caller will try to apply coerce_type if we
- * return FUNCDETAIL_COERCION. If we return that result for something
+ * return FUNCDETAIL_COERCION. If we return that result for something
* coerce_type can't handle, we'll cause infinite recursion between
* this module and coerce_type!
*/
@@ -1175,7 +1175,7 @@ func_get_detail(List *funcname,
{
/*
* This is a bit tricky in named notation, since the supplied
- * arguments could replace any subset of the defaults. We
+ * arguments could replace any subset of the defaults. We
* work by making a bitmapset of the argnumbers of defaulted
* arguments, then scanning the defaults list and selecting
* the needed items. (This assumes that defaulted arguments
@@ -1325,7 +1325,7 @@ FuncNameAsType(List *funcname)
* ParseComplexProjection -
* handles function calls with a single argument that is of complex type.
* If the function call is actually a column projection, return a suitably
- * transformed expression tree. If not, return NULL.
+ * transformed expression tree. If not, return NULL.
*/
static Node *
ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg,
@@ -1399,7 +1399,7 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg,
* The result is something like "foo(integer)".
*
* If argnames isn't NIL, it is a list of C strings representing the actual
- * arg names for the last N arguments. This must be considered part of the
+ * arg names for the last N arguments. This must be considered part of the
* function signature too, when dealing with named-notation function calls.
*
* This is typically used in the construction of function-not-found error
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index b552ddfda96..740e1bdef84 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -98,8 +98,8 @@ free_parsestate(ParseState *pstate)
* is a dummy (always 0, in fact).
*
* The locations stored in raw parsetrees are byte offsets into the source
- * string. We have to convert them to 1-based character indexes for reporting
- * to clients. (We do things this way to avoid unnecessary overhead in the
+ * string. We have to convert them to 1-based character indexes for reporting
+ * to clients. (We do things this way to avoid unnecessary overhead in the
* normal non-error case: computing character indexes would be much more
* expensive than storing token offsets.)
*/
@@ -128,7 +128,7 @@ parser_errposition(ParseState *pstate, int location)
* Sometimes the parser calls functions that aren't part of the parser
* subsystem and can't reasonably be passed a ParseState; yet we would
* like any errors thrown in those functions to be tagged with a parse
- * error location. Use this function to set up an error context stack
+ * error location. Use this function to set up an error context stack
* entry that will accomplish that. Usage pattern:
*
* declare a local variable "ParseCallbackState pcbstate"
@@ -220,7 +220,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
* If the input is a domain, smash to base type, and extract the actual
* typmod to be applied to the base type. Subscripting a domain is an
* operation that necessarily works on the base array type, not the domain
- * itself. (Note that we provide no method whereby the creator of a
+ * itself. (Note that we provide no method whereby the creator of a
* domain over an array type could hide its ability to be subscripted.)
*/
*arrayType = getBaseTypeAndTypmod(*arrayType, arrayTypmod);
@@ -268,7 +268,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
*
* In an array assignment, we are given a destination array value plus a
* source value that is to be assigned to a single element or a slice of
- * that array. We produce an expression that represents the new array value
+ * that array. We produce an expression that represents the new array value
* with the source data inserted into the right part of the array.
*
* For both cases, if the source array is of a domain-over-array type,
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index 4a2f77771b8..5ff5fd05f5a 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -447,7 +447,7 @@ oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId,
*
* This is tighter than oper() because it will not return an operator that
* requires coercion of the input datatypes (but binary-compatible operators
- * are accepted). Otherwise, the semantics are the same.
+ * are accepted). Otherwise, the semantics are the same.
*/
Operator
compatible_oper(ParseState *pstate, List *op, Oid arg1, Oid arg2,
@@ -980,7 +980,7 @@ make_scalar_array_op(ParseState *pstate, List *opname,
* mapping is pretty expensive to compute, especially for ambiguous operators;
* this is mainly because there are a *lot* of instances of popular operator
* names such as "=", and we have to check each one to see which is the
- * best match. So once we have identified the correct mapping, we save it
+ * best match. So once we have identified the correct mapping, we save it
* in a cache that need only be flushed on pg_operator or pg_cast change.
* (pg_cast must be considered because changes in the set of implicit casts
* affect the set of applicable operators for any given input datatype.)
diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c
index 3b8a67619e8..7a87df76b88 100644
--- a/src/backend/parser/parse_param.c
+++ b/src/backend/parser/parse_param.c
@@ -255,7 +255,7 @@ variable_coerce_param_hook(ParseState *pstate, Param *param,
* of parsing with parse_variable_parameters.
*
* Note: this code intentionally does not check that all parameter positions
- * were used, nor that all got non-UNKNOWN types assigned. Caller of parser
+ * were used, nor that all got non-UNKNOWN types assigned. Caller of parser
* should enforce that if it's important.
*/
void
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 8293f07d867..1ef23f93628 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -67,7 +67,7 @@ static int specialAttNum(const char *attname);
*
* A qualified refname (schemaname != NULL) can only match a relation RTE
* that (a) has no alias and (b) is for the same relation identified by
- * schemaname.refname. In this case we convert schemaname.refname to a
+ * schemaname.refname. In this case we convert schemaname.refname to a
* relation OID and search by relid, rather than by alias name. This is
* peculiar, but it's what SQL92 says to do.
*/
@@ -156,7 +156,7 @@ scanNameSpaceForRefname(ParseState *pstate, const char *refname, int location)
/*
* Search the query's table namespace for a relation RTE matching the
- * given relation OID. Return the RTE if a unique match, or NULL
+ * given relation OID. Return the RTE if a unique match, or NULL
* if no match. Raise error if multiple matches (which shouldn't
* happen if the namespace was checked correctly when it was created).
*
@@ -349,7 +349,7 @@ checkNameSpaceConflicts(ParseState *pstate, List *namespace1,
/*
* given an RTE, return RT index (starting with 1) of the entry,
- * and optionally get its nesting depth (0 = current). If sublevels_up
+ * and optionally get its nesting depth (0 = current). If sublevels_up
* is NULL, only consider rels at the current nesting level.
* Raises error if RTE not found.
*/
@@ -896,7 +896,7 @@ addRangeTableEntry(ParseState *pstate,
/*
* Get the rel's OID. This access also ensures that we have an up-to-date
- * relcache entry for the rel. Since this is typically the first access
+ * relcache entry for the rel. Since this is typically the first access
* to a rel in a statement, be careful to get the right access level
* depending on whether we're doing SELECT FOR UPDATE/SHARE.
*/
@@ -2461,7 +2461,7 @@ errorMissingRTE(ParseState *pstate, RangeVar *relation)
/*
* Check to see if there are any potential matches in the query's
- * rangetable. (Note: cases involving a bad schema name in the RangeVar
+ * rangetable. (Note: cases involving a bad schema name in the RangeVar
* will throw error immediately here. That seems OK.)
*/
rte = searchRangeTable(pstate, relation);
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index 98c28193692..4827a96448b 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -176,7 +176,7 @@ transformTargetList(ParseState *pstate, List *targetlist)
* This is the identical transformation to transformTargetList, except that
* the input list elements are bare expressions without ResTarget decoration,
* and the output elements are likewise just expressions without TargetEntry
- * decoration. We use this for ROW() and VALUES() constructs.
+ * decoration. We use this for ROW() and VALUES() constructs.
*/
List *
transformExpressionList(ParseState *pstate, List *exprlist)
@@ -341,7 +341,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
/*
* transformAssignedExpr()
- * This is used in INSERT and UPDATE statements only. It prepares an
+ * This is used in INSERT and UPDATE statements only. It prepares an
* expression for assignment to a column of the target table.
* This includes coercing the given value to the target column's type
* (if necessary), and dealing with any subfield names or subscripts
@@ -359,7 +359,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
*
* Note: location points at the target column name (SET target or INSERT
* column name list entry), and must therefore be -1 in an INSERT that
- * omits the column name list. So we should usually prefer to use
+ * omits the column name list. So we should usually prefer to use
* exprLocation(expr) for errors that can happen in a default INSERT.
*/
Expr *
@@ -423,7 +423,7 @@ transformAssignedExpr(ParseState *pstate,
/*
* If there is indirection on the target column, prepare an array or
- * subfield assignment expression. This will generate a new column value
+ * subfield assignment expression. This will generate a new column value
* that the source value has been inserted into, which can then be placed
* in the new tuple constructed by INSERT or UPDATE.
*/
@@ -528,7 +528,7 @@ updateTargetListEntry(ParseState *pstate,
/*
* Set the resno to identify the target column --- the rewriter and
- * planner depend on this. We also set the resname to identify the target
+ * planner depend on this. We also set the resname to identify the target
* column, but this is only for debugging purposes; it should not be
* relied on. (In particular, it might be out of date in a stored rule.)
*/
@@ -976,7 +976,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
*
* Note: this code is a lot like transformColumnRef; it's tempting to
* call that instead and then replace the resulting whole-row Var with
- * a list of Vars. However, that would leave us with the RTE's
+ * a list of Vars. However, that would leave us with the RTE's
* selectedCols bitmap showing the whole row as needing select
* permission, as well as the individual columns. That would be
* incorrect (since columns added later shouldn't need select
@@ -995,7 +995,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
} crserr = CRSERR_NO_RTE;
/*
- * Give the PreParseColumnRefHook, if any, first shot. If it returns
+ * Give the PreParseColumnRefHook, if any, first shot. If it returns
* non-null then we should use that expression.
*/
if (pstate->p_pre_columnref_hook != NULL)
@@ -1238,7 +1238,7 @@ ExpandRowReference(ParseState *pstate, Node *expr,
/*
* If the rowtype expression is a whole-row Var, we can expand the fields
- * as simple Vars. Note: if the RTE is a relation, this case leaves us
+ * as simple Vars. Note: if the RTE is a relation, this case leaves us
* with the RTE's selectedCols bitmap showing the whole row as needing
* select permission, as well as the individual columns. However, we can
* only get here for weird notations like (table.*).*, so it's not worth
@@ -1320,7 +1320,7 @@ ExpandRowReference(ParseState *pstate, Node *expr,
* Get the tuple descriptor for a Var of type RECORD, if possible.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
* the tupdesc from it. We ereport if we can't determine the tupdesc.
*
@@ -1403,7 +1403,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of ParseState
+ * to. We have to build an additional level of ParseState
* to keep in step with varlevelsup in the subselect.
*/
ParseState mypstate;
@@ -1477,7 +1477,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index ac62cbc8c33..6be40018f42 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -34,7 +34,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName,
/*
* LookupTypeName
* Given a TypeName object, lookup the pg_type syscache entry of the type.
- * Returns NULL if no such type can be found. If the type is found,
+ * Returns NULL if no such type can be found. If the type is found,
* the typmod value represented in the TypeName struct is computed and
* stored into *typmod_p.
*
@@ -47,7 +47,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName,
*
* typmod_p can be passed as NULL if the caller does not care to know the
* typmod value, but the typmod decoration (if any) will be validated anyway,
- * except in the case where the type is not found. Note that if the type is
+ * except in the case where the type is not found. Note that if the type is
* found but is a shell, and there is typmod decoration, an error will be
* thrown --- this is intentional.
*
@@ -571,7 +571,7 @@ typeTypeCollation(Type typ)
/*
* Given a type structure and a string, returns the internal representation
- * of that string. The "string" can be NULL to perform conversion of a NULL
+ * of that string. The "string" can be NULL to perform conversion of a NULL
* (which might result in failure, if the input function rejects NULLs).
*/
Datum
@@ -595,7 +595,7 @@ stringTypeDatum(Type tp, char *string, int32 atttypmod)
* instability in the input function is that comparison of Const nodes
* relies on bytewise comparison of the datums, so if the input function
* leaves garbage then subexpressions that should be identical may not get
- * recognized as such. See pgsql-hackers discussion of 2008-04-04.
+ * recognized as such. See pgsql-hackers discussion of 2008-04-04.
*/
if (string && !typform->typbyval)
{
@@ -642,7 +642,7 @@ pts_error_callback(void *arg)
/*
* Currently we just suppress any syntax error position report, rather
- * than transforming to an "internal query" error. It's unlikely that a
+ * than transforming to an "internal query" error. It's unlikely that a
* type name is complex enough to need positioning.
*/
errposition(0);
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 387bcb8ab91..eb4e4212fc9 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -158,7 +158,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
stmt = (CreateStmt *) copyObject(stmt);
/*
- * Look up the creation namespace. This also checks permissions on the
+ * Look up the creation namespace. This also checks permissions on the
* target namespace, so that we throw any permissions error as early as
* possible.
*/
@@ -189,7 +189,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
* If the target relation name isn't schema-qualified, make it so. This
* prevents some corner cases in which added-on rewritten commands might
* think they should apply to other relations that have the same name and
- * are earlier in the search path. But a local temp table is effectively
+ * are earlier in the search path. But a local temp table is effectively
* specified to be in pg_temp, so no need for anything extra in that case.
*/
if (stmt->relation->schemaname == NULL
@@ -867,7 +867,7 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing the
+ * commit. That will prevent someone else from deleting or ALTERing the
* parent before the child is committed.
*/
heap_close(relation, NoLock);
@@ -1565,7 +1565,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
parser_errposition(cxt->pstate, constraint->location)));
/*
- * Insist on it being a btree. That's the only kind that supports
+ * Insist on it being a btree. That's the only kind that supports
* uniqueness at the moment anyway; but we must have an index that
* exactly matches what you'd get from plain ADD CONSTRAINT syntax,
* else dump and reload will produce a different index (breaking
@@ -1592,7 +1592,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
/*
* We shouldn't see attnum == 0 here, since we already rejected
- * expression indexes. If we do, SystemAttributeDefinition will
+ * expression indexes. If we do, SystemAttributeDefinition will
* throw an error.
*/
if (attnum > 0)
@@ -1606,7 +1606,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
attname = pstrdup(NameStr(attform->attname));
/*
- * Insist on default opclass and sort options. While the index
+ * Insist on default opclass and sort options. While the index
* would still work as a constraint with non-default settings, it
* might not provide exactly the same uniqueness semantics as
* you'd get from a normally-created constraint; and there's also
@@ -1857,7 +1857,7 @@ transformFKConstraints(CreateStmtContext *cxt,
* transformIndexStmt - parse analysis for CREATE INDEX and ALTER TABLE
*
* Note: this is a no-op for an index not using either index expressions or
- * a predicate expression. There are several code paths that create indexes
+ * a predicate expression. There are several code paths that create indexes
* without bothering to call this, because they know they don't have any
* such expressions to deal with.
*
@@ -1973,7 +1973,7 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString,
/*
* To avoid deadlock, make sure the first thing we do is grab
- * AccessExclusiveLock on the target relation. This will be needed by
+ * AccessExclusiveLock on the target relation. This will be needed by
* DefineQueryRewrite(), and we don't want to grab a lesser lock
* beforehand.
*/
diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c
index e389208d469..a5b01f1cac4 100644
--- a/src/backend/parser/parser.c
+++ b/src/backend/parser/parser.c
@@ -65,7 +65,7 @@ raw_parser(const char *str)
* Intermediate filter between parser and core lexer (core_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
diff --git a/src/backend/port/darwin/system.c b/src/backend/port/darwin/system.c
index 1385e43152a..50919799506 100644
--- a/src/backend/port/darwin/system.c
+++ b/src/backend/port/darwin/system.c
@@ -24,7 +24,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c
index 484eb43b5c3..ccd92c39d4b 100644
--- a/src/backend/port/dynloader/darwin.c
+++ b/src/backend/port/dynloader/darwin.c
@@ -47,7 +47,7 @@ pg_dlerror(void)
/*
* These routines were taken from the Apache source, but were made
- * available with a PostgreSQL-compatible license. Kudos Wilfredo
+ * available with a PostgreSQL-compatible license. Kudos Wilfredo
* Sánchez <wsanchez@apple.com>.
*/
diff --git a/src/backend/port/dynloader/freebsd.c b/src/backend/port/dynloader/freebsd.c
index bc400791788..66272914e6d 100644
--- a/src/backend/port/dynloader/freebsd.c
+++ b/src/backend/port/dynloader/freebsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/netbsd.c b/src/backend/port/dynloader/netbsd.c
index 1bac83784af..f2259b2400c 100644
--- a/src/backend/port/dynloader/netbsd.c
+++ b/src/backend/port/dynloader/netbsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/openbsd.c b/src/backend/port/dynloader/openbsd.c
index 4556a2dfebb..28eaf772096 100644
--- a/src/backend/port/dynloader/openbsd.c
+++ b/src/backend/port/dynloader/openbsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c
index acd0094360e..a4a25ade872 100644
--- a/src/backend/port/posix_sema.c
+++ b/src/backend/port/posix_sema.c
@@ -138,7 +138,7 @@ PosixSemaphoreKill(sem_t * sem)
*
* This is called during postmaster start or shared memory reinitialization.
* It should do whatever is needed to be able to support up to maxSemas
- * subsequent PGSemaphoreCreate calls. Also, if any system resources
+ * subsequent PGSemaphoreCreate calls. Also, if any system resources
* are acquired here or in PGSemaphoreCreate, register an on_shmem_exit
* callback to release them.
*
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index 7b671869267..ce5446526bd 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -256,7 +256,7 @@ IpcSemaphoreCreate(int numSems)
/*
* Can only get here if some other process managed to create the same
- * sema key before we did. Let him have that one, loop around to try
+ * sema key before we did. Let him have that one, loop around to try
* next key.
*/
}
@@ -281,12 +281,12 @@ IpcSemaphoreCreate(int numSems)
*
* This is called during postmaster start or shared memory reinitialization.
* It should do whatever is needed to be able to support up to maxSemas
- * subsequent PGSemaphoreCreate calls. Also, if any system resources
+ * subsequent PGSemaphoreCreate calls. Also, if any system resources
* are acquired here or in PGSemaphoreCreate, register an on_shmem_exit
* callback to release them.
*
* The port number is passed for possible use as a key (for SysV, we use
- * it to generate the starting semaphore key). In a standalone backend,
+ * it to generate the starting semaphore key). In a standalone backend,
* zero will be passed.
*
* In the SysV implementation, we acquire semaphore sets on-demand; the
@@ -381,7 +381,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* from the operation prematurely because we were sent a signal. So we
* try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. On
+ * Each time around the loop, we check for a cancel/die interrupt. On
* some platforms, if such an interrupt comes in while we are waiting, it
* will cause the semop() call to exit with errno == EINTR, allowing us to
* service the interrupt (if not in a critical section already) during the
@@ -399,7 +399,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
* execute directly. However, there is a huge pitfall: there is another
* window of a few instructions after the semop() before we are able to
- * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
+ * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
* control, which means that the lock has been acquired but our caller did
* not get a chance to record the fact. Therefore, we only set
* ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
@@ -412,9 +412,9 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* On some platforms, signals marked SA_RESTART (which is most, for us)
* will not interrupt the semop(); it will just keep waiting. Therefore
* it's necessary for cancel/die interrupts to be serviced directly by the
- * signal handler. On these platforms the behavior is really the same
+ * signal handler. On these platforms the behavior is really the same
* whether the signal arrives just before the semop() begins, or while it
- * is waiting. The loop on EINTR is thus important only for other types
+ * is waiting. The loop on EINTR is thus important only for other types
* of interrupts.
*/
do
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 3840ad7b1b2..c91c86219b4 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -242,7 +242,7 @@ IpcMemoryDelete(int status, Datum shmId)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
@@ -341,14 +341,14 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2)
* the storage.
*
* Dead Postgres segments are recycled if found, but we do not fail upon
- * collision with non-Postgres shmem segments. The idea here is to detect and
+ * collision with non-Postgres shmem segments. The idea here is to detect and
* re-use keys that may have been assigned by a crashed postmaster or backend.
*
* makePrivate means to always create a new segment, rather than attach to
* or recycle any existing segment.
*
* The port number is passed for possible use as a key (for SysV, we use
- * it to generate the starting shmem key). In a standalone backend,
+ * it to generate the starting shmem key). In a standalone backend,
* zero will be passed.
*/
PGShmemHeader *
@@ -458,7 +458,7 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
/*
* PGSharedMemoryReAttach
*
- * Re-attach to an already existing shared memory segment. In the non
+ * Re-attach to an already existing shared memory segment. In the non
* EXEC_BACKEND case this is not used, because postmaster children inherit
* the shared memory segment attachment via fork().
*
@@ -500,7 +500,7 @@ PGSharedMemoryReAttach(void)
*
* Detach from the shared memory segment, if still attached. This is not
* intended for use by the process that originally created the segment
- * (it will have an on_shmem_exit callback registered to do that). Rather,
+ * (it will have an on_shmem_exit callback registered to do that). Rather,
* this is for subprocesses that have inherited an attachment and want to
* get rid of it.
*/
diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c
index 1a2ee12dcb0..d6978566398 100644
--- a/src/backend/port/win32_shmem.c
+++ b/src/backend/port/win32_shmem.c
@@ -78,7 +78,7 @@ GetSharedMemName(void)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 3d57c453772..4ff5d877115 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -21,21 +21,21 @@
* There is an autovacuum shared memory area, where the launcher stores
* information about the database it wants vacuumed. When it wants a new
* worker to start, it sets a flag in shared memory and sends a signal to the
- * postmaster. Then postmaster knows nothing more than it must start a worker;
- * so it forks a new child, which turns into a worker. This new process
+ * postmaster. Then postmaster knows nothing more than it must start a worker;
+ * so it forks a new child, which turns into a worker. This new process
* connects to shared memory, and there it can inspect the information that the
* launcher has set up.
*
* If the fork() call fails in the postmaster, it sets a flag in the shared
* memory area, and sends a signal to the launcher. The launcher, upon
* noticing the flag, can try starting the worker again by resending the
- * signal. Note that the failure can only be transient (fork failure due to
+ * signal. Note that the failure can only be transient (fork failure due to
* high load, memory pressure, too many processes, etc); more permanent
* problems, like failure to connect to a database, are detected later in the
* worker and dealt with just by having the worker exit normally. The launcher
* will launch a new worker again later, per schedule.
*
- * When the worker is done vacuuming it sends SIGUSR2 to the launcher. The
+ * When the worker is done vacuuming it sends SIGUSR2 to the launcher. The
* launcher then wakes up and is able to launch another worker, if the schedule
* is so tight that a new worker is needed immediately. At this time the
* launcher can also balance the settings for the various remaining workers'
@@ -228,7 +228,7 @@ typedef enum
/*-------------
* The main autovacuum shmem struct. On shared memory we store this main
- * struct and the array of WorkerInfo structs. This struct keeps:
+ * struct and the array of WorkerInfo structs. This struct keeps:
*
* av_signal set by other processes to indicate various conditions
* av_launcherpid the PID of the autovacuum launcher
@@ -410,7 +410,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has any
+ * can signal any child processes too. (autovacuum probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -420,7 +420,7 @@ AutoVacLauncherMain(int argc, char *argv[])
#endif
/*
- * Set up signal handlers. We operate on databases much like a regular
+ * Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*/
@@ -528,7 +528,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* Force zero_damaged_pages OFF in the autovac process, even if it is set
- * in postgresql.conf. We don't really want such a dangerous option being
+ * in postgresql.conf. We don't really want such a dangerous option being
* applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
@@ -862,7 +862,7 @@ launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval * nap)
* this the "new" database, because when the database was already present on
* the list, we expect that this function is not called at all). The
* preexisting list, if any, will be used to preserve the order of the
- * databases in the autovacuum_naptime period. The new database is put at the
+ * databases in the autovacuum_naptime period. The new database is put at the
* end of the interval. The actual values are not saved, which should not be
* much of a problem.
*/
@@ -1075,7 +1075,7 @@ db_comparator(const void *a, const void *b)
*
* Bare-bones procedure for starting an autovacuum worker from the launcher.
* It determines what database to work on, sets up shared memory stuff and
- * signals postmaster to start the worker. It fails gracefully if invoked when
+ * signals postmaster to start the worker. It fails gracefully if invoked when
* autovacuum_workers are already active.
*
* Return value is the OID of the database that the worker is going to process,
@@ -1328,7 +1328,7 @@ launch_worker(TimestampTz now)
/*
* Called from postmaster to signal a failure to fork a process to become
- * worker. The postmaster should kill(SIGUSR2) the launcher shortly
+ * worker. The postmaster should kill(SIGUSR2) the launcher shortly
* after calling this function.
*/
void
@@ -1462,7 +1462,7 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has any
+ * can signal any child processes too. (autovacuum probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -1472,7 +1472,7 @@ AutoVacWorkerMain(int argc, char *argv[])
#endif
/*
- * Set up signal handlers. We operate on databases much like a regular
+ * Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*
@@ -1523,7 +1523,7 @@ AutoVacWorkerMain(int argc, char *argv[])
EmitErrorReport();
/*
- * We can now go away. Note that because we called InitProcess, a
+ * We can now go away. Note that because we called InitProcess, a
* callback was registered to do ProcKill, which will clean up
* necessary state.
*/
@@ -1537,7 +1537,7 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* Force zero_damaged_pages OFF in the autovac process, even if it is set
- * in postgresql.conf. We don't really want such a dangerous option being
+ * in postgresql.conf. We don't really want such a dangerous option being
* applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
@@ -1663,7 +1663,7 @@ FreeWorkerInfo(int code, Datum arg)
/*
* Wake the launcher up so that he can launch a new worker immediately
* if required. We only save the launcher's PID in local memory here;
- * the actual signal will be sent when the PGPROC is recycled. Note
+ * the actual signal will be sent when the PGPROC is recycled. Note
* that we always do this, so that the launcher can rebalance the cost
* limit setting of the remaining workers.
*
@@ -1777,7 +1777,7 @@ autovac_balance_cost(void)
/*
* We put a lower bound of 1 on the cost_limit, to avoid division-
- * by-zero in the vacuum code. Also, in case of roundoff trouble
+ * by-zero in the vacuum code. Also, in case of roundoff trouble
* in these calculations, let's be sure we don't ever set
* cost_limit to more than the base value.
*/
@@ -1824,7 +1824,7 @@ get_database_list(void)
/*
* Start a transaction so we can access pg_database, and get a snapshot.
* We don't have a use for the snapshot itself, but we're interested in
- * the secondary effect that it sets RecentGlobalXmin. (This is critical
+ * the secondary effect that it sets RecentGlobalXmin. (This is critical
* for anything that reads heap pages, because HOT may decide to prune
* them even if the process doesn't attempt to modify any tuples.)
*/
@@ -2241,14 +2241,14 @@ do_autovacuum(void)
}
/*
- * Ok, good to go. Store the table in shared memory before releasing
+ * Ok, good to go. Store the table in shared memory before releasing
* the lock so that other workers don't vacuum it concurrently.
*/
MyWorkerInfo->wi_tableoid = relid;
LWLockRelease(AutovacuumScheduleLock);
/*
- * Remember the prevailing values of the vacuum cost GUCs. We have to
+ * Remember the prevailing values of the vacuum cost GUCs. We have to
* restore these at the bottom of the loop, else we'll compute wrong
* values in the next iteration of autovac_balance_cost().
*/
@@ -2277,7 +2277,7 @@ do_autovacuum(void)
/*
* Save the relation name for a possible error message, to avoid a
- * catalog lookup in case of an error. If any of these return NULL,
+ * catalog lookup in case of an error. If any of these return NULL,
* then the relation has been dropped since last we checked; skip it.
* Note: they must live in a long-lived memory context because we call
* vacuum and analyze in different transactions.
@@ -2689,7 +2689,7 @@ relation_needs_vacanalyze(Oid relid,
{
/*
* Skip a table not found in stat hash, unless we have to force vacuum
- * for anti-wrap purposes. If it's not acted upon, there's no need to
+ * for anti-wrap purposes. If it's not acted upon, there's no need to
* vacuum it.
*/
*dovacuum = force_vacuum;
@@ -2891,7 +2891,7 @@ AutoVacuumShmemInit(void)
* Refresh pgstats data for an autovacuum process
*
* Cause the next pgstats read operation to obtain fresh data, but throttle
- * such refreshing in the autovacuum launcher. This is mostly to avoid
+ * such refreshing in the autovacuum launcher. This is mostly to avoid
* rereading the pgstats files too many times in quick succession when there
* are many databases.
*
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index 83c13e252c4..238c1b3d607 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -2,15 +2,15 @@
*
* bgwriter.c
*
- * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
+ * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
* to keep regular backends from having to write out dirty shared buffers
* (which they would only do when needing to free a shared buffer to read in
* another page). In the best scenario all writes from shared buffers will
- * be issued by the background writer process. However, regular backends are
+ * be issued by the background writer process. However, regular backends are
* still empowered to issue writes if the bgwriter fails to maintain enough
* clean shared buffers.
*
- * The bgwriter is also charged with handling all checkpoints. It will
+ * The bgwriter is also charged with handling all checkpoints. It will
* automatically dispatch a checkpoint after a certain amount of time has
* elapsed since the last one, and it can be signaled to perform requested
* checkpoints as well. (The GUC parameter that mandates a checkpoint every
@@ -22,7 +22,7 @@
* finishes, or as soon as recovery begins if we are doing archive recovery.
* It remains alive until the postmaster commands it to terminate.
* Normal termination is by SIGUSR2, which instructs the bgwriter to execute
- * a shutdown checkpoint and then exit(0). (All backends must be stopped
+ * a shutdown checkpoint and then exit(0). (All backends must be stopped
* before SIGUSR2 is issued!) Emergency termination is by SIGQUIT; like any
* backend, the bgwriter will simply abort and exit on SIGQUIT.
*
@@ -210,7 +210,7 @@ BackgroundWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (bgwriter probably never has any
+ * can signal any child processes too. (bgwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -223,7 +223,7 @@ BackgroundWriterMain(void)
* Properly accept or ignore signals the postmaster might send us
*
* Note: we deliberately ignore SIGTERM, because during a standard Unix
- * system shutdown cycle, init will SIGTERM all processes at once. We
+ * system shutdown cycle, init will SIGTERM all processes at once. We
* want to wait for the backends to exit, whereupon the postmaster will
* tell us it's okay to shut down (via SIGUSR2).
*
@@ -293,7 +293,7 @@ BackgroundWriterMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in bgwriter, but we do have LWLocks, buffers, and temp files.
*/
LWLockReleaseAll();
@@ -507,7 +507,7 @@ BackgroundWriterMain(void)
ckpt_performed = CreateRestartPoint(flags);
/*
- * After any checkpoint, close all smgr files. This is so we
+ * After any checkpoint, close all smgr files. This is so we
* won't hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -654,7 +654,7 @@ BgWriterNap(void)
}
/*
- * Returns true if an immediate checkpoint request is pending. (Note that
+ * Returns true if an immediate checkpoint request is pending. (Note that
* this does not check the *current* checkpoint's IMMEDIATE flag, but whether
* there is one pending behind it.)
*/
@@ -834,7 +834,7 @@ bg_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -956,7 +956,7 @@ RequestCheckpoint(int flags)
CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
/*
- * After any checkpoint, close all smgr files. This is so we won't
+ * After any checkpoint, close all smgr files. This is so we won't
* hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -1087,7 +1087,7 @@ RequestCheckpoint(int flags)
* to the requests[] queue without checking for duplicates. The bgwriter
* will have to eliminate dups internally anyway. However, if we discover
* that the queue is full, we make a pass over the entire queue to compact
- * it. This is somewhat expensive, but the alternative is for the backend
+ * it. This is somewhat expensive, but the alternative is for the backend
* to perform its own fsync, which is far more expensive in practice. It
* is theoretically possible a backend fsync might still be necessary, if
* the queue is full and contains no duplicate entries. In that case, we
@@ -1111,7 +1111,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* If the background writer isn't running or the request queue is full,
- * the backend will have to perform its own fsync request. But before
+ * the backend will have to perform its own fsync request. But before
* forcing that to happen, we can try to compact the background writer
* request queue.
*/
@@ -1143,7 +1143,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
* Although a full fsync request queue is not common, it can lead to severe
* performance problems when it does happen. So far, this situation has
* only been observed to occur when the system is under heavy write load,
- * and especially during the "sync" phase of a checkpoint. Without this
+ * and especially during the "sync" phase of a checkpoint. Without this
* logic, each backend begins doing an fsync for every block written, which
* gets very expensive and can slow down the whole system.
*
diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c
index d2d37cd261f..1e927644e18 100644
--- a/src/backend/postmaster/fork_process.c
+++ b/src/backend/postmaster/fork_process.c
@@ -72,7 +72,7 @@ fork_process(void)
* stupid, but the kernel hackers seem uninterested in improving it.)
* Therefore it's often a good idea to protect the postmaster by
* setting its oom_adj value negative (which has to be done in a
- * root-owned startup script). If you just do that much, all child
+ * root-owned startup script). If you just do that much, all child
* processes will also be protected against OOM kill, which might not
* be desirable. You can then choose to build with LINUX_OOM_ADJ
* #defined to 0, or some other value that you want child processes to
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index b40375aaaa5..212432ec06b 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -552,9 +552,9 @@ pgarch_archiveXlog(char *xlog)
{
/*
* If either the shell itself, or a called command, died on a signal,
- * abort the archiver. We do this because system() ignores SIGINT and
+ * abort the archiver. We do this because system() ignores SIGINT and
* SIGQUIT while waiting; so a signal is very likely something that
- * should have interrupted us too. If we overreact it's no big deal,
+ * should have interrupted us too. If we overreact it's no big deal,
* the postmaster will just start the archiver again.
*
* Per the Single Unix Spec, shells report exit status > 128 when a
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 67a7122a1ff..e27adfd2663 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -336,7 +336,7 @@ pgstat_init(void)
* On some platforms, pg_getaddrinfo_all() may return multiple addresses
* only one of which will actually work (eg, both IPv6 and IPv4 addresses
* when kernel will reject IPv6). Worse, the failure may occur at the
- * bind() or perhaps even connect() stage. So we must loop through the
+ * bind() or perhaps even connect() stage. So we must loop through the
* results till we find a working combination. We will generate LOG
* messages, but no error, for bogus combinations.
*/
@@ -600,7 +600,7 @@ pgstat_start(void)
/*
* Do nothing if too soon since last collector start. This is a safety
* valve to protect against continuous respawn attempts if the collector
- * is dying immediately at launch. Note that since we will be re-called
+ * is dying immediately at launch. Note that since we will be re-called
* from the postmaster main loop, we will get another chance later.
*/
curtime = time(NULL);
@@ -1038,7 +1038,7 @@ pgstat_vacuum_stat(void)
*
* Collect the OIDs of all objects listed in the specified system catalog
* into a temporary hash table. Caller should hash_destroy the result
- * when done with it. (However, we make the table in CurrentMemoryContext
+ * when done with it. (However, we make the table in CurrentMemoryContext
* so that it will be freed properly in event of an error.)
* ----------
*/
@@ -1283,7 +1283,7 @@ pgstat_report_analyze(Relation rel,
* have counted such rows as live or dead respectively. Because we will
* report our counts of such rows at transaction end, we should subtract
* off these counts from what we send to the collector now, else they'll
- * be double-counted after commit. (This approach also ensures that the
+ * be double-counted after commit. (This approach also ensures that the
* collector ends up with the right numbers if we abort instead of
* committing.)
*/
@@ -1958,7 +1958,7 @@ AtPrepare_PgStat(void)
* Clean up after successful PREPARE.
*
* All we need do here is unlink the transaction stats state from the
- * nontransactional state. The nontransactional action counts will be
+ * nontransactional state. The nontransactional action counts will be
* reported to the stats collector immediately, while the effects on live
* and dead tuple counts are preserved in the 2PC state file.
*
@@ -2678,12 +2678,12 @@ pgstat_read_current_status(void)
* pgstat_get_backend_current_activity() -
*
* Return a string representing the current activity of the backend with
- * the specified PID. This looks directly at the BackendStatusArray,
+ * the specified PID. This looks directly at the BackendStatusArray,
* and so will provide current information regardless of the age of our
* transaction's snapshot of the status array.
*
* It is the caller's responsibility to invoke this only for backends whose
- * state is expected to remain stable while the result is in use. The
+ * state is expected to remain stable while the result is in use. The
* only current use is in deadlock reporting, where we can expect that
* the target backend is blocked on a lock. (There are corner cases
* where the target's wait could get aborted while we are looking at it,
@@ -2832,7 +2832,7 @@ pgstat_send_bgwriter(void)
/* ----------
* PgstatCollectorMain() -
*
- * Start up the statistics collector process. This is the body of the
+ * Start up the statistics collector process. This is the body of the
* postmaster child process.
*
* The argc/argv parameters are valid only in EXEC_BACKEND case.
@@ -2861,7 +2861,7 @@ PgstatCollectorMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (pgstat probably never has any
+ * can signal any child processes too. (pgstat probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -2908,7 +2908,7 @@ PgstatCollectorMain(int argc, char *argv[])
pgStatDBHash = pgstat_read_statsfile(InvalidOid, true);
/*
- * Setup the descriptor set for select(2). Since only one bit in the set
+ * Setup the descriptor set for select(2). Since only one bit in the set
* ever changes, we need not repeat FD_ZERO each time.
*/
#if !defined(HAVE_POLL) && !defined(WIN32)
@@ -2921,7 +2921,7 @@ PgstatCollectorMain(int argc, char *argv[])
*
* For performance reasons, we don't want to do a PostmasterIsAlive() test
* after every message; instead, do it only when select()/poll() is
- * interrupted by timeout. In essence, we'll stay alive as long as
+ * interrupted by timeout. In essence, we'll stay alive as long as
* backends keep sending us stuff often, even if the postmaster is gone.
*/
for (;;)
@@ -3392,7 +3392,7 @@ pgstat_write_statsfile(bool permanent)
/*
* If there is clock skew between backends and the collector, we could
* receive a stats request time that's in the future. If so, complain
- * and reset last_statrequest. Resetting ensures that no inquiry
+ * and reset last_statrequest. Resetting ensures that no inquiry
* message can cause more than one stats file write to occur.
*/
if (last_statrequest > last_statwrite)
@@ -3765,14 +3765,14 @@ backend_read_statsfile(void)
/*
* We set the minimum acceptable timestamp to PGSTAT_STAT_INTERVAL msec
- * before now. This indirectly ensures that the collector needn't write
+ * before now. This indirectly ensures that the collector needn't write
* the file more often than PGSTAT_STAT_INTERVAL. In an autovacuum
* worker, however, we want a lower delay to avoid using stale data, so we
* use PGSTAT_RETRY_DELAY (since the number of worker is low, this
* shouldn't be a problem).
*
* Note that we don't recompute min_ts after sleeping; so we might end up
- * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In practice
+ * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In practice
* that shouldn't happen, though, as long as the sleep time is less than
* PGSTAT_STAT_INTERVAL; and we don't want to lie to the collector about
* what our cutoff time really is.
@@ -3836,7 +3836,7 @@ pgstat_setup_memcxt(void)
/* ----------
* pgstat_clear_snapshot() -
*
- * Discard any data collected in the current transaction. Any subsequent
+ * Discard any data collected in the current transaction. Any subsequent
* request will cause new snapshots to be read.
*
* This is also invoked during transaction commit or abort to discard
@@ -4073,7 +4073,7 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len)
dbentry->functions = NULL;
/*
- * Reset database-level stats too. This should match the initialization
+ * Reset database-level stats too. This should match the initialization
* code in pgstat_get_db_entry().
*/
dbentry->n_xact_commit = 0;
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 1fabbce517f..41c4b41a945 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2,7 +2,7 @@
*
* postmaster.c
* This program acts as a clearing house for requests to the
- * POSTGRES system. Frontend programs send a startup message
+ * POSTGRES system. Frontend programs send a startup message
* to the Postmaster and the postmaster uses the info in the
* message to setup a backend process.
*
@@ -15,7 +15,7 @@
* The postmaster process creates the shared memory and semaphore
* pools during startup, but as a rule does not touch them itself.
* In particular, it is not a member of the PGPROC array of backends
- * and so it cannot participate in lock-manager operations. Keeping
+ * and so it cannot participate in lock-manager operations. Keeping
* the postmaster away from shared memory operations makes it simpler
* and more reliable. The postmaster is almost always able to recover
* from crashes of individual backends by resetting shared memory;
@@ -58,7 +58,7 @@
* Error Reporting:
* Use write_stderr() only for reporting "interactive" errors
* (essentially, bogus arguments on the command line). Once the
- * postmaster is launched, use ereport(). In particular, don't use
+ * postmaster is launched, use ereport(). In particular, don't use
* write_stderr() for anything that occurs after pmdaemonize.
*
*-------------------------------------------------------------------------
@@ -132,10 +132,10 @@
* children we have and send them appropriate signals when necessary.
*
* "Special" children such as the startup, bgwriter and autovacuum launcher
- * tasks are not in this list. Autovacuum worker and walsender processes are
+ * tasks are not in this list. Autovacuum worker and walsender processes are
* in it. Also, "dead_end" children are in it: these are children launched just
* for the purpose of sending a friendly rejection message to a would-be
- * client. We must track them because they are attached to shared memory,
+ * client. We must track them because they are attached to shared memory,
* but we know they will never become live backends. dead_end children are
* not assigned a PMChildSlot.
*/
@@ -182,10 +182,10 @@ static char ExtraOptions[MAXPGPATH];
/*
* These globals control the behavior of the postmaster in case some
- * backend dumps core. Normally, it kills all peers of the dead backend
+ * backend dumps core. Normally, it kills all peers of the dead backend
* and reinitializes shared memory. By specifying -s or -n, we can have
* the postmaster stop (rather than kill) peers and not reinitialize
- * shared data structures. (Reinit is currently dead code, though.)
+ * shared data structures. (Reinit is currently dead code, though.)
*/
static bool Reinit = true;
static int SendStop = false;
@@ -233,13 +233,13 @@ static bool RecoveryError = false; /* T if WAL recovery failed */
* state and the startup process is launched. The startup process begins by
* reading the control file and other preliminary initialization steps.
* In a normal startup, or after crash recovery, the startup process exits
- * with exit code 0 and we switch to PM_RUN state. However, archive recovery
+ * with exit code 0 and we switch to PM_RUN state. However, archive recovery
* is handled specially since it takes much longer and we would like to support
* hot standby during archive recovery.
*
* When the startup process is ready to start archive recovery, it signals the
* postmaster, and we switch to PM_RECOVERY state. The background writer is
- * launched, while the startup process continues applying WAL. If Hot Standby
+ * launched, while the startup process continues applying WAL. If Hot Standby
* is enabled, then, after reaching a consistent point in WAL redo, startup
* process signals us again, and we switch to PM_HOT_STANDBY state and
* begin accepting connections to perform read-only queries. When archive
@@ -551,7 +551,7 @@ PostmasterMain(int argc, char *argv[])
opterr = 1;
/*
- * Parse command-line options. CAUTION: keep this in sync with
+ * Parse command-line options. CAUTION: keep this in sync with
* tcop/postgres.c (the option sets should not conflict) and with the
* common help() function in main/main.c.
*/
@@ -829,7 +829,7 @@ PostmasterMain(int argc, char *argv[])
CreateDataDirLockFile(true);
/*
- * If timezone is not set, determine what the OS uses. (In theory this
+ * If timezone is not set, determine what the OS uses. (In theory this
* should be done during GUC initialization, but because it can take as
* much as several seconds, we delay it until after we've created the
* postmaster.pid file. This prevents problems with boot scripts that
@@ -1100,7 +1100,7 @@ PostmasterMain(int argc, char *argv[])
load_ident();
/*
- * Remove old temporary files. At this point there can be no other
+ * Remove old temporary files. At this point there can be no other
* Postgres processes running in this directory, so this should be safe.
*/
RemovePgTempFiles();
@@ -1347,7 +1347,7 @@ pmdaemonize(void)
/*
* Reassociate stdin/stdout/stderr. fork_process() cleared any pending
- * output, so this should be safe. The only plausible error is EINTR,
+ * output, so this should be safe. The only plausible error is EINTR,
* which just means we should retry.
*/
do
@@ -1590,7 +1590,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
{
/*
* EOF after SSLdone probably means the client didn't like our
- * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
+ * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
* don't clutter the log with a complaint.
*/
if (!SSLdone)
@@ -1715,7 +1715,7 @@ retry1:
int32 offset = sizeof(ProtocolVersion);
/*
- * Scan packet body for name/option pairs. We can assume any string
+ * Scan packet body for name/option pairs. We can assume any string
* beginning within the packet body is null-terminated, thanks to
* zeroing extra byte above.
*/
@@ -2121,7 +2121,7 @@ reset_shared(int port)
*
* Note: in each "cycle of life" we will normally assign the same IPC keys
* (if using SysV shmem and/or semas), since the port number is used to
- * determine IPC keys. This helps ensure that we will clean up dead IPC
+ * determine IPC keys. This helps ensure that we will clean up dead IPC
* objects if the postmaster crashes and is restarted.
*/
CreateSharedMemoryAndSemaphores(false, port);
@@ -2460,7 +2460,7 @@ reaper(SIGNAL_ARGS)
/*
* OK, we saw normal exit of the bgwriter after it's been told
* to shut down. We expect that it wrote a shutdown
- * checkpoint. (If for some reason it didn't, recovery will
+ * checkpoint. (If for some reason it didn't, recovery will
* occur on next postmaster start.)
*
* At this point we should have no normal backend children
@@ -2536,7 +2536,7 @@ reaper(SIGNAL_ARGS)
/*
* Was it the autovacuum launcher? Normal exit can be ignored; we'll
* start a new one at the next iteration of the postmaster's main
- * loop, if necessary. Any other exit condition is treated as a
+ * loop, if necessary. Any other exit condition is treated as a
* crash.
*/
if (pid == AutoVacPID)
@@ -2665,7 +2665,7 @@ CleanupBackend(int pid,
if (!ReleasePostmasterChildSlot(bp->child_slot))
{
/*
- * Uh-oh, the child failed to clean itself up. Treat as a
+ * Uh-oh, the child failed to clean itself up. Treat as a
* crash after all.
*/
HandleChildCrash(pid, exitstatus, _("server process"));
@@ -2958,7 +2958,7 @@ PostmasterStateMachine(void)
* PM_WAIT_BACKENDS state ends when we have no regular backends
* (including autovac workers) and no walwriter or autovac launcher.
* If we are doing crash recovery then we expect the bgwriter to exit
- * too, otherwise not. The archiver, stats, and syslogger processes
+ * too, otherwise not. The archiver, stats, and syslogger processes
* are disregarded since they are not connected to shared memory; we
* also disregard dead_end children here. Walsenders are also
* disregarded, they will be terminated later after writing the
@@ -2974,7 +2974,7 @@ PostmasterStateMachine(void)
if (FatalError)
{
/*
- * Start waiting for dead_end children to die. This state
+ * Start waiting for dead_end children to die. This state
* change causes ServerLoop to stop creating new ones.
*/
pmState = PM_WAIT_DEAD_END;
@@ -3072,7 +3072,7 @@ PostmasterStateMachine(void)
/*
* If we've been told to shut down, we exit as soon as there are no
- * remaining children. If there was a crash, cleanup will occur at the
+ * remaining children. If there was a crash, cleanup will occur at the
* next startup. (Before PostgreSQL 8.3, we tried to recover from the
* crash before exiting, but that seems unwise if we are quitting because
* we got SIGTERM from init --- there may well not be time for recovery
@@ -3148,7 +3148,7 @@ PostmasterStateMachine(void)
* system().
*
* There is a race condition for recently-forked children: they might not
- * have executed setsid() yet. So we signal the child directly as well as
+ * have executed setsid() yet. So we signal the child directly as well as
* the group. We assume such a child will handle the signal before trying
* to spawn any grandchild processes. We also assume that signaling the
* child twice will not cause any problems.
@@ -3336,7 +3336,7 @@ BackendStartup(Port *port)
/*
* Try to report backend fork() failure to client before we close the
- * connection. Since we do not care to risk blocking the postmaster on
+ * connection. Since we do not care to risk blocking the postmaster on
* this connection, we set the connection to non-blocking and try only once.
*
* This is grungy special-purpose code; we cannot use backend libpq since
@@ -3390,7 +3390,7 @@ BackendInitialize(Port *port)
/*
* PreAuthDelay is a debugging aid for investigating problems in the
* authentication cycle: it can be set in postgresql.conf to allow time to
- * attach to the newly-forked backend with a debugger. (See also
+ * attach to the newly-forked backend with a debugger. (See also
* PostAuthDelay, which we allow clients to pass through PGOPTIONS, but it
* is not honored until after authentication.)
*/
@@ -3417,7 +3417,7 @@ BackendInitialize(Port *port)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (We do this now on the off chance
+ * can signal any child processes too. (We do this now on the off chance
* that something might spawn a child process during authentication.)
*/
#ifdef HAVE_SETSID
@@ -3427,7 +3427,7 @@ BackendInitialize(Port *port)
/*
* We arrange for a simple exit(1) if we receive SIGTERM or SIGQUIT or
- * timeout while trying to collect the startup packet. Otherwise the
+ * timeout while trying to collect the startup packet. Otherwise the
* postmaster cannot shutdown the database FAST or IMMED cleanly if a
* buggy client fails to send the packet promptly.
*/
@@ -3505,7 +3505,7 @@ BackendInitialize(Port *port)
status = ProcessStartupPacket(port, false);
/*
- * Stop here if it was bad or a cancel packet. ProcessStartupPacket
+ * Stop here if it was bad or a cancel packet. ProcessStartupPacket
* already did any appropriate error reporting.
*/
if (status != STATUS_OK)
@@ -4053,7 +4053,7 @@ SubPostmasterMain(int argc, char *argv[])
read_nondefault_variables();
/*
- * Reload any libraries that were preloaded by the postmaster. Since we
+ * Reload any libraries that were preloaded by the postmaster. Since we
* exec'd this process, those libraries didn't come along with us; but we
* should load them into all child processes to be consistent with the
* non-EXEC_BACKEND behavior.
@@ -4106,7 +4106,7 @@ SubPostmasterMain(int argc, char *argv[])
*
* This prevents a randomized stack base address that causes child
* shared memory to be at a different address than the parent, making
- * it impossible to attached to shared memory. Return the value to
+ * it impossible to attached to shared memory. Return the value to
* '1' when finished.
*/
CreateSharedMemoryAndSemaphores(false, 0);
@@ -4212,7 +4212,7 @@ ExitPostmaster(int status)
/* should cleanup shared memory and kill all backends */
/*
- * Not sure of the semantics here. When the Postmaster dies, should the
+ * Not sure of the semantics here. When the Postmaster dies, should the
* backends all be killed? probably not.
*
* MUST -- vadim 05-10-1999
@@ -4244,7 +4244,7 @@ sigusr1_handler(SIGNAL_ARGS)
FatalError = false;
/*
- * Crank up the background writer. It doesn't matter if this fails,
+ * Crank up the background writer. It doesn't matter if this fails,
* we'll just try again later.
*/
Assert(BgWriterPID == 0);
@@ -4467,7 +4467,7 @@ CountChildren(int target)
/*
* StartChildProcess -- start an auxiliary process for the postmaster
*
- * xlop determines what kind of child will be started. All child types
+ * xlop determines what kind of child will be started. All child types
* initially go to AuxiliaryProcessMain, which will handle common setup.
*
* Return value of StartChildProcess is subprocess' PID, or 0 if failed
@@ -4686,9 +4686,9 @@ CreateOptsFile(int argc, char *argv[], char *fullprogname)
* This reports the number of entries needed in per-child-process arrays
* (the PMChildFlags array, and if EXEC_BACKEND the ShmemBackendArray).
* These arrays include regular backends, autovac workers and walsenders,
- * but not special children nor dead_end children. This allows the arrays
+ * but not special children nor dead_end children. This allows the arrays
* to have a fixed maximum size, to wit the same too-many-children limit
- * enforced by canAcceptConnections(). The exact value isn't too critical
+ * enforced by canAcceptConnections(). The exact value isn't too critical
* as long as it's more than MaxBackends.
*/
int
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index 264cab0690b..341c99d5a2e 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -65,7 +65,7 @@
/*
- * GUC parameters. Logging_collector cannot be changed after postmaster
+ * GUC parameters. Logging_collector cannot be changed after postmaster
* start, but the rest can change at SIGHUP.
*/
bool Logging_collector = false;
@@ -187,7 +187,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* If we restarted, our stderr is already redirected into our own input
* pipe. This is of course pretty useless, not to mention that it
- * interferes with detecting pipe EOF. Point stderr to /dev/null. This
+ * interferes with detecting pipe EOF. Point stderr to /dev/null. This
* assumes that all interesting messages generated in the syslogger will
* come through elog.c and will be sent to write_syslogger_file.
*/
@@ -197,7 +197,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* The closes might look redundant, but they are not: we want to be
- * darn sure the pipe gets closed even if the open failed. We can
+ * darn sure the pipe gets closed even if the open failed. We can
* survive running with stderr pointing nowhere, but we can't afford
* to have extra pipe input descriptors hanging around.
*/
@@ -238,7 +238,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (syslogger probably never has any
+ * can signal any child processes too. (syslogger probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -477,7 +477,7 @@ SysLoggerMain(int argc, char *argv[])
(errmsg("logger shutting down")));
/*
- * Normal exit from the syslogger is here. Note that we
+ * Normal exit from the syslogger is here. Note that we
* deliberately do not close syslogFile before exiting; this is to
* allow for the possibility of elog messages being generated
* inside proc_exit. Regular exit() will take care of flushing
@@ -1280,7 +1280,7 @@ set_next_rotation_time(void)
/*
* The requirements here are to choose the next time > now that is a
* "multiple" of the log rotation interval. "Multiple" can be interpreted
- * fairly loosely. In this version we align to log_timezone rather than
+ * fairly loosely. In this version we align to log_timezone rather than
* GMT.
*/
rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */
diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c
index 80991f6f567..12f9c0d2074 100644
--- a/src/backend/postmaster/walwriter.c
+++ b/src/backend/postmaster/walwriter.c
@@ -92,7 +92,7 @@ WalWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (walwriter probably never has any
+ * can signal any child processes too. (walwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -165,7 +165,7 @@ WalWriterMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in walwriter, but we do have LWLocks, and perhaps buffers?
*/
LWLockReleaseAll();
@@ -295,7 +295,7 @@ wal_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c
index e6aa899518f..c495cee3003 100644
--- a/src/backend/regex/regc_color.c
+++ b/src/backend/regex/regc_color.c
@@ -2,7 +2,7 @@
* colorings of characters
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_cvec.c b/src/backend/regex/regc_cvec.c
index fb6f06b5243..f8aaedfad8b 100644
--- a/src/backend/regex/regc_cvec.c
+++ b/src/backend/regex/regc_cvec.c
@@ -2,7 +2,7 @@
* Utility functions for handling cvecs
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c
index 3360cfb0e9f..0e87ad2deba 100644
--- a/src/backend/regex/regc_lex.c
+++ b/src/backend/regex/regc_lex.c
@@ -2,7 +2,7 @@
* lexical analyzer
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c
index 0f70931b13e..e6f5fc45da0 100644
--- a/src/backend/regex/regc_locale.c
+++ b/src/backend/regex/regc_locale.c
@@ -30,7 +30,7 @@
*
* THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+ * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
* IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
* NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
* MODIFICATIONS.
@@ -38,7 +38,7 @@
* GOVERNMENT USE: If you are acquiring this software on behalf of the
* U.S. government, the Government shall have only "Restricted Rights"
* in the software and related documentation as defined in the Federal
- * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+ * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
* are acquiring the software on behalf of the Department of Defense, the
* software shall be classified as "Commercial Computer Software" and the
* Government shall have only "Restricted Rights" as defined in Clause
@@ -707,7 +707,7 @@ allcases(struct vars * v, /* context */
/*
* cmp - chr-substring compare
*
- * Backrefs need this. It should preferably be efficient.
+ * Backrefs need this. It should preferably be efficient.
* Note that it does not need to report anything except equal/unequal.
* Note also that the length is exact, and the comparison should not
* stop at embedded NULs!
diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c
index 170da5954c7..776bb59b867 100644
--- a/src/backend/regex/regc_nfa.c
+++ b/src/backend/regex/regc_nfa.c
@@ -2,7 +2,7 @@
* NFA utilities.
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -1304,7 +1304,7 @@ fixempties(struct nfa * nfa,
}
/*
- * And remove any states that have become useless. (This cleanup is not
+ * And remove any states that have become useless. (This cleanup is not
* very thorough, and would be even less so if we tried to combine it with
* the previous step; but cleanup() will take care of anything we miss.)
*/
@@ -1372,7 +1372,7 @@ replaceempty(struct nfa * nfa,
* non-EMPTY out-arcs), we must keep it so, so always push forward in that
* case.
*
- * The fan-out/fan-in comparison should count only non-EMPTY arcs. If
+ * The fan-out/fan-in comparison should count only non-EMPTY arcs. If
* "from" is doomed, we can skip counting "to"'s arcs, since we want to
* force taking the copyins path in that case.
*/
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index bc5ad3157a9..570719d13a7 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -23,7 +23,7 @@
* several implementation strategies depending on the situation:
*
* 1. In C/POSIX collations, we use hard-wired code. We can't depend on
- * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
+ * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
* collations don't give a fig about multibyte characters.
*
* 2. In the "default" collation (which is supposed to obey LC_CTYPE):
@@ -35,10 +35,10 @@
*
* 2b. In all other encodings, or on machines that lack <wctype.h>, we use
* the <ctype.h> functions for pg_wchar values up to 255, and punt for values
- * above that. This is only 100% correct in single-byte encodings such as
- * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
+ * above that. This is only 100% correct in single-byte encodings such as
+ * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
* character sets for which the properties being tested here aren't very
- * relevant for higher code values anyway. The difficulty with using the
+ * relevant for higher code values anyway. The difficulty with using the
* <wctype.h> functions with non-Unicode multibyte encodings is that we can
* have no certainty that the platform's wchar_t representation matches
* what we do in pg_wchar conversions.
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index 2ddcdff09a4..01dbade2d86 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -2,7 +2,7 @@
* re_*comp and friends - compile REs
* This file #includes several others (see the bottom).
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -563,7 +563,7 @@ makesearch(struct vars * v,
* constraints, often knowing when you were in the pre state tells you
* little; it's the next state(s) that are informative. But some of them
* may have other inarcs, i.e. it may be possible to make actual progress
- * and then return to one of them. We must de-optimize such cases,
+ * and then return to one of them. We must de-optimize such cases,
* splitting each such state into progress and no-progress states.
*/
@@ -609,7 +609,7 @@ makesearch(struct vars * v,
* parse - parse an RE
*
* This is actually just the top level, which parses a bunch of branches
- * tied together with '|'. They appear in the tree as the left children
+ * tied together with '|'. They appear in the tree as the left children
* of a chain of '|' subres.
*/
static struct subre *
@@ -1335,7 +1335,7 @@ bracket(struct vars * v,
/*
* cbracket - handle complemented bracket expression
* We do it by calling bracket() with dummy endpoints, and then complementing
- * the result. The alternative would be to invoke rainbow(), and then delete
+ * the result. The alternative would be to invoke rainbow(), and then delete
* arcs as the b.e. is seen... but that gets messy.
*/
static void
diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c
index 5efb761d6d7..f51a2472761 100644
--- a/src/backend/regex/rege_dfa.c
+++ b/src/backend/regex/rege_dfa.c
@@ -2,7 +2,7 @@
* DFA routines
* This file is #included by regexec.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regerror.c b/src/backend/regex/regerror.c
index 21d2c60b623..6b8d9cfb859 100644
--- a/src/backend/regex/regerror.c
+++ b/src/backend/regex/regerror.c
@@ -1,7 +1,7 @@
/*
* regerror - error-code expansion
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c
index 7cd6679eded..ee9984d6c07 100644
--- a/src/backend/regex/regexec.c
+++ b/src/backend/regex/regexec.c
@@ -1,7 +1,7 @@
/*
* re_*exec and friends - match REs
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regfree.c b/src/backend/regex/regfree.c
index b291749bd1a..ae17ae70eb6 100644
--- a/src/backend/regex/regfree.c
+++ b/src/backend/regex/regfree.c
@@ -1,7 +1,7 @@
/*
* regfree - free an RE
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 7fe9b09adfa..c77f455381f 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -510,7 +510,7 @@ SyncRepGetStandbyPriority(void)
/*
* Walk queue from head. Set the state of any backends that need to be woken,
- * remove them from the queue, and then wake them. Pass all = true to wake
+ * remove them from the queue, and then wake them. Pass all = true to wake
* whole queue; otherwise, just wake up to the walsender's LSN.
*
* Must hold SyncRepLock.
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index 7058b888ca3..d2ac547602b 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -226,7 +226,7 @@ WalReceiverMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (walreceiver probably never has
+ * can signal any child processes too. (walreceiver probably never has
* any child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -411,7 +411,7 @@ WalRcvQuickDieHandler(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c
index 587949bfdb9..f33e9429e31 100644
--- a/src/backend/replication/walreceiverfuncs.c
+++ b/src/backend/replication/walreceiverfuncs.c
@@ -220,7 +220,7 @@ RequestXLogStreaming(XLogRecPtr recptr, const char *conninfo)
* Returns the last+1 byte position that walreceiver has written.
*
* Optionally, returns the previous chunk start, that is the first byte
- * written in the most recent walreceiver flush cycle. Callers not
+ * written in the most recent walreceiver flush cycle. Callers not
* interested in that value may pass NULL for latestChunkStart.
*/
XLogRecPtr
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index b6bf6333378..a5683302d56 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -1260,7 +1260,7 @@ WalSndQuickDieHandler(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 5a4c5695946..07b52338946 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -400,7 +400,7 @@ DefineQueryRewrite(char *rulename,
*
* If so, check that the relation is empty because the storage for the
* relation is going to be deleted. Also insist that the rel not have
- * any triggers, indexes, or child tables. (Note: these tests are too
+ * any triggers, indexes, or child tables. (Note: these tests are too
* strict, because they will reject relations that once had such but
* don't anymore. But we don't really care, because this whole
* business of converting relations to views is just a kluge to allow
@@ -616,7 +616,7 @@ checkRuleResultList(List *targetList, TupleDesc resultDesc, bool isSelect)
* Note: for a view (ON SELECT rule), the checkAsUser field of the OLD
* RTE entry will be overridden when the view rule is expanded, and the
* checkAsUser field of the NEW entry is irrelevant because that entry's
- * requiredPerms bits will always be zero. However, for other types of rules
+ * requiredPerms bits will always be zero. However, for other types of rules
* it's important to set these fields to match the rule owner. So we just set
* them always.
*/
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 771bc162f21..c4fa3ed0fd2 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -207,7 +207,7 @@ AcquireRewriteLocks(Query *parsetree,
/*
* The elements of an alias list have to refer to
* earlier RTEs of the same rtable, because that's the
- * order the planner builds things in. So we already
+ * order the planner builds things in. So we already
* processed the referenced RTE, and so it's safe to
* use get_rte_attribute_is_dropped on it. (This might
* not hold after rewriting or planning, but it's OK
@@ -369,7 +369,7 @@ rewriteRuleAction(Query *parsetree,
/*
* Generate expanded rtable consisting of main parsetree's rtable plus
* rule action's rtable; this becomes the complete rtable for the rule
- * action. Some of the entries may be unused after we finish rewriting,
+ * action. Some of the entries may be unused after we finish rewriting,
* but we leave them all in place for two reasons:
*
* We'd have a much harder job to adjust the query's varnos if we
@@ -435,7 +435,7 @@ rewriteRuleAction(Query *parsetree,
* that if the rule action refers to OLD, its jointree will add a
* reference to rt_index. If the rule action doesn't refer to OLD, but
* either the rule_qual or the user query quals do, then we need to keep
- * the original rtindex in the jointree to provide data for the quals. We
+ * the original rtindex in the jointree to provide data for the quals. We
* don't want the original rtindex to be joined twice, however, so avoid
* keeping it if the rule action mentions it.
*
@@ -457,7 +457,7 @@ rewriteRuleAction(Query *parsetree,
{
/*
* If sub_action is a setop, manipulating its jointree will do no
- * good at all, because the jointree is dummy. (Perhaps someday
+ * good at all, because the jointree is dummy. (Perhaps someday
* we could push the joining and quals down to the member
* statements of the setop?)
*/
@@ -658,7 +658,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
* then junk fields (these in no particular order).
*
* We must do items 1,2,3 before firing rewrite rules, else rewritten
- * references to NEW.foo will produce wrong or incomplete results. Item 4
+ * references to NEW.foo will produce wrong or incomplete results. Item 4
* is not needed for rewriting, but will be needed by the planner, and we
* can do it essentially for free while handling the other items.
*
@@ -865,7 +865,7 @@ process_matched_tle(TargetEntry *src_tle,
}
/*----------
- * Multiple assignments to same attribute. Allow only if all are
+ * Multiple assignments to same attribute. Allow only if all are
* FieldStore or ArrayRef assignment operations. This is a bit
* tricky because what we may actually be looking at is a nest of
* such nodes; consider
@@ -883,7 +883,7 @@ process_matched_tle(TargetEntry *src_tle,
* assignments appear to occur left-to-right.
*
* For FieldStore, instead of nesting we can generate a single
- * FieldStore with multiple target fields. We must nest when
+ * FieldStore with multiple target fields. We must nest when
* ArrayRefs are involved though.
*----------
*/
@@ -1175,7 +1175,7 @@ rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation, List *attrnos)
* rewriteTargetListUD - rewrite UPDATE/DELETE targetlist as needed
*
* This function adds a "junk" TLE that is needed to allow the executor to
- * find the original row for the update or delete. When the target relation
+ * find the original row for the update or delete. When the target relation
* is a regular table, the junk TLE emits the ctid attribute of the original
* row. When the target relation is a view, there is no ctid, so we instead
* emit a whole-row Var that will contain the "old" values of the view row.
@@ -1331,9 +1331,9 @@ ApplyRetrieveRule(Query *parsetree,
* fine as the result relation.
*
* For UPDATE/DELETE, we need to expand the view so as to have source
- * data for the operation. But we also need an unmodified RTE to
+ * data for the operation. But we also need an unmodified RTE to
* serve as the target. So, copy the RTE and add the copy to the
- * rangetable. Note that the copy does not get added to the jointree.
+ * rangetable. Note that the copy does not get added to the jointree.
* Also note that there's a hack in fireRIRrules to avoid calling this
* function again when it arrives at the copied RTE.
*/
@@ -1508,7 +1508,7 @@ markQueryForLocking(Query *qry, Node *jtnode,
* in the given tree.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * SubLink nodes in-place. It is caller's responsibility to ensure that
+ * SubLink nodes in-place. It is caller's responsibility to ensure that
* no unwanted side-effects occur!
*
* This is unlike most of the other routines that recurse into subselects,
@@ -1700,7 +1700,7 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
* not just "NOT x" which the planner is much smarter about, else we will
* do the wrong thing when the qual evaluates to NULL.)
*
- * The rule_qual may contain references to OLD or NEW. OLD references are
+ * The rule_qual may contain references to OLD or NEW. OLD references are
* replaced by references to the specified rt_index (the relation that the
* rule applies to). NEW references are only possible for INSERT and UPDATE
* queries on the relation itself, and so they should be replaced by copies
@@ -1770,7 +1770,7 @@ CopyAndAddInvertedQual(Query *parsetree,
* rows that the qualified action doesn't act on. (If there are multiple
* qualified INSTEAD rules, we AND all the negated quals onto a single
* modified original query.) We won't execute the original, unmodified
- * query if we find either qualified or unqualified INSTEAD rules. If
+ * query if we find either qualified or unqualified INSTEAD rules. If
* we find both, the modified original query is discarded too.
*/
static List *
@@ -2226,7 +2226,7 @@ QueryRewrite(Query *parsetree)
*
* If the original query is still in the list, it sets the command tag.
* Otherwise, the last INSTEAD query of the same kind as the original is
- * allowed to set the tag. (Note these rules can leave us with no query
+ * allowed to set the tag. (Note these rules can leave us with no query
* setting the tag. The tcop code has to cope with this by setting up a
* default tag based on the original un-rewritten query.)
*
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 3f20d741f69..c28b4eeae40 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -68,7 +68,7 @@ checkExprHasAggs(Node *node)
* specified query level.
*
* The objective of this routine is to detect whether there are aggregates
- * belonging to the given query level. Aggregates belonging to subqueries
+ * belonging to the given query level. Aggregates belonging to subqueries
* or outer queries do NOT cause a true result. We must recurse into
* subqueries to detect outer-reference aggregates that logically belong to
* the specified query level.
@@ -123,7 +123,7 @@ contain_aggs_of_level_walker(Node *node,
* Find the parse location of any aggregate of the specified query level.
*
* Returns -1 if no such agg is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Note: it might seem appropriate to merge this functionality into
@@ -218,7 +218,7 @@ contain_windowfuncs_walker(Node *node, void *context)
* Find the parse location of any windowfunc of the current query level.
*
* Returns -1 if no such windowfunc is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Note: it might seem appropriate to merge this functionality into
@@ -297,11 +297,11 @@ checkExprHasSubLink_walker(Node *node, void *context)
*
* Find all Var nodes in the given tree with varlevelsup == sublevels_up,
* and increment their varno fields (rangetable indexes) by 'offset'.
- * The varnoold fields are adjusted similarly. Also, adjust other nodes
+ * The varnoold fields are adjusted similarly. Also, adjust other nodes
* that contain rangetable indexes, such as RangeTblRef and JoinExpr.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -457,11 +457,11 @@ offset_relid_set(Relids relids, int offset)
*
* Find all Var nodes in the given tree belonging to a specific relation
* (identified by sublevels_up and rt_index), and change their varno fields
- * to 'new_index'. The varnoold fields are changed too. Also, adjust other
+ * to 'new_index'. The varnoold fields are changed too. Also, adjust other
* nodes that contain rangetable indexes, such as RangeTblRef and JoinExpr.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -640,7 +640,7 @@ adjust_relid_set(Relids relids, int oldrelid, int newrelid)
* Likewise for other nodes containing levelsup fields, such as Aggref.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * Var nodes in-place. The given expression tree should have been copied
+ * Var nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
diff --git a/src/backend/rewrite/rewriteSupport.c b/src/backend/rewrite/rewriteSupport.c
index 7770d032b1e..9feed7345eb 100644
--- a/src/backend/rewrite/rewriteSupport.c
+++ b/src/backend/rewrite/rewriteSupport.c
@@ -127,7 +127,7 @@ get_rewrite_oid(Oid relid, const char *rulename, bool missing_ok)
* Find rule oid, given only a rule name but no rel OID.
*
* If there's more than one, it's an error. If there aren't any, that's an
- * error, too. In general, this should be avoided - it is provided to support
+ * error, too. In general, this should be avoided - it is provided to support
* syntax that is compatible with pre-7.3 versions of PG, where rule names
* were unique across the entire database.
*/
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index dadb49dae9e..0f1b77713c3 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -44,7 +44,7 @@ int32 *PrivateRefCount;
*
* IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
* It must be set when an IO is initiated and cleared at
- * the end of the IO. It is there to make sure that one
+ * the end of the IO. It is there to make sure that one
* process doesn't start to use a buffer while another is
* faulting it in. see WaitIO and related routines.
*
@@ -54,7 +54,7 @@ int32 *PrivateRefCount;
*
* PrivateRefCount -- Each buffer also has a private refcount that keeps
* track of the number of times the buffer is pinned in the current
- * process. This is used for two purposes: first, if we pin a
+ * process. This is used for two purposes: first, if we pin a
* a buffer more than once, we only need to change the shared refcount
* once, thus only lock the shared state once; second, when a transaction
* aborts, it should only unpin the buffers exactly the number of times it
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index 33ecd1214e5..899fa593b29 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -3,7 +3,7 @@
* buf_table.c
* routines for mapping BufferTags to buffer indexes.
*
- * Note: the routines in this file do no locking of their own. The caller
+ * Note: the routines in this file do no locking of their own. The caller
* must hold a suitable lock on the appropriate BufMappingLock, as specified
* in the comments. We can't do the locking inside these functions because
* in most cases the caller needs to adjust the buffer header contents
@@ -112,7 +112,7 @@ BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
* Insert a hashtable entry for given tag and buffer ID,
* unless an entry already exists for that tag
*
- * Returns -1 on successful insertion. If a conflicting entry exists
+ * Returns -1 on successful insertion. If a conflicting entry exists
* already, returns the buffer ID in that entry.
*
* Caller must hold exclusive lock on BufMappingLock for tag's partition
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index b6539514f89..58b64f6570d 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -111,7 +111,7 @@ static void AtProcExit_Buffers(int code, Datum arg);
* PrefetchBuffer -- initiate asynchronous read of a block of a relation
*
* This is named by analogy to ReadBuffer but doesn't actually allocate a
- * buffer. Instead it tries to ensure that a future ReadBuffer for the given
+ * buffer. Instead it tries to ensure that a future ReadBuffer for the given
* block will not be delayed by the I/O. Prefetching is optional.
* No-op if prefetching isn't compiled in.
*/
@@ -201,7 +201,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* Assume when this function is called, that reln has been opened already.
*
* In RBM_NORMAL mode, the page is read from disk, and the page header is
- * validated. An error is thrown if the page header is not valid. (But
+ * validated. An error is thrown if the page header is not valid. (But
* note that an all-zero page is considered "valid"; see PageIsVerified().)
*
* RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
@@ -209,7 +209,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* for non-critical data, where the caller is prepared to repair errors.
*
* In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled
- * with zeros instead of reading it from disk. Useful when the caller is
+ * with zeros instead of reading it from disk. Useful when the caller is
* going to fill the page from scratch, since this saves I/O and avoids
* unnecessary failure if the page-on-disk has corrupt page headers.
* Caution: do not use this mode to read a page that is beyond the relation's
@@ -365,7 +365,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* This can happen because mdread doesn't complain about reads beyond
* EOF (when zero_damaged_pages is ON) and so a previous attempt to
* read a block beyond EOF could have left a "valid" zero-filled
- * buffer. Unfortunately, we have also seen this case occurring
+ * buffer. Unfortunately, we have also seen this case occurring
* because of buggy Linux kernels that sometimes return an
* lseek(SEEK_END) result that doesn't account for a recent write. In
* that situation, the pre-existing buffer would contain valid data
@@ -575,7 +575,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/*
* Didn't find it in the buffer pool. We'll have to initialize a new
- * buffer. Remember to unlock the mapping lock while doing the work.
+ * buffer. Remember to unlock the mapping lock while doing the work.
*/
LWLockRelease(newPartitionLock);
@@ -585,7 +585,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
bool lock_held;
/*
- * Select a victim buffer. The buffer is returned with its header
+ * Select a victim buffer. The buffer is returned with its header
* spinlock still held! Also (in most cases) the BufFreelistLock is
* still held, since it would be bad to hold the spinlock while
* possibly waking up other processes.
@@ -634,7 +634,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* If using a nondefault strategy, and writing the buffer
* would require a WAL flush, let the strategy decide whether
* to go ahead and write/reuse the buffer or to choose another
- * victim. We need lock to inspect the page LSN, so this
+ * victim. We need lock to inspect the page LSN, so this
* can't be done inside StrategyGetBuffer.
*/
if (strategy != NULL &&
@@ -755,7 +755,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
{
/*
* We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
+ * in the page, or (b) a previous read attempt failed. We
* have to wait for any active read attempt to finish, and
* then set up our own read attempt if the page is still not
* BM_VALID. StartBufferIO does it all.
@@ -848,7 +848,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* This is used only in contexts such as dropping a relation. We assume
* that no other backend could possibly be interested in using the page,
* so the only reason the buffer might be pinned is if someone else is
- * trying to write it out. We have to let them finish before we can
+ * trying to write it out. We have to let them finish before we can
* reclaim the buffer.
*
* The buffer could get reclaimed by someone else while we are waiting
@@ -947,7 +947,7 @@ retry:
*
* Marks buffer contents as dirty (actual write happens later).
*
- * Buffer must be pinned and exclusive-locked. (If caller does not hold
+ * Buffer must be pinned and exclusive-locked. (If caller does not hold
* exclusive lock, then somebody could be in process of writing the buffer,
* leading to risk of bad data written to disk.)
*/
@@ -991,7 +991,7 @@ MarkBufferDirty(Buffer buffer)
*
* Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
* compared to calling the two routines separately. Now it's mainly just
- * a convenience function. However, if the passed buffer is valid and
+ * a convenience function. However, if the passed buffer is valid and
* already contains the desired block, we just return it as-is; and that
* does save considerable work compared to a full release and reacquire.
*
@@ -1043,7 +1043,7 @@ ReleaseAndReadBuffer(Buffer buffer,
* when we first pin it; for other strategies we just make sure the usage_count
* isn't zero. (The idea of the latter is that we don't want synchronized
* heap scans to inflate the count, but we need it to not be zero to discourage
- * other backends from stealing buffers from our ring. As long as we cycle
+ * other backends from stealing buffers from our ring. As long as we cycle
* through the ring faster than the global clock-sweep cycles, buffers in
* our ring won't be chosen as victims for replacement by other backends.)
*
@@ -1051,7 +1051,7 @@ ReleaseAndReadBuffer(Buffer buffer,
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
*
- * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
+ * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
* some callers to avoid an extra spinlock cycle.
*/
static bool
@@ -1204,7 +1204,7 @@ BufferSync(int flags)
* have the flag set.
*
* Note that if we fail to write some buffer, we may leave buffers with
- * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
+ * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
* certainly need to be written for the next checkpoint attempt, too.
*/
num_to_write = 0;
@@ -1945,7 +1945,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* Currently, this is called only from smgr.c when the underlying file
@@ -1954,7 +1954,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
* be deleted momentarily anyway, and there is no point in writing it.
* It is the responsibility of higher-level code to ensure that the
* deletion or truncation does not lose any data that could be needed
- * later. It is also the responsibility of higher-level code to ensure
+ * later. It is also the responsibility of higher-level code to ensure
* that no other process could be trying to load more pages of the
* relation into buffers.
*
@@ -1997,9 +1997,9 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber forkNum,
*
* This function removes all the buffers in the buffer cache for a
* particular database. Dirty pages are simply dropped, without
- * bothering to write them out first. This is used when we destroy a
+ * bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
- * tree no longer exists. Implementation is pretty similar to
+ * tree no longer exists. Implementation is pretty similar to
* DropRelFileNodeBuffers() which is for destroying just one relation.
* --------------------------------------------------------------------
*/
@@ -2298,9 +2298,9 @@ SetBufferCommitInfoNeedsSave(Buffer buffer)
/*
* This routine might get called many times on the same page, if we are
* making the first scan after commit of an xact that added/deleted many
- * tuples. So, be as quick as we can if the buffer is already dirty. We
+ * tuples. So, be as quick as we can if the buffer is already dirty. We
* do this by not acquiring spinlock if it looks like the status bits are
- * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
+ * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
* immediately after we look, because the buffer content update is already
* done and will be reflected in the I/O.)
*/
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index bf9903b9f32..73d42961e8a 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -36,7 +36,7 @@ typedef struct
*/
/*
- * Statistics. These counters should be wide enough that they can't
+ * Statistics. These counters should be wide enough that they can't
* overflow during a single bgwriter cycle.
*/
uint32 completePasses; /* Complete cycles of the clock sweep */
@@ -129,7 +129,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
/*
* We count buffer allocation requests so that the bgwriter can estimate
- * the rate of buffer consumption. Note that buffers recycled by a
+ * the rate of buffer consumption. Note that buffers recycled by a
* strategy object are intentionally not counted here.
*/
StrategyControl->numBufferAllocs++;
@@ -248,7 +248,7 @@ StrategyFreeBuffer(volatile BufferDesc *buf)
*
* In addition, we return the completed-pass count (which is effectively
* the higher-order bits of nextVictimBuffer) and the count of recent buffer
- * allocs if non-NULL pointers are passed. The alloc count is reset after
+ * allocs if non-NULL pointers are passed. The alloc count is reset after
* being read.
*/
int
@@ -442,7 +442,7 @@ GetBufferFromRing(BufferAccessStrategy strategy)
/*
* If the slot hasn't been filled yet, tell the caller to allocate a new
- * buffer with the normal allocation strategy. He will then fill this
+ * buffer with the normal allocation strategy. He will then fill this
* slot by calling AddBufferToRing with the new buffer.
*/
bufnum = strategy->buffers[strategy->current];
@@ -495,7 +495,7 @@ AddBufferToRing(BufferAccessStrategy strategy, volatile BufferDesc *buf)
*
* When a nondefault strategy is used, the buffer manager calls this function
* when it turns out that the buffer selected by StrategyGetBuffer needs to
- * be written out and doing so would require flushing WAL too. This gives us
+ * be written out and doing so would require flushing WAL too. This gives us
* a chance to choose a different victim.
*
* Returns true if buffer manager should ask for a new victim, and false
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 8816a5dfab4..f908584589c 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -95,7 +95,7 @@ LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum,
* Find or create a local buffer for the given page of the given relation.
*
* API is similar to bufmgr.c's BufferAlloc, except that we do not need
- * to do any locking since this is all local. Also, IO_IN_PROGRESS
+ * to do any locking since this is all local. Also, IO_IN_PROGRESS
* does not get set. Lastly, we support only default access strategy
* (hence, usage_count is always advanced).
*/
@@ -286,7 +286,7 @@ MarkLocalBufferDirty(Buffer buffer)
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* See DropRelFileNodeBuffers in bufmgr.c for more notes.
@@ -413,7 +413,7 @@ GetLocalBufferStorage(void)
/*
* We allocate local buffers in a context of their own, so that the
* space eaten for them is easily recognizable in MemoryContextStats
- * output. Create the context on first use.
+ * output. Create the context on first use.
*/
if (LocalBufferContext == NULL)
LocalBufferContext =
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index c593f72b16a..44ea151a8d8 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -29,7 +29,7 @@
* that was current at that time.
*
* BufFile also supports temporary files that exceed the OS file size limit
- * (by opening multiple fd.c temporary files). This is an essential feature
+ * (by opening multiple fd.c temporary files). This is an essential feature
* for sorts and hashjoins on large amounts of data.
*-------------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@ struct BufFile
bool dirty; /* does buffer need to be written? */
/*
- * resowner is the ResourceOwner to use for underlying temp files. (We
+ * resowner is the ResourceOwner to use for underlying temp files. (We
* don't need to remember the memory context we're using explicitly,
* because after creation we only repalloc our arrays larger.)
*/
@@ -519,7 +519,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence)
{
/*
* Seek is to a point within existing buffer; we can just adjust
- * pos-within-buffer, without flushing buffer. Note this is OK
+ * pos-within-buffer, without flushing buffer. Note this is OK
* whether reading or writing, but buffer remains dirty if we were
* writing.
*/
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index a3211b1f041..ed76272fbcf 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -64,7 +64,7 @@
* and other code that tries to open files without consulting fd.c. This
* is the number left free. (While we can be pretty sure we won't get
* EMFILE, there's never any guarantee that we won't get ENFILE due to
- * other processes chewing up FDs. So it's a bad idea to try to open files
+ * other processes chewing up FDs. So it's a bad idea to try to open files
* without consulting fd.c. Nonetheless we cannot control all code.)
*
* Because this is just a fixed setting, we are effectively assuming that
@@ -154,8 +154,8 @@ typedef struct vfd
} Vfd;
/*
- * Virtual File Descriptor array pointer and size. This grows as
- * needed. 'File' values are indexes into this array.
+ * Virtual File Descriptor array pointer and size. This grows as
+ * needed. 'File' values are indexes into this array.
* Note that VfdCache[0] is not a usable VFD, just a list header.
*/
static Vfd *VfdCache;
@@ -221,7 +221,7 @@ static int nextTempTableSpace = 0;
*
* The Least Recently Used ring is a doubly linked list that begins and
* ends on element zero. Element zero is special -- it doesn't represent
- * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
+ * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
* anchor that shows us the beginning/end of the ring.
* Only VFD elements that are currently really open (have an FD assigned) are
* in the Lru ring. Elements that are "virtually" open can be recognized
@@ -379,7 +379,7 @@ InitFileAccess(void)
* We stop counting if usable_fds reaches max_to_probe. Note: a small
* value of max_to_probe might result in an underestimate of already_open;
* we must fill in any "gaps" in the set of used FDs before the calculation
- * of already_open will give the right answer. In practice, max_to_probe
+ * of already_open will give the right answer. In practice, max_to_probe
* of a couple of dozen should be enough to ensure good results.
*
* We assume stdin (FD 0) is available for dup'ing
@@ -456,7 +456,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
pfree(fd);
/*
- * Return results. usable_fds is just the number of successful dups. We
+ * Return results. usable_fds is just the number of successful dups. We
* assume that the system limit is highestfd+1 (remember 0 is a legal FD
* number) and so already_open is highestfd+1 - usable_fds.
*/
@@ -950,7 +950,7 @@ OpenTemporaryFile(bool interXact)
/*
* If not, or if tablespace is bad, create in database's default
- * tablespace. MyDatabaseTableSpace should normally be set before we get
+ * tablespace. MyDatabaseTableSpace should normally be set before we get
* here, but just in case it isn't, fall back to pg_default tablespace.
*/
if (file <= 0)
@@ -1475,7 +1475,7 @@ reserveAllocatedDesc(void)
/*
* Routines that want to use stdio (ie, FILE*) should use AllocateFile
* rather than plain fopen(). This lets fd.c deal with freeing FDs if
- * necessary to open the file. When done, call FreeFile rather than fclose.
+ * necessary to open the file. When done, call FreeFile rather than fclose.
*
* Note that files that will be open for any significant length of time
* should NOT be handled this way, since they cannot share kernel file
@@ -1654,7 +1654,7 @@ TryAgain:
* Read a directory opened with AllocateDir, ereport'ing any error.
*
* This is easier to use than raw readdir() since it takes care of some
- * otherwise rather tedious and error-prone manipulation of errno. Also,
+ * otherwise rather tedious and error-prone manipulation of errno. Also,
* if you are happy with a generic error message for AllocateDir failure,
* you can just do
*
@@ -1770,7 +1770,7 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces)
numTempTableSpaces = numSpaces;
/*
- * Select a random starting point in the list. This is to minimize
+ * Select a random starting point in the list. This is to minimize
* conflicts between backends that are most likely sharing the same list
* of temp tablespaces. Note that if we create multiple temp files in the
* same transaction, we'll advance circularly through the list --- this
@@ -1799,7 +1799,7 @@ TempTablespacesAreSet(void)
/*
* GetNextTempTableSpace
*
- * Select the next temp tablespace to use. A result of InvalidOid means
+ * Select the next temp tablespace to use. A result of InvalidOid means
* to use the current database's default tablespace.
*/
Oid
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index 1a5a874c86a..c314f6cc173 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -52,7 +52,7 @@
* Range Category
* 0 - 31 0
* 32 - 63 1
- * ... ... ...
+ * ... ... ...
* 8096 - 8127 253
* 8128 - 8163 254
* 8164 - 8192 255
@@ -127,7 +127,7 @@ static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof);
* will turn out to have too little space available by the time the caller
* gets a lock on it. In that case, the caller should report the actual
* amount of free space available on that page and then try again (see
- * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
+ * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
*/
BlockNumber
diff --git a/src/backend/storage/freespace/fsmpage.c b/src/backend/storage/freespace/fsmpage.c
index 8e8096cece7..35af331f708 100644
--- a/src/backend/storage/freespace/fsmpage.c
+++ b/src/backend/storage/freespace/fsmpage.c
@@ -185,13 +185,13 @@ restart:
/*----------
* Start the search from the target slot. At every step, move one
- * node to the right, then climb up to the parent. Stop when we reach
+ * node to the right, then climb up to the parent. Stop when we reach
* a node with enough free space (as we must, since the root has enough
* space).
*
* The idea is to gradually expand our "search triangle", that is, all
* nodes covered by the current node, and to be sure we search to the
- * right from the start point. At the first step, only the target slot
+ * right from the start point. At the first step, only the target slot
* is examined. When we move up from a left child to its parent, we are
* adding the right-hand subtree of that parent to the search triangle.
* When we move right then up from a right child, we are dropping the
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 540209ce795..57aa9bf0e7f 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
@@ -84,7 +84,7 @@ static int on_proc_exit_index,
* -cim 2/6/90
*
* Unfortunately, we can't really guarantee that add-on code
- * obeys the rule of not calling exit() directly. So, while
+ * obeys the rule of not calling exit() directly. So, while
* this is the preferred way out of the system, we also register
* an atexit callback that will make sure cleanup happens.
* ----------------------------------------------------------------
@@ -103,7 +103,7 @@ proc_exit(int code)
* fixed file name, each backend will overwrite earlier profiles. To
* fix that, we create a separate subdirectory for each backend
* (./gprof/pid) and 'cd' to that subdirectory before we exit() - that
- * forces mcleanup() to write each profile into its own directory. We
+ * forces mcleanup() to write each profile into its own directory. We
* end up with something like: $PGDATA/gprof/8829/gmon.out
* $PGDATA/gprof/8845/gmon.out ...
*
@@ -257,7 +257,7 @@ atexit_callback(int exitstatus, void *arg)
* on_proc_exit
*
* this function adds a callback function to the list of
- * functions invoked by proc_exit(). -cim 2/6/90
+ * functions invoked by proc_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
@@ -288,7 +288,7 @@ on_proc_exit(pg_on_exit_callback function, Datum arg)
* on_shmem_exit
*
* this function adds a callback function to the list of
- * functions invoked by shmem_exit(). -cim 2/6/90
+ * functions invoked by shmem_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 56c0bd8d498..0991f72764a 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -51,7 +51,7 @@ static bool addin_request_allowed = true;
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -81,7 +81,7 @@ RequestAddinShmemSpace(Size size)
* This is a bit code-wasteful and could be cleaned up.)
*
* If "makePrivate" is true then we only need private memory, not shared
- * memory. This is true for a standalone backend, false for a postmaster.
+ * memory. This is true for a standalone backend, false for a postmaster.
*/
void
CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c
index 306e3f9a216..8b596036a72 100644
--- a/src/backend/storage/ipc/pmsignal.c
+++ b/src/backend/storage/ipc/pmsignal.c
@@ -26,9 +26,9 @@
/*
* The postmaster is signaled by its children by sending SIGUSR1. The
- * specific reason is communicated via flags in shared memory. We keep
+ * specific reason is communicated via flags in shared memory. We keep
* a boolean flag for each possible "reason", so that different reasons
- * can be signaled by different backends at the same time. (However,
+ * can be signaled by different backends at the same time. (However,
* if the same reason is signaled more than once simultaneously, the
* postmaster will observe it only once.)
*
@@ -42,7 +42,7 @@
* have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is
* available for assignment. An ASSIGNED slot is associated with a postmaster
* child process, but either the process has not touched shared memory yet,
- * or it has successfully cleaned up after itself. A ACTIVE slot means the
+ * or it has successfully cleaned up after itself. A ACTIVE slot means the
* process is actively using shared memory. The slots are assigned to
* child processes at random, and postmaster.c is responsible for tracking
* which one goes with which PID.
@@ -297,7 +297,7 @@ PostmasterIsAlive(bool amDirectChild)
}
/*
- * Use kill() to see if the postmaster is still alive. This can sometimes
+ * Use kill() to see if the postmaster is still alive. This can sometimes
* give a false positive result, since the postmaster's PID may get
* recycled, but it is good enough for existing uses by indirect children
* and in debugging environments.
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 4743526ced9..a3b6037a099 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -19,11 +19,11 @@
*
* During hot standby, we also keep a list of XIDs representing transactions
* that are known to be running in the master (or more precisely, were running
- * as of the current point in the WAL stream). This list is kept in the
+ * as of the current point in the WAL stream). This list is kept in the
* KnownAssignedXids array, and is updated by watching the sequence of
* arriving XIDs. This is necessary because if we leave those XIDs out of
* snapshots taken for standby queries, then they will appear to be already
- * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
+ * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
* array represents standby processes, which by definition are not running
* transactions that have XIDs.
*
@@ -261,7 +261,7 @@ ProcArrayAdd(PGPROC *proc)
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
- * Ooops, no room. (This really shouldn't happen, since there is a
+ * Ooops, no room. (This really shouldn't happen, since there is a
* fixed supply of PGPROC structs too, and so we should have failed
* earlier.)
*/
@@ -1054,9 +1054,9 @@ TransactionIdIsActive(TransactionId xid)
* ignored.
*
* This is used by VACUUM to decide which deleted tuples must be preserved
- * in a table. allDbs = TRUE is needed for shared relations, but allDbs =
+ * in a table. allDbs = TRUE is needed for shared relations, but allDbs =
* FALSE is sufficient for non-shared relations, since only backends in my
- * own database could ever see the tuples in them. Also, we can ignore
+ * own database could ever see the tuples in them. Also, we can ignore
* concurrently running lazy VACUUMs because (a) they must be working on other
* tables, and (b) they don't need to do snapshot-based lookups.
*
@@ -1321,7 +1321,7 @@ GetSnapshotData(Snapshot snapshot)
/*
* If the transaction has been assigned an xid < xmax we add it to
- * the snapshot, and update xmin if necessary. There's no need to
+ * the snapshot, and update xmin if necessary. There's no need to
* store XIDs >= xmax, since we'll treat them as running anyway.
* We don't bother to examine their subxids either.
*
@@ -1346,7 +1346,7 @@ GetSnapshotData(Snapshot snapshot)
* do that much work while holding the ProcArrayLock.
*
* The other backend can add more subxids concurrently, but cannot
- * remove any. Hence it's important to fetch nxids just once.
+ * remove any. Hence it's important to fetch nxids just once.
* Should be safe to use memcpy, though. (We needn't worry about
* missing any xids added concurrently, because they must postdate
* xmax.)
@@ -1782,7 +1782,7 @@ BackendPidGetProc(int pid)
* Only main transaction Ids are considered. This function is mainly
* useful for determining what backend owns a lock.
*
- * Beware that not every xact has an XID assigned. However, as long as you
+ * Beware that not every xact has an XID assigned. However, as long as you
* only call this using an XID found on disk, you're safe.
*/
int
@@ -1842,7 +1842,7 @@ IsBackendPid(int pid)
* some snapshot we have. Since we examine the procarray with only shared
* lock, there are race conditions: a backend could set its xmin just after
* we look. Indeed, on multiprocessors with weak memory ordering, the
- * other backend could have set its xmin *before* we look. We know however
+ * other backend could have set its xmin *before* we look. We know however
* that such a backend must have held shared ProcArrayLock overlapping our
* own hold of ProcArrayLock, else we would see its xmin update. Therefore,
* any snapshot the other backend is taking concurrently with our scan cannot
@@ -2295,7 +2295,7 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
* XidCacheRemoveRunningXids
*
* Remove a bunch of TransactionIds from the list of known-running
- * subtransactions for my backend. Both the specified xid and those in
+ * subtransactions for my backend. Both the specified xid and those in
* the xids[] array (of length nxids) are removed from the subxids cache.
* latestXid must be the latest XID among the group.
*/
@@ -2401,7 +2401,7 @@ DisplayXidCache(void)
* treated as running by standby transactions, even though they are not in
* the standby server's PGPROC array.
*
- * We record all XIDs that we know have been assigned. That includes all the
+ * We record all XIDs that we know have been assigned. That includes all the
* XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have
* been assigned. We can deduce the existence of unobserved XIDs because we
* know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids
@@ -2410,7 +2410,7 @@ DisplayXidCache(void)
*
* During hot standby we do not fret too much about the distinction between
* top-level XIDs and subtransaction XIDs. We store both together in the
- * KnownAssignedXids list. In backends, this is copied into snapshots in
+ * KnownAssignedXids list. In backends, this is copied into snapshots in
* GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot()
* doesn't care about the distinction either. Subtransaction XIDs are
* effectively treated as top-level XIDs and in the typical case pg_subtrans
@@ -2623,14 +2623,14 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid)
* must hold shared ProcArrayLock to examine the array. To remove XIDs from
* the array, the startup process must hold ProcArrayLock exclusively, for
* the usual transactional reasons (compare commit/abort of a transaction
- * during normal running). Compressing unused entries out of the array
+ * during normal running). Compressing unused entries out of the array
* likewise requires exclusive lock. To add XIDs to the array, we just insert
* them into slots to the right of the head pointer and then advance the head
* pointer. This wouldn't require any lock at all, except that on machines
* with weak memory ordering we need to be careful that other processors
* see the array element changes before they see the head pointer change.
* We handle this by using a spinlock to protect reads and writes of the
- * head/tail pointers. (We could dispense with the spinlock if we were to
+ * head/tail pointers. (We could dispense with the spinlock if we were to
* create suitable memory access barrier primitives and use those instead.)
* The spinlock must be taken to read or write the head/tail pointers unless
* the caller holds ProcArrayLock exclusively.
@@ -2727,7 +2727,7 @@ KnownAssignedXidsCompress(bool force)
* If exclusive_lock is true then caller already holds ProcArrayLock in
* exclusive mode, so we need no extra locking here. Else caller holds no
* lock, so we need to be sure we maintain sufficient interlocks against
- * concurrent readers. (Only the startup process ever calls this, so no need
+ * concurrent readers. (Only the startup process ever calls this, so no need
* to worry about concurrent writers.)
*/
static void
@@ -2773,7 +2773,7 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids);
/*
- * Verify that insertions occur in TransactionId sequence. Note that even
+ * Verify that insertions occur in TransactionId sequence. Note that even
* if the last existing element is marked invalid, it must still have a
* correctly sequenced XID value.
*/
@@ -2876,7 +2876,7 @@ KnownAssignedXidsSearch(TransactionId xid, bool remove)
}
/*
- * Standard binary search. Note we can ignore the KnownAssignedXidsValid
+ * Standard binary search. Note we can ignore the KnownAssignedXidsValid
* array here, since even invalid entries will contain sorted XIDs.
*/
first = tail;
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 0811da7b6fe..06096990d15 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -26,7 +26,7 @@
* for a module and should never be allocated after the shared memory
* initialization phase. Hash tables have a fixed maximum size, but
* their actual size can vary dynamically. When entries are added
- * to the table, more space is allocated. Queues link data structures
+ * to the table, more space is allocated. Queues link data structures
* that have been allocated either within fixed-size structures or as hash
* buckets. Each shared data structure has a string name to identify
* it (assigned in the module that declares it).
@@ -40,7 +40,7 @@
* The shmem index has two purposes: first, it gives us
* a simple model of how the world looks when a backend process
* initializes. If something is present in the shmem index,
- * it is initialized. If it is not, it is uninitialized. Second,
+ * it is initialized. If it is not, it is uninitialized. Second,
* the shmem index allows us to allocate shared memory on demand
* instead of trying to preallocate structures and hard-wire the
* sizes and locations in header files. If you are using a lot
@@ -55,8 +55,8 @@
* pointers using the method described in (b) above.
*
* (d) memory allocation model: shared memory can never be
- * freed, once allocated. Each hash table has its own free list,
- * so hash buckets can be reused when an item is deleted. However,
+ * freed, once allocated. Each hash table has its own free list,
+ * so hash buckets can be reused when an item is deleted. However,
* if one hash table grows very large and then shrinks, its space
* cannot be redistributed to other tables. We could build a simple
* hash bucket garbage collector if need be. Right now, it seems
@@ -116,7 +116,7 @@ InitShmemAllocation(void)
Assert(shmhdr != NULL);
/*
- * Initialize the spinlock used by ShmemAlloc. We have to do the space
+ * Initialize the spinlock used by ShmemAlloc. We have to do the space
* allocation the hard way, since obviously ShmemAlloc can't be called
* yet.
*/
@@ -217,7 +217,7 @@ InitShmemIndex(void)
*
* Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
* hashtable to exist already, we have a bit of a circularity problem in
- * initializing the ShmemIndex itself. The special "ShmemIndex" hash
+ * initializing the ShmemIndex itself. The special "ShmemIndex" hash
* table name will tell ShmemInitStruct to fake it.
*/
info.keysize = SHMEM_INDEX_KEYSIZE;
@@ -294,7 +294,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* ShmemInitStruct -- Create/attach to a structure in shared memory.
*
* This is called during initialization to find or allocate
- * a data structure in shared memory. If no other process
+ * a data structure in shared memory. If no other process
* has created the structure, this routine allocates space
* for it. If it exists already, a pointer to the existing
* structure is returned.
@@ -303,7 +303,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
- * cases. Now, it always throws error instead, so callers need not check
+ * cases. Now, it always throws error instead, so callers need not check
* for NULL.
*/
void *
@@ -335,7 +335,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
- * index has been initialized. This should be OK because no other
+ * index has been initialized. This should be OK because no other
* process can be accessing shared memory yet.
*/
Assert(shmemseghdr->index == NULL);
diff --git a/src/backend/storage/ipc/shmqueue.c b/src/backend/storage/ipc/shmqueue.c
index d7ec3013a6e..dbfb063a2c1 100644
--- a/src/backend/storage/ipc/shmqueue.c
+++ b/src/backend/storage/ipc/shmqueue.c
@@ -14,7 +14,7 @@
*
* Package for managing doubly-linked lists in shared memory.
* The only tricky thing is that SHM_QUEUE will usually be a field
- * in a larger record. SHMQueueNext has to return a pointer
+ * in a larger record. SHMQueueNext has to return a pointer
* to the record itself instead of a pointer to the SHMQueue field
* of the record. It takes an extra parameter and does some extra
* pointer arithmetic to do this correctly.
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index b0cfc8be696..e568d4579af 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -26,7 +26,7 @@
* Because backends sitting idle will not be reading sinval events, we
* need a way to give an idle backend a swift kick in the rear and make
* it catch up before the sinval queue overflows and forces it to go
- * through a cache reset exercise. This is done by sending
+ * through a cache reset exercise. This is done by sending
* PROCSIG_CATCHUP_INTERRUPT to any backend that gets too far behind.
*
* State for catchup events consists of two flags: one saying whether
@@ -65,7 +65,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
* NOTE: it is entirely possible for this routine to be invoked recursively
* as a consequence of processing inside the invalFunction or resetFunction.
* Furthermore, such a recursive call must guarantee that all outstanding
- * inval messages have been processed before it exits. This is the reason
+ * inval messages have been processed before it exits. This is the reason
* for the strange-looking choice to use a statically allocated buffer array
* and counters; it's so that a recursive call can process messages already
* sucked out of sinvaladt.c.
@@ -131,7 +131,7 @@ ReceiveSharedInvalidMessages(
* We are now caught up. If we received a catchup signal, reset that
* flag, and call SICleanupQueue(). This is not so much because we need
* to flush dead messages right now, as that we want to pass on the
- * catchup signal to the next slowest backend. "Daisy chaining" the
+ * catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for what
* should be just a background maintenance activity.
*/
@@ -151,7 +151,7 @@ ReceiveSharedInvalidMessages(
*
* If we are idle (catchupInterruptEnabled is set), we can safely
* invoke ProcessCatchupEvent directly. Otherwise, just set a flag
- * to do it later. (Note that it's quite possible for normal processing
+ * to do it later. (Note that it's quite possible for normal processing
* of the current transaction to cause ReceiveSharedInvalidMessages()
* to be run later on; in that case the flag will get cleared again,
* since there's no longer any reason to do anything.)
@@ -227,7 +227,7 @@ HandleCatchupInterrupt(void)
* EnableCatchupInterrupt
*
* This is called by the PostgresMain main loop just before waiting
- * for a frontend command. We process any pending catchup events,
+ * for a frontend command. We process any pending catchup events,
* and enable the signal handler to process future events directly.
*
* NOTE: the signal handler starts out disabled, and stays so until
@@ -272,7 +272,7 @@ EnableCatchupInterrupt(void)
* DisableCatchupInterrupt
*
* This is called by the PostgresMain main loop just after receiving
- * a frontend command. Signal handler execution of catchup events
+ * a frontend command. Signal handler execution of catchup events
* is disabled until the next EnableCatchupInterrupt call.
*
* The PROCSIG_NOTIFY_INTERRUPT signal handler also needs to call this,
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index 4f446aab7a4..ec479c4dd02 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -45,7 +45,7 @@
* In reality, the messages are stored in a circular buffer of MAXNUMMESSAGES
* entries. We translate MsgNum values into circular-buffer indexes by
* computing MsgNum % MAXNUMMESSAGES (this should be fast as long as
- * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
+ * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
* doesn't exceed minMsgNum by more than MAXNUMMESSAGES, we have enough space
* in the buffer. If the buffer does overflow, we recover by setting the
* "reset" flag for each backend that has fallen too far behind. A backend
@@ -58,7 +58,7 @@
* normal behavior is that at most one such interrupt is in flight at a time;
* when a backend completes processing a catchup interrupt, it executes
* SICleanupQueue, which will signal the next-furthest-behind backend if
- * needed. This avoids undue contention from multiple backends all trying
+ * needed. This avoids undue contention from multiple backends all trying
* to catch up at once. However, the furthest-back backend might be stuck
* in a state where it can't catch up. Eventually it will get reset, so it
* won't cause any more problems for anyone but itself. But we don't want
@@ -89,7 +89,7 @@
* the writer wants to change maxMsgNum while readers need to read it.
* We deal with that by having a spinlock that readers must take for just
* long enough to read maxMsgNum, while writers take it for just long enough
- * to write maxMsgNum. (The exact rule is that you need the spinlock to
+ * to write maxMsgNum. (The exact rule is that you need the spinlock to
* read maxMsgNum if you are not holding SInvalWriteLock, and you need the
* spinlock to write maxMsgNum unless you are holding both locks.)
*
@@ -404,7 +404,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
SISeg *segP = shmInvalBuffer;
/*
- * N can be arbitrarily large. We divide the work into groups of no more
+ * N can be arbitrarily large. We divide the work into groups of no more
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
* an unreasonably long time. (This is not so much because we care about
* letting in other writers, as that some just-caught-up backend might be
@@ -426,7 +426,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* If the buffer is full, we *must* acquire some space. Clean the
* queue and reset anyone who is preventing space from being freed.
* Otherwise, clean the queue only when it's exceeded the next
- * fullness threshold. We have to loop and recheck the buffer state
+ * fullness threshold. We have to loop and recheck the buffer state
* after any call of SICleanupQueue.
*/
for (;;)
@@ -480,11 +480,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* executing on behalf of other backends, since each instance will modify only
* fields of its own backend's ProcState, and no instance will look at fields
* of other backends' ProcStates. We express this by grabbing SInvalReadLock
- * in shared mode. Note that this is not exactly the normal (read-only)
+ * in shared mode. Note that this is not exactly the normal (read-only)
* interpretation of a shared lock! Look closely at the interactions before
* allowing SInvalReadLock to be grabbed in shared mode for any other reason!
*
- * NB: this can also run in parallel with SIInsertDataEntries. It is not
+ * NB: this can also run in parallel with SIInsertDataEntries. It is not
* guaranteed that we will return any messages added after the routine is
* entered.
*
@@ -567,7 +567,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
*
* Caution: because we transiently release write lock when we have to signal
* some other backend, it is NOT guaranteed that there are still minFree
- * free message slots at exit. Caller must recheck and perhaps retry.
+ * free message slots at exit. Caller must recheck and perhaps retry.
*/
void
SICleanupQueue(bool callerHasWriteLock, int minFree)
@@ -588,7 +588,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
/*
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
* furthest-back backend that needs signaling (if any), and reset any
- * backends that are too far back. Note that because we ignore sendOnly
+ * backends that are too far back. Note that because we ignore sendOnly
* backends here it is possible for them to keep sending messages without
* a problem even when they are the only active backend.
*/
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 322f5bb2580..bfda389c54f 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -131,7 +131,7 @@ GetStandbyLimitTime(void)
/*
* The cutoff time is the last WAL data receipt time plus the appropriate
- * delay variable. Delay of -1 means wait forever.
+ * delay variable. Delay of -1 means wait forever.
*/
GetXLogReceiptTime(&rtime, &fromStream);
if (fromStream)
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index 1e7c501bc88..34f9e8f543d 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -801,7 +801,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
/*
* If we found the page of the truncation point we need to truncate the
- * data in it. Otherwise if we're in a hole, we need to create a page to
+ * data in it. Otherwise if we're in a hole, we need to create a page to
* mark the end of data.
*/
if (olddata != NULL && olddata->pageno == pageno)
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index aa6d315226e..3064ec746c1 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -51,7 +51,7 @@ typedef struct
} WAIT_ORDER;
/*
- * Information saved about each edge in a detected deadlock cycle. This
+ * Information saved about each edge in a detected deadlock cycle. This
* is used to print a diagnostic message upon failure.
*
* Note: because we want to examine this info after releasing the lock
@@ -119,7 +119,7 @@ static PGPROC *blocking_autovacuum_proc = NULL;
* InitDeadLockChecking -- initialize deadlock checker during backend startup
*
* This does per-backend initialization of the deadlock checker; primarily,
- * allocation of working memory for DeadLockCheck. We do this per-backend
+ * allocation of working memory for DeadLockCheck. We do this per-backend
* since there's no percentage in making the kernel do copy-on-write
* inheritance of workspace from the postmaster. We want to allocate the
* space at startup because (a) the deadlock checker might be invoked when
@@ -291,10 +291,10 @@ GetBlockingAutoVacuumPgproc(void)
* DeadLockCheckRecurse -- recursively search for valid orderings
*
* curConstraints[] holds the current set of constraints being considered
- * by an outer level of recursion. Add to this each possible solution
+ * by an outer level of recursion. Add to this each possible solution
* constraint for any cycle detected at this level.
*
- * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
+ * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
* state is attainable, in which case waitOrders[] shows the required
* rearrangements of lock wait queues (if any).
*/
@@ -429,7 +429,7 @@ TestConfiguration(PGPROC *startProc)
*
* Since we need to be able to check hypothetical configurations that would
* exist after wait queue rearrangement, the routine pays attention to the
- * table of hypothetical queue orders in waitOrders[]. These orders will
+ * table of hypothetical queue orders in waitOrders[]. These orders will
* be believed in preference to the actual ordering seen in the locktable.
*/
static bool
@@ -505,7 +505,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
/*
- * Scan for procs that already hold conflicting locks. These are "hard"
+ * Scan for procs that already hold conflicting locks. These are "hard"
* edges in the waits-for graph.
*/
procLocks = &(lock->procLocks);
@@ -703,7 +703,7 @@ ExpandConstraints(EDGE *constraints,
nWaitOrders = 0;
/*
- * Scan constraint list backwards. This is because the last-added
+ * Scan constraint list backwards. This is because the last-added
* constraint is the only one that could fail, and so we want to test it
* for inconsistency first.
*/
@@ -757,7 +757,7 @@ ExpandConstraints(EDGE *constraints,
* The initial queue ordering is taken directly from the lock's wait queue.
* The output is an array of PGPROC pointers, of length equal to the lock's
* wait queue length (the caller is responsible for providing this space).
- * The partial order is specified by an array of EDGE structs. Each EDGE
+ * The partial order is specified by an array of EDGE structs. Each EDGE
* is one that we need to reverse, therefore the "waiter" must appear before
* the "blocker" in the output array. The EDGE array may well contain
* edges associated with other locks; these should be ignored.
@@ -827,7 +827,7 @@ TopoSort(LOCK *lock,
afterConstraints[k] = i + 1;
}
/*--------------------
- * Now scan the topoProcs array backwards. At each step, output the
+ * Now scan the topoProcs array backwards. At each step, output the
* last proc that has no remaining before-constraints, and decrease
* the beforeConstraints count of each of the procs it was constrained
* against.
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index c91f6ac10e7..ed731c80c61 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -65,7 +65,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
/*
* LockRelationOid
*
- * Lock a relation given only its OID. This should generally be used
+ * Lock a relation given only its OID. This should generally be used
* before attempting to open the relation's relcache entry.
*/
void
@@ -252,7 +252,7 @@ LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
/*
* LockRelationIdForSession
*
- * This routine grabs a session-level lock on the target relation. The
+ * This routine grabs a session-level lock on the target relation. The
* session lock persists across transaction boundaries. It will be removed
* when UnlockRelationIdForSession() is called, or if an ereport(ERROR) occurs,
* or if the backend exits.
@@ -455,7 +455,7 @@ XactLockTableInsert(TransactionId xid)
*
* Delete the lock showing that the given transaction ID is running.
* (This is never used for main transaction IDs; those locks are only
- * released implicitly at transaction end. But we do use it for subtrans IDs.)
+ * released implicitly at transaction end. But we do use it for subtrans IDs.)
*/
void
XactLockTableDelete(TransactionId xid)
@@ -476,7 +476,7 @@ XactLockTableDelete(TransactionId xid)
* subtransaction, we will exit as soon as it aborts or its top parent commits.
* It takes some extra work to ensure this, because to save on shared memory
* the XID lock of a subtransaction is released when it ends, whether
- * successfully or unsuccessfully. So we have to check if it's "still running"
+ * successfully or unsuccessfully. So we have to check if it's "still running"
* and if so wait for its parent.
*/
void
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 22530a6cff6..3fc91740b89 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -873,7 +873,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* If lock requested conflicts with locks requested by waiters, must join
- * wait queue. Otherwise, check for conflict with already-held locks.
+ * wait queue. Otherwise, check for conflict with already-held locks.
* (That's last because most complex check.)
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
@@ -950,7 +950,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* NOTE: do not do any material change of state between here and
- * return. All required changes in locktable state must have been
+ * return. All required changes in locktable state must have been
* done when the lock was granted to us --- see notes in WaitOnLock.
*/
@@ -980,7 +980,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
{
/*
* Decode the locktag back to the original values, to avoid sending
- * lots of empty bytes with every message. See lock.h to check how a
+ * lots of empty bytes with every message. See lock.h to check how a
* locktag is defined for LOCKTAG_RELATION
*/
LogAccessExclusiveLock(locktag->locktag_field1,
@@ -1045,7 +1045,7 @@ LockCheckConflicts(LockMethod lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own lock. We have
+ * Rats. Something conflicts. But it could still be my own lock. We have
* to construct a conflict mask that does not reflect our own locks, but
* only lock types held by other processes.
*/
@@ -1137,7 +1137,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
/*
* We need only run ProcLockWakeup if the released lock conflicts with at
- * least one of the lock types requested by waiter(s). Otherwise whatever
+ * least one of the lock types requested by waiter(s). Otherwise whatever
* conflict made them wait must still exist. NOTE: before MVCC, we could
* skip wakeup if lock->granted[lockmode] was still positive. But that's
* not true anymore, because the remaining granted locks might belong to
@@ -1157,7 +1157,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
}
/*
- * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
+ * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
* proclock and lock objects if possible, and call ProcLockWakeup if there
* are remaining requests and the caller says it's OK. (Normally, this
* should be called after UnGrantLock, and wakeupNeeded is the result from
@@ -1516,7 +1516,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
}
/*
- * Decrease the total local count. If we're still holding the lock, we're
+ * Decrease the total local count. If we're still holding the lock, we're
* done.
*/
locallock->nLocks--;
@@ -2272,7 +2272,7 @@ PostPrepare_Locks(TransactionId xid)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
- * the proclock would then be in the wrong hash chain. So, unlink
+ * the proclock would then be in the wrong hash chain. So, unlink
* and delete the old proclock; create a new one with the right
* contents; and link it into place. We do it in this order to be
* certain we won't run out of shared memory (the way dynahash.c
@@ -2394,7 +2394,7 @@ GetLockStatusData(void)
* view of the state.
*
* Since this is a read-only operation, we take shared instead of
- * exclusive lock. There's not a whole lot of point to this, because all
+ * exclusive lock. There's not a whole lot of point to this, because all
* the normal operations require exclusive lock, but it doesn't hurt
* anything either. It will at least allow two backends to do
* GetLockStatusData in parallel.
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 0fe7ce45cd6..f482f34bad3 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -6,7 +6,7 @@
* Lightweight locks are intended primarily to provide mutual exclusion of
* access to shared-memory data structures. Therefore, they offer both
* exclusive and shared lock modes (to support read/write and read-only
- * access to a shared object). There are few other frammishes. User-level
+ * access to a shared object). There are few other frammishes. User-level
* locking should be done with the full lock manager --- which depends on
* LWLocks to protect its shared state.
*
@@ -53,7 +53,7 @@ typedef struct LWLock
* (LWLockIds are indexes into the array.) We force the array stride to
* be a power of 2, which saves a few cycles in indexing, but more
* importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
+ * boundaries. This reduces cache contention problems, especially on AMD
* Opterons. (Of course, we have to also ensure that the array start
* address is suitably aligned.)
*
@@ -200,7 +200,7 @@ NumLWLocks(void)
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -378,7 +378,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* in the presence of contention. The efficiency of being able to do that
* outweighs the inefficiency of sometimes wasting a process dispatch
* cycle because the lock is not free when a released waiter finally gets
- * to run. See pgsql-hackers archives for 29-Dec-01.
+ * to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index b7364787037..b9c6d8edee2 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -32,11 +32,11 @@
* examining the MVCC data.)
*
* (1) Besides tuples actually read, they must cover ranges of tuples
- * which would have been read based on the predicate. This will
+ * which would have been read based on the predicate. This will
* require modelling the predicates through locks against database
* objects such as pages, index ranges, or entire tables.
*
- * (2) They must be kept in RAM for quick access. Because of this, it
+ * (2) They must be kept in RAM for quick access. Because of this, it
* isn't possible to always maintain tuple-level granularity -- when
* the space allocated to store these approaches exhaustion, a
* request for a lock may need to scan for situations where a single
@@ -49,7 +49,7 @@
*
* (4) While they are associated with a transaction, they must survive
* a successful COMMIT of that transaction, and remain until all
- * overlapping transactions complete. This even means that they
+ * overlapping transactions complete. This even means that they
* must survive termination of the transaction's process. If a
* top level transaction is rolled back, however, it is immediately
* flagged so that it can be ignored, and its SIREAD locks can be
@@ -90,7 +90,7 @@
* may yet matter because they overlap still-active transactions.
*
* SerializablePredicateLockListLock
- * - Protects the linked list of locks held by a transaction. Note
+ * - Protects the linked list of locks held by a transaction. Note
* that the locks themselves are also covered by the partition
* locks of their respective lock targets; this lock only affects
* the linked list connecting the locks related to a transaction.
@@ -101,11 +101,11 @@
* - It is relatively infrequent that another process needs to
* modify the list for a transaction, but it does happen for such
* things as index page splits for pages with predicate locks and
- * freeing of predicate locked pages by a vacuum process. When
+ * freeing of predicate locked pages by a vacuum process. When
* removing a lock in such cases, the lock itself contains the
* pointers needed to remove it from the list. When adding a
* lock in such cases, the lock can be added using the anchor in
- * the transaction structure. Neither requires walking the list.
+ * the transaction structure. Neither requires walking the list.
* - Cleaning up the list for a terminated transaction is sometimes
* not done on a retail basis, in which case no lock is required.
* - Due to the above, a process accessing its active transaction's
@@ -348,7 +348,7 @@ int max_predicate_locks_per_xact; /* set by guc.c */
/*
* This provides a list of objects in order to track transactions
- * participating in predicate locking. Entries in the list are fixed size,
+ * participating in predicate locking. Entries in the list are fixed size,
* and reside in shared memory. The memory address of an entry must remain
* fixed during its lifetime. The list will be protected from concurrent
* update externally; no provision is made in this code to manage that. The
@@ -538,7 +538,7 @@ SerializationNeededForWrite(Relation relation)
/*
* These functions are a simple implementation of a list for this specific
- * type of struct. If there is ever a generalized shared memory list, we
+ * type of struct. If there is ever a generalized shared memory list, we
* should probably switch to that.
*/
static SERIALIZABLEXACT *
@@ -758,7 +758,7 @@ OldSerXidPagePrecedesLogically(int p, int q)
int diff;
/*
- * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
+ * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
* be in the range 0..OLDSERXID_MAX_PAGE.
*/
Assert(p >= 0 && p <= OLDSERXID_MAX_PAGE);
@@ -920,7 +920,7 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
}
/*
- * Get the minimum commitSeqNo for any conflict out for the given xid. For
+ * Get the minimum commitSeqNo for any conflict out for the given xid. For
* a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
* will be returned.
*/
@@ -973,7 +973,7 @@ OldSerXidSetActiveSerXmin(TransactionId xid)
/*
* When no sxacts are active, nothing overlaps, set the xid values to
* invalid to show that there are no valid entries. Don't clear headPage,
- * though. A new xmin might still land on that page, and we don't want to
+ * though. A new xmin might still land on that page, and we don't want to
* repeatedly zero out the same page.
*/
if (!TransactionIdIsValid(xid))
@@ -1458,7 +1458,7 @@ SummarizeOldestCommittedSxact(void)
/*
* Grab the first sxact off the finished list -- this will be the earliest
- * commit. Remove it from the list.
+ * commit. Remove it from the list.
*/
sxact = (SERIALIZABLEXACT *)
SHMQueueNext(FinishedSerializableTransactions,
@@ -1974,7 +1974,7 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
/*
* Delete child target locks owned by this process.
* This implementation is assuming that the usage of each target tag field
- * is uniform. No need to make this hard if we don't have to.
+ * is uniform. No need to make this hard if we don't have to.
*
* We aren't acquiring lightweight locks for the predicate lock or lock
* target structures associated with this transaction unless we're going
@@ -2420,7 +2420,7 @@ PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
}
/*
- * Do quick-but-not-definitive test for a relation lock first. This will
+ * Do quick-but-not-definitive test for a relation lock first. This will
* never cause a return when the relation is *not* locked, but will
* occasionally let the check continue when there really *is* a relation
* level lock.
@@ -2732,7 +2732,7 @@ exit:
* transaction which is not serializable.
*
* NOTE: This is currently only called with transfer set to true, but that may
- * change. If we decide to clean up the locks from a table on commit of a
+ * change. If we decide to clean up the locks from a table on commit of a
* transaction which executed DROP TABLE, the false condition will be useful.
*/
static void
@@ -2813,7 +2813,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
continue; /* already the right lock */
/*
- * If we made it here, we have work to do. We make sure the heap
+ * If we made it here, we have work to do. We make sure the heap
* relation lock exists, then we walk the list of predicate locks for
* the old target we found, moving all locks to the heap relation lock
* -- unless they already hold that.
@@ -3158,7 +3158,7 @@ ReleasePredicateLocks(bool isCommit)
* If this value is changing, we don't care that much whether we get the
* old or new value -- it is just used to determine how far
* GlobalSerizableXmin must advance before this transaction can be fully
- * cleaned up. The worst that could happen is we wait for one more
+ * cleaned up. The worst that could happen is we wait for one more
* transaction to complete before freeing some RAM; correctness of visible
* behavior is not affected.
*/
@@ -3260,7 +3260,7 @@ ReleasePredicateLocks(bool isCommit)
}
/*
- * Release all outConflicts to committed transactions. If we're rolling
+ * Release all outConflicts to committed transactions. If we're rolling
* back clear them all. Set SXACT_FLAG_CONFLICT_OUT if any point to
* previously committed transactions.
*/
@@ -3579,7 +3579,7 @@ ClearOldPredicateLocks(void)
* matter -- but keep the transaction entry itself and any outConflicts.
*
* When the summarize flag is set, we've run short of room for sxact data
- * and must summarize to the SLRU. Predicate locks are transferred to a
+ * and must summarize to the SLRU. Predicate locks are transferred to a
* dummy "old" transaction, with duplicate locks on a single target
* collapsing to a single lock with the "latest" commitSeqNo from among
* the conflicting locks..
@@ -3772,7 +3772,7 @@ XidIsConcurrent(TransactionId xid)
/*
* CheckForSerializableConflictOut
* We are reading a tuple which has been modified. If it is visible to
- * us but has been deleted, that indicates a rw-conflict out. If it's
+ * us but has been deleted, that indicates a rw-conflict out. If it's
* not visible and was created by a concurrent (overlapping)
* serializable transaction, that is also a rw-conflict out,
*
@@ -3859,7 +3859,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
/*
- * Find top level xid. Bail out if xid is too early to be a conflict, or
+ * Find top level xid. Bail out if xid is too early to be a conflict, or
* if it's our own xid.
*/
if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
@@ -3924,7 +3924,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
/*
* We have a conflict out to a transaction which has a conflict out to a
- * summarized transaction. That summarized transaction must have
+ * summarized transaction. That summarized transaction must have
* committed first, and we can't tell when it committed in relation to our
* snapshot acquisition, so something needs to be canceled.
*/
@@ -3958,7 +3958,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
&& (!SxactHasConflictOut(sxact)
|| MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
{
- /* Read-only transaction will appear to run first. No conflict. */
+ /* Read-only transaction will appear to run first. No conflict. */
LWLockRelease(SerializableXactHashLock);
return;
}
@@ -4549,7 +4549,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
*
* If a dangerous structure is found, the pivot (the near conflict) is
* marked for death, because rolling back another transaction might mean
- * that we flail without ever making progress. This transaction is
+ * that we flail without ever making progress. This transaction is
* committing writes, so letting it commit ensures progress. If we
* canceled the far conflict, it might immediately fail again on retry.
*/
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 507ab09194d..f97962ca1b3 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -355,7 +355,7 @@ InitProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -405,7 +405,7 @@ InitProcessPhase2(void)
*
* Auxiliary processes are presently not expected to wait for real (lockmgr)
* locks, so we need not set up the deadlock checker. They are never added
- * to the ProcArray or the sinval messaging mechanism, either. They also
+ * to the ProcArray or the sinval messaging mechanism, either. They also
* don't get a VXID assigned, since this is only useful when we actually
* hold lockmgr locks.
*
@@ -502,7 +502,7 @@ InitAuxiliaryProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -642,7 +642,7 @@ LockWaitCancel(void)
/*
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
- * semaphore is reset to zero. This prevented a leftover wakeup signal
+ * semaphore is reset to zero. This prevented a leftover wakeup signal
* from remaining in the semaphore if someone else had granted us the lock
* we wanted before we were able to remove ourselves from the wait-list.
* However, now that ProcSleep loops until waitStatus changes, a leftover
@@ -758,7 +758,7 @@ ProcKill(int code, Datum arg)
/*
* AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
- * processes (bgwriter, etc). The PGPROC and sema are not released, only
+ * processes (bgwriter, etc). The PGPROC and sema are not released, only
* marked as not-in-use.
*/
static void
@@ -884,7 +884,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
*
* Special case: if I find I should go in front of some waiter, check to
* see if I conflict with already-held locks or the requests before that
- * waiter. If not, then just grant myself the requested lock immediately.
+ * waiter. If not, then just grant myself the requested lock immediately.
* This is the same as the test for immediate grant in LockAcquire, except
* we are only considering the part of the wait queue before my insertion
* point.
@@ -903,7 +903,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
- * Yes, so we have a deadlock. Easiest way to clean up
+ * Yes, so we have a deadlock. Easiest way to clean up
* correctly is to call RemoveFromWaitQueue(), but we
* can't do that until we are *on* the wait queue. So, set
* a flag to check below, and break out of loop. Also,
@@ -1010,8 +1010,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
- * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
- * implementation. While this is normally good, there are cases where a
+ * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
+ * implementation. While this is normally good, there are cases where a
* saved wakeup might be leftover from a previous operation (for example,
* we aborted ProcWaitForSignal just before someone did ProcSendSignal).
* So, loop to wait again if the waitStatus shows we haven't been granted
@@ -1031,7 +1031,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* waitStatus could change from STATUS_WAITING to something else
- * asynchronously. Read it just once per loop to prevent surprising
+ * asynchronously. Read it just once per loop to prevent surprising
* behavior (such as missing log messages).
*/
myWaitStatus = MyProc->waitStatus;
@@ -1425,10 +1425,10 @@ check_done:
* This can share the semaphore normally used for waiting for locks,
* since a backend could never be waiting for a lock and a signal at
* the same time. As with locks, it's OK if the signal arrives just
- * before we actually reach the waiting state. Also as with locks,
+ * before we actually reach the waiting state. Also as with locks,
* it's necessary that the caller be robust against bogus wakeups:
* always check that the desired state has occurred, and wait again
- * if not. This copes with possible "leftover" wakeups.
+ * if not. This copes with possible "leftover" wakeups.
*/
void
ProcWaitForSignal(void)
@@ -1482,7 +1482,7 @@ ProcSendSignal(int pid)
/*
* Enable the SIGALRM interrupt to fire after the specified delay
*
- * Delay is given in milliseconds. Caller should be sure a SIGALRM
+ * Delay is given in milliseconds. Caller should be sure a SIGALRM
* signal handler is installed before this is called.
*
* This code properly handles nesting of deadlock timeout alarms within
@@ -1533,7 +1533,7 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
* NOTE: in this case it is possible that this routine will be
* interrupted by the previously-set timer alarm. This is okay
* because the signal handler will do only what it should do according
- * to the state variables. The deadlock checker may get run earlier
+ * to the state variables. The deadlock checker may get run earlier
* than normal, but that does no harm.
*/
timeout_start_time = GetCurrentTimestamp();
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index 1b678075a80..bc293a9091d 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -77,7 +77,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
*
* We time out and declare error after NUM_DELAYS delays (thus, exactly
* that many tries). With the given settings, this will usually take 2 or
- * so minutes. It seems better to fix the total number of tries (and thus
+ * so minutes. It seems better to fix the total number of tries (and thus
* the probability of unintended failure) than to fix the total time
* spent.
*
@@ -140,7 +140,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
* Note: spins_per_delay is local within our current process. We want to
* average these observations across multiple backends, since it's
* relatively rare for this function to even get entered, and so a single
- * backend might not live long enough to converge on a good value. That
+ * backend might not live long enough to converge on a good value. That
* is handled by the two routines below.
*/
if (cur_delay == 0)
@@ -179,7 +179,7 @@ update_spins_per_delay(int shared_spins_per_delay)
/*
* We use an exponential moving average with a relatively slow adaption
* rate, so that noise in any one backend's result won't affect the shared
- * value too much. As long as both inputs are within the allowed range,
+ * value too much. As long as both inputs are within the allowed range,
* the result must be too, so we need not worry about clamping the result.
*
* We deliberately truncate rather than rounding; this is so that single
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 9f1ce1e8c8a..44a6f6f2d19 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -5,7 +5,7 @@
*
*
* For machines that have test-and-set (TAS) instructions, s_lock.h/.c
- * define the spinlock implementation. This file contains only a stub
+ * define the spinlock implementation. This file contains only a stub
* implementation for spinlocks using PGSemaphores. Unless semaphores
* are implemented in a way that doesn't involve a kernel call, this
* is too slow to be very useful :-(
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 4f580fa9b5f..4f32333f42d 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -54,7 +54,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* PageHeaderIsValid
* Check that the header fields of a page appear valid.
*
- * This is called when a page has just been read in from disk. The idea is
+ * This is called when a page has just been read in from disk. The idea is
* to cheaply detect trashed pages before we go nuts following bogus item
* pointers, testing invalid transaction identifiers, etc.
*
@@ -99,7 +99,7 @@ PageHeaderIsValid(PageHeader page)
/*
* PageAddItem
*
- * Add an item to a page. Return value is offset at which it was
+ * Add an item to a page. Return value is offset at which it was
* inserted, or InvalidOffsetNumber if there's not room to insert.
*
* If overwrite is true, we just store the item at the specified
@@ -699,7 +699,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
* PageIndexMultiDelete
*
* This routine handles the case of deleting multiple tuples from an
- * index page at once. It is considerably faster than a loop around
+ * index page at once. It is considerably faster than a loop around
* PageIndexTupleDelete ... however, the caller *must* supply the array
* of item numbers to be deleted in item number order!
*/
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index ead48ccddd3..f9658b37447 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -77,7 +77,7 @@
* not needed because of an mdtruncate() operation. The reason for leaving
* them present at size zero, rather than unlinking them, is that other
* backends and/or the bgwriter might be holding open file references to
- * such segments. If the relation expands again after mdtruncate(), such
+ * such segments. If the relation expands again after mdtruncate(), such
* that a deactivated segment becomes active again, it is important that
* such file references still be valid --- else data might get written
* out to an unlinked old copy of a segment file that will eventually
@@ -114,7 +114,7 @@ static MemoryContext MdCxt; /* context for all md.c allocations */
* we keep track of pending fsync operations: we need to remember all relation
* segments that have been written since the last checkpoint, so that we can
* fsync them down to disk before completing the next checkpoint. This hash
- * table remembers the pending operations. We use a hash table mostly as
+ * table remembers the pending operations. We use a hash table mostly as
* a convenient way of eliminating duplicate requests.
*
* We use a similar mechanism to remember no-longer-needed files that can
@@ -276,7 +276,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* During bootstrap, there are cases where a system relation will be
* accessed (by internal backend processes) before the bootstrap
* script nominally creates it. Therefore, allow the file to exist
- * already, even if isRedo is not set. (See also mdopen)
+ * already, even if isRedo is not set. (See also mdopen)
*/
if (isRedo || IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
@@ -318,7 +318,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* if the contents of the file were repopulated by subsequent WAL entries.
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as for instance CLUSTER and CREATE INDEX do),
- * the contents of the file would be lost forever. By leaving the empty file
+ * the contents of the file would be lost forever. By leaving the empty file
* until after the next checkpoint, we prevent reassignment of the relfilenode
* number until it's safe, because relfilenode assignment skips over any
* existing file.
@@ -470,7 +470,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
/*
* Note: because caller usually obtained blocknum by calling mdnblocks,
* which did a seek(SEEK_END), this seek is often redundant and will be
- * optimized away by fd.c. It's not redundant, however, if there is a
+ * optimized away by fd.c. It's not redundant, however, if there is a
* partial page at the end of the file. In that case we want to try to
* overwrite the partial page with a full page. It's also not redundant
* if bufmgr.c had to dump another buffer of the same file to make room
@@ -770,9 +770,9 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
* exactly RELSEG_SIZE long, and it's useless to recheck that each time.
*
* NOTE: this assumption could only be wrong if another backend has
- * truncated the relation. We rely on higher code levels to handle that
+ * truncated the relation. We rely on higher code levels to handle that
* scenario by closing and re-opening the md fd, which is handled via
- * relcache flush. (Since the bgwriter doesn't participate in relcache
+ * relcache flush. (Since the bgwriter doesn't participate in relcache
* flush, it could have segment chain entries for inactive segments;
* that's OK because the bgwriter never needs to compute relation size.)
*/
@@ -965,7 +965,7 @@ mdsync(void)
/*
* If we are in the bgwriter, the sync had better include all fsync
- * requests that were queued by backends up to this point. The tightest
+ * requests that were queued by backends up to this point. The tightest
* race condition that could occur is that a buffer that must be written
* and fsync'd for the checkpoint could have been dumped by a backend just
* before it was visited by BufferSync(). We know the backend will have
@@ -1057,7 +1057,7 @@ mdsync(void)
* have been deleted (unlinked) by the time we get to them. Rather
* than just hoping an ENOENT (or EACCES on Windows) error can be
* ignored, what we do on error is absorb pending requests and
- * then retry. Since mdunlink() queues a "revoke" message before
+ * then retry. Since mdunlink() queues a "revoke" message before
* actually unlinking, the fsync request is guaranteed to be
* marked canceled after the absorb if it really was this case.
* DROP DATABASE likewise has to tell us to forget fsync requests
@@ -1450,7 +1450,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* NB: it's intentional that we don't change cycle_ctr if the entry
- * already exists. The fsync request must be treated as old, even
+ * already exists. The fsync request must be treated as old, even
* though the new request will be satisfied too by any subsequent
* fsync.
*
@@ -1458,7 +1458,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
* act just as though it wasn't there. The only case where this could
* happen would be if a file had been deleted, we received but did not
* yet act on the cancel request, and the same relfilenode was then
- * assigned to a new file. We mustn't lose the new request, but it
+ * assigned to a new file. We mustn't lose the new request, but it
* should be considered new not old.
*/
}
@@ -1621,7 +1621,7 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
{
/*
* Normally we will create new segments only if authorized by the
- * caller (i.e., we are doing mdextend()). But when doing WAL
+ * caller (i.e., we are doing mdextend()). But when doing WAL
* recovery, create segments anyway; this allows cases such as
* replaying WAL data that has a write into a high-numbered
* segment of a relation that was later deleted. We want to go
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 51d966300d4..1402cd7bc10 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -544,7 +544,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
* Send a shared-inval message to force other backends to close any smgr
* references they may have for this rel. This is useful because they
* might have open file pointers to segments that got removed, and/or
- * smgr_targblock variables pointing past the new rel end. (The inval
+ * smgr_targblock variables pointing past the new rel end. (The inval
* message will come back to our backend, too, causing a
* probably-unnecessary local smgr flush. But we don't expect that this
* is a performance-critical path.) As in the unlink code, we want to be
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index 30863bb7b96..869e0e1391f 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -42,8 +42,8 @@
* each fastpath call as a separate transaction command, and so the
* cached data could never actually have been reused. If it had worked
* as intended, it would have had problems anyway with dangling references
- * in the FmgrInfo struct. So, forget about caching and just repeat the
- * syscache fetches on each usage. They're not *that* expensive.
+ * in the FmgrInfo struct. So, forget about caching and just repeat the
+ * syscache fetches on each usage. They're not *that* expensive.
*/
struct fp_info
{
@@ -203,7 +203,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
/*
* Since the validity of this structure is determined by whether the
- * funcid is OK, we clear the funcid here. It must not be set to the
+ * funcid is OK, we clear the funcid here. It must not be set to the
* correct value until we are about to return with a good struct fp_info,
* since we can be interrupted (i.e., with an ereport(ERROR, ...)) at any
* time. [No longer really an issue since we don't save the struct
@@ -255,7 +255,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
* RETURNS:
* 0 if successful completion, EOF if frontend connection lost.
*
- * Note: All ordinary errors result in ereport(ERROR,...). However,
+ * Note: All ordinary errors result in ereport(ERROR,...). However,
* if we lose the frontend connection there is no one to ereport to,
* and no use in proceeding...
*
@@ -509,7 +509,7 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
/*
* Since stringinfo.c keeps a trailing null in place even for
- * binary data, the contents of abuf are a valid C string. We
+ * binary data, the contents of abuf are a valid C string. We
* have to do encoding conversion before calling the typinput
* routine, though.
*/
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index a37fa26a21b..3cdc0715ece 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -427,7 +427,7 @@ SocketBackend(StringInfo inBuf)
default:
/*
- * Otherwise we got garbage from the frontend. We treat this as
+ * Otherwise we got garbage from the frontend. We treat this as
* fatal because we have probably lost message boundary sync, and
* there's no good way to recover.
*/
@@ -826,7 +826,7 @@ exec_simple_query(const char *query_string)
ResetUsage();
/*
- * Start up a transaction command. All queries generated by the
+ * Start up a transaction command. All queries generated by the
* query_string will be in this same command block, *unless* we find a
* BEGIN/COMMIT/ABORT statement; we have to force a new xact command after
* one of those, else bad things will happen in xact.c. (Note that this
@@ -835,7 +835,7 @@ exec_simple_query(const char *query_string)
start_xact_command();
/*
- * Zap any pre-existing unnamed statement. (While not strictly necessary,
+ * Zap any pre-existing unnamed statement. (While not strictly necessary,
* it seems best to define simple-Query mode as if it used the unnamed
* statement and portal; this ensures we recover any storage used by prior
* unnamed operations.)
@@ -894,7 +894,7 @@ exec_simple_query(const char *query_string)
/*
* Get the command name for use in status display (it also becomes the
- * default completion tag, down inside PortalRun). Set ps_status and
+ * default completion tag, down inside PortalRun). Set ps_status and
* do any special start-of-SQL-command processing needed by the
* destination.
*/
@@ -982,7 +982,7 @@ exec_simple_query(const char *query_string)
/*
* Select the appropriate output format: text unless we are doing a
- * FETCH from a binary cursor. (Pretty grotty to have to do this here
+ * FETCH from a binary cursor. (Pretty grotty to have to do this here
* --- but it avoids grottiness in other places. Ah, the joys of
* backward compatibility...)
*/
@@ -1296,7 +1296,7 @@ exec_parse_message(const char *query_string, /* string to execute */
}
else
{
- /* Empty input string. This is legal. */
+ /* Empty input string. This is legal. */
raw_parse_tree = NULL;
commandTag = NULL;
stmt_list = NIL;
@@ -1355,7 +1355,7 @@ exec_parse_message(const char *query_string, /* string to execute */
/*
* We do NOT close the open transaction command here; that only happens
- * when the client sends Sync. Instead, do CommandCounterIncrement just
+ * when the client sends Sync. Instead, do CommandCounterIncrement just
* in case something happened during parse/plan.
*/
CommandCounterIncrement();
@@ -1498,7 +1498,7 @@ exec_bind_message(StringInfo input_message)
* If we are in aborted transaction state, the only portals we can
* actually run are those containing COMMIT or ROLLBACK commands. We
* disallow binding anything else to avoid problems with infrastructure
- * that expects to run inside a valid transaction. We also disallow
+ * that expects to run inside a valid transaction. We also disallow
* binding any parameters, since we can't risk calling user-defined I/O
* functions.
*/
@@ -1585,7 +1585,7 @@ exec_bind_message(StringInfo input_message)
/*
* Rather than copying data around, we just set up a phony
* StringInfo pointing to the correct portion of the message
- * buffer. We assume we can scribble on the message buffer so
+ * buffer. We assume we can scribble on the message buffer so
* as to maintain the convention that StringInfos have a
* trailing null. This is grotty but is a big win when
* dealing with very large parameter strings.
@@ -1976,7 +1976,7 @@ exec_execute_message(const char *portal_name, long max_rows)
if (is_xact_command)
{
/*
- * If this was a transaction control statement, commit it. We
+ * If this was a transaction control statement, commit it. We
* will start a new xact command for the next command (if any).
*/
finish_xact_command();
@@ -2389,7 +2389,7 @@ exec_describe_portal_message(const char *portal_name)
/*
* If we are in aborted transaction state, we can't run
* SendRowDescriptionMessage(), because that needs catalog accesses.
- * Hence, refuse to Describe portals that return data. (We shouldn't just
+ * Hence, refuse to Describe portals that return data. (We shouldn't just
* refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
* blindly Describes whatever it does.)
@@ -2611,7 +2611,7 @@ quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -3293,7 +3293,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx,
}
/*
- * Parse command-line options. CAUTION: keep this in sync with
+ * Parse command-line options. CAUTION: keep this in sync with
* postmaster/postmaster.c (the option sets should not conflict) and with
* the common help() function in main/main.c.
*/
@@ -3590,7 +3590,7 @@ PostgresMain(int argc, char *argv[],
* we have set up the handler.
*
* Also note: it's best not to use any signals that are SIG_IGNored in the
- * postmaster. If such a signal arrives before we are able to change the
+ * postmaster. If such a signal arrives before we are able to change the
* handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
* handler in the postmaster to reserve the signal. (Of course, this isn't
* an issue for signals that are locally generated, such as SIGALRM and
@@ -3786,7 +3786,7 @@ PostgresMain(int argc, char *argv[],
/*
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
- * AbortTransaction() instead. The only stuff done directly here
+ * AbortTransaction() instead. The only stuff done directly here
* should be stuff that is guaranteed to apply *only* for outer-level
* error recovery, such as adjusting the FE/BE protocol status.
*/
@@ -3890,7 +3890,7 @@ PostgresMain(int argc, char *argv[],
* collector, and to update the PS stats display. We avoid doing
* those every time through the message loop because it'd slow down
* processing of batched messages, and because we don't want to report
- * uncommitted updates (that confuses autovacuum). The notification
+ * uncommitted updates (that confuses autovacuum). The notification
* processor wants a call too, if we are not in a transaction block.
*/
if (send_ready_for_query)
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index e8f557c1a1e..ef6520c5783 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -557,7 +557,7 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
/*
* We don't start the executor until we are told to run the
- * portal. We do need to set up the result tupdesc.
+ * portal. We do need to set up the result tupdesc.
*/
{
PlannedStmt *pstmt;
@@ -907,7 +907,7 @@ PortalRunSelect(Portal portal,
Assert(queryDesc || portal->holdStore);
/*
- * Force the queryDesc destination to the right thing. This supports
+ * Force the queryDesc destination to the right thing. This supports
* MOVE, for example, which will pass in dest = DestNone. This is okay to
* change as long as we do it on every fetch. (The Executor must not
* assume that dest never changes.)
@@ -1155,12 +1155,12 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
elog(DEBUG3, "ProcessUtility");
/*
- * Set snapshot if utility stmt needs one. Most reliable way to do this
+ * Set snapshot if utility stmt needs one. Most reliable way to do this
* seems to be to enumerate those that do not need one; this is a short
* list. Transaction control, LOCK, and SET must *not* set a snapshot
* since they need to be executable at the start of a transaction-snapshot
* mode transaction without freezing a snapshot. By extension we allow
- * SHOW not to set a snapshot. The other stmts listed are just efficiency
+ * SHOW not to set a snapshot. The other stmts listed are just efficiency
* hacks. Beware of listing anything that can modify the database --- if,
* say, it has to update an index with expressions that invoke
* user-defined functions, then it had better have a snapshot.
@@ -1195,7 +1195,7 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
/*
* Some utility commands may pop the ActiveSnapshot stack from under us,
- * so we only pop the stack if we actually see a snapshot set. Note that
+ * so we only pop the stack if we actually see a snapshot set. Note that
* the set of utility commands that do this must be the same set
* disallowed to run inside a transaction; otherwise, we could be popping
* a snapshot that belongs to some other operation.
@@ -1517,7 +1517,7 @@ DoPortalRunFetch(Portal portal,
* Definition: Rewind to start, advance count-1 rows, return
* next row (if any). In practice, if the goal is less than
* halfway back to the start, it's better to scan from where
- * we are. In any case, we arrange to fetch the target row
+ * we are. In any case, we arrange to fetch the target row
* going forwards.
*/
if (portal->posOverflow || portal->portalPos == LONG_MAX ||
@@ -1624,7 +1624,7 @@ DoPortalRunFetch(Portal portal,
* If we are sitting on a row, back up one so we can re-fetch it.
* If we are not sitting on a row, we still have to start up and
* shut down the executor so that the destination is initialized
- * and shut down correctly; so keep going. To PortalRunSelect,
+ * and shut down correctly; so keep going. To PortalRunSelect,
* count == 0 means we will retrieve no row.
*/
if (on_row)
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index beaa08358a2..c096b52bfd6 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -258,7 +258,7 @@ PreventCommandIfReadOnly(const char *cmdname)
* PreventCommandDuringRecovery: throw error if RecoveryInProgress
*
* The majority of operations that are unsafe in a Hot Standby slave
- * will be rejected by XactReadOnly tests. However there are a few
+ * will be rejected by XactReadOnly tests. However there are a few
* commands that are allowed in "read-only" xacts but cannot be allowed
* in Hot Standby mode. Those commands should call this function.
*/
diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c
index 93b550a1a50..75da5068a12 100644
--- a/src/backend/tsearch/ts_locale.c
+++ b/src/backend/tsearch/ts_locale.c
@@ -90,7 +90,7 @@ t_isprint(const char *ptr)
/*
- * Set up to read a file using tsearch_readline(). This facility is
+ * Set up to read a file using tsearch_readline(). This facility is
* better than just reading the file directly because it provides error
* context pointing to the specific line where a problem is detected.
*
@@ -168,7 +168,7 @@ tsearch_readline_callback(void *arg)
/*
* We can't include the text of the config line for errors that occur
- * during t_readline() itself. This is only partly a consequence of our
+ * during t_readline() itself. This is only partly a consequence of our
* arms-length use of that routine: the major cause of such errors is
* encoding violations, and we daren't try to print error messages
* containing badly-encoded data.
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index 2654d644579..308ae0893c8 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -115,13 +115,13 @@ ts_typanalyze(PG_FUNCTION_ARGS)
* language's frequency table, where K is the target number of entries in
* the MCELEM array plus an arbitrary constant, meant to reflect the fact
* that the most common words in any language would usually be stopwords
- * so we will not actually see them in the input. We assume that the
+ * so we will not actually see them in the input. We assume that the
* distribution of word frequencies (including the stopwords) follows Zipf's
* law with an exponent of 1.
*
* Assuming Zipfian distribution, the frequency of the K'th word is equal
* to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of
- * words in the language. Putting W as one million, we get roughly 0.07/K.
+ * words in the language. Putting W as one million, we get roughly 0.07/K.
* Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set
* epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and
* maximum expected hashtable size of about 1000 * (K + 10).
@@ -162,7 +162,7 @@ compute_tsvector_stats(VacAttrStats *stats,
TrackItem *item;
/*
- * We want statistics_target * 10 lexemes in the MCELEM array. This
+ * We want statistics_target * 10 lexemes in the MCELEM array. This
* multiplier is pretty arbitrary, but is meant to reflect the fact that
* the number of individual lexeme values tracked in pg_statistic ought to
* be more than the number of values for a simple scalar column.
@@ -233,7 +233,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* We loop through the lexemes in the tsvector and add them to our
- * tracking hashtable. Note: the hashtable entries will point into
+ * tracking hashtable. Note: the hashtable entries will point into
* the (detoasted) tsvector value, therefore we cannot free that
* storage until we're done.
*/
@@ -300,7 +300,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@@ -333,7 +333,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* If we obtained more lexemes than we really want, get rid of those
- * with least frequencies. The easiest way is to qsort the array into
+ * with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
@@ -364,7 +364,7 @@ compute_tsvector_stats(VacAttrStats *stats,
* they get sorted on frequencies. The rationale is that we
* usually search through most common elements looking for a
* specific value, so we can grab its frequency. When values are
- * presorted we can employ binary search for that. See
+ * presorted we can employ binary search for that. See
* ts_selfuncs.c for a real usage scenario.
*/
qsort(sort_table, num_mcelem, sizeof(TrackItem *),
diff --git a/src/backend/tsearch/ts_utils.c b/src/backend/tsearch/ts_utils.c
index abf53c00197..29cd383b488 100644
--- a/src/backend/tsearch/ts_utils.c
+++ b/src/backend/tsearch/ts_utils.c
@@ -25,8 +25,8 @@
/*
* Given the base name and extension of a tsearch config file, return
- * its full path name. The base name is assumed to be user-supplied,
- * and is checked to prevent pathname attacks. The extension is assumed
+ * its full path name. The base name is assumed to be user-supplied,
+ * and is checked to prevent pathname attacks. The extension is assumed
* to be safe.
*
* The result is a palloc'd string.
@@ -39,7 +39,7 @@ get_tsearch_config_filename(const char *basename,
char *result;
/*
- * We limit the basename to contain a-z, 0-9, and underscores. This may
+ * We limit the basename to contain a-z, 0-9, and underscores. This may
* be overly restrictive, but we don't want to allow access to anything
* outside the tsearch_data directory, so for instance '/' *must* be
* rejected, and on some platforms '\' and ':' are risky as well. Allowing
@@ -69,7 +69,7 @@ comparestr(const void *a, const void *b)
/*
* Reads a stop-word file. Each word is run through 'wordop'
- * function, if given. wordop may either modify the input in-place,
+ * function, if given. wordop may either modify the input in-place,
* or palloc a new version.
*/
void
diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c
index f66851426b4..b2da46c5872 100644
--- a/src/backend/tsearch/wparser_def.c
+++ b/src/backend/tsearch/wparser_def.c
@@ -330,7 +330,7 @@ TParserInit(char *str, int len)
/*
* Use of %.*s here is a bit risky since it can misbehave if the data is
- * not in what libc thinks is the prevailing encoding. However, since
+ * not in what libc thinks is the prevailing encoding. However, since
* this is just a debugging aid, we choose to live with that.
*/
fprintf(stderr, "parsing \"%.*s\"\n", len, str);
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 7c4eaaca8a3..e32e081482e 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -119,7 +119,7 @@ static Oid get_role_oid_or_public(const char *rolname);
/*
* getid
* Consumes the first alphanumeric string (identifier) found in string
- * 's', ignoring any leading white space. If it finds a double quote
+ * 's', ignoring any leading white space. If it finds a double quote
* it returns the word inside the quotes.
*
* RETURNS:
@@ -225,7 +225,7 @@ putid(char *p, const char *s)
*
* RETURNS:
* the string position in 's' immediately following the ACL
- * specification. Also:
+ * specification. Also:
* - loads the structure pointed to by 'aip' with the appropriate
* UID/GID, id type identifier and mode type values.
*/
@@ -939,7 +939,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can only
+ * Remove abandoned privileges (cascading revoke). Currently we can only
* handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
@@ -1005,7 +1005,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* If the old ACL contained any references to the new owner, then we may
- * now have generated an ACL containing duplicate entries. Find them and
+ * now have generated an ACL containing duplicate entries. Find them and
* merge them so that there are not duplicates. (This is relatively
* expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
* be the normal case.)
@@ -1016,7 +1016,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
* remove privilege-free entries, should there be any in the input.) dst
* is the next output slot, targ is the currently considered input slot
* (always >= dst), and src scans entries to the right of targ looking for
- * duplicates. Once an entry has been emitted to dst it is known
+ * duplicates. Once an entry has been emitted to dst it is known
* duplicate-free and need not be considered anymore.
*/
if (newpresent)
@@ -2400,7 +2400,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum,
* existence of the pg_class row before risking calling pg_class_aclcheck.
* Note: it might seem there's a race condition against concurrent DROP,
* but really it's safe because there will be no syscache flush between
- * here and there. So if we see the row in the syscache, so will
+ * here and there. So if we see the row in the syscache, so will
* pg_class_aclcheck.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid)))
@@ -4747,14 +4747,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index 274e867fdd1..bd186cb6cfc 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
@@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS)
int lbs[1];
/*
- * Test for null before Asserting we are in right context. This is to
+ * Test for null before Asserting we are in right context. This is to
* avoid possible Assert failure in 8.4beta installations, where it is
* possible for users to create NULL constants of type internal.
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 3e43e951e10..e8621ff8751 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -689,7 +689,7 @@ ReadArrayStr(char *arrayStr,
/*
* We have to remove " and \ characters to create a clean item value to
- * pass to the datatype input routine. We overwrite each item value
+ * pass to the datatype input routine. We overwrite each item value
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
@@ -889,7 +889,7 @@ ReadArrayStr(char *arrayStr,
* referenced by Datums after copying them.
*
* If the input data is of varlena type, the caller must have ensured that
- * the values are not toasted. (Doing it here doesn't work since the
+ * the values are not toasted. (Doing it here doesn't work since the
* caller has already allocated space for the array...)
*/
static void
@@ -1985,7 +1985,7 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
* copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
@@ -2617,7 +2617,7 @@ array_set_slice(ArrayType *array,
/*
* array_map()
*
- * Map an array through an arbitrary function. Return a new array with
+ * Map an array through an arbitrary function. Return a new array with
* same dimensions and each source element transformed by fn(). Each
* source element is passed as the first argument to fn(); additional
* arguments to be passed to fn() can be specified by the caller.
@@ -2632,9 +2632,9 @@ array_set_slice(ArrayType *array,
* first argument position initially holds the input array value.
* * inpType: OID of element type of input array. This must be the same as,
* or binary-compatible with, the first argument type of fn().
- * * retType: OID of element type of output array. This must be the same as,
+ * * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -3488,7 +3488,7 @@ array_cmp(FunctionCallInfo fcinfo)
/*
* If arrays contain same data (up to end of shorter one), apply
- * additional rules to sort by dimensionality. The relative significance
+ * additional rules to sort by dimensionality. The relative significance
* of the different bits of information is historical; mainly we just care
* that we don't say "equal" for arrays of different dimensionality.
*/
@@ -3750,7 +3750,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation,
/*
* We assume that the comparison operator is strict, so a NULL can't
- * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
* array_eq, should we act like that?
*/
if (isnull1)
@@ -4241,7 +4241,7 @@ array_copy(char *destptr, int nitems,
*
* Note: this could certainly be optimized using standard bitblt methods.
* However, it's not clear that the typical Postgres array has enough elements
- * to make it worth worrying too much. For the moment, KISS.
+ * to make it worth worrying too much. For the moment, KISS.
*/
void
array_bitmap_copy(bits8 *destbitmap, int destoffset,
@@ -4438,7 +4438,7 @@ array_extract_slice(ArrayType *newarray,
* Insert a slice into an array.
*
* ndim/dim[]/lb[] are dimensions of the original array. A new array with
- * those same dimensions is to be constructed. destArray must already
+ * those same dimensions is to be constructed. destArray must already
* have been allocated and its header initialized.
*
* st[]/endp[] identify the slice to be replaced. Elements within the slice
@@ -5106,7 +5106,7 @@ array_unnest(PG_FUNCTION_ARGS)
* Get the array value and detoast if needed. We can't do this
* earlier because if we have to detoast, we want the detoasted copy
* to be in multi_call_memory_ctx, so it will go away when we're done
- * and not before. (If no detoast happens, we assume the originally
+ * and not before. (If no detoast happens, we assume the originally
* passed array will stick around till then.)
*/
arr = PG_GETARG_ARRAYTYPE_P(0);
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index af7359ca4e3..cf248916000 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span)
/*
* ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array,
- * and get the contents converted to integers. Returns a palloc'd array
+ * and get the contents converted to integers. Returns a palloc'd array
* and places the length at *n.
*/
int32 *
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index 974cb223589..5a5f83a9aaf 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS)
* charrecv - converts external binary format to char
*
* The external representation is one byte, with no character set
- * conversion. This is somewhat dubious, perhaps, but in many
+ * conversion. This is somewhat dubious, perhaps, but in many
* cases people use char for a 1-byte binary type.
*/
Datum
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 51c6b6bacbf..714250b9d0f 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1257,7 +1257,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -1606,7 +1606,7 @@ time_interval(PG_FUNCTION_ARGS)
* Convert interval to time data type.
*
* This is defined as producing the fractional-day portion of the interval.
- * Therefore, we can just ignore the months field. It is not real clear
+ * Therefore, we can just ignore the months field. It is not real clear
* what to do with negative intervals, but we choose to subtract the floor,
* so that, say, '-2 hours' becomes '22:00:00'.
*/
@@ -2596,7 +2596,7 @@ timetz_zone(PG_FUNCTION_ARGS)
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 05e1730c881..59e06bb569d 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -352,7 +352,7 @@ j2date(int jd, int *year, int *month, int *day)
* j2day - convert Julian date to day-of-week (0..6 == Sun..Sat)
*
* Note: various places use the locution j2day(date - 1) to produce a
- * result according to the convention 0..6 = Mon..Sun. This is a bit of
+ * result according to the convention 0..6 = Mon..Sun. This is a bit of
* a crock, but will work as long as the computation here is just a modulo.
*/
int
@@ -2472,7 +2472,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
/*
* Nothing so far; make a decision about what we think the input
- * is. There used to be lots of heuristics here, but the
+ * is. There used to be lots of heuristics here, but the
* consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
@@ -2505,9 +2505,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
{
/*
* We are at the first numeric field of a date that included a
- * textual month name. We want to support the variants
+ * textual month name. We want to support the variants
* MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
- * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
* either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
@@ -3315,7 +3315,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
@@ -4109,7 +4109,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/*
* We've been burnt by stupid errors in the ordering of the datetkn tables
- * once too often. Arrange to check them during postmaster start.
+ * once too often. Arrange to check them during postmaster start.
*/
static bool
CheckDateTokenTable(const char *tablename, const datetkn *base, int nel)
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 1fd7ff777ae..f29778a7f12 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
/*
* just compare the two datums. NOTE: just comparing "len" bytes will
* not do the work, because we do not know how these bytes are aligned
- * inside the "Datum". We assume instead that any given datatype is
+ * inside the "Datum". We assume instead that any given datatype is
* consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 7381f217251..f73e4cd5c6e 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -510,7 +510,7 @@ pg_size_pretty(PG_FUNCTION_ARGS)
* This is expected to be used in queries like
* SELECT pg_relation_filenode(oid) FROM pg_class;
* That leads to a couple of choices. We work from the pg_class row alone
- * rather than actually opening each relation, for efficiency. We don't
+ * rather than actually opening each relation, for efficiency. We don't
* fail if we can't find the relation --- some rows might be visible in
* the query's MVCC snapshot but already dead according to SnapshotNow.
* (Note: we could avoid using the catcache, but there's little point
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index c178fd0bddb..acf8c0cc5bb 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -12,11 +12,11 @@
* The overhead required for constraint checking can be high, since examining
* the catalogs to discover the constraints for a given domain is not cheap.
* We have three mechanisms for minimizing this cost:
- * 1. In a nest of domains, we flatten the checking of all the levels
+ * 1. In a nest of domains, we flatten the checking of all the levels
* into just one operation.
- * 2. We cache the list of constraint items in the FmgrInfo struct
+ * 2. We cache the list of constraint items in the FmgrInfo struct
* passed by the caller.
- * 3. If there are CHECK constraints, we cache a standalone ExprContext
+ * 3. If there are CHECK constraints, we cache a standalone ExprContext
* to evaluate them in.
*
*
@@ -305,7 +305,7 @@ domain_recv(PG_FUNCTION_ARGS)
/*
* domain_check - check that a datum satisfies the constraints of a
- * domain. extra and mcxt can be passed if they are available from,
+ * domain. extra and mcxt can be passed if they are available from,
* say, a FmgrInfo structure, or they can be NULL, in which case the
* setup is repeated for each call.
*/
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index b929abeba78..61efe0d8a5f 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -287,7 +287,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* In some IRIX versions, strtod() recognizes only "inf", so if the input
- * is "infinity" we have to skip over "inity". Also, it may return
+ * is "infinity" we have to skip over "inity". Also, it may return
* positive infinity for "-inf".
*/
if (isinf(val))
@@ -508,7 +508,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* In some IRIX versions, strtod() recognizes only "inf", so if the input
- * is "infinity" we have to skip over "inity". Also, it may return
+ * is "infinity" we have to skip over "inity". Also, it may return
* positive infinity for "-inf".
*/
if (isinf(val))
@@ -2050,7 +2050,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS)
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index 7c51ee7ecac..5037f090ca1 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -46,14 +46,14 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
* double quoted if it contains funny characters or matches a keyword.
*
* If typemod is NULL then we are formatting a type name in a context where
- * no typemod is available, eg a function argument or result type. This
+ * no typemod is available, eg a function argument or result type. This
* yields a slightly different result from specifying typemod = -1 in some
* cases. Given typemod = -1 we feel compelled to produce an output that
* the parser will interpret as having typemod -1, so that pg_dump will
- * produce CREATE TABLE commands that recreate the original state. But
+ * produce CREATE TABLE commands that recreate the original state. But
* given NULL typemod, we assume that the parser's interpretation of
* typemod doesn't matter, and so we are willing to output a slightly
- * "prettier" representation of the same type. For example, type = bpchar
+ * "prettier" representation of the same type. For example, type = bpchar
* and typemod = NULL gets you "character", whereas typemod = -1 gets you
* "bpchar" --- the former will be interpreted as character(1) by the
* parser, which does not yield typemod -1.
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 6f267db0256..a54b215afb9 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1816,7 +1816,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
/*
* Note: we assume that toupper_l()/tolower_l() will not be so broken
- * as to need guard tests. When using the default collation, we apply
+ * as to need guard tests. When using the default collation, we apply
* the traditional Postgres behavior that forces ASCII-style treatment
* of I/i, but in non-default collations you get exactly what the
* collation says.
@@ -3579,7 +3579,7 @@ do_to_timestamp(text *date_txt, text *fmt,
{
/*
* The month and day field have not been set, so we use the
- * day-of-year field to populate them. Depending on the date mode,
+ * day-of-year field to populate them. Depending on the date mode,
* this field may be interpreted as a Gregorian day-of-year, or an ISO
* week date day-of-year.
*/
diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c
index c2e6bc1794e..c576b7d6ff9 100644
--- a/src/backend/utils/adt/geo_selfuncs.c
+++ b/src/backend/utils/adt/geo_selfuncs.c
@@ -22,7 +22,7 @@
/*
- * Selectivity functions for geometric operators. These are bogus -- unless
+ * Selectivity functions for geometric operators. These are bogus -- unless
* we know the actual key distribution in the index, we can't make a good
* prediction of the selectivity of these operators.
*
@@ -34,7 +34,7 @@
* In general, GiST needs to search multiple subtrees in order to guarantee
* that all occurrences of the same key have been found. Because of this,
* the estimated cost for scanning the index ought to be higher than the
- * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
+ * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
* ought to be adjusted accordingly --- but until we can generate somewhat
* realistic numbers here, it hardly matters...
*/
diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c
index 5f2a3d361d9..d33534ec173 100644
--- a/src/backend/utils/adt/inet_cidr_ntop.c
+++ b/src/backend/utils/adt/inet_cidr_ntop.c
@@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
else
{
- /* Copy src to private buffer. Zero host part. */
+ /* Copy src to private buffer. Zero host part. */
p = (bits + 7) / 8;
memcpy(inbuf, src, p);
memset(inbuf + p, 0, 16 - p);
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 07493e3c3f8..3ca243fb0ad 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
* overflow and thus incorrectly match).
@@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index b28b6f03e7e..a5723af3043 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
ptr++;
/*
- * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
* cleaner than trying to get the loop below to handle it portably.
*/
if (strncmp(ptr, "9223372036854775808", 19) == 0)
@@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
* will overflow and thus incorrectly match).
@@ -719,7 +719,7 @@ int8inc(PG_FUNCTION_ARGS)
/*
* These functions are exactly like int8inc but are used for aggregates that
- * count only non-null values. Since the functions are declared strict,
+ * count only non-null values. Since the functions are declared strict,
* the null checks happen before we ever get here, and all we need do is
* increment the state value. We could actually make these pg_proc entries
* point right at int8inc, but then the opr_sanity regression test would
@@ -773,7 +773,7 @@ int84pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -794,8 +794,8 @@ int84mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -815,7 +815,7 @@ int84mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -882,7 +882,7 @@ int48pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -903,8 +903,8 @@ int48mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -924,7 +924,7 @@ int48mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -970,7 +970,7 @@ int82pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -991,8 +991,8 @@ int82mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1012,7 +1012,7 @@ int82mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -1079,7 +1079,7 @@ int28pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1100,8 +1100,8 @@ int28mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1121,7 +1121,7 @@ int28mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 09e8698af2c..3137711c384 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -76,12 +76,12 @@ wchareq(char *p1, char *p2)
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*/
/*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 5bda4af50fd..3b6300dc40f 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -278,7 +278,7 @@ pg_sleep(PG_FUNCTION_ARGS)
* pg_usleep's upper bound on allowed delays.
*
* By computing the intended stop time initially, we avoid accumulation of
- * extra delay across multiple sleeps. This also ensures we won't delay
+ * extra delay across multiple sleeps. This also ensures we won't delay
* less than the specified time if pg_usleep is interrupted by other
* signals such as SIGHUP.
*/
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 6771e78af8e..bdd98d98d54 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -198,7 +198,7 @@ tm2abstime(struct pg_tm * tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
@@ -1163,7 +1163,7 @@ tintervalsame(PG_FUNCTION_ARGS)
* 1. The interval length computations overflow at 2^31 seconds, causing
* intervals longer than that to sort oddly compared to those shorter.
* 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are
- * just ordinary integers. Since this code doesn't handle them specially,
+ * just ordinary integers. Since this code doesn't handle them specially,
* it's possible for [a b] to be considered longer than [c infinity] for
* finite abstimes a, b, c. In combination with the previous point, the
* interval [-infinity infinity] is treated as being shorter than many finite
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index a968d707994..7ebae765400 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -29,7 +29,7 @@ static int ip_addrsize(inet *inetptr);
static inet *internal_inetpl(inet *ip, int64 addend);
/*
- * Access macros. We use VARDATA_ANY so that we can process short-header
+ * Access macros. We use VARDATA_ANY so that we can process short-header
* varlena values without detoasting them. This requires a trick:
* VARDATA_ANY assumes the varlena header is already filled in, which is
* not the case when constructing a new value (until SET_INET_VARSIZE is
@@ -88,7 +88,7 @@ network_in(char *src, bool is_cidr)
dst = (inet *) palloc0(sizeof(inet));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
* will have a : somewhere in them (several, in fact) so if there is one
* present, assume it's V6, otherwise assume it's V4.
*/
@@ -193,7 +193,7 @@ cidr_out(PG_FUNCTION_ARGS)
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
@@ -1392,7 +1392,7 @@ inetmi(PG_FUNCTION_ARGS)
/*
* We form the difference using the traditional complement, increment,
* and add rule, with the increment part being handled by starting the
- * carry off at 1. If you don't think integer arithmetic is done in
+ * carry off at 1. If you don't think integer arithmetic is done in
* two's complement, too bad.
*/
int nb = ip_addrsize(ip);
@@ -1414,7 +1414,7 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * Input wider than int64: check for overflow. All bytes to
+ * Input wider than int64: check for overflow. All bytes to
* the left of what will fit should be 0 or 0xFF, depending on
* sign of the now-complete result.
*/
@@ -1445,9 +1445,9 @@ inetmi(PG_FUNCTION_ARGS)
* XXX This should go away someday!
*
* This is a kluge needed because we don't yet support zones in stored inet
- * values. Since the result of getnameinfo() might include a zone spec,
+ * values. Since the result of getnameinfo() might include a zone spec,
* call this to remove it anywhere we want to feed getnameinfo's output to
- * network_in. Beats failing entirely.
+ * network_in. Beats failing entirely.
*
* An alternative approach would be to let network_in ignore %-parts for
* itself, but that would mean we'd silently drop zone specs in user input,
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 6b60a5c1c78..5091306b0e9 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -49,7 +49,7 @@
* Numeric values are represented in a base-NBASE floating point format.
* Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed
* and wide enough to store a digit. We assume that NBASE*NBASE can fit in
- * an int. Although the purely calculational routines could handle any even
+ * an int. Although the purely calculational routines could handle any even
* NBASE that's less than sqrt(INT_MAX), in practice we are only interested
* in NBASE a power of ten, so that I/O conversions and decimal rounding
* are easy. Also, it's actually more efficient if NBASE is rather less than
@@ -94,11 +94,11 @@ typedef int16 NumericDigit;
* If the high bits of the first word of a NumericChoice (n_header, or
* n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the
* numeric follows the NumericShort format; if they are NUMERIC_POS or
- * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
+ * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
* it is a NaN. We currently always store a NaN using just two bytes (i.e.
* only n_header), but previous releases used only the NumericLong format,
* so we might find 4-byte NaNs on disk if a database has been migrated using
- * pg_upgrade. In either case, when the high bits indicate a NaN, the
+ * pg_upgrade. In either case, when the high bits indicate a NaN, the
* remaining bits are never examined. Currently, we always initialize these
* to zero, but it might be possible to use them for some other purpose in
* the future.
@@ -206,19 +206,19 @@ struct NumericData
: ((n)->choice.n_long.n_weight))
/* ----------
- * NumericVar is the format we use for arithmetic. The digit-array part
+ * NumericVar is the format we use for arithmetic. The digit-array part
* is the same as the NumericData storage format, but the header is more
* complex.
*
* The value represented by a NumericVar is determined by the sign, weight,
* ndigits, and digits[] array.
* Note: the first digit of a NumericVar's value is assumed to be multiplied
- * by NBASE ** weight. Another way to say it is that there are weight+1
+ * by NBASE ** weight. Another way to say it is that there are weight+1
* digits before the decimal point. It is possible to have weight < 0.
*
* buf points at the physical start of the palloc'd digit buffer for the
- * NumericVar. digits points at the first digit in actual use (the one
- * with the specified weight). We normally leave an unused digit or two
+ * NumericVar. digits points at the first digit in actual use (the one
+ * with the specified weight). We normally leave an unused digit or two
* (preset to zeroes) between buf and digits, so that there is room to store
* a carry out of the top digit without reallocating space. We just need to
* decrement digits (and increment weight) to make room for the carry digit.
@@ -592,7 +592,7 @@ numeric_maximum_size(int32 typmod)
* In most cases, the size of a numeric will be smaller than the value
* computed below, because the varlena header will typically get toasted
* down to a single byte before being stored on disk, and it may also be
- * possible to use a short numeric header. But our job here is to compute
+ * possible to use a short numeric header. But our job here is to compute
* the worst case.
*/
return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit));
@@ -913,7 +913,7 @@ numeric_uminus(PG_FUNCTION_ARGS)
/*
* The packed format is known to be totally zero digit trimmed always. So
- * we can identify a ZERO by the fact that there are no digits at all. Do
+ * we can identify a ZERO by the fact that there are no digits at all. Do
* nothing to a zero.
*/
if (NUMERIC_NDIGITS(num) != 0)
@@ -1926,7 +1926,7 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -1979,7 +1979,7 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2585,9 +2585,9 @@ numeric_avg_accum(PG_FUNCTION_ARGS)
/*
* Integer data types all use Numeric accumulators to share code and
- * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
+ * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
* is overkill for the N and sum(X) values, but definitely not overkill
- * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
+ * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
* for stddev/variance --- there are faster special-purpose accumulator
* routines for SUM and AVG of these datatypes.
*/
@@ -2850,7 +2850,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
* the initial condition of the transition data value needs to be NULL. This
* means we can't rely on ExecAgg to automatically insert the first non-null
* data value into the transition data: it doesn't know how to do the type
- * conversion. The upshot is that these routines have to be marked non-strict
+ * conversion. The upshot is that these routines have to be marked non-strict
* and handle substitution of the first non-null input themselves.
*/
@@ -3248,7 +3248,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest)
/*
* We first parse the string to extract decimal digits and determine the
- * correct decimal weight. Then convert to NBASE representation.
+ * correct decimal weight. Then convert to NBASE representation.
*/
switch (*cp)
{
@@ -3838,7 +3838,7 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Convert numeric to int8, rounding if needed.
*
- * If overflow, return FALSE (no error is raised). Return TRUE if okay.
+ * If overflow, return FALSE (no error is raised). Return TRUE if okay.
*
* CAUTION: var's contents may be modified by rounding!
*/
@@ -4302,7 +4302,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -4346,7 +4346,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
* would have more than rscale fractional digits, truncate the computation
- * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
* or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
@@ -4589,7 +4589,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*
* We need the first divisor digit to be >= NBASE/2. If it isn't,
* make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
+ * factor "d". (The reason for allocating dividend[0] above is to
* leave room for possible carry here.)
*/
if (divisor[1] < HALF_NBASE)
@@ -4633,7 +4633,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
+ * no need to adjust the working dividend. It's worth testing
* here to fall out ASAP when processing trailing zeroes in a
* dividend.
*/
@@ -4651,7 +4651,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Adjust quotient digit if it's too large. Knuth proves that
* after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
+ * just one too large. (Note: it's OK to use dividend[j+2] here
* because we know the divisor length is at least 2.)
*/
while (divisor2 * qhat >
@@ -4826,7 +4826,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result,
* dividend's digits (plus appended zeroes to reach the desired precision
* including guard digits). Each step of the main loop computes an
* (approximate) quotient digit and stores it into div[], removing one
- * position of dividend space. A final pass of carry propagation takes
+ * position of dividend space. A final pass of carry propagation takes
* care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
@@ -5683,7 +5683,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 495b6261e62..4510acd17a1 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -318,7 +318,7 @@ oidparse(Node *node)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid OID
+ * constants by the lexer. Accept these if they are valid OID
* strings.
*/
return oidin_subr(strVal(node), NULL);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index c3181be0d2f..82c77ed768d 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -20,12 +20,12 @@
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also
* settable at run-time. However, we don't actually set those locale
- * categories permanently. This would have bizarre effects like no
+ * categories permanently. This would have bizarre effects like no
* longer accepting standard floating-point literals in some locales.
* Instead, we only set the locales briefly when needed, cache the
* required information obtained from localeconv(), and set them back.
* The cached information is only used by the formatting functions
- * (to_char, etc.) and the money type. For the user, this should all be
+ * (to_char, etc.) and the money type. For the user, this should all be
* transparent.
*
* !!! NOW HEAR THIS !!!
@@ -39,7 +39,7 @@
* fail = true;
* setlocale(category, save);
* DOES NOT WORK RELIABLY: on some platforms the second setlocale() call
- * will change the memory save is pointing at. To do this sort of thing
+ * will change the memory save is pointing at. To do this sort of thing
* safely, you *must* pstrdup what setlocale returns the first time.
*
* FYI, The Open Group locale standard is defined here:
@@ -253,7 +253,7 @@ check_locale(int category, const char *value)
*
* For most locale categories, the assign hook doesn't actually set the locale
* permanently, just reset flags so that the next use will cache the
- * appropriate values. (See explanation at the top of this file.)
+ * appropriate values. (See explanation at the top of this file.)
*
* Note: we accept value = "" as selecting the postmaster's environment
* value, whatever it was (so long as the environment setting is legal).
@@ -766,7 +766,7 @@ IsoLocaleName(const char *winlocname)
* could fail if the locale is C, so str_tolower() shouldn't call it
* in that case.
*
- * Note that we currently lack any way to flush the cache. Since we don't
+ * Note that we currently lack any way to flush the cache. Since we don't
* support ALTER COLLATION, this is OK. The worst case is that someone
* drops a collation, and a useless cache entry hangs around in existing
* backends.
@@ -960,7 +960,7 @@ report_newlocale_failure(const char *localename)
/*
- * Create a locale_t from a collation OID. Results are cached for the
+ * Create a locale_t from a collation OID. Results are cached for the
* lifetime of the backend. Thus, do not free the result with freelocale().
*
* As a special optimization, the default/database collation returns 0.
@@ -1143,7 +1143,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
* This has almost the API of mbstowcs_l(), except that *from need not be
* null-terminated; instead, the number of input bytes is specified as
* fromlen. Also, we ereport() rather than returning -1 for invalid
- * input encoding. tolen is the maximum number of wchar_t's to store at *to.
+ * input encoding. tolen is the maximum number of wchar_t's to store at *to.
* The output will be zero-terminated iff there is room.
*/
size_t
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 6b0fd364e44..f5c8321a44e 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -578,7 +578,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* If we've emitted more than first_success_by bytes without finding
- * anything compressible at all, fail. This lets us fall out
+ * anything compressible at all, fail. This lets us fall out
* reasonably quickly when looking at incompressible input (such as
* pre-compressed data).
*/
@@ -602,7 +602,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
found_match = true;
}
@@ -616,7 +616,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
}
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index ddb1bd2b71c..638618db89d 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -6,7 +6,7 @@
* A pseudo-type isn't really a type and never has any operations, but
* we do need to supply input and output functions to satisfy the links
* in the pseudo-type's entry in pg_type. In most cases the functions
- * just throw an error if invoked. (XXX the error messages here cover
+ * just throw an error if invoked. (XXX the error messages here cover
* the most common case, but might be confusing in some contexts. Can
* we do better?)
*
@@ -138,7 +138,7 @@ anyarray_out(PG_FUNCTION_ARGS)
* anyarray_recv - binary input routine for pseudo-type ANYARRAY.
*
* XXX this could actually be made to work, since the incoming array
- * data will contain the element type OID. Need to think through
+ * data will contain the element type OID. Need to think through
* type-safety issues before allowing it, however.
*/
Datum
@@ -192,7 +192,7 @@ anyenum_out(PG_FUNCTION_ARGS)
* void_in - input routine for pseudo-type VOID.
*
* We allow this so that PL functions can return VOID without any special
- * hack in the PL handler. Whatever value the PL thinks it's returning
+ * hack in the PL handler. Whatever value the PL thinks it's returning
* will just be ignored.
*/
Datum
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 7e05a53810e..3f8a812a492 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
char errMsg[100];
/*
- * Look for a match among previously compiled REs. Since the data
+ * Look for a match among previously compiled REs. Since the data
* structure is self-organizing with most-used entries at the front, our
* search strategy can just be to scan from the front.
*/
@@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
/*
* Here and in other places in this file, do CHECK_FOR_INTERRUPTS
- * before reporting a regex error. This is so that if the regex
+ * before reporting a regex error. This is so that if the regex
* library aborts and returns REG_CANCEL, we don't print an error
* message that implies the regex was invalid.
*/
@@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len,
* dat_len --- the length of the data string
* nmatch, pmatch --- optional return area for match details
*
- * Data is given in the database encoding. We internally
+ * Data is given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
*/
static bool
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 6716c0204c1..6d06d415cc9 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -81,7 +81,7 @@ regprocin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_proc for a unique match. This is needed for
+ * just search pg_proc for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -266,7 +266,7 @@ regprocedurein(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -427,7 +427,7 @@ regoperin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_operator for a unique match. This is needed for
+ * just search pg_operator for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -616,7 +616,7 @@ regoperatorin(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -853,7 +853,7 @@ regclassout(PG_FUNCTION_ARGS)
/*
* In bootstrap mode, skip the fancy namespace stuff and just return
- * the class name. (This path is only needed for debugging output
+ * the class name. (This path is only needed for debugging output
* anyway.)
*/
if (IsBootstrapProcessingMode())
@@ -1343,7 +1343,7 @@ stringToQualifiedNameList(const char *string)
/*
* Given a C string, parse it into a qualified function or operator name
- * followed by a parenthesized list of type names. Reduce the
+ * followed by a parenthesized list of type names. Reduce the
* type names to an array of OIDs (returned into *nargs and *argtypes;
* the argtypes array should be of size FUNC_MAX_ARGS). The function or
* operator name is returned to *names as a List of Strings.
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index f08af12a2bd..32621e0fc26 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -2006,11 +2006,11 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
/*
* "MATCH <unspecified>" only changes columns corresponding to the
- * referenced columns that have changed in pk_rel. This means the
+ * referenced columns that have changed in pk_rel. This means the
* "SET attrn=NULL [, attrn=NULL]" string will be change as well.
* In this case, we need to build a temporary plan rather than use
* our cached plan, unless the update happens to change all
- * columns in the key. Fortunately, for the most common case of a
+ * columns in the key. Fortunately, for the most common case of a
* single-column foreign key, this will be true.
*
* In case you're wondering, the inequality check works because we
@@ -2768,7 +2768,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
- * have many large foreign-key validations happening concurrently. So
+ * have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem.
*
@@ -3506,7 +3506,7 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't passed
+ * Determine which relation to complain about. If tupdesc wasn't passed
* by caller, assume the violator tuple came from there.
*/
onfk = (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK);
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index c55c3d3902b..8372c7be772 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -277,7 +277,7 @@ record_in(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -635,7 +635,7 @@ record_recv(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -889,7 +889,7 @@ record_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1124,7 +1124,7 @@ record_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 8bf5a791375..059eeb3dcc0 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -877,7 +877,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno,
context = deparse_context_for(get_relation_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should never be
+ * Start the index definition. Note that the index's name should never be
* schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -1305,7 +1305,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
prettyFlags, 0);
/*
- * Now emit the constraint definition. There are cases where
+ * Now emit the constraint definition. There are cases where
* the constraint expression will be fully parenthesized and
* we don't need the outer parens ... but there are other
* cases where we do need 'em. Be conservative for now.
@@ -2126,7 +2126,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext,
*
* Given the reference name (alias) and OID of a relation, build deparsing
* context for an expression referencing only that relation (as varno 1,
- * varlevelsup 0). This is sufficient for many uses of deparse_expression.
+ * varlevelsup 0). This is sufficient for many uses of deparse_expression.
* ----------
*/
List *
@@ -2211,7 +2211,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps)
* We special-case Append and MergeAppend to pretend that the first child
* plan is the OUTER referent; we have to interpret OUTER Vars in their
* tlists according to one of the children, and the first one is the most
- * natural choice. Likewise special-case ModifyTable to pretend that the
+ * natural choice. Likewise special-case ModifyTable to pretend that the
* first child plan is the OUTER referent; this is to support RETURNING
* lists containing references to non-target relations.
*/
@@ -2251,7 +2251,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps)
* push_child_plan: temporarily transfer deparsing attention to a child plan
*
* When expanding an OUTER or INNER reference, we must adjust the deparse
- * context in case the referenced expression itself uses OUTER/INNER. We
+ * context in case the referenced expression itself uses OUTER/INNER. We
* modify the top stack entry in-place to avoid affecting levelsup issues
* (although in a Plan tree there really shouldn't be any).
*
@@ -2615,8 +2615,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the passed
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
* querytree!
*
* We are only deparsing the query (we are not about to execute it), so we
@@ -3036,7 +3036,7 @@ get_target_list(List *targetList, deparse_context *context,
}
/*
- * Figure out what the result column should be called. In the context
+ * Figure out what the result column should be called. In the context
* of a view, use the view's tuple descriptor (so as to pick up the
* effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE.
@@ -3176,7 +3176,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno,
* expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
- * dump it without any decoration. Otherwise, just dump the expression
+ * dump it without any decoration. Otherwise, just dump the expression
* normally.
*/
if (force_colno)
@@ -4292,7 +4292,7 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
@@ -4920,10 +4920,10 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* If there's a refassgnexpr, we want to print the node in the
- * format "array[subscripts] := refassgnexpr". This is not
+ * format "array[subscripts] := refassgnexpr". This is not
* legal SQL, so decompilation of INSERT or UPDATE statements
* should always use processIndirection as part of the
- * statement-level syntax. We should only see this when
+ * statement-level syntax. We should only see this when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement.
*/
@@ -5082,7 +5082,7 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* We cannot see an already-planned subplan in rule deparsing,
- * only while EXPLAINing a query plan. We don't try to
+ * only while EXPLAINing a query plan. We don't try to
* reconstruct the original SQL, just reference the subplan
* that appears elsewhere in EXPLAIN's result.
*/
@@ -5155,14 +5155,14 @@ get_rule_expr(Node *node, deparse_context *context,
* There is no good way to represent a FieldStore as real SQL,
* so decompilation of INSERT or UPDATE statements should
* always use processIndirection as part of the
- * statement-level syntax. We should only get here when
+ * statement-level syntax. We should only get here when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement. The plan case is even harder than
* ordinary rules would be, because the planner tries to
* collapse multiple assignments to the same field or subfield
* into one FieldStore; so we can see a list of target fields
* not just one, and the arguments could be FieldStores
- * themselves. We don't bother to try to print the target
+ * themselves. We don't bother to try to print the target
* field names; we just print the source arguments, with a
* ROW() around them if there's more than one. This isn't
* terribly complete, but it's probably good enough for
@@ -6058,7 +6058,7 @@ get_coercion_expr(Node *arg, deparse_context *context,
* Since parse_coerce.c doesn't immediately collapse application of
* length-coercion functions to constants, what we'll typically see in
* such cases is a Const with typmod -1 and a length-coercion function
- * right above it. Avoid generating redundant output. However, beware of
+ * right above it. Avoid generating redundant output. However, beware of
* suppressing casts when the user actually wrote something like
* 'foo'::text::char(3).
*/
@@ -6140,7 +6140,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype)
/*
* These types are printed without quotes unless they contain
* values that aren't accepted by the scanner unquoted (e.g.,
- * 'NaN'). Note that strtod() and friends might accept NaN,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
* In reality we only need to defend against infinity and NaN,
@@ -6795,7 +6795,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
{
- /* Okay, we need the opclass name. Do we need to qualify it? */
+ /* Okay, we need the opclass name. Do we need to qualify it? */
opcname = NameStr(opcrec->opcname);
if (OpclassIsVisible(opclass))
appendStringInfo(buf, " %s", quote_identifier(opcname));
@@ -7090,9 +7090,9 @@ generate_relation_name(Oid relid, List *namespaces)
* generate_function_name
* Compute the name to display for a function specified by OID,
* given that it is being called with the specified actual arg names and
- * types. (Those matter because of ambiguous-function resolution rules.)
+ * types. (Those matter because of ambiguous-function resolution rules.)
*
- * The result includes all necessary quoting and schema-prefixing. We can
+ * The result includes all necessary quoting and schema-prefixing. We can
* also pass back an indication of whether the function is variadic.
*/
static char *
@@ -7120,7 +7120,7 @@ generate_function_name(Oid funcid, int nargs, List *argnames,
/*
* The idea here is to schema-qualify only if the parser would fail to
* resolve the correct function given the unqualified func name with the
- * specified argtypes. If the function is variadic, we should presume
+ * specified argtypes. If the function is variadic, we should presume
* that VARIADIC will be included in the call.
*/
p_result = func_get_detail(list_make1(makeString(proname)),
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index c71e952f8e5..b90d2d7412c 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -72,7 +72,7 @@
* float8 oprjoin (internal, oid, internal, int2, internal);
*
* (Before Postgres 8.4, join estimators had only the first four of these
- * parameters. That signature is still allowed, but deprecated.) The
+ * parameters. That signature is still allowed, but deprecated.) The
* relationship between jointype and sjinfo is explained in the comments for
* clause_selectivity() --- the short version is that jointype is usually
* best ignored in favor of examining sjinfo.
@@ -201,7 +201,7 @@ static Const *string_to_bytea_const(const char *str, size_t str_len);
*
* Note: this routine is also used to estimate selectivity for some
* operators that are not "=" but have comparable selectivity behavior,
- * such as "~=" (geometric approximate-match). Even for "=", we must
+ * such as "~=" (geometric approximate-match). Even for "=", we must
* keep in mind that the left and right datatypes may differ.
*/
Datum
@@ -286,7 +286,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* Is the constant "=" to any of the column's most common values?
* (Although the given operator may not really be "=", we will assume
- * that seeing whether it returns TRUE is an appropriate test. If you
+ * that seeing whether it returns TRUE is an appropriate test. If you
* don't like this, maybe you shouldn't be using eqsel for your
* operator...)
*/
@@ -420,7 +420,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* result averaged over all possible values whether common or
* uncommon. (Essentially, we are assuming that the not-yet-known
* comparison value is equally likely to be any of the possible
- * values, regardless of their frequency in the table. Is that a good
+ * values, regardless of their frequency in the table. Is that a good
* idea?)
*/
selec = 1.0 - stats->stanullfrac;
@@ -643,7 +643,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
* essentially using the histogram just as a representative sample. However,
* small histograms are unlikely to be all that representative, so the caller
* should be prepared to fall back on some other estimation approach when the
- * histogram is missing or very small. It may also be prudent to combine this
+ * histogram is missing or very small. It may also be prudent to combine this
* approach with another one when the histogram is small.
*
* If the actual histogram size is not at least min_hist_size, we won't bother
@@ -661,7 +661,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
@@ -774,7 +774,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
*
* If the binary search accesses the first or last histogram
* entry, we try to replace that endpoint with the true column min
- * or max as found by get_actual_variable_range(). This
+ * or max as found by get_actual_variable_range(). This
* ameliorates misestimates when the min or max is moving as a
* result of changes since the last ANALYZE. Note that this could
* result in effectively including MCVs into the histogram that
@@ -878,7 +878,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* Watch out for the possibility that we got a NaN or
- * Infinity from the division. This can happen
+ * Infinity from the division. This can happen
* despite the previous checks, if for example "low"
* is -Infinity.
*/
@@ -893,7 +893,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
* Ideally we'd produce an error here, on the grounds that
* the given operator shouldn't have scalarXXsel
* registered as its selectivity func unless we can deal
- * with its operand types. But currently, all manner of
+ * with its operand types. But currently, all manner of
* stuff is invoking scalarXXsel, so give a default
* estimate until that can be fixed.
*/
@@ -919,7 +919,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* The histogram boundaries are only approximate to begin with,
- * and may well be out of date anyway. Therefore, don't believe
+ * and may well be out of date anyway. Therefore, don't believe
* extremely small or large selectivity estimates --- unless we
* got actual current endpoint values from the table.
*/
@@ -1116,7 +1116,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If this is for a NOT LIKE or similar operator, get the corresponding
- * positive-match operator and work with that. Set result to the correct
+ * positive-match operator and work with that. Set result to the correct
* default estimate, too.
*/
if (negate)
@@ -1320,7 +1320,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
@@ -2135,9 +2135,9 @@ eqjoinsel_inner(Oid operator,
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2169,7 +2169,7 @@ eqjoinsel_inner(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2377,9 +2377,9 @@ eqjoinsel_semi(Oid operator,
if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2410,7 +2410,7 @@ eqjoinsel_semi(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2447,7 +2447,7 @@ eqjoinsel_semi(Oid operator,
/*
* Now we need to estimate the fraction of relation 1 that has at
- * least one join partner. We know for certain that the matched MCVs
+ * least one join partner. We know for certain that the matched MCVs
* do, so that gives us a lower bound, but we're really in the dark
* about everything else. Our crude approach is: if nd1 <= nd2 then
* assume all non-null rel1 rows have join partners, else assume for
@@ -3044,11 +3044,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* case (all possible cross-product terms actually appear as groups) since
* very often the grouped-by Vars are highly correlated. Our current approach
* is as follows:
- * 1. Expressions yielding boolean are assumed to contribute two groups,
+ * 1. Expressions yielding boolean are assumed to contribute two groups,
* independently of their content, and are ignored in the subsequent
- * steps. This is mainly because tests like "col IS NULL" break the
+ * steps. This is mainly because tests like "col IS NULL" break the
* heuristic used in step 2 especially badly.
- * 2. Reduce the given expressions to a list of unique Vars used. For
+ * 2. Reduce the given expressions to a list of unique Vars used. For
* example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
* It is clearly correct not to count the same Var more than once.
* It is also reasonable to treat f(x) the same as x: f() cannot
@@ -3058,14 +3058,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* As a special case, if a GROUP BY expression can be matched to an
* expressional index for which we have statistics, then we treat the
* whole expression as though it were just a Var.
- * 3. If the list contains Vars of different relations that are known equal
+ * 3. If the list contains Vars of different relations that are known equal
* due to equivalence classes, then drop all but one of the Vars from each
* known-equal set, keeping the one with smallest estimated # of values
* (since the extra values of the others can't appear in joined rows).
* Note the reason we only consider Vars of different relations is that
* if we considered ones of the same rel, we'd be double-counting the
* restriction selectivity of the equality in the next step.
- * 4. For Vars within a single source rel, we multiply together the numbers
+ * 4. For Vars within a single source rel, we multiply together the numbers
* of values, clamp to the number of rows in the rel (divided by 10 if
* more than one Var), and then multiply by the selectivity of the
* restriction clauses for that rel. When there's more than one Var,
@@ -3076,7 +3076,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* by the restriction selectivity is effectively assuming that the
* restriction clauses are independent of the grouping, which is a crummy
* assumption, but it's hard to do better.
- * 5. If there are Vars from multiple rels, we repeat step 4 for each such
+ * 5. If there are Vars from multiple rels, we repeat step 4 for each such
* rel, and multiply the results together.
* Note that rels not containing grouped Vars are ignored completely, as are
* join clauses. Such rels cannot increase the number of groups, and we
@@ -3107,7 +3107,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
return 1.0;
/*
- * Count groups derived from boolean grouping expressions. For other
+ * Count groups derived from boolean grouping expressions. For other
* expressions, find the unique Vars used, treating an expression as a Var
* if we can find stats for it. For each one, record the statistical
* estimate of number of distinct values (total in its table, without
@@ -3196,7 +3196,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove these
+ * varinfos, plus all other Vars in the same relation. We remove these
* Vars from the newvarinfos list for the next iteration. This is the
* easiest way to group Vars of same rel together.
*/
@@ -3297,11 +3297,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* distribution, so this will have to do for now.
*
* We are passed the number of buckets the executor will use for the given
- * input relation. If the data were perfectly distributed, with the same
+ * input relation. If the data were perfectly distributed, with the same
* number of tuples going into each available bucket, then the bucketsize
* fraction would be 1/nbuckets. But this happy state of affairs will occur
* only if (a) there are at least nbuckets distinct data values, and (b)
- * we have a not-too-skewed data distribution. Otherwise the buckets will
+ * we have a not-too-skewed data distribution. Otherwise the buckets will
* be nonuniformly occupied. If the other relation in the join has a key
* distribution similar to this one's, then the most-loaded buckets are
* exactly those that will be probed most often. Therefore, the "average"
@@ -3477,7 +3477,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -4061,7 +4061,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
* relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
@@ -4224,7 +4224,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
* XXX This means the Var comes from a JOIN or sub-SELECT. Later
* add code to dig down into the join etc and see if we can trace
* the variable to something with stats. (But beware of
- * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no
+ * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no
* cases where this would really be useful, because we'd have
* flattened the subselect if it is??)
*/
@@ -4235,7 +4235,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
/*
* Okay, it's a more complicated expression. Determine variable
- * membership. Note that when varRelid isn't zero, only vars of that
+ * membership. Note that when varRelid isn't zero, only vars of that
* relation are considered "real" vars.
*/
varnos = pull_varnos(basenode);
@@ -4284,13 +4284,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to match
+ * We have an expression in vars of a single relation. Try to match
* it to expressional index columns, in hopes of finding some
* statistics.
*
* XXX it's conceivable that there are multiple matches with different
* index opfamilies; if so, we need to pick one that matches the
- * operator we are estimating for. FIXME later.
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -4386,7 +4386,7 @@ get_variable_numdistinct(VariableStatData *vardata)
double ntuples;
/*
- * Determine the stadistinct value to use. There are cases where we can
+ * Determine the stadistinct value to use. There are cases where we can
* get an estimate even without a pg_statistic entry, or can get a better
* value than is in pg_statistic.
*/
@@ -4502,7 +4502,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
/*
* XXX It's very tempting to try to use the actual column min and max, if
- * we can get them relatively-cheaply with an index probe. However, since
+ * we can get them relatively-cheaply with an index probe. However, since
* this function is called many times during join planning, that could
* have unpleasant effects on planning speed. Need more investigation
* before enabling this.
@@ -4753,7 +4753,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
* and it can be very expensive if a lot of uncommitted rows
* exist at the end of the index (because we'll laboriously
* fetch each one and reject it). What seems like a good
- * compromise is to use SnapshotDirty. That will accept
+ * compromise is to use SnapshotDirty. That will accept
* uncommitted rows, and thus avoid fetching multiple heap
* tuples in this scenario. On the other hand, it will reject
* known-dead rows, and thus not give a bogus answer when the
@@ -4892,7 +4892,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids)
* Check whether char is a letter (and, hence, subject to case-folding)
*
* In multibyte character sets, we can't use isalpha, and it does not seem
- * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
+ * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* any multibyte char is potentially case-varying.
*/
static int
@@ -5144,7 +5144,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
* together with info about MCVs and NULLs.
*
* We use the >= and < operators from the specified btree opfamily to do the
- * estimation. The given variable and Const must be of the associated
+ * estimation. The given variable and Const must be of the associated
* datatype.
*
* XXX Note: we make use of the upper bound to estimate operator selectivity
@@ -5203,7 +5203,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
@@ -5427,7 +5427,7 @@ regex_selectivity(const char *patt, int pattlen, bool case_insensitive,
* that is not a bulletproof guarantee that an extension of the string might
* not sort after it; an example is that "foo " is less than "foo!", but it
* is not clear that a "dictionary" sort ordering will consider "foo!" less
- * than "foo bar". CAUTION: Therefore, this function should be used only for
+ * than "foo bar". CAUTION: Therefore, this function should be used only for
* estimation purposes when working in a non-C collation.
*
* To try to catch most cases where an extended string might otherwise sort
@@ -5813,9 +5813,9 @@ genericcostestimate(PlannerInfo *root,
* The above calculations are all per-index-scan. However, if we are in a
* nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
* the potential for cache effects to reduce the number of disk page
- * fetches needed. We want to estimate the average per-scan I/O cost in
+ * fetches needed. We want to estimate the average per-scan I/O cost in
* the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
@@ -5888,7 +5888,7 @@ genericcostestimate(PlannerInfo *root,
* evaluated once at the start of the scan to reduce them to runtime keys
* to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
- * indexqual operator. Because we have numIndexTuples as a per-scan
+ * indexqual operator. Because we have numIndexTuples as a per-scan
* number, we have to multiply by num_sa_scans to get the correct result
* for ScalarArrayOpExpr cases. Similarly add in costs for any index
* ORDER BY expressions.
@@ -5965,7 +5965,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexQuals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
@@ -6631,7 +6631,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* nPendingPages can be trusted, but the other fields are as of the last
- * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
+ * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
* growth since then. If the fields are zero (implying no VACUUM at all,
* and an index created pre-9.1), assume all pages are entry pages.
*/
@@ -6776,7 +6776,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* Add an estimate of entry pages read by partial match algorithm. It's a
- * scan over leaf pages in entry tree. We haven't any useful stats here,
+ * scan over leaf pages in entry tree. We haven't any useful stats here,
* so estimate it as proportion.
*/
entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries);
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index d11962802a7..b6b3478fc9a 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -374,7 +374,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -973,7 +973,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
* that fields to the right of the last one specified are zeroed out,
* but those to the left of it remain valid. Thus for example there
* is no operational difference between INTERVAL YEAR TO MONTH and
- * INTERVAL MONTH. In some cases we could meaningfully enforce that
+ * INTERVAL MONTH. In some cases we could meaningfully enforce that
* higher-order fields are zero; for example INTERVAL DAY could reject
* nonzero "month" field. However that seems a bit pointless when we
* can't do it consistently. (We cannot enforce a range limit on the
@@ -982,9 +982,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
*
* Note: before PG 8.4 we interpreted a limited set of fields as
* actually causing a "modulo" operation on a given value, potentially
- * losing high-order as well as low-order information. But there is
+ * losing high-order as well as low-order information. But there is
* no support for such behavior in the standard, and it seems fairly
- * undesirable on data consistency grounds anyway. Now we only
+ * undesirable on data consistency grounds anyway. Now we only
* perform truncation or rounding of low-order fields.
*/
if (range == INTERVAL_FULL_RANGE)
@@ -1104,7 +1104,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
/*
* Note: this round-to-nearest code is not completely consistent
* about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
+ * values. On most platforms, rint() will implement
* round-to-nearest-even, but the integer code always rounds up
* (away from zero). Is it worth trying to be consistent?
*/
@@ -1314,7 +1314,7 @@ timestamptz_to_time_t(TimestampTz t)
* Produce a C-string representation of a TimestampTz.
*
* This is mostly for use in emitting messages. The primary difference
- * from timestamptz_out is that we force the output format to ISO. Note
+ * from timestamptz_out is that we force the output format to ISO. Note
* also that the result is in a static buffer, not pstrdup'd.
*/
const char *
@@ -1484,7 +1484,7 @@ recalc_t:
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
* coding avoids hardwiring any assumptions about the width of pg_time_t,
* so it should behave sanely on machines without int64.
*/
@@ -4407,7 +4407,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
@@ -4581,7 +4581,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c
index 4cb961146b8..3ae552cb93d 100644
--- a/src/backend/utils/adt/tsginidx.c
+++ b/src/backend/utils/adt/tsginidx.c
@@ -237,7 +237,7 @@ gin_tsquery_consistent(PG_FUNCTION_ARGS)
* Formerly, gin_extract_tsvector had only two arguments. Now it has three,
* but we still need a pg_proc entry with two args to support reloading
* pre-9.1 contrib/tsearch2 opclass declarations. This compatibility
- * function should go away eventually. (Note: you might say "hey, but the
+ * function should go away eventually. (Note: you might say "hey, but the
* code above is only *using* two args, so let's just declare it that way".
* If you try that you'll find the opr_sanity regression test complains.)
*/
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 7b84637f8cf..c9f0f9f7cfe 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -256,7 +256,7 @@ bpcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
@@ -550,7 +550,7 @@ varcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index e9ea6288048..dd115900d4b 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -528,7 +528,7 @@ textlen(PG_FUNCTION_ARGS)
* Does the real work for textlen()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed form. We can avoid decompressing it at all
* in some cases.
*/
@@ -700,7 +700,7 @@ text_substr_no_len(PG_FUNCTION_ARGS)
* Does the real work for text_substr() and text_substr_no_len()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed/toasted form. We can avoid detoasting all
* of it in some cases.
*
@@ -1050,7 +1050,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* searched (t1) and the "needle" is the pattern being sought (t2).
*
* If the needle is empty or bigger than the haystack then there is no
- * point in wasting cycles initializing the table. We also choose not to
+ * point in wasting cycles initializing the table. We also choose not to
* use B-M-H for needles of length 1, since the skip table can't possibly
* save anything in that case.
*/
@@ -1066,7 +1066,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* declaration of TextPositionState allows up to 256 elements, but for
* short search problems we don't really want to have to initialize so
* many elements --- it would take too long in comparison to the
- * actual search time. So we choose a useful skip table size based on
+ * actual search time. So we choose a useful skip table size based on
* the haystack length minus the needle length. The closer the needle
* length is to the haystack length the less useful skipping becomes.
*
@@ -1098,7 +1098,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
state->skiptable[i] = len2;
/*
- * Now examine the needle. For each character except the last one,
+ * Now examine the needle. For each character except the last one,
* set the corresponding table element to the appropriate skip
* distance. Note that when two characters share the same skip table
* entry, the one later in the needle must determine the skip
@@ -1186,11 +1186,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[(unsigned char) *hptr & skiptablemask];
@@ -1242,11 +1242,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[*hptr & skiptablemask];
@@ -1281,7 +1281,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
/*
* Unfortunately, there is no strncoll(), so in the non-C locale case we
- * have to do some memory copying. This turns out to be significantly
+ * have to do some memory copying. This turns out to be significantly
* slower, so we optimize the case where LC_COLLATE is C. We also try to
* optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
@@ -2266,7 +2266,7 @@ textToQualifiedNameList(text *textval)
* SplitIdentifierString --- parse a string containing identifiers
*
* This is the guts of textToQualifiedNameList, and is exported for use in
- * other situations such as parsing GUC variables. In the GUC case, it's
+ * other situations such as parsing GUC variables. In the GUC case, it's
* important to avoid memory leaks, so the API is designed to minimize the
* amount of stuff that needs to be allocated and freed.
*
@@ -2274,7 +2274,7 @@ textToQualifiedNameList(text *textval)
* rawstring: the input string; must be overwritable! On return, it's
* been modified to contain the separated identifiers.
* separator: the separator punctuation expected between identifiers
- * (typically '.' or ','). Whitespace may also appear around
+ * (typically '.' or ','). Whitespace may also appear around
* identifiers.
* Outputs:
* namelist: filled with a palloc'd list of pointers to identifiers within
@@ -2343,7 +2343,7 @@ SplitIdentifierString(char *rawstring, char separator,
*
* XXX because we want to overwrite the input in-place, we cannot
* support a downcasing transformation that increases the string
- * length. This is not a problem given the current implementation
+ * length. This is not a problem given the current implementation
* of downcase_truncate_identifier, but we'll probably have to do
* something about this someday.
*/
@@ -2694,7 +2694,7 @@ check_replace_text_has_escape_char(const text *replace_text)
* appendStringInfoRegexpSubstr
*
* Append replace_text to str, substituting regexp back references for
- * \n escapes. start_ptr is the start of the match in the source string,
+ * \n escapes. start_ptr is the start of the match in the source string,
* at logical character position data_pos.
*/
static void
@@ -2777,7 +2777,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Note so and eo
+ * Copy the text that is back reference of regexp. Note so and eo
* are counted in characters not bytes.
*/
char *chunk_start;
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 3261eac1ce8..bfaf0a0be70 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -19,7 +19,7 @@
* fail. For one thing, this avoids having to manage variant catalog
* installations. But it also has nice effects such as that you can
* dump a database containing XML type data even if the server is not
- * linked with libxml. Thus, make sure xml_out() works even if nothing
+ * linked with libxml. Thus, make sure xml_out() works even if nothing
* else does.
*/
@@ -254,7 +254,7 @@ xml_out(PG_FUNCTION_ARGS)
xmltype *x = PG_GETARG_XML_P(0);
/*
- * xml_out removes the encoding property in all cases. This is because we
+ * xml_out removes the encoding property in all cases. This is because we
* cannot control from here whether the datum will be converted to a
* different client encoding, so we'd do more harm than good by including
* it.
@@ -425,7 +425,7 @@ xmlcomment(PG_FUNCTION_ARGS)
/*
* TODO: xmlconcat needs to merge the notations and unparsed entities
- * of the argument values. Not very important in practice, though.
+ * of the argument values. Not very important in practice, though.
*/
xmltype *
xmlconcat(List *args)
@@ -559,7 +559,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
/*
* We first evaluate all the arguments, then start up libxml and create
- * the result. This avoids issues if one of the arguments involves a call
+ * the result. This avoids issues if one of the arguments involves a call
* to some other function or subsystem that wants to use libxml on its own
* terms.
*/
@@ -849,7 +849,7 @@ xml_is_document(xmltype *arg)
* pg_xml_init --- set up for use of libxml
*
* This should be called by each function that is about to use libxml
- * facilities. It has two responsibilities: verify compatibility with the
+ * facilities. It has two responsibilities: verify compatibility with the
* loaded libxml version (done on first call in a session) and establish
* or re-establish our libxml error handler. The latter needs to be done
* anytime we might have passed control to add-on modules (eg libperl) which
@@ -909,7 +909,7 @@ pg_xml_init(void)
resetStringInfo(xml_err_buf);
/*
- * We re-establish the error callback function every time. This makes
+ * We re-establish the error callback function every time. This makes
* it safe for other subsystems (PL/Perl, say) to also use libxml with
* their own callbacks ... so long as they likewise set up the
* callbacks on every use. It's cheap enough to not be worth worrying
@@ -1116,7 +1116,7 @@ finished:
/*
* Write an XML declaration. On output, we adjust the XML declaration
- * as follows. (These rules are the moral equivalent of the clause
+ * as follows. (These rules are the moral equivalent of the clause
* "Serialization of an XML value" in the SQL standard.)
*
* We try to avoid generating an XML declaration if possible. This is
@@ -1638,8 +1638,8 @@ map_xml_name_to_sql_identifier(char *name)
*
* When xml_escape_strings is true, then certain characters in string
* values are replaced by entity references (&lt; etc.), as specified
- * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
- * wanted. The false case is mainly useful when the resulting value
+ * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
+ * wanted. The false case is mainly useful when the resulting value
* is used with xmlTextWriterWriteAttribute() to write out an
* attribute, because that function does the escaping itself.
*/
@@ -1909,13 +1909,13 @@ _SPI_strdup(const char *s)
*
* There are two kinds of mappings: Mapping SQL data (table contents)
* to XML documents, and mapping SQL structure (the "schema") to XML
- * Schema. And there are functions that do both at the same time.
+ * Schema. And there are functions that do both at the same time.
*
* Then you can map a database, a schema, or a table, each in both
* ways. This breaks down recursively: Mapping a database invokes
* mapping schemas, which invokes mapping tables, which invokes
* mapping rows, which invokes mapping columns, although you can't
- * call the last two from the outside. Because of this, there are a
+ * call the last two from the outside. Because of this, there are a
* number of xyz_internal() functions which are to be called both from
* the function manager wrapper and from some upper layer in a
* recursive call.
@@ -1924,7 +1924,7 @@ _SPI_strdup(const char *s)
* nulls, tableforest, and targetns mean.
*
* Some style guidelines for XML output: Use double quotes for quoting
- * XML attributes. Indent XML elements by two spaces, but remember
+ * XML attributes. Indent XML elements by two spaces, but remember
* that a lot of code is called recursively at different levels, so
* it's better not to indent rather than create output that indents
* and outdents weirdly. Add newlines to make the output look nice.
@@ -2088,12 +2088,12 @@ cursor_to_xml(PG_FUNCTION_ARGS)
* Write the start tag of the root element of a data mapping.
*
* top_level means that this is the very top level of the eventual
- * output. For example, when the user calls table_to_xml, then a call
+ * output. For example, when the user calls table_to_xml, then a call
* with a table name to this function is the top level. When the user
* calls database_to_xml, then a call with a schema name to this
* function is not the top level. If top_level is false, then the XML
* namespace declarations are omitted, because they supposedly already
- * appeared earlier in the output. Repeating them is not wrong, but
+ * appeared earlier in the output. Repeating them is not wrong, but
* it looks ugly.
*/
static void
@@ -3031,7 +3031,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
* SQL/XML:2008 sections 9.5 and 9.6.
*
* (The distinction between 9.5 and 9.6 is basically that 9.6 adds
- * a name attribute, which this function does. The name-less version
+ * a name attribute, which this function does. The name-less version
* 9.5 doesn't appear to be required anywhere.)
*/
static const char *
@@ -3209,7 +3209,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
/*
* Map an SQL row to an XML element, taking the row from the active
- * SPI cursor. See also SQL/XML:2008 section 9.10.
+ * SPI cursor. See also SQL/XML:2008 section 9.10.
*/
static void
SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 7018ccfe62a..b6f00478e9c 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -48,7 +48,7 @@ typedef struct
* Flush all cache entries when pg_attribute is updated.
*
* When pg_attribute is updated, we must flush the cache entry at least
- * for that attribute. Currently, we just flush them all. Since attribute
+ * for that attribute. Currently, we just flush them all. Since attribute
* options are not currently used in performance-critical paths (such as
* query execution), this seems OK.
*/
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 00f17d9edc6..81dcf3361f8 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -823,7 +823,7 @@ InitCatCache(int id,
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
+ * descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
*/
#ifdef CACHEDEBUG
@@ -1048,7 +1048,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
* if necessary (on the first access to a particular cache).
*
* The result is NULL if not found, or a pointer to a HeapTuple in
- * the cache. The caller must not modify the tuple, and must call
+ * the cache. The caller must not modify the tuple, and must call
* ReleaseCatCache() when done with it.
*
* The search key values should be expressed as Datums of the key columns'
@@ -1175,8 +1175,8 @@ SearchCatCache(CatCache *cache,
* the relation --- for example, due to shared-cache-inval messages being
* processed during heap_open(). This is OK. It's even possible for one
* of those lookups to find and enter the very same tuple we are trying to
- * fetch here. If that happens, we will enter a second copy of the tuple
- * into the cache. The first copy will never be referenced again, and
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
@@ -1215,7 +1215,7 @@ SearchCatCache(CatCache *cache,
*
* In bootstrap mode, we don't build negative entries, because the cache
* invalidation mechanism isn't alive and can't clear them if the tuple
- * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
* cache inval for that.)
*/
if (ct == NULL)
@@ -1502,7 +1502,7 @@ SearchCatCacheList(CatCache *cache,
/*
* We are now past the last thing that could trigger an elog before we
* have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1592,7 +1592,7 @@ ReleaseCatCacheList(CatCList *list)
/*
* CatalogCacheCreateEntry
* Create a new CatCTup entry, copying the given HeapTuple and other
- * supplied data into it. The new entry initially has refcount 0.
+ * supplied data into it. The new entry initially has refcount 0.
*/
static CatCTup *
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
@@ -1727,7 +1727,7 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
* the specified relation, find all catcaches it could be in, compute the
* correct hash value for each such catcache, and call the specified function
* to record the cache id, hash value, and tuple ItemPointer in inval.c's
- * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
+ * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
* using the recorded information.
*
* Note that it is irrelevant whether the given tuple is actually loaded
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 4249bd33765..396cc0bea69 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -29,23 +29,23 @@
*
* If we successfully complete the transaction, we have to broadcast all
* these invalidation events to other backends (via the SI message queue)
- * so that they can flush obsolete entries from their caches. Note we have
+ * so that they can flush obsolete entries from their caches. Note we have
* to record the transaction commit before sending SI messages, otherwise
* the other backends won't see our updated tuples as good.
*
* When a subtransaction aborts, we can process and discard any events
- * it has queued. When a subtransaction commits, we just add its events
+ * it has queued. When a subtransaction commits, we just add its events
* to the pending lists of the parent transaction.
*
* In short, we need to remember until xact end every insert or delete
- * of a tuple that might be in the system caches. Updates are treated as
+ * of a tuple that might be in the system caches. Updates are treated as
* two events, delete + insert, for simplicity. (There are cases where
* it'd be possible to record just one event, but we don't currently try.)
*
* We do not need to register EVERY tuple operation in this way, just those
- * on tuples in relations that have associated catcaches. We do, however,
+ * on tuples in relations that have associated catcaches. We do, however,
* have to register every operation on every tuple that *could* be in a
- * catcache, whether or not it currently is in our cache. Also, if the
+ * catcache, whether or not it currently is in our cache. Also, if the
* tuple is in a relation that has multiple catcaches, we need to register
* an invalidation message for each such catcache. catcache.c's
* PrepareToInvalidateCacheTuple() routine provides the knowledge of which
@@ -103,7 +103,7 @@
/*
* To minimize palloc traffic, we keep pending requests in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). All request types can be stored as SharedInvalidationMessage
+ * array). All request types can be stored as SharedInvalidationMessage
* records. The ordering of requests within a list is never significant.
*/
typedef struct InvalidationChunk
@@ -630,7 +630,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
* This essentially means that only backends in this same database
* will react to the relcache flush request. This is in fact
* appropriate, since only those backends could see our pg_attribute
- * change anyway. It looks a bit ugly though. (In practice, shared
+ * change anyway. It looks a bit ugly though. (In practice, shared
* relations can't have schema changes after bootstrap, so we should
* never come here for a shared rel anyway.)
*/
@@ -642,7 +642,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
/*
* When a pg_index row is updated, we should send out a relcache inval
- * for the index relation. As above, we don't know the shared status
+ * for the index relation. As above, we don't know the shared status
* of the index, but in practice it doesn't matter since indexes of
* shared catalogs can't have such updates.
*/
@@ -688,7 +688,7 @@ AcceptInvalidationMessages(void)
*
* If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
* slows things by at least a factor of 10000, so I wouldn't suggest
- * trying to run the entire regression tests that way. It's useful to try
+ * trying to run the entire regression tests that way. It's useful to try
* a few simple tests, to make sure that cache reload isn't subject to
* internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
@@ -901,12 +901,12 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
* If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
* to the shared invalidation message queue. Note that these will be read
* not only by other backends, but also by our own backend at the next
- * transaction start (via AcceptInvalidationMessages). This means that
+ * transaction start (via AcceptInvalidationMessages). This means that
* we can skip immediate local processing of anything that's still in
* CurrentCmdInvalidMsgs, and just send that list out too.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
* since they'll not have seen our changed tuples anyway. We can forget
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
* the caches yet.
@@ -965,11 +965,11 @@ AtEOXact_Inval(bool isCommit)
* parent's PriorCmdInvalidMsgs list.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
* We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
* touched the caches yet.
*
- * In any case, pop the transaction stack. We need not physically free memory
+ * In any case, pop the transaction stack. We need not physically free memory
* here, since CurTransactionContext is about to be emptied anyway
* (if aborting). Beware of the possibility of aborting the same nesting
* level twice, though.
@@ -1025,7 +1025,7 @@ AtEOSubXact_Inval(bool isCommit)
* in a transaction.
*
* Here, we send no messages to the shared queue, since we don't know yet if
- * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
+ * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
* list, so as to flush our caches of any entries we have outdated in the
* current command. We then move the current-cmd list over to become part
* of the prior-cmds list.
@@ -1155,7 +1155,7 @@ CacheInvalidateRelcacheByRelid(Oid relid)
*
* Sending this type of invalidation msg forces other backends to close open
* smgr entries for the rel. This should be done to flush dangling open-file
- * references when the physical rel is being dropped or truncated. Because
+ * references when the physical rel is being dropped or truncated. Because
* these are nontransactional (i.e., not-rollback-able) operations, we just
* send the inval message immediately without any queuing.
*
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 6435541bb63..c90909c95cd 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -182,13 +182,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
* (This indicates that the operator is not a valid ordering operator.)
*
* Note: the operator could be registered in multiple families, for example
- * if someone were to build a "reverse sort" opfamily. This would result in
+ * if someone were to build a "reverse sort" opfamily. This would result in
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
* or NULLS LAST, as well as inefficient planning due to failure to match up
* pathkeys that should be the same. So we want a determinate result here.
* Because of the way the syscache search works, we'll use the interpretation
* associated with the opfamily with smallest OID, which is probably
- * determinate enough. Since there is no longer any particularly good reason
+ * determinate enough. Since there is no longer any particularly good reason
* to build reverse-sort opfamilies, it doesn't seem worth expending any
* additional effort on ensuring consistency.
*/
@@ -386,7 +386,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
*
* The planner currently uses simple equal() tests to compare the lists
* returned by this function, which makes the list order relevant, though
- * strictly speaking it should not be. Because of the way syscache list
+ * strictly speaking it should not be. Because of the way syscache list
* searches are handled, in normal operation the result will be sorted by OID
* so everything works fine. If running with system index usage disabled,
* the result ordering is unspecified and hence the planner might fail to
@@ -1195,7 +1195,7 @@ op_mergejoinable(Oid opno, Oid inputtype)
*
* In some cases (currently only array_eq), hashjoinability depends on the
* specific input data type the operator is invoked for, so that must be
- * passed as well. We currently assume that only one input's type is needed
+ * passed as well. We currently assume that only one input's type is needed
* to check this --- by convention, pass the left input's data type.
*/
bool
@@ -1825,7 +1825,7 @@ get_typbyval(Oid typid)
* A two-fer: given the type OID, return both typlen and typbyval.
*
* Since both pieces of info are needed to know how to copy a Datum,
- * many places need both. Might as well get them with one cache lookup
+ * many places need both. Might as well get them with one cache lookup
* instead of two. Also, this routine raises an error instead of
* returning a bogus value when given a bad type OID.
*/
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 5dcea3db33a..8894ff8076f 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -12,16 +12,16 @@
*
* The plan cache manager itself is principally responsible for tracking
* whether cached plans should be invalidated because of schema changes in
- * the objects they depend on. When (and if) the next demand for a cached
+ * the objects they depend on. When (and if) the next demand for a cached
* plan occurs, the query will be replanned. Note that this could result
* in an error, for example if a column referenced by the query is no
- * longer present. The creator of a cached plan can specify whether it
+ * longer present. The creator of a cached plan can specify whether it
* is allowable for the query to change output tupdesc on replan (this
* could happen with "SELECT *" for example) --- if so, it's up to the
* caller to notice changes and cope with them.
*
* Currently, we track exactly the dependencies of plans on relations and
- * user-defined functions. On relcache invalidation events or pg_proc
+ * user-defined functions. On relcache invalidation events or pg_proc
* syscache invalidation events, we invalidate just those plans that depend
* on the particular object being modified. (Note: this scheme assumes
* that any table modification that requires replanning will generate a
@@ -209,7 +209,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* avoids extra copy steps during plan construction. If the query ever does
* need replanning, we'll generate a separate new CachedPlan at that time, but
* the CachedPlanSource and the initial CachedPlan share the caller-provided
- * context and go away together when neither is needed any longer. (Because
+ * context and go away together when neither is needed any longer. (Because
* the parser and planner generate extra cruft in addition to their real
* output, this approach means that the context probably contains a bunch of
* useless junk as well as the useful trees. Hence, this method is a
@@ -300,7 +300,7 @@ FastCreateCachedPlan(Node *raw_parse_tree,
* CachedPlanSetParserHook: set up to use parser callback hooks
*
* Use this when a caller wants to manage parameter information via parser
- * callbacks rather than a fixed parameter-types list. Beware that the
+ * callbacks rather than a fixed parameter-types list. Beware that the
* information pointed to by parserSetupArg must be valid for as long as
* the cached plan might be replanned!
*/
@@ -377,7 +377,7 @@ StoreCachedPlan(CachedPlanSource *plansource,
{
/*
* Planner already extracted dependencies, we don't have to ... except
- * in the case of EXPLAIN. We assume here that EXPLAIN can't appear
+ * in the case of EXPLAIN. We assume here that EXPLAIN can't appear
* in a list with other commands.
*/
plan->relationOids = plan->invalItems = NIL;
@@ -410,7 +410,7 @@ StoreCachedPlan(CachedPlanSource *plansource,
* DropCachedPlan: destroy a cached plan.
*
* Actually this only destroys the CachedPlanSource: the referenced CachedPlan
- * is released, but not destroyed until its refcount goes to zero. That
+ * is released, but not destroyed until its refcount goes to zero. That
* handles the situation where DropCachedPlan is called while the plan is
* still in use.
*/
@@ -586,7 +586,7 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
}
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*/
resultDesc = PlanCacheComputeResultDesc(slist);
@@ -651,7 +651,7 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
*
* Note: useResOwner = false is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
- * Portal. Transient references should be protected by a resource owner.
+ * Portal. Transient references should be protected by a resource owner.
*/
void
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
@@ -925,7 +925,7 @@ plan_list_is_transient(List *stmt_list)
/*
* PlanCacheComputeResultDesc: given a list of either fully-planned statements
- * or Queries, determine the result tupledesc it will produce. Returns NULL
+ * or Queries, determine the result tupledesc it will produce. Returns NULL
* if the execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 29ae3878ef3..6d5ac6c8564 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -124,7 +124,7 @@ bool criticalSharedRelcachesBuilt = false;
/*
* This counter counts relcache inval events received since backend startup
- * (but only for rels that are actually in cache). Presently, we use it only
+ * (but only for rels that are actually in cache). Presently, we use it only
* to detect whether data about to be written by write_relcache_init_file()
* might already be obsolete.
*/
@@ -449,7 +449,7 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
* built the critical relcache entries (this includes initdb and startup
* without a pg_internal.init file).
*/
@@ -512,7 +512,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* The attcacheoff values we read from pg_attribute should all be -1
- * ("unknown"). Verify this if assert checking is on. They will be
+ * ("unknown"). Verify this if assert checking is on. They will be
* computed when and if needed during tuple access.
*/
#ifdef USE_ASSERT_CHECKING
@@ -526,7 +526,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special cases
+ * attribute: it must be zero. This eliminates the need for special cases
* for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
@@ -582,7 +582,7 @@ RelationBuildTupleDesc(Relation relation)
* each relcache entry that has associated rules. The context is used
* just for rule info, not for any other subsidiary data of the relcache
* entry, because that keeps the update logic in RelationClearRelation()
- * manageable. The other subsidiary data structures are simple enough
+ * manageable. The other subsidiary data structures are simple enough
* to be easy to free explicitly, anyway.
*/
static void
@@ -691,9 +691,9 @@ RelationBuildRuleLock(Relation relation)
/*
* We want the rule's table references to be checked as though by the
- * table owner, not the user referencing the rule. Therefore, scan
+ * table owner, not the user referencing the rule. Therefore, scan
* through the rule's actions and set the checkAsUser field on all
- * rtable entries. We have to look at the qual as well, in case it
+ * rtable entries. We have to look at the qual as well, in case it
* contains sublinks.
*
* The reason for doing this when the rule is loaded, rather than when
@@ -1036,7 +1036,7 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we need
+ * Make the private context to hold index access info. The reason we need
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*
@@ -1084,7 +1084,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* indcollation cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must extract
+ * because it comes after the variable-width indkey field. Must extract
* the datum the hard way...
*/
indcollDatum = fastgetattr(relation->rd_indextuple,
@@ -1109,7 +1109,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* Fill the support procedure OID array, as well as the info about
- * opfamilies and opclass input types. (aminfo and supportinfo are left
+ * opfamilies and opclass input types. (aminfo and supportinfo are left
* as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(indclass, relation->rd_support,
@@ -1197,7 +1197,7 @@ IndexSupportInitialize(oidvector *indclass,
* Note there is no provision for flushing the cache. This is OK at the
* moment because there is no way to ALTER any interesting properties of an
* existing opclass --- all you can do is drop it, which will result in
- * a useless but harmless dead entry in the cache. To support altering
+ * a useless but harmless dead entry in the cache. To support altering
* opclass membership (not the same as opfamily membership!), we'd need to
* be able to flush this cache as well as the contents of relcache entries
* for indexes.
@@ -1306,7 +1306,7 @@ LookupOpclassInfo(Oid operatorClassOid,
heap_close(rel, AccessShareLock);
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
* the default ones (those with lefttype = righttype = opcintype).
*/
if (numSupport > 0)
@@ -1825,7 +1825,7 @@ RelationDestroyRelation(Relation relation)
*
* NB: when rebuilding, we'd better hold some lock on the relation,
* else the catalog data we need to read could be changing under us.
- * Also, a rel to be rebuilt had better have refcnt > 0. This is because
+ * Also, a rel to be rebuilt had better have refcnt > 0. This is because
* an sinval reset could happen while we're accessing the catalogs, and
* the rel would get blown away underneath us by RelationCacheInvalidate
* if it has zero refcnt.
@@ -1848,7 +1848,7 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
* weren't closed already. If the relation is not getting deleted, the
- * next smgr access should reopen the files automatically. This ensures
+ * next smgr access should reopen the files automatically. This ensures
* that the low-level file access state is updated after, say, a vacuum
* truncation.
*/
@@ -1860,7 +1860,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* in case it is a mapped relation whose mapping changed.
*
* If it's a nailed index, then we need to re-read the pg_class row to see
- * if its relfilenode changed. We can't necessarily do that here, because
+ * if its relfilenode changed. We can't necessarily do that here, because
* we might be in a failed transaction. We assume it's okay to do it if
* there are open references to the relcache entry (cf notes for
* AtEOXact_RelationCache). Otherwise just mark the entry as possibly
@@ -1921,7 +1921,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* over from the old entry). This is to avoid trouble in case an
* error causes us to lose control partway through. The old entry
* will still be marked !rd_isvalid, so we'll try to rebuild it again
- * on next access. Meanwhile it's not any less valid than it was
+ * on next access. Meanwhile it's not any less valid than it was
* before, so any code that might expect to continue accessing it
* isn't hurt by the rebuild failure. (Consider for example a
* subtransaction that ALTERs a table and then gets canceled partway
@@ -2110,7 +2110,7 @@ RelationCacheInvalidateEntry(Oid relationId)
/*
* RelationCacheInvalidate
* Blow away cached relation descriptors that have zero reference counts,
- * and rebuild those with positive reference counts. Also reset the smgr
+ * and rebuild those with positive reference counts. Also reset the smgr
* relation cache and re-read relation mapping data.
*
* This is currently used only to recover from SI message buffer overflow,
@@ -2123,7 +2123,7 @@ RelationCacheInvalidateEntry(Oid relationId)
* We do this in two phases: the first pass deletes deletable items, and
* the second one rebuilds the rebuildable items. This is essential for
* safety, because hash_seq_search only copes with concurrent deletion of
- * the element it is currently visiting. If a second SI overflow were to
+ * the element it is currently visiting. If a second SI overflow were to
* occur while we are walking the table, resulting in recursive entry to
* this routine, we could crash because the inner invocation blows away
* the entry next to be visited by the outer scan. But this way is OK,
@@ -2274,7 +2274,7 @@ AtEOXact_RelationCache(bool isCommit)
* unless there is actually something for this routine to do. Other than
* the debug-only Assert checks, most transactions don't create any work
* for us to do here, so we keep a static flag that gets set if there is
- * anything to do. (Currently, this means either a relation is created in
+ * anything to do. (Currently, this means either a relation is created in
* the current xact, or one is given a new relfilenode, or an index list
* is forced.) For simplicity, the flag remains set till end of top-level
* transaction, even though we could clear it at subtransaction end in
@@ -2570,7 +2570,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. Note that the physical ID (relfilenode) is initially the same
+ * places. Note that the physical ID (relfilenode) is initially the same
* as the logical ID (OID); except that for a mapped relation, we set
* relfilenode to zero and rely on RelationInitPhysicalAddr to consult the
* map.
@@ -2804,7 +2804,7 @@ RelationCacheInitializePhase2(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * Try to load the shared relcache cache file. If unsuccessful, bootstrap
+ * Try to load the shared relcache cache file. If unsuccessful, bootstrap
* the cache with pre-made descriptors for the critical shared catalogs.
*/
if (!load_relcache_init_file(true))
@@ -2884,9 +2884,9 @@ RelationCacheInitializePhase3(void)
/*
* If we didn't get the critical system indexes loaded into relcache, do
- * so now. These are critical because the catcache and/or opclass cache
+ * so now. These are critical because the catcache and/or opclass cache
* depend on them for fetches done during relcache load. Thus, we have an
- * infinite-recursion problem. We can break the recursion by doing
+ * infinite-recursion problem. We can break the recursion by doing
* heapscans instead of indexscans at certain key spots. To avoid hobbling
* performance, we only want to do that until we have the critical indexes
* loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
@@ -2903,7 +2903,7 @@ RelationCacheInitializePhase3(void)
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
* in the same way as the others, because the critical catalogs don't
* (currently) have any rules or triggers, and so these indexes can be
- * rebuilt without inducing recursion. However they are used during
+ * rebuilt without inducing recursion. However they are used during
* relcache load when a rel does have rules or triggers, so we choose to
* nail them for performance reasons.
*/
@@ -2934,7 +2934,7 @@ RelationCacheInitializePhase3(void)
*
* DatabaseNameIndexId isn't critical for relcache loading, but rather for
* initial lookup of MyDatabaseId, without which we'll never find any
- * non-shared catalogs at all. Autovacuum calls InitPostgres with a
+ * non-shared catalogs at all. Autovacuum calls InitPostgres with a
* database OID, so it instead depends on DatabaseOidIndexId. We also
* need to nail up some indexes on pg_authid and pg_auth_members for use
* during client authentication.
@@ -3360,7 +3360,7 @@ RelationGetIndexList(Relation relation)
/*
* We build the list we intend to return (in the caller's context) while
- * doing the scan. After successfully completing the scan, we copy that
+ * doing the scan. After successfully completing the scan, we copy that
* list into the relcache entry. This avoids cache-context memory leakage
* if we get some sort of error partway through.
*/
@@ -4260,7 +4260,7 @@ load_relcache_init_file(bool shared)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying to
+ * init file is broken, so do it the hard way. We don't bother trying to
* free the clutter we just allocated; it's not in the relcache so it
* won't hurt.
*/
@@ -4325,7 +4325,7 @@ write_relcache_init_file(bool shared)
}
/*
- * Write a magic number to serve as a file version identifier. We can
+ * Write a magic number to serve as a file version identifier. We can
* change the magic number whenever the relcache layout changes.
*/
magic = RELCACHE_INIT_FILEMAGIC;
@@ -4550,7 +4550,7 @@ RelationCacheInitFilePostInvalidate(void)
*
* We used to keep the init files across restarts, but that is unsafe in PITR
* scenarios, and even in simple crash-recovery cases there are windows for
- * the init files to become out-of-sync with the database. So now we just
+ * the init files to become out-of-sync with the database. So now we just
* remove them during startup and expect the first backend launch to rebuild
* them. Of course, this has to happen in each database of the cluster.
*/
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index a19ee28b53b..6d2a0198588 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -23,7 +23,7 @@
* mapped catalogs can only be relocated by operations such as VACUUM FULL
* and CLUSTER, which make no transactionally-significant changes: it must be
* safe for the new file to replace the old, even if the transaction itself
- * aborts. An important factor here is that the indexes and toast table of
+ * aborts. An important factor here is that the indexes and toast table of
* a mapped catalog must also be mapped, so that the rewrites/relocations of
* all these files commit in a single map file update rather than being tied
* to transaction commit.
@@ -58,13 +58,13 @@
/*
* The map file is critical data: we have no automatic method for recovering
* from loss or corruption of it. We use a CRC so that we can detect
- * corruption. To minimize the risk of failed updates, the map file should
+ * corruption. To minimize the risk of failed updates, the map file should
* be kept to no more than one standard-size disk sector (ie 512 bytes),
* and we use overwrite-in-place rather than playing renaming games.
* The struct layout below is designed to occupy exactly 512 bytes, which
* might make filesystem updates a bit more efficient.
*
- * Entries in the mappings[] array are in no particular order. We could
+ * Entries in the mappings[] array are in no particular order. We could
* speed searching by insisting on OID order, but it really shouldn't be
* worth the trouble given the intended size of the mapping sets.
*/
@@ -91,7 +91,7 @@ typedef struct RelMapFile
/*
* The currently known contents of the shared map file and our database's
- * local map file are stored here. These can be reloaded from disk
+ * local map file are stored here. These can be reloaded from disk
* immediately whenever we receive an update sinval message.
*/
static RelMapFile shared_map;
@@ -294,7 +294,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay)
* RelationMapRemoveMapping
*
* Remove a relation's entry in the map. This is only allowed for "active"
- * (but not committed) local mappings. We need it so we can back out the
+ * (but not committed) local mappings. We need it so we can back out the
* entry for the transient target file when doing VACUUM FULL/CLUSTER on
* a mapped relation.
*/
@@ -322,7 +322,7 @@ RelationMapRemoveMapping(Oid relationId)
* RelationMapInvalidate
*
* This routine is invoked for SI cache flush messages. We must re-read
- * the indicated map file. However, we might receive a SI message in a
+ * the indicated map file. However, we might receive a SI message in a
* process that hasn't yet, and might never, load the mapping files;
* for example the autovacuum launcher, which *must not* try to read
* a local map since it is attached to no particular database.
@@ -390,7 +390,7 @@ AtCCI_RelationMap(void)
*
* During commit, this must be called as late as possible before the actual
* transaction commit, so as to minimize the window where the transaction
- * could still roll back after committing map changes. Although nothing
+ * could still roll back after committing map changes. Although nothing
* critically bad happens in such a case, we still would prefer that it
* not happen, since we'd possibly be losing useful updates to the relations'
* pg_class row(s).
@@ -457,7 +457,7 @@ AtPrepare_RelationMap(void)
/*
* CheckPointRelationMap
*
- * This is called during a checkpoint. It must ensure that any relation map
+ * This is called during a checkpoint. It must ensure that any relation map
* updates that were WAL-logged before the start of the checkpoint are
* securely flushed to disk and will not need to be replayed later. This
* seems unlikely to be a performance-critical issue, so we use a simple
@@ -647,7 +647,7 @@ load_relmap_file(bool shared)
*
* Because this may be called during WAL replay when MyDatabaseId,
* DatabasePath, etc aren't valid, we require the caller to pass in suitable
- * values. The caller is also responsible for being sure no concurrent
+ * values. The caller is also responsible for being sure no concurrent
* map update could be happening.
*/
static void
@@ -675,7 +675,7 @@ write_relmap_file(bool shared, RelMapFile *newmap,
* critical section, so that an open() failure need not force PANIC.
*
* Note: since we use BasicOpenFile, we are nominally responsible for
- * ensuring the fd is closed on error. In practice, this isn't important
+ * ensuring the fd is closed on error. In practice, this isn't important
* because either an error happens inside the critical section, or we are
* in bootstrap or WAL replay; so an error past this point is always fatal
* anyway.
@@ -773,7 +773,7 @@ write_relmap_file(bool shared, RelMapFile *newmap,
/*
* Make sure that the files listed in the map are not deleted if the outer
- * transaction aborts. This had better be within the critical section
+ * transaction aborts. This had better be within the critical section
* too: it's not likely to fail, but if it did, we'd arrive at transaction
* abort with the files still vulnerable. PANICing will leave things in a
* good state on-disk.
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index 57e5d0342a6..a306c481db6 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -4,7 +4,7 @@
* Tablespace cache management.
*
* We cache the parsed version of spcoptions for each tablespace to avoid
- * needing to reparse on every lookup. Right now, there doesn't appear to
+ * needing to reparse on every lookup. Right now, there doesn't appear to
* be a measurable performance gain from doing this, but that might change
* in the future as we add more options.
*
@@ -128,7 +128,7 @@ get_tablespace(Oid spcid)
return spc;
/*
- * Not found in TableSpace cache. Check catcache. If we don't find a
+ * Not found in TableSpace cache. Check catcache. If we don't find a
* valid HeapTuple, it must mean someone has managed to request tablespace
* details for a non-existent tablespace. We'll just treat that case as
* if no options were specified.
@@ -158,7 +158,7 @@ get_tablespace(Oid spcid)
}
/*
- * Now create the cache entry. It's important to do this only after
+ * Now create the cache entry. It's important to do this only after
* reading the pg_tablespace entry, since doing so could cause a cache
* flush.
*/
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 99e5f1d9fe6..55d852e95e4 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -764,7 +764,7 @@ static bool CacheInitialized = false;
* InitCatalogCache - initialize the caches
*
* Note that no database access is done here; we only allocate memory
- * and initialize the cache structure. Interrogation of the database
+ * and initialize the cache structure. Interrogation of the database
* to complete initialization of a cache happens upon first use
* of that cache.
*/
@@ -1001,7 +1001,7 @@ SearchSysCacheExistsAttName(Oid relid, const char *attname)
* extract a specific attribute.
*
* This is equivalent to using heap_getattr() on a tuple fetched
- * from a non-cached relation. Usually, this is only used for attributes
+ * from a non-cached relation. Usually, this is only used for attributes
* that could be NULL or variable length; the fixed-size attributes in
* a system table are accessed just by mapping the tuple onto the C struct
* declarations from include/catalog/.
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 5fd02958cf1..06972e4da9d 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -11,7 +11,7 @@
*
* Several seemingly-odd choices have been made to support use of the type
* cache by generic array and record handling routines, such as array_eq(),
- * record_cmp(), and hash_array(). Because those routines are used as index
+ * record_cmp(), and hash_array(). Because those routines are used as index
* support operations, they cannot leak memory. To allow them to execute
* efficiently, all information that they would like to re-use across calls
* is kept in the type cache.
@@ -99,7 +99,7 @@ typedef struct TypeCacheEnumData
*
* Stored record types are remembered in a linear array of TupleDescs,
* which can be indexed quickly with the assigned typmod. There is also
- * a hash table to speed searches for matching TupleDescs. The hash key
+ * a hash table to speed searches for matching TupleDescs. The hash key
* uses just the first N columns' type OIDs, and so we may have multiple
* entries with the same hash key.
*/
@@ -468,7 +468,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
/*
* Link to the tupdesc and increment its refcount (we assert it's a
- * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
* because the reference mustn't be entered in the current resource owner;
* it can outlive the current query.
*/
@@ -1004,7 +1004,7 @@ load_enum_cache_data(TypeCacheEntry *tcache)
/*
* Read all the information for members of the enum type. We collect the
* info in working memory in the caller's context, and then transfer it to
- * permanent memory in CacheMemoryContext. This minimizes the risk of
+ * permanent memory in CacheMemoryContext. This minimizes the risk of
* leaking memory from CacheMemoryContext in the event of an error partway
* through.
*/
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 0f19bb45e40..fef2dd14776 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -5,7 +5,7 @@
*
* Because of the extremely high rate at which log messages can be generated,
* we need to be mindful of the performance cost of obtaining any information
- * that may be logged. Also, it's important to keep in mind that this code may
+ * that may be logged. Also, it's important to keep in mind that this code may
* get called from within an aborted transaction, in which case operations
* such as syscache lookups are unsafe.
*
@@ -15,23 +15,23 @@
* if we run out of memory, it's important to be able to report that fact.
* There are a number of considerations that go into this.
*
- * First, distinguish between re-entrant use and actual recursion. It
+ * First, distinguish between re-entrant use and actual recursion. It
* is possible for an error or warning message to be emitted while the
- * parameters for an error message are being computed. In this case
+ * parameters for an error message are being computed. In this case
* errstart has been called for the outer message, and some field values
- * may have already been saved, but we are not actually recursing. We handle
- * this by providing a (small) stack of ErrorData records. The inner message
+ * may have already been saved, but we are not actually recursing. We handle
+ * this by providing a (small) stack of ErrorData records. The inner message
* can be computed and sent without disturbing the state of the outer message.
* (If the inner message is actually an error, this isn't very interesting
* because control won't come back to the outer message generator ... but
* if the inner message is only debug or log data, this is critical.)
*
* Second, actual recursion will occur if an error is reported by one of
- * the elog.c routines or something they call. By far the most probable
+ * the elog.c routines or something they call. By far the most probable
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
@@ -231,7 +231,7 @@ errstart(int elevel, const char *filename, int lineno,
{
/*
* If we are inside a critical section, all errors become PANIC
- * errors. See miscadmin.h.
+ * errors. See miscadmin.h.
*/
if (CritSectionCount > 0)
elevel = PANIC;
@@ -244,7 +244,7 @@ errstart(int elevel, const char *filename, int lineno,
*
* 2. ExitOnAnyError mode switch is set (initdb uses this).
*
- * 3. the error occurred after proc_exit has begun to run. (It's
+ * 3. the error occurred after proc_exit has begun to run. (It's
* proc_exit's responsibility to see that this doesn't turn into
* infinite recursion!)
*/
@@ -341,7 +341,7 @@ errstart(int elevel, const char *filename, int lineno,
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -443,7 +443,7 @@ errfinish(int dummy,...)
*
* Reset InterruptHoldoffCount in case we ereport'd from inside an
* interrupt holdoff section. (We assume here that no handler will
- * itself be inside a holdoff section. If necessary, such a handler
+ * itself be inside a holdoff section. If necessary, such a handler
* could save and restore InterruptHoldoffCount for itself, but this
* should make life easier for most.)
*
@@ -469,7 +469,7 @@ errfinish(int dummy,...)
* progress, so that we can report the message before dying. (Without
* this, pq_putmessage will refuse to send the message at all, which is
* what we want for NOTICE messages, but not for fatal exits.) This hack
- * is necessary because of poor design of old-style copy protocol. Note
+ * is necessary because of poor design of old-style copy protocol. Note
* we must do this even if client is fool enough to have set
* client_min_messages above FATAL, so don't look at output_to_client.
*/
@@ -581,7 +581,7 @@ errcode(int sqlerrcode)
/*
* errcode_for_file_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of disk file access.
*
* NOTE: the primary error message string should generally include %m
@@ -652,7 +652,7 @@ errcode_for_file_access(void)
/*
* errcode_for_socket_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of socket access.
*
* NOTE: the primary error message string should generally include %m
@@ -690,7 +690,7 @@ errcode_for_socket_access(void)
* This macro handles expansion of a format string and associated parameters;
* it's common code for errmsg(), errdetail(), etc. Must be called inside
* a routine that is declared like "const char *fmt, ..." and has an edata
- * pointer set up. The message is assigned to edata->targetfield, or
+ * pointer set up. The message is assigned to edata->targetfield, or
* appended to it if appendval is true. The message is subject to translation
* if translateit is true.
*
@@ -1173,7 +1173,7 @@ elog_start(const char *filename, int lineno, const char *funcname)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery. Note that the message is intentionally not localized,
* else failure to convert it to client encoding could cause further
@@ -1324,7 +1324,7 @@ EmitErrorReport(void)
/*
* CopyErrorData --- obtain a copy of the topmost error stack entry
*
- * This is only for use in error handler code. The data is copied into the
+ * This is only for use in error handler code. The data is copied into the
* current memory context, so callers should always switch away from
* ErrorContext first; otherwise it will be lost when FlushErrorState is done.
*/
@@ -1415,7 +1415,7 @@ FlushErrorState(void)
*
* A handler can do CopyErrorData/FlushErrorState to get out of the error
* subsystem, then do some processing, and finally ReThrowError to re-throw
- * the original error. This is slower than just PG_RE_THROW() but should
+ * the original error. This is slower than just PG_RE_THROW() but should
* be used if the "some processing" is likely to incur another error.
*/
void
@@ -1432,7 +1432,7 @@ ReThrowError(ErrorData *edata)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -1585,7 +1585,7 @@ set_syslog_parameters(const char *ident, int facility)
{
/*
* guc.c is likely to call us repeatedly with same parameters, so don't
- * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
* the connection until needed, since this routine will get called whether
* or not Log_destination actually mentions syslog.
*
@@ -2794,7 +2794,7 @@ useful_strerror(int errnum)
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno. This
+ * Some strerror()s return an empty string for out-of-range errno. This
* is ANSI C spec compliant, but not exactly useful. Also, we may get
* back strings of question marks if libc cannot transcode the message to
* the codeset specified by LC_CTYPE. If we get nothing useful, first try
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 734bd056891..d7634a98d90 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -131,7 +131,7 @@ load_external_function(char *filename, char *funcname,
/*
* This function loads a shlib file without looking up any particular
- * function in it. If the same shlib has previously been loaded,
+ * function in it. If the same shlib has previously been loaded,
* unload and reload it.
*
* When 'restricted' is true, only libraries in the presumed-secure
@@ -171,7 +171,7 @@ lookup_external_function(void *filehandle, char *funcname)
/*
* Load the specified dynamic-link library file, unless it already is
- * loaded. Return the pg_dl* handle for the file.
+ * loaded. Return the pg_dl* handle for the file.
*
* Note: libname is expected to be an exact name for the library file.
*/
@@ -473,7 +473,7 @@ file_exists(const char *name)
* If name contains a slash, check if the file exists, if so return
* the name. Else (no slash) try to expand using search path (see
* find_in_dynamic_libpath below); if that works, return the fully
- * expanded file name. If the previous failed, append DLSUFFIX and
+ * expanded file name. If the previous failed, append DLSUFFIX and
* try again. If all fails, just return the original name.
*
* The result will always be freshly palloc'd.
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 0a2d4ef9565..12dcd7980a7 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -96,7 +96,7 @@ static Datum fmgr_security_definer(PG_FUNCTION_ARGS);
/*
- * Lookup routines for builtin-function table. We can search by either Oid
+ * Lookup routines for builtin-function table. We can search by either Oid
* or name, but search by Oid is much faster.
*/
@@ -581,7 +581,7 @@ clear_external_function_hash(void *filehandle)
* Copy an FmgrInfo struct
*
* This is inherently somewhat bogus since we can't reliably duplicate
- * language-dependent subsidiary info. We cheat by zeroing fn_extra,
+ * language-dependent subsidiary info. We cheat by zeroing fn_extra,
* instead, meaning that subsidiary info will have to be recomputed.
*/
void
@@ -861,7 +861,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
/*
- * Support for security-definer and proconfig-using functions. We support
+ * Support for security-definer and proconfig-using functions. We support
* both of these features using the same call handler, because they are
* often used together and it would be inefficient (as well as notationally
* messy) to have two levels of call handler involved.
@@ -881,7 +881,7 @@ struct fmgr_security_definer_cache
* (All this info is cached for the duration of the current query.)
* To execute a call, we temporarily replace the flinfo with the cached
* and looked-up one, while keeping the outer fcinfo (which contains all
- * the actual arguments, etc.) intact. This is not re-entrant, but then
+ * the actual arguments, etc.) intact. This is not re-entrant, but then
* the fcinfo itself can't be used re-entrantly anyway.
*/
static Datum
@@ -961,7 +961,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* We don't need to restore GUC or userid settings on error, because the
- * ensuing xact or subxact abort will do that. The PG_TRY block is only
+ * ensuing xact or subxact abort will do that. The PG_TRY block is only
* needed to clean up the flinfo link.
*/
save_flinfo = fcinfo->flinfo;
@@ -1014,7 +1014,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* These are for invocation of a specifically named function with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. Also, the function cannot be one that needs to
+ * are allowed to be NULL. Also, the function cannot be one that needs to
* look at FmgrInfo, since there won't be any.
*/
Datum
@@ -1559,8 +1559,8 @@ FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
/*
* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially fmgr_info() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially fmgr_info() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the fmgr_info() once and then use FunctionCallN().
*/
Datum
@@ -1889,7 +1889,7 @@ OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
*
* One important difference from the bare function call is that we will
* push any active SPI context, allowing SPI-using I/O functions to be
- * called from other SPI functions without extra notation. This is a hack,
+ * called from other SPI functions without extra notation. This is a hack,
* but the alternative of expecting all SPI functions to do SPI_push/SPI_pop
* around I/O calls seems worse.
*/
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index aa249fabfe8..85a17b010e9 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -135,7 +135,7 @@ per_MultiFuncCall(PG_FUNCTION_ARGS)
* FuncCallContext is pointing to it), but in most usage patterns the
* tuples stored in it will be in the function's per-tuple context. So at
* the beginning of each call, the Slot will hold a dangling pointer to an
- * already-recycled tuple. We clear it out here.
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
* will always be NULL. This is just here for backwards compatibility in
@@ -191,13 +191,13 @@ shutdown_MultiFuncCall(Datum arg)
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
@@ -280,7 +280,7 @@ get_func_result_type(Oid functionId,
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
@@ -441,7 +441,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 77d615cbda1..dcc4c91aa42 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -5,19 +5,19 @@
*
* dynahash.c supports both local-to-a-backend hash tables and hash tables in
* shared memory. For shared hash tables, it is the caller's responsibility
- * to provide appropriate access interlocking. The simplest convention is
- * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
+ * to provide appropriate access interlocking. The simplest convention is
+ * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
* hash_seq_search) need only shared lock, but any update requires exclusive
* lock. For heavily-used shared tables, the single-lock approach creates a
* concurrency bottleneck, so we also support "partitioned" locking wherein
* there are multiple LWLocks guarding distinct subsets of the table. To use
* a hash table in partitioned mode, the HASH_PARTITION flag must be given
- * to hash_create. This prevents any attempt to split buckets on-the-fly.
+ * to hash_create. This prevents any attempt to split buckets on-the-fly.
* Therefore, each hash bucket chain operates independently, and no fields
* of the hash header change after init except nentries and freeList.
* A partitioned table uses a spinlock to guard changes of those two fields.
* This lets any subset of the hash buckets be treated as a separately
- * lockable partition. We expect callers to use the low-order bits of a
+ * lockable partition. We expect callers to use the low-order bits of a
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
@@ -76,7 +76,7 @@
* Constants
*
* A hash table has a top-level "directory", each of whose entries points
- * to a "segment" of ssize bucket headers. The maximum number of hash
+ * to a "segment" of ssize bucket headers. The maximum number of hash
* buckets is thus dsize * ssize (but dsize may be expansible). Of course,
* the number of records in the table can be larger, but we don't want a
* whole lot of records per bucket or performance goes down.
@@ -84,7 +84,7 @@
* In a hash table allocated in shared memory, the directory cannot be
* expanded because it must stay at a fixed address. The directory size
* should be selected using hash_select_dirsize (and you'd better have
- * a good idea of the maximum number of entries!). For non-shared hash
+ * a good idea of the maximum number of entries!). For non-shared hash
* tables, the initial directory size can be left at the default.
*/
#define DEF_SEGSIZE 256
@@ -330,7 +330,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
{
/*
* ctl structure and directory are preallocated for shared memory
- * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
+ * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
* well.
*/
hashp->hctl = info->hctl;
@@ -779,7 +779,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
@@ -1245,7 +1245,7 @@ expand_table(HTAB *hashp)
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the hash
+ * Relocate records to the new bucket. NOTE: because of the way the hash
* masking is done in calc_bucket, only one old bucket can need to be
* split at this point. With a different way of reducing the hash value,
* that might not be true!
@@ -1394,7 +1394,7 @@ hash_corrupted(HTAB *hashp)
{
/*
* If the corruption is in a shared hashtable, we'd better force a
- * systemwide restart. Otherwise, just shut down this one backend.
+ * systemwide restart. Otherwise, just shut down this one backend.
*/
if (hashp->isshared)
elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
@@ -1439,7 +1439,7 @@ next_pow2_int(long num)
/************************* SEQ SCAN TRACKING ************************/
/*
- * We track active hash_seq_search scans here. The need for this mechanism
+ * We track active hash_seq_search scans here. The need for this mechanism
* comes from the fact that a scan will get confused if a bucket split occurs
* while it's in progress: it might visit entries twice, or even miss some
* entirely (if it's partway through the same bucket that splits). Hence
@@ -1459,7 +1459,7 @@ next_pow2_int(long num)
*
* This arrangement is reasonably robust if a transient hashtable is deleted
* without notifying us. The absolute worst case is we might inhibit splits
- * in another table created later at exactly the same address. We will give
+ * in another table created later at exactly the same address. We will give
* a warning at transaction end for reference leaks, so any bugs leading to
* lack of notification should be easy to catch.
*/
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 25cbf60e657..fc37bf6d467 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -58,7 +58,7 @@ static char socketLockFile[MAXPGPATH];
*
* NOTE: "ignoring system indexes" means we do not use the system indexes
* for lookups (either in hardwired catalog accesses or in planner-generated
- * plans). We do, however, still update the indexes when a catalog
+ * plans). We do, however, still update the indexes when a catalog
* modification is made.
* ----------------------------------------------------------------
*/
@@ -300,7 +300,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* Currently there are two valid bits in SecurityRestrictionContext:
*
* SECURITY_LOCAL_USERID_CHANGE indicates that we are inside an operation
- * that is temporarily changing CurrentUserId via these functions. This is
+ * that is temporarily changing CurrentUserId via these functions. This is
* needed to indicate that the actual value of CurrentUserId is not in sync
* with guc.c's internal state, so SET ROLE has to be disallowed.
*
@@ -321,7 +321,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* ever throw any kind of error. This is because they are used by
* StartTransaction and AbortTransaction to save/restore the settings,
* and during the first transaction within a backend, the value to be saved
- * and perhaps restored is indeed invalid. We have to be able to get
+ * and perhaps restored is indeed invalid. We have to be able to get
* through AbortTransaction without asserting in case InitPostgres fails.
*/
void
@@ -361,7 +361,7 @@ InSecurityRestrictedOperation(void)
/*
* These are obsolete versions of Get/SetUserIdAndSecContext that are
* only provided for bug-compatibility with some rather dubious code in
- * pljava. We allow the userid to be set, but only when not inside a
+ * pljava. We allow the userid to be set, but only when not inside a
* security restriction context.
*/
void
@@ -464,7 +464,7 @@ InitializeSessionUserId(const char *rolename)
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -563,7 +563,7 @@ GetCurrentRoleId(void)
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
@@ -725,7 +725,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
my_gp_pid = 0;
/*
- * We need a loop here because of race conditions. But don't loop forever
+ * We need a loop here because of race conditions. But don't loop forever
* (for example, a non-writable $PGDATA directory might cause a failure
* that won't go away). 100 tries seems like plenty.
*/
@@ -734,7 +734,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
+ * Think not to make the file protection weaker than 0600. See
* comments below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
@@ -794,7 +794,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
* implies that the existing process has a different userid than we
* do, which means it cannot be a competing postmaster. A postmaster
* cannot successfully attach to a data directory owned by a userid
- * other than its own. (This is now checked directly in
+ * other than its own. (This is now checked directly in
* checkDataDir(), but has been true for a long time because of the
* restriction that the data directory isn't group- or
* world-accessible.) Also, since we create the lockfiles mode 600,
@@ -832,9 +832,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be that
+ * No, the creating process did not exist. However, it could be that
* the postmaster crashed (or more likely was kill -9'd by a clueless
- * admin) but has left orphan backends behind. Check for this by
+ * admin) but has left orphan backends behind. Check for this by
* looking to see if there is an associated shmem segment that is
* still in use.
*
@@ -875,7 +875,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Looks like nobody's home. Unlink the file and try again to create
- * it. Need a loop because of possible race condition against other
+ * it. Need a loop because of possible race condition against other
* would-be creators.
*/
if (unlink(filename) < 0)
@@ -889,7 +889,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * Successfully created the file, now fill it. See comment in miscadmin.h
+ * Successfully created the file, now fill it. See comment in miscadmin.h
* about the contents. Note that we write the same first five lines into
* both datadir and socket lockfiles; although more stuff may get added to
* the datadir lockfile later.
@@ -1232,7 +1232,7 @@ load_libraries(const char *libraries, const char *gucname, bool restricted)
/*
* Choose notice level: avoid repeat messages when re-loading a library
- * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND
+ * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND
* configurations)
*/
#ifdef EXEC_BACKEND
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index f8d8626a92c..cfefe4b0a4f 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -76,7 +76,7 @@ static void process_settings(Oid databaseid, Oid roleid);
* GetDatabaseTuple -- fetch the pg_database row for a database
*
* This is used during backend startup when we don't yet have any access to
- * system catalogs in general. In the worst case, we can seqscan pg_database
+ * system catalogs in general. In the worst case, we can seqscan pg_database
* using nothing but the hard-wired descriptor that relcache.c creates for
* pg_database. In more typical cases, relcache.c was able to load
* descriptors for both pg_database and its indexes from the shared relcache
@@ -100,7 +100,7 @@ GetDatabaseTuple(const char *dbname)
CStringGetDatum(dbname));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -143,7 +143,7 @@ GetDatabaseTupleByOid(Oid dboid)
ObjectIdGetDatum(dboid));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -184,7 +184,7 @@ PerformAuthentication(Port *port)
* are loading them into the startup transaction's memory context, not
* PostmasterContext, but that shouldn't matter.
*
- * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
+ * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#ifdef EXEC_BACKEND
if (!load_hba())
@@ -283,7 +283,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
name)));
/*
- * Check privilege to connect to the database. (The am_superuser test
+ * Check privilege to connect to the database. (The am_superuser test
* is redundant, but since we have the flag, might as well check it
* and save a few cycles.)
*/
@@ -299,7 +299,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -444,7 +444,7 @@ BaseInit(void)
* Initialize POSTGRES.
*
* The database can be specified by name, using the in_dbname parameter, or by
- * OID, using the dboid parameter. In the latter case, the actual database
+ * OID, using the dboid parameter. In the latter case, the actual database
* name can be returned to the caller in out_dbname. If out_dbname isn't
* NULL, it must point to a buffer of size NAMEDATALEN.
*
@@ -838,7 +838,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
/*
* Now process any command-line switches and any additional GUC variable
- * settings passed in the startup packet. We couldn't do this before
+ * settings passed in the startup packet. We couldn't do this before
* because we didn't know if client is a superuser.
*/
if (MyProcPort != NULL)
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index ca965390557..68615726552 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -231,7 +231,7 @@ static unsigned short BinarySearchRange
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
* 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
- * 0x9d. [region_low, region_high] We
+ * 0x9d. [region_low, region_high] We
* should remember big5 has two different regions (above).
* There is a bias for the distance between these regions.
* 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 287ff808fc1..49826bad037 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -29,7 +29,7 @@
/*
* We maintain a simple linked list caching the fmgr lookup info for the
* currently selected conversion functions, as well as any that have been
- * selected previously in the current session. (We remember previous
+ * selected previously in the current session. (We remember previous
* settings because we must be able to restore a previous setting during
* transaction rollback, without doing any fresh catalog accesses.)
*
@@ -76,7 +76,7 @@ static int cliplen(const char *str, int len, int limit);
/*
- * Prepare for a future call to SetClientEncoding. Success should mean
+ * Prepare for a future call to SetClientEncoding. Success should mean
* that SetClientEncoding is guaranteed to succeed for this encoding request.
*
* (But note that success before backend_startup_complete does not guarantee
@@ -148,7 +148,7 @@ PrepareClientEncoding(int encoding)
/*
* We cannot yet remove any older entry for the same encoding pair,
- * since it could still be in use. SetClientEncoding will clean up.
+ * since it could still be in use. SetClientEncoding will clean up.
*/
return 0; /* success */
@@ -157,8 +157,8 @@ PrepareClientEncoding(int encoding)
{
/*
* If we're not in a live transaction, the only thing we can do is
- * restore a previous setting using the cache. This covers all
- * transaction-rollback cases. The only case it might not work for is
+ * restore a previous setting using the cache. This covers all
+ * transaction-rollback cases. The only case it might not work for is
* trying to change client_encoding on the fly by editing
* postgresql.conf and SIGHUP'ing. Which would probably be a stupid
* thing to do anyway.
@@ -316,7 +316,7 @@ pg_get_client_encoding_name(void)
*
* CAUTION: although the presence of a length argument means that callers
* can pass non-null-terminated strings, care is required because the same
- * string will be passed back if no conversion occurs. Such callers *must*
+ * string will be passed back if no conversion occurs. Such callers *must*
* check whether result == src and handle that case differently.
*
* Note: we try to avoid raising error, since that could get us into
@@ -572,7 +572,7 @@ pg_any_to_server(const char *s, int len, int encoding)
* the selected client_encoding. If the client encoding is ASCII-safe
* then we just do a straight validation under that encoding. For an
* ASCII-unsafe encoding we have a problem: we dare not pass such data
- * to the parser but we have no way to convert it. We compromise by
+ * to the parser but we have no way to convert it. We compromise by
* rejecting the data if it contains any non-ASCII characters.
*/
if (PG_VALID_BE_ENCODING(encoding))
diff --git a/src/backend/utils/mb/wstrcmp.c b/src/backend/utils/mb/wstrcmp.c
index 64a9cf848e2..dad3ae023a3 100644
--- a/src/backend/utils/mb/wstrcmp.c
+++ b/src/backend/utils/mb/wstrcmp.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/mb/wstrncmp.c b/src/backend/utils/mb/wstrncmp.c
index 87c1f5afdaa..ea4823fc6f8 100644
--- a/src/backend/utils/mb/wstrncmp.c
+++ b/src/backend/utils/mb/wstrncmp.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index f32cf90ac4a..a66a7d9198c 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3499,7 +3499,7 @@ get_guc_variables(void)
/*
- * Build the sorted array. This is split out so that it could be
+ * Build the sorted array. This is split out so that it could be
* re-executed after startup (eg, we could allow loadable modules to
* add vars, and then we'd need to re-sort).
*/
@@ -3657,7 +3657,7 @@ add_placeholder_variable(const char *name, int elevel)
/*
* The char* is allocated at the end of the struct since we have no
- * 'static' place to point to. Note that the current value, as well as
+ * 'static' place to point to. Note that the current value, as well as
* the boot and reset values, start out NULL.
*/
var->variable = (char **) (var + 1);
@@ -3735,7 +3735,7 @@ find_option(const char *name, bool create_placeholders, int elevel)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that the
+ * See if the name is an obsolete name for a variable. We assume that the
* set of supported old names is short enough that a brute-force search is
* the best way.
*/
@@ -5188,7 +5188,7 @@ set_config_option(const char *name, const char *value,
* If a PGC_BACKEND parameter is changed in the config file,
* we want to accept the new value in the postmaster (whence
* it will propagate to subsequently-started backends), but
- * ignore it in existing backends. This is a tad klugy, but
+ * ignore it in existing backends. This is a tad klugy, but
* necessary because we don't re-read the config file during
* backend start.
*
@@ -5245,7 +5245,7 @@ set_config_option(const char *name, const char *value,
* An exception might be made if the reset value is assumed to be "safe".
*
* Note: this flag is currently used for "session_authorization" and
- * "role". We need to prohibit changing these inside a local userid
+ * "role". We need to prohibit changing these inside a local userid
* context because when we exit it, GUC won't be notified, leaving things
* out of sync. (This could be fixed by forcing a new GUC nesting level,
* but that would change behavior in possibly-undesirable ways.) Also, we
@@ -5837,7 +5837,7 @@ set_config_sourcefile(const char *name, char *sourcefile, int sourceline)
/*
* Set a config option to the given value. See also set_config_option,
- * this is just the wrapper to be called from outside GUC. NB: this
+ * this is just the wrapper to be called from outside GUC. NB: this
* is used only for non-transactional operations.
*
* Note: there is no support here for setting source file/line, as it
@@ -6073,7 +6073,7 @@ flatten_set_variable_args(const char *name, List *args)
else
{
/*
- * Plain string literal or identifier. For quote mode,
+ * Plain string literal or identifier. For quote mode,
* quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
@@ -7625,7 +7625,7 @@ ParseLongOption(const char *string, char **name, char **value)
/*
* Handle options fetched from pg_db_role_setting.setconfig,
- * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
+ * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
*
* The array parameter must be an array of TEXT (it must not be NULL).
*/
@@ -7905,7 +7905,7 @@ GUCArrayReset(ArrayType *array)
* Validate a proposed option setting for GUCArrayAdd/Delete/Reset.
*
* name is the option name. value is the proposed value for the Add case,
- * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
+ * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
* not an error to have no permissions to set the option.
*
* Returns TRUE if OK, FALSE if skipIfNoPermissions is true and user does not
@@ -7990,7 +7990,7 @@ validate_option_array_item(const char *name, const char *value,
* ERRCODE_INVALID_PARAMETER_VALUE SQLSTATE for check hook failures.
*
* Note that GUC_check_errmsg() etc are just macros that result in a direct
- * assignment to the associated variables. That is ugly, but forced by the
+ * assignment to the associated variables. That is ugly, but forced by the
* limitations of C's macro mechanisms.
*/
void
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 66d03adaf90..190ec15ca13 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -109,7 +109,7 @@ static char **save_argv;
* from being clobbered by subsequent ps_display actions.
*
* (The original argv[] will not be overwritten by this routine, but may be
- * overwritten during init_ps_display. Also, the physical location of the
+ * overwritten during init_ps_display. Also, the physical location of the
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
@@ -210,7 +210,7 @@ save_ps_display_args(int argc, char **argv)
/*
* Call this once during subprocess startup to set the identification
- * values. At this point, the original argv[] array may be overwritten.
+ * values. At this point, the original argv[] array may be overwritten.
*/
void
init_ps_display(const char *username, const char *dbname,
@@ -360,7 +360,7 @@ set_ps_display(const char *activity, bool force)
/*
* Returns what's currently in the ps display, in case someone needs
- * it. Note that only the activity part is returned. On some platforms
+ * it. Note that only the activity part is returned. On some platforms
* the string will not be null-terminated, so return the effective
* length into *displen.
*/
diff --git a/src/backend/utils/misc/rbtree.c b/src/backend/utils/misc/rbtree.c
index f8143724d0a..c52e5047bfc 100644
--- a/src/backend/utils/misc/rbtree.c
+++ b/src/backend/utils/misc/rbtree.c
@@ -13,7 +13,7 @@
*
* Red-black trees are a type of balanced binary tree wherein (1) any child of
* a red node is always black, and (2) every path from root to leaf traverses
- * an equal number of black nodes. From these properties, it follows that the
+ * an equal number of black nodes. From these properties, it follows that the
* longest path from root to leaf is only about twice as long as the shortest,
* so lookups are guaranteed to run in O(lg n) time.
*
@@ -102,7 +102,7 @@ static RBNode sentinel = {InitialState, RBBLACK, RBNIL, RBNIL, NULL};
* valid data! freefunc can be NULL if caller doesn't require retail
* space reclamation.
*
- * The RBTree node is palloc'd in the caller's memory context. Note that
+ * The RBTree node is palloc'd in the caller's memory context. Note that
* all contents of the tree are actually allocated by the caller, not here.
*
* Since tree contents are managed by the caller, there is currently not
@@ -282,10 +282,10 @@ rb_rotate_right(RBTree *rb, RBNode *x)
/*
* Maintain Red-Black tree balance after inserting node x.
*
- * The newly inserted node is always initially marked red. That may lead to
+ * The newly inserted node is always initially marked red. That may lead to
* a situation where a red node has a red child, which is prohibited. We can
* always fix the problem by a series of color changes and/or "rotations",
- * which move the problem progressively higher up in the tree. If one of the
+ * which move the problem progressively higher up in the tree. If one of the
* two red nodes is the root, we can always fix the problem by changing the
* root from red to black.
*
@@ -296,7 +296,7 @@ static void
rb_insert_fixup(RBTree *rb, RBNode *x)
{
/*
- * x is always a red node. Initially, it is the newly inserted node. Each
+ * x is always a red node. Initially, it is the newly inserted node. Each
* iteration of this loop moves it higher up in the tree.
*/
while (x != rb->root && x->parent->color == RBRED)
@@ -481,7 +481,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
while (x != rb->root && x->color == RBBLACK)
{
/*
- * Left and right cases are symmetric. Any nodes that are children of
+ * Left and right cases are symmetric. Any nodes that are children of
* x have a black-height one less than the remainder of the nodes in
* the tree. We rotate and recolor nodes to move the problem up the
* tree: at some stage we'll either fix the problem, or reach the root
diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c
index b52942db721..72ee2afea71 100644
--- a/src/backend/utils/misc/tzparser.c
+++ b/src/backend/utils/misc/tzparser.c
@@ -4,7 +4,7 @@
* Functions for parsing timezone offset files
*
* Note: this code is invoked from the check_hook for the GUC variable
- * timezone_abbreviations. Therefore, it should report problems using
+ * timezone_abbreviations. Therefore, it should report problems using
* GUC_check_errmsg() and related functions, and try to avoid throwing
* elog(ERROR). This is not completely bulletproof at present --- in
* particular out-of-memory will throw an error. Could probably fix with
@@ -179,7 +179,7 @@ addToArray(tzEntry **base, int *arraysize, int n,
/*
* Search the array for a duplicate; as a useful side effect, the array is
- * maintained in sorted order. We use strcmp() to ensure we match the
+ * maintained in sorted order. We use strcmp() to ensure we match the
* sort order datetime.c expects.
*/
arrayptr = *base;
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index e202acac934..23b345bb595 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -38,7 +38,7 @@
* request, even if it was much larger than necessary. This led to more
* and more wasted space in allocated chunks over time. To fix, get rid
* of the midrange behavior: we now handle only "small" power-of-2-size
- * chunks as chunks. Anything "large" is passed off to malloc(). Change
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
* the number of freelists to change the small/large boundary.
*
*
@@ -54,7 +54,7 @@
* Thus, if someone makes the common error of writing past what they've
* requested, the problem is likely to go unnoticed ... until the day when
* there *isn't* any wasted space, perhaps because of different memory
- * alignment on a new platform, or some other effect. To catch this sort
+ * alignment on a new platform, or some other effect. To catch this sort
* of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
* the requested space whenever the request is less than the actual chunk
* size, and verifies that the byte is undamaged when the chunk is freed.
@@ -153,7 +153,7 @@ typedef AllocSetContext *AllocSet;
/*
* AllocBlock
* An AllocBlock is the unit of memory that is obtained by aset.c
- * from malloc(). It contains one or more AllocChunks, which are
+ * from malloc(). It contains one or more AllocChunks, which are
* the units requested by palloc() and freed by pfree(). AllocChunks
* cannot be returned to malloc() individually, instead they are put
* on freelists by pfree() and re-used by the next palloc() that has
@@ -290,7 +290,7 @@ AllocSetFreeIndex(Size size)
/*
* At this point we need to obtain log2(tsize)+1, ie, the number of
- * not-all-zero bits at the right. We used to do this with a
+ * not-all-zero bits at the right. We used to do this with a
* shift-and-count loop, but this function is enough of a hotspot to
* justify micro-optimization effort. The best approach seems to be
* to use a lookup table. Note that this code assumes that
@@ -457,7 +457,7 @@ AllocSetInit(MemoryContext context)
* Actually, this routine has some discretion about what to do.
* It should mark all allocated chunks freed, but it need not necessarily
* give back all the resources the set owns. Our actual implementation is
- * that we hang onto any "keeper" block specified for the set. In this way,
+ * that we hang onto any "keeper" block specified for the set. In this way,
* we don't thrash malloc() when a context is repeatedly reset after small
* allocations, which is typical behavior for per-tuple contexts.
*/
@@ -690,7 +690,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* In most cases, we'll get back the index of the next larger
- * freelist than the one we need to put this chunk on. The
+ * freelist than the one we need to put this chunk on. The
* exception is when availchunk is exactly a power of 2.
*/
if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
@@ -836,7 +836,7 @@ AllocSetFree(MemoryContext context, void *pointer)
{
/*
* Big chunks are certain to have been allocated as single-chunk
- * blocks. Find the containing block and return it to malloc().
+ * blocks. Find the containing block and return it to malloc().
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -932,7 +932,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > set->allocChunkLimit)
{
/*
- * The chunk must have been allocated as a single-chunk block. Find
+ * The chunk must have been allocated as a single-chunk block. Find
* the containing block and use realloc() to make it bigger with
* minimum space wastage.
*/
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 0e2d151a3ee..42c5ffba44b 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -163,7 +163,7 @@ MemoryContextResetChildren(MemoryContext context)
*
* The type-specific delete routine removes all subsidiary storage
* for the context, but we have to delete the context node itself,
- * as well as recurse to get the children. We must also delink the
+ * as well as recurse to get the children. We must also delink the
* node from its parent, if it has one.
*/
void
@@ -418,22 +418,22 @@ MemoryContextContains(MemoryContext context, void *pointer)
* we want to be sure that we don't leave the context tree invalid
* in case of failure (such as insufficient memory to allocate the
* context node itself). The procedure goes like this:
- * 1. Context-type-specific routine first calls MemoryContextCreate(),
+ * 1. Context-type-specific routine first calls MemoryContextCreate(),
* passing the appropriate tag/size/methods values (the methods
* pointer will ordinarily point to statically allocated data).
* The parent and name parameters usually come from the caller.
- * 2. MemoryContextCreate() attempts to allocate the context node,
+ * 2. MemoryContextCreate() attempts to allocate the context node,
* plus space for the name. If this fails we can ereport() with no
* damage done.
- * 3. We fill in all of the type-independent MemoryContext fields.
- * 4. We call the type-specific init routine (using the methods pointer).
+ * 3. We fill in all of the type-independent MemoryContext fields.
+ * 4. We call the type-specific init routine (using the methods pointer).
* The init routine is required to make the node minimally valid
* with zero chance of failure --- it can't allocate more memory,
* for example.
- * 5. Now we have a minimally valid node that can behave correctly
+ * 5. Now we have a minimally valid node that can behave correctly
* when told to reset or delete itself. We link the node to its
* parent (if any), making the node part of the context tree.
- * 6. We return to the context-type-specific routine, which finishes
+ * 6. We return to the context-type-specific routine, which finishes
* up type-specific initialization. This routine can now do things
* that might fail (like allocate more memory), so long as it's
* sure the node is left in a state that delete will handle.
@@ -445,7 +445,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
*
* Normally, the context node and the name are allocated from
* TopMemoryContext (NOT from the parent context, since the node must
- * survive resets of its parent context!). However, this routine is itself
+ * survive resets of its parent context!). However, this routine is itself
* used to create TopMemoryContext! If we see that TopMemoryContext is NULL,
* we assume we are creating TopMemoryContext and use malloc() to allocate
* the node.
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 9a257e7351b..3dda7cff94c 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -143,14 +143,14 @@ GetPortalByName(const char *name)
* Get the "primary" stmt within a portal, ie, the one marked canSetTag.
*
* Returns NULL if no such stmt. If multiple PlannedStmt structs within the
- * portal are marked canSetTag, returns the first one. Neither of these
+ * portal are marked canSetTag, returns the first one. Neither of these
* cases should occur in present usages of this function.
*
* Copes if given a list of Querys --- can't happen in a portal, but this
* code also supports plancache.c, which needs both cases.
*
* Note: the reason this is just handed a List is so that plancache.c
- * can share the code. For use with a portal, use PortalGetPrimaryStmt
+ * can share the code. For use with a portal, use PortalGetPrimaryStmt
* rather than calling this directly.
*/
Node *
@@ -276,7 +276,7 @@ CreateNewPortal(void)
* you can pass a constant string, perhaps "(query not available)".)
*
* commandTag shall be NULL if and only if the original query string
- * (before rewriting) was an empty string. Also, the passed commandTag must
+ * (before rewriting) was an empty string. Also, the passed commandTag must
* be a pointer to a constant string, since it is not copied.
*
* If cplan is provided, then it is a cached plan containing the stmts,
@@ -479,7 +479,7 @@ PortalDrop(Portal portal, bool isTopCommit)
/*
* Allow portalcmds.c to clean up the state it knows about, in particular
- * shutting down the executor if still active. This step potentially runs
+ * shutting down the executor if still active. This step potentially runs
* user-defined code so failure has to be expected. It's the cleanup
* hook's responsibility to not try to do that more than once, in the case
* that failure occurs and then we come back to drop the portal again
@@ -506,12 +506,12 @@ PortalDrop(Portal portal, bool isTopCommit)
PortalReleaseCachedPlan(portal);
/*
- * Release any resources still attached to the portal. There are several
+ * Release any resources still attached to the portal. There are several
* cases being covered here:
*
* Top transaction commit (indicated by isTopCommit): normally we should
* do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we have a
+ * releasing mechanism handle these resources too. However, if we have a
* FAILED portal (eg, a cursor that got an error), we'd better clean up
* its resources to avoid resource-leakage warning messages.
*
@@ -523,7 +523,7 @@ PortalDrop(Portal portal, bool isTopCommit)
* cleaned up in transaction abort.
*
* Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become the
+ * is not FAILED then we do not release its locks. The locks become the
* responsibility of the transaction's ResourceOwner (since it is the
* parent of the portal's owner) and will be released when the transaction
* eventually ends.
@@ -610,7 +610,7 @@ PortalHashTableDeleteAll(void)
* Holdable cursors created in this transaction need to be converted to
* materialized form, since we are going to close down the executor and
* release locks. Non-holdable portals created in this transaction are
- * simply removed. Portals remaining from prior transactions should be
+ * simply removed. Portals remaining from prior transactions should be
* left untouched.
*
* Returns TRUE if any portals changed state (possibly causing user-defined
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index e5461e660ef..f8507d8f856 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -151,7 +151,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
* but don't delete the owner objects themselves.
*
* Note that this executes just one phase of release, and so typically
- * must be called three times. We do it this way because (a) we want to
+ * must be called three times. We do it this way because (a) we want to
* do all the recursion separately for each phase, thereby preserving
* the needed order of operations; and (b) xact.c may have other operations
* to do between the phases.
@@ -225,7 +225,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
*
* During a commit, there shouldn't be any remaining pins --- that
* would indicate failure to clean up the executor correctly --- so
- * issue warnings. In the abort case, just clean up quietly.
+ * issue warnings. In the abort case, just clean up quietly.
*
* We are careful to do the releasing back-to-front, so as to avoid
* O(N^2) behavior in ResourceOwnerForgetBuffer().
@@ -377,7 +377,7 @@ ResourceOwnerDelete(ResourceOwner owner)
/*
* We delink the owner from its parent before deleting it, so that if
* there's an error we won't have deleted/busted owners still attached to
- * the owner tree. Better a leak than a crash.
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
@@ -569,7 +569,7 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course, but
+ * recently pinned buffer. This isn't always the case of course, but
* it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 1f21b1dd542..1a5c5de7f8d 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -7,14 +7,14 @@
* tuplesort.c). Merging is an ideal algorithm for tape devices, but if
* we implement it on disk by creating a separate file for each "tape",
* there is an annoying problem: the peak space usage is at least twice
- * the volume of actual data to be sorted. (This must be so because each
+ * the volume of actual data to be sorted. (This must be so because each
* datum will appear in both the input and output tapes of the final
- * merge pass. For seven-tape polyphase merge, which is otherwise a
+ * merge pass. For seven-tape polyphase merge, which is otherwise a
* pretty good algorithm, peak usage is more like 4x actual data volume.)
*
* We can work around this problem by recognizing that any one tape
* dataset (with the possible exception of the final output) is written
- * and read exactly once in a perfectly sequential manner. Therefore,
+ * and read exactly once in a perfectly sequential manner. Therefore,
* a datum once read will not be required again, and we can recycle its
* space for use by the new tape dataset(s) being generated. In this way,
* the total space usage is essentially just the actual data volume, plus
@@ -55,7 +55,7 @@
* To support the above policy of writing to the lowest free block,
* ltsGetFreeBlock sorts the list of free block numbers into decreasing
* order each time it is asked for a block and the list isn't currently
- * sorted. This is an efficient way to handle it because we expect cycles
+ * sorted. This is an efficient way to handle it because we expect cycles
* of releasing many blocks followed by re-using many blocks, due to
* tuplesort.c's "preread" behavior.
*
@@ -117,7 +117,7 @@ typedef struct LogicalTape
/*
* The total data volume in the logical tape is numFullBlocks * BLCKSZ +
- * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
* only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
@@ -157,7 +157,7 @@ struct LogicalTapeSet
*
* If blocksSorted is true then the block numbers in freeBlocks are in
* *decreasing* order, so that removing the last entry gives us the lowest
- * free block. We re-sort the blocks whenever a block is demanded; this
+ * free block. We re-sort the blocks whenever a block is demanded; this
* should be reasonably efficient given the expected usage pattern.
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
@@ -218,7 +218,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
/*
* Read a block-sized buffer from the specified block of the underlying file.
*
- * No need for an error return convention; we ereport() on any error. This
+ * No need for an error return convention; we ereport() on any error. This
* module should never attempt to read a block it doesn't know is there.
*/
static void
@@ -353,7 +353,7 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
/*
* Reset a logical tape's indirect-block hierarchy after a write pass
- * to prepare for reading. We dump out partly-filled blocks except
+ * to prepare for reading. We dump out partly-filled blocks except
* at the top of the hierarchy, and we rewind each level to the start.
* This call returns the first data block number, or -1L if the tape
* is empty.
@@ -540,7 +540,7 @@ LogicalTapeSetCreate(int ntapes)
/*
* Initialize per-tape structs. Note we allocate the I/O buffer and
* first-level indirect block for a tape only when it is first actually
- * written to. This avoids wasting memory space when tuplesort.c
+ * written to. This avoids wasting memory space when tuplesort.c
* overestimates the number of tapes needed.
*/
for (i = 0; i < ntapes; i++)
@@ -591,7 +591,7 @@ LogicalTapeSetClose(LogicalTapeSet *lts)
* Mark a logical tape set as not needing management of free space anymore.
*
* This should be called if the caller does not intend to write any more data
- * into the tape set, but is reading from un-frozen tapes. Since no more
+ * into the tape set, but is reading from un-frozen tapes. Since no more
* writes are planned, remembering free blocks is no longer useful. Setting
* this flag lets us avoid wasting time and space in ltsReleaseBlock(), which
* is not designed to handle large numbers of free blocks.
@@ -732,7 +732,7 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
else
{
/*
- * Completion of a read phase. Rewind and prepare for write.
+ * Completion of a read phase. Rewind and prepare for write.
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
@@ -826,7 +826,7 @@ LogicalTapeRead(LogicalTapeSet *lts, int tapenum,
*
* This *must* be called just at the end of a write pass, before the
* tape is rewound (after rewind is too late!). It performs a rewind
- * and switch to read mode "for free". An immediately following rewind-
+ * and switch to read mode "for free". An immediately following rewind-
* for-read call is OK but not necessary.
*/
void
@@ -862,7 +862,7 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
}
/*
- * Backspace the tape a given number of bytes. (We also support a more
+ * Backspace the tape a given number of bytes. (We also support a more
* general seek interface, see below.)
*
* *Only* a frozen-for-read tape can be backed up; we don't support
@@ -966,7 +966,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
return false;
/*
- * OK, advance or back up to the target block. This implementation would
+ * OK, advance or back up to the target block. This implementation would
* be pretty inefficient for long seeks, but we really aren't expecting
* that (a seek over one tuple is typical).
*/
@@ -999,7 +999,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
* Obtain current position in a form suitable for a later LogicalTapeSeek.
*
* NOTE: it'd be OK to do this during write phase with intention of using
- * the position for a seek after freezing. Not clear if anyone needs that.
+ * the position for a seek after freezing. Not clear if anyone needs that.
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index a7f760312a3..90ef0abca62 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
@@ -40,7 +40,7 @@
* into sorted runs in temporary tapes, emitting just enough tuples at each
* step to get back within the workMem limit. Whenever the run number at
* the top of the heap changes, we begin a new run with a new output tape
- * (selected per Algorithm D). After the end of the input is reached,
+ * (selected per Algorithm D). After the end of the input is reached,
* we dump out remaining tuples in memory into a final run (or two),
* then merge the runs using Algorithm D.
*
@@ -57,17 +57,17 @@
* access at all, defeating the read-ahead methods used by most Unix kernels.
* Worse, the output tape gets written into a very random sequence of blocks
* of the temp file, ensuring that things will be even worse when it comes
- * time to read that tape. A straightforward merge pass thus ends up doing a
+ * time to read that tape. A straightforward merge pass thus ends up doing a
* lot of waiting for disk seeks. We can improve matters by prereading from
* each source tape sequentially, loading about workMem/M bytes from each tape
* in turn. Then we run the merge algorithm, writing but not reading until
- * one of the preloaded tuple series runs out. Then we switch back to preread
+ * one of the preloaded tuple series runs out. Then we switch back to preread
* mode, fill memory again, and repeat. This approach helps to localize both
* read and write accesses.
*
* When the caller requests random access to the sort result, we form
* the final sorted run on a logical tape which is then "frozen", so
- * that we can access it randomly. When the caller does not need random
+ * that we can access it randomly. When the caller does not need random
* access, we return from tuplesort_performsort() as soon as we are down
* to one run per logical tape. The final merge is then performed
* on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
@@ -77,7 +77,7 @@
* grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
* to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
* tape drives are expensive beasts, and in particular that there will always
- * be many more runs than tape drives. In our implementation a "tape drive"
+ * be many more runs than tape drives. In our implementation a "tape drive"
* doesn't cost much more than a few Kb of memory buffers, so we can afford
* to have lots of them. In particular, if we can have as many tape drives
* as sorted runs, we can eliminate any repeated I/O at all. In the current
@@ -136,28 +136,28 @@ bool optimize_bounded_sort = true;
/*
- * The objects we actually sort are SortTuple structs. These contain
+ * The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
* can be freed by a simple pfree(). SortTuples also contain the tuple's
* first key column in Datum/nullflag format, and an index integer.
*
* Storing the first key column lets us save heap_getattr or index_getattr
- * calls during tuple comparisons. We could extract and save all the key
+ * calls during tuple comparisons. We could extract and save all the key
* columns not just the first, but this would increase code complexity and
* overhead, and wouldn't actually save any comparison cycles in the common
* case where the first key determines the comparison result. Note that
* for a pass-by-reference datatype, datum1 points into the "tuple" storage.
*
* When sorting single Datums, the data value is represented directly by
- * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
+ * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
* then datum1 points to a separately palloc'd data value that is also pointed
* to by the "tuple" pointer; otherwise "tuple" is NULL.
*
* While building initial runs, tupindex holds the tuple's run number. During
* merge passes, we re-use it to hold the input tape number that each tuple in
* the heap was read from, or to hold the index of the next tuple pre-read
- * from the same tape in the case of pre-read entries. tupindex goes unused
+ * from the same tape in the case of pre-read entries. tupindex goes unused
* if the sort occurs entirely in memory.
*/
typedef struct
@@ -238,7 +238,7 @@ struct Tuplesortstate
void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() the out-of-line data (not the SortTuple struct!), and increase
@@ -264,7 +264,7 @@ struct Tuplesortstate
void (*reversedirection) (Tuplesortstate *state);
/*
- * This array holds the tuples now in sort memory. If we are in state
+ * This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
@@ -407,7 +407,7 @@ struct Tuplesortstate
* If state->randomAccess is true, then the stored representation of the
* tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* randomAccess is not true, the write/read routines may omit the extra
* length word.
*
@@ -417,7 +417,7 @@ struct Tuplesortstate
* the back length word (if present).
*
* The write/read routines can make use of the tuple description data
- * stored in the Tuplesortstate record, if needed. They are also expected
+ * stored in the Tuplesortstate record, if needed. They are also expected
* to adjust state->availMem by the amount of memory space (not tape space!)
* released or consumed. There is no error return from either writetup
* or readtup; they should ereport() on failure.
@@ -505,7 +505,7 @@ static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
*
* After calling tuplesort_begin, the caller should call tuplesort_putXXX
* zero or more times, then call tuplesort_performsort when all the tuples
- * have been supplied. After performsort, retrieve the tuples in sorted
+ * have been supplied. After performsort, retrieve the tuples in sorted
* order by calling tuplesort_getXXX until it returns false/NULL. (If random
* access was requested, rescan, markpos, and restorepos can also be called.)
* Call tuplesort_end to terminate the operation and release memory/disk space.
@@ -861,7 +861,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
*
* Advise tuplesort that at most the first N result tuples are required.
*
- * Must be called before inserting any tuples. (Actually, we could allow it
+ * Must be called before inserting any tuples. (Actually, we could allow it
* as long as the sort hasn't spilled to disk, but there seems no need for
* delayed calls at the moment.)
*
@@ -978,7 +978,7 @@ grow_memtuples(Tuplesortstate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. We assume here that the
* memory chunk overhead associated with the memtuples array is constant
- * and so there will be no unexpected addition to what we ask for. (The
+ * and so there will be no unexpected addition to what we ask for. (The
* minimum array size established in tuplesort_begin_common is large
* enough to force palloc to treat it as a separate chunk, so this
* assumption should be good. But let's check it.)
@@ -1115,7 +1115,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
case TSS_INITIAL:
/*
- * Save the tuple into the unsorted array. First, grow the array
+ * Save the tuple into the unsorted array. First, grow the array
* as needed. Note that we try to grow the array when there is
* still one free slot remaining --- if we fail, there'll still be
* room to store the incoming tuple, and then we'll switch to
@@ -1136,7 +1136,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
* enough tuples to meet the bound.
*
* Note that once we enter TSS_BOUNDED state we will always try to
- * complete the sort that way. In the worst case, if later input
+ * complete the sort that way. In the worst case, if later input
* tuples are larger than earlier ones, this might cause us to
* exceed workMem significantly.
*/
@@ -1267,7 +1267,7 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
- * in memory, using a heap to eliminate excess tuples. Now we
+ * in memory, using a heap to eliminate excess tuples. Now we
* have to transform the heap to a properly-sorted array.
*/
sort_bounded_heap(state);
@@ -1281,7 +1281,7 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining in
+ * Finish tape-based sort. First, flush all tuples remaining in
* memory out to tape; then merge until we have a single remaining
* run (or, if !randomAccess, one run per tape). Note that
* mergeruns sets the correct state->status.
@@ -1342,7 +1342,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -1548,7 +1548,7 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
HeapTuple
@@ -1567,7 +1567,7 @@ tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
/*
* Fetch the next index tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
IndexTuple
@@ -1638,7 +1638,7 @@ tuplesort_merge_order(long allowedMem)
/*
* We need one tape for each merge input, plus another one for the output,
- * and each of these tapes needs buffer space. In addition we want
+ * and each of these tapes needs buffer space. In addition we want
* MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
* count).
*
@@ -1692,7 +1692,7 @@ inittapes(Tuplesortstate *state)
* don't decrease it to the point that we have no room for tuples. (That
* case is only likely to occur if sorting pass-by-value Datums; in all
* other scenarios the memtuples[] array is unlikely to occupy more than
- * half of allowedMem. In the pass-by-value case it's not important to
+ * half of allowedMem. In the pass-by-value case it's not important to
* account for tuple space, so we don't care if LACKMEM becomes
* inaccurate.)
*/
@@ -1816,7 +1816,7 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
* volume is between 1X and 2X workMem), we can just use that tape as the
- * finished output, rather than doing a useless merge. (This obvious
+ * finished output, rather than doing a useless merge. (This obvious
* optimization is not in Knuth's algorithm.)
*/
if (state->currentRun == 1)
@@ -1922,7 +1922,7 @@ mergeruns(Tuplesortstate *state)
* the loop without performing the last iteration of step D6, we have not
* rearranged the tape unit assignment, and therefore the result is on
* TAPE[T]. We need to do it this way so that we can freeze the final
- * output tape while rewinding it. The last iteration of step D6 would be
+ * output tape while rewinding it. The last iteration of step D6 would be
* a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[state->tapeRange];
@@ -2006,7 +2006,7 @@ mergeonerun(Tuplesortstate *state)
* beginmerge - initialize for a merge pass
*
* We decrease the counts of real and dummy runs for each tape, and mark
- * which tapes contain active input runs in mergeactive[]. Then, load
+ * which tapes contain active input runs in mergeactive[]. Then, load
* as many tuples as we can from each active input tape, and finally
* fill the merge heap with the first tuple from each active tape.
*/
@@ -2099,7 +2099,7 @@ beginmerge(Tuplesortstate *state)
* This routine exists to improve sequentiality of reads during a merge pass,
* as explained in the header comments of this file. Load tuples from each
* active source tape until the tape's run is exhausted or it has used up
- * its fair share of available memory. In any case, we guarantee that there
+ * its fair share of available memory. In any case, we guarantee that there
* is at least one preread tuple available from each unexhausted input tape.
*
* We invoke this routine at the start of a merge pass for initial load,
@@ -2362,7 +2362,7 @@ tuplesort_get_stats(Tuplesortstate *state,
* accurately once we have begun to return tuples to the caller (since we
* don't account for pfree's the caller is expected to do), so we cannot
* rely on availMem in a disk sort. This does not seem worth the overhead
- * to fix. Is it worth creating an API for the memory context code to
+ * to fix. Is it worth creating an API for the memory context code to
* tell us how much is actually used in sortcontext?
*/
if (state->tapeset)
@@ -2400,7 +2400,7 @@ tuplesort_get_stats(Tuplesortstate *state,
/*
* Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
*
- * Compare two SortTuples. If checkIndex is true, use the tuple index
+ * Compare two SortTuples. If checkIndex is true, use the tuple index
* as the front of the sort key; otherwise, no.
*/
@@ -2504,7 +2504,7 @@ sort_bounded_heap(Tuplesortstate *state)
/*
* Insert a new tuple into an empty or existing heap, maintaining the
- * heap invariant. Caller is responsible for ensuring there's room.
+ * heap invariant. Caller is responsible for ensuring there's room.
*
* Note: we assume *tuple is a temporary variable that can be scribbled on.
* For some callers, tuple actually points to a memtuples[] entry above the
@@ -2609,7 +2609,7 @@ markrunend(Tuplesortstate *state, int tapenum)
/*
- * Set up for an external caller of ApplySortFunction. This function
+ * Set up for an external caller of ApplySortFunction. This function
* basically just exists to localize knowledge of the encoding of sk_flags
* used in this module.
*/
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 4d6e3aa0e4b..9739c6bbed6 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
@@ -17,7 +17,7 @@
* space limit specified by the caller.
*
* The (approximate) amount of memory allowed to the tuplestore is specified
- * in kilobytes by the caller. We absorb tuples and simply store them in an
+ * in kilobytes by the caller. We absorb tuples and simply store them in an
* in-memory array as long as we haven't exceeded maxKBytes. If we do exceed
* maxKBytes, we dump all the tuples into a temp file and then read from that
* when needed.
@@ -29,7 +29,7 @@
* When the caller requests backward-scan capability, we write the temp file
* in a format that allows either forward or backward scan. Otherwise, only
* forward scan is allowed. A request for backward scan must be made before
- * putting any tuples into the tuplestore. Rewind is normally allowed but
+ * putting any tuples into the tuplestore. Rewind is normally allowed but
* can be turned off via tuplestore_set_eflags; turning off rewind for all
* read pointers enables truncation of the tuplestore at the oldest read point
* for minimal memory usage. (The caller must explicitly call tuplestore_trim
@@ -63,7 +63,7 @@
/*
- * Possible states of a Tuplestore object. These denote the states that
+ * Possible states of a Tuplestore object. These denote the states that
* persist between calls of Tuplestore routines.
*/
typedef enum
@@ -82,7 +82,7 @@ typedef enum
*
* Special case: if eof_reached is true, then the pointer's read position is
* implicitly equal to the write position, and current/file/offset aren't
- * maintained. This way we need not update all the read pointers each time
+ * maintained. This way we need not update all the read pointers each time
* we write.
*/
typedef struct
@@ -126,7 +126,7 @@ struct Tuplestorestate
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() it, and increase state->availMem by the amount of memory space
@@ -194,7 +194,7 @@ struct Tuplestorestate
* If state->backward is true, then the stored representation of
* the tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* state->backward is not set, the write/read routines may omit the extra
* length word.
*
@@ -290,7 +290,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes)
* tuple store are allowed.
*
* interXact: if true, the files used for on-disk storage persist beyond the
- * end of the current transaction. NOTE: It's the caller's responsibility to
+ * end of the current transaction. NOTE: It's the caller's responsibility to
* create such a tuplestore in a memory context and resource owner that will
* also survive transaction boundaries, and to ensure the tuplestore is closed
* when it's no longer wanted.
@@ -329,7 +329,7 @@ tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
* any data into the tuplestore.
*
* eflags is a bitmask following the meanings used for executor node
- * startup flags (see executor.h). tuplestore pays attention to these bits:
+ * startup flags (see executor.h). tuplestore pays attention to these bits:
* EXEC_FLAG_REWIND need rewind to start
* EXEC_FLAG_BACKWARD need backward fetch
* If tuplestore_set_eflags is not called, REWIND is allowed, and BACKWARD
@@ -739,7 +739,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple)
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If should_free is set, the
+ * Returns NULL if no more tuples. If should_free is set, the
* caller must pfree the returned tuple when done with it.
*
* Backward scan is only allowed if randomAccess was set true or
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index d9b37b2ba3f..772ae17ba43 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -15,7 +15,7 @@
* this module.
*
* To allow reusing existing combo cids, we also keep a hash table that
- * maps cmin,cmax pairs to combo cids. This keeps the data structure size
+ * maps cmin,cmax pairs to combo cids. This keeps the data structure size
* reasonable in most cases, since the number of unique pairs used by any
* one transaction is likely to be small.
*
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index 5917b014fdc..2ce477f3991 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -8,9 +8,9 @@
* (tracked by separate refcounts on each snapshot), its memory can be freed.
*
* These arrangements let us reset MyProc->xmin when there are no snapshots
- * referenced by this transaction. (One possible improvement would be to be
+ * referenced by this transaction. (One possible improvement would be to be
* able to advance Xmin when the snapshot with the earliest Xmin is no longer
- * referenced. That's a bit harder though, it requires more locking, and
+ * referenced. That's a bit harder though, it requires more locking, and
* anyway it should be rather uncommon to keep snapshots referenced for too
* long.)
*
@@ -41,7 +41,7 @@
* CurrentSnapshot points to the only snapshot taken in transaction-snapshot
* mode, and to the latest one taken in a read-committed transaction.
* SecondarySnapshot is a snapshot that's always up-to-date as of the current
- * instant, even in transaction-snapshot mode. It should only be used for
+ * instant, even in transaction-snapshot mode. It should only be used for
* special-purpose code (say, RI checking.)
*
* These SnapshotData structs are static to simplify memory allocation
@@ -60,7 +60,7 @@ static Snapshot SecondarySnapshot = NULL;
* mode, we don't want it to say that BootstrapTransactionId is in progress.
*
* RecentGlobalXmin is initialized to InvalidTransactionId, to ensure that no
- * one tries to use a stale value. Readers should ensure that it has been set
+ * one tries to use a stale value. Readers should ensure that it has been set
* to something else before using it.
*/
TransactionId TransactionXmin = FirstNormalTransactionId;
@@ -270,7 +270,7 @@ FreeSnapshot(Snapshot snapshot)
*
* If the passed snapshot is a statically-allocated one, or it is possibly
* subject to a future command counter update, create a new long-lived copy
- * with active refcount=1. Otherwise, only increment the refcount.
+ * with active refcount=1. Otherwise, only increment the refcount.
*/
void
PushActiveSnapshot(Snapshot snap)
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index b396e7ecf78..d7cb7a4a9c5 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -20,7 +20,7 @@
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
@@ -87,13 +87,13 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
* buffer, so we can't use the LSN to interlock this; we have to just refrain
* from setting the hint bit until some future re-examination of the tuple.
*
- * We can always set hint bits when marking a transaction aborted. (Some
+ * We can always set hint bits when marking a transaction aborted. (Some
* code in heapam.c relies on that!)
*
* Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
* we can always set the hint bits, since pre-9.0 VACUUM FULL always used
* synchronous commits and didn't move tuples that weren't previously
- * hinted. (This is not known by this subroutine, but is applied by its
+ * hinted. (This is not known by this subroutine, but is applied by its
* callers.) Note: old-style VACUUM FULL is gone, but we have to keep this
* module's support for MOVED_OFF/MOVED_IN flag bits for as long as we
* support in-place update from pre-9.0 databases.
@@ -490,7 +490,7 @@ HeapTupleSatisfiesAny(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
* This is a simplified version that only checks for VACUUM moving conditions.
* It's appropriate for TOAST usage because TOAST really doesn't want to do
* its own time qual checks; if you can see the main table row that contains
- * a TOAST reference, you should be able to see the TOASTed value. However,
+ * a TOAST reference, you should be able to see the TOASTed value. However,
* vacuuming a TOAST table is independent of the main table, and in case such
* a vacuum fails partway through, we'd better do this much checking.
*
@@ -1060,7 +1060,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
* we mainly want to know is if a tuple is potentially visible to *any*
* running transaction. If so, it can't be removed yet by VACUUM.
*
- * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
+ * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
* deleted by XIDs >= OldestXmin are deemed "recently dead"; they might
* still be visible to some open transaction, so we can't remove them,
* even if we see that the deleting transaction has committed.
@@ -1146,7 +1146,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * Okay, the inserter committed, so it was good at some point. Now what
+ * Okay, the inserter committed, so it was good at some point. Now what
* about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1245,7 +1245,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
/*
* Make a quick range check to eliminate most XIDs without looking at the
- * xip arrays. Note that this is OK even if we convert a subxact XID to
+ * xip arrays. Note that this is OK even if we convert a subxact XID to
* its parent below, because a subxact with XID < xmin has surely also got
* a parent with XID < xmin, while one with XID >= xmax must belong to a
* parent that was not yet committed at the time of this snapshot.
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 65fb4db0046..091b13f3e4d 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -18,7 +18,7 @@
* to produce a new database.
*
* For largely-historical reasons, the template1 database is the one built
- * by the basic bootstrap process. After it is complete, template0 and
+ * by the basic bootstrap process. After it is complete, template0 and
* the default database, postgres, are made just by copying template1.
*
* To create template1, we run the postgres (backend) program in bootstrap
@@ -677,7 +677,7 @@ find_matching_ts_config(const char *lc_type)
/*
* Convert lc_ctype to a language name by stripping everything after an
- * underscore. Just for paranoia, we also stop at '.' or '@'.
+ * underscore. Just for paranoia, we also stop at '.' or '@'.
*/
if (lc_type == NULL)
langname = xstrdup("");
@@ -1673,7 +1673,7 @@ setup_collation(void)
* When copying collations to the final location, eliminate aliases that
* conflict with an existing locale name for the same encoding. For
* example, "br_FR.iso88591" is normalized to "br_FR", both for encoding
- * LATIN1. But the unnormalized locale "br_FR" already exists for LATIN1.
+ * LATIN1. But the unnormalized locale "br_FR" already exists for LATIN1.
* Prefer the alias that matches the OS locale name, else the first locale
* name by sort order (arbitrary choice to be deterministic).
*
@@ -1780,7 +1780,7 @@ setup_dictionary(void)
/*
* Set up privileges
*
- * We mark most system catalogs as world-readable. We don't currently have
+ * We mark most system catalogs as world-readable. We don't currently have
* to touch functions, languages, or databases, because their default
* permissions are OK.
*
@@ -2112,7 +2112,7 @@ check_ok(void)
*
* Note: this is used to process both postgresql.conf entries and SQL
* string literals. Since postgresql.conf strings are defined to treat
- * backslashes as escapes, we have to double backslashes here. Hence,
+ * backslashes as escapes, we have to double backslashes here. Hence,
* when using this for a SQL string literal, use E'' syntax.
*
* We do not need to worry about encoding considerations because all
diff --git a/src/bin/pg_controldata/pg_controldata.c b/src/bin/pg_controldata/pg_controldata.c
index 2d52f784ae4..b0937cd9e0f 100644
--- a/src/bin/pg_controldata/pg_controldata.c
+++ b/src/bin/pg_controldata/pg_controldata.c
@@ -12,7 +12,7 @@
/*
* We have to use postgres.h not postgres_fe.h here, because there's so much
* backend-only stuff in the XLOG include files we need. But we need a
- * frontend-ish environment otherwise. Hence this ugly hack.
+ * frontend-ish environment otherwise. Hence this ugly hack.
*/
#define FRONTEND 1
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 7e2a126474e..9b9fbe8d245 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -483,7 +483,7 @@ test_postmaster_connection(bool do_checkpoint)
* 6 9.1+ server, shared memory not created
* 7 9.1+ server, shared memory created
*
- * This code does not support pre-9.1 servers. On Unix machines
+ * This code does not support pre-9.1 servers. On Unix machines
* we could consider extracting the port number from the shmem
* key, but that (a) is not robust, and (b) doesn't help with
* finding out the socket directory. And it wouldn't work anyway
@@ -516,7 +516,7 @@ test_postmaster_connection(bool do_checkpoint)
time_t pmstart;
/*
- * Make sanity checks. If it's for a standalone backend
+ * Make sanity checks. If it's for a standalone backend
* (negative PID), or the recorded start time is before
* pg_ctl started, then either we are looking at the wrong
* data directory, or this is a pre-existing pidfile that
@@ -630,7 +630,7 @@ test_postmaster_connection(bool do_checkpoint)
/*
* If we've been able to identify the child postmaster's PID, check
- * the process is still alive. This covers cases where the postmaster
+ * the process is still alive. This covers cases where the postmaster
* successfully created the pidfile but then crashed without removing
* it.
*/
@@ -1171,7 +1171,7 @@ postmaster_is_alive(pid_t pid)
* postmaster we are after.
*
* Don't believe that our own PID or parent shell's PID is the postmaster,
- * either. (Windows hasn't got getppid(), though.)
+ * either. (Windows hasn't got getppid(), though.)
*/
if (pid == getpid())
return false;
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index 87207f7d83a..72100e0024c 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -40,7 +40,7 @@ static int numCatalogIds = 0;
/*
* These variables are static to avoid the notational cruft of having to pass
- * them into findTableByOid() and friends. For each of these arrays, we
+ * them into findTableByOid() and friends. For each of these arrays, we
* build a sorted-by-OID index array immediately after it's built, and then
* we use binary search in findTableByOid() and friends. (qsort'ing the base
* arrays themselves would be simpler, but it doesn't work because pg_dump.c
@@ -483,7 +483,7 @@ findObjectByDumpId(DumpId dumpId)
*
* We use binary search in a sorted list that is built on first call.
* If AssignDumpId() and findObjectByCatalogId() calls were freely intermixed,
- * the code would work, but possibly be very slow. In the current usage
+ * the code would work, but possibly be very slow. In the current usage
* pattern that does not happen, indeed we build the list at most twice.
*/
DumpableObject *
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index 6e5e625e6d0..734066ccfa3 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -175,7 +175,7 @@ fmtId(const char *rawid)
* standard_conforming_strings settings.
*
* This is essentially equivalent to libpq's PQescapeStringInternal,
- * except for the output buffer structure. We need it in situations
+ * except for the output buffer structure. We need it in situations
* where we do not have a PGconn available. Where we do,
* appendStringLiteralConn is a better choice.
*/
@@ -350,7 +350,7 @@ appendByteaLiteral(PQExpBuffer buf, const unsigned char *str, size_t length,
/*
* This implementation is hard-wired to produce hex-format output. We do
* not know the server version the output will be loaded into, so making
- * an intelligent format choice is impossible. It might be better to
+ * an intelligent format choice is impossible. It might be better to
* always use the old escaped format.
*/
if (!enlargePQExpBuffer(buf, 2 * length + 5))
@@ -407,7 +407,7 @@ parse_version(const char *versionString)
* into individual items.
*
* On success, returns true and sets *itemarray and *nitems to describe
- * an array of individual strings. On parse failure, returns false;
+ * an array of individual strings. On parse failure, returns false;
* *itemarray may exist or be NULL.
*
* NOTE: free'ing itemarray is sufficient to deallocate the working storage.
@@ -547,7 +547,7 @@ buildACLCommands(const char *name, const char *subname,
/*
* At the end, these two will be pasted together to form the result. But
* the owner privileges need to go before the other ones to keep the
- * dependencies valid. In recent versions this is normally the case, but
+ * dependencies valid. In recent versions this is normally the case, but
* in old versions they come after the PUBLIC privileges and that results
* in problems if we need to run REVOKE on the owner privileges.
*/
@@ -717,7 +717,7 @@ buildDefaultACLCommands(const char *type, const char *nspname,
/*
* We incorporate the target role directly into the command, rather than
- * playing around with SET ROLE or anything like that. This is so that a
+ * playing around with SET ROLE or anything like that. This is so that a
* permissions error leads to nothing happening, rather than changing
* default privileges for the wrong user.
*/
@@ -745,7 +745,7 @@ buildDefaultACLCommands(const char *type, const char *nspname,
*
* The returned grantee string will be the dequoted username or groupname
* (preceded with "group " in the latter case). The returned grantor is
- * the dequoted grantor name or empty. Privilege characters are decoded
+ * the dequoted grantor name or empty. Privilege characters are decoded
* and split between privileges with grant option (privswgo) and without
* (privs).
*
@@ -975,7 +975,7 @@ AddAcl(PQExpBuffer aclbuf, const char *keyword, const char *subname)
* namevar: name of query variable to match against an object-name pattern.
* altnamevar: NULL, or name of an alternative variable to match against name.
* visibilityrule: clause to use if we want to restrict to visible objects
- * (for example, "pg_catalog.pg_table_is_visible(p.oid)"). Can be NULL.
+ * (for example, "pg_catalog.pg_table_is_visible(p.oid)"). Can be NULL.
*
* Formatting note: the text already present in buf should end with a newline.
* The appended text, if any, will end with one too.
@@ -1022,7 +1022,7 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern,
* last alternatives which is not what we want.
*
* Note: the result of this pass is the actual regexp pattern(s) we want
- * to execute. Quoting/escaping into SQL literal format will be done
+ * to execute. Quoting/escaping into SQL literal format will be done
* below using appendStringLiteralConn().
*/
appendPQExpBufferStr(&namebuf, "^(");
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 63e6e859e88..806146acfe9 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -592,8 +592,8 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
/*
* In parallel restore, if we created the table earlier in
* the run then we wrap the COPY in a transaction and
- * precede it with a TRUNCATE. If archiving is not on
- * this prevents WAL-logging the COPY. This obtains a
+ * precede it with a TRUNCATE. If archiving is not on
+ * this prevents WAL-logging the COPY. This obtains a
* speedup similar to that from using single_txn mode in
* non-parallel restores.
*/
@@ -2543,7 +2543,7 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user)
appendPQExpBuffer(cmd, "SET SESSION AUTHORIZATION ");
/*
- * SQL requires a string literal here. Might as well be correct.
+ * SQL requires a string literal here. Might as well be correct.
*/
if (user && *user)
appendStringLiteralAHX(cmd, user, AH);
@@ -2674,7 +2674,7 @@ _becomeUser(ArchiveHandle *AH, const char *user)
}
/*
- * Become the owner of the given TOC entry object. If
+ * Become the owner of the given TOC entry object. If
* changes in ownership are not allowed, this doesn't do anything.
*/
static void
@@ -3165,7 +3165,7 @@ ReadHead(ArchiveHandle *AH)
/*
* If we haven't already read the header, do so.
*
- * NB: this code must agree with _discoverArchiveFormat(). Maybe find a
+ * NB: this code must agree with _discoverArchiveFormat(). Maybe find a
* way to unify the cases?
*/
if (!AH->readHeader)
@@ -3276,7 +3276,7 @@ checkSeek(FILE *fp)
return false;
/*
- * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
+ * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
* this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
* successful no-op even on files that are otherwise unseekable.
*/
@@ -3317,7 +3317,7 @@ dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
*
* Work is done in three phases.
* First we process all SECTION_PRE_DATA tocEntries, in a single connection,
- * just as for a standard restore. Second we process the remaining non-ACL
+ * just as for a standard restore. Second we process the remaining non-ACL
* steps in parallel worker children (threads on Windows, processes on Unix),
* each of which connects separately to the database. Finally we process all
* the ACL entries in a single connection (that happens back in
@@ -3349,7 +3349,7 @@ restore_toc_entries_parallel(ArchiveHandle *AH)
* Do all the early stuff in a single connection in the parent. There's no
* great point in running it in parallel, in fact it will actually run
* faster in a single connection because we avoid all the connection and
- * setup overhead. Also, pg_dump is not currently very good about showing
+ * setup overhead. Also, pg_dump is not currently very good about showing
* all the dependencies of SECTION_PRE_DATA items, so we do not risk
* trying to process them out-of-order.
*/
@@ -3390,7 +3390,7 @@ restore_toc_entries_parallel(ArchiveHandle *AH)
}
/*
- * Now close parent connection in prep for parallel steps. We do this
+ * Now close parent connection in prep for parallel steps. We do this
* mainly to ensure that we don't exceed the specified number of parallel
* connections.
*/
@@ -3413,7 +3413,7 @@ restore_toc_entries_parallel(ArchiveHandle *AH)
* Initialize the lists of pending and ready items. After this setup, the
* pending list is everything that needs to be done but is blocked by one
* or more dependencies, while the ready list contains items that have no
- * remaining dependencies. Note: we don't yet filter out entries that
+ * remaining dependencies. Note: we don't yet filter out entries that
* aren't going to be restored. They might participate in dependency
* chains connecting entries that should be restored, so we treat them as
* live until we actually process them.
@@ -3980,7 +3980,7 @@ fix_dependencies(ArchiveHandle *AH)
* Note: currently, a TABLE DATA should always have exactly one
* dependency, on its TABLE item. So we don't bother to search, but look
* just at the first dependency. We do trouble to make sure that it's a
- * TABLE, if possible. However, if the dependency isn't in the archive
+ * TABLE, if possible. However, if the dependency isn't in the archive
* then just assume it was a TABLE; this is to cover cases where the table
* was suppressed but we have the data and some dependent post-data items.
*
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index 3851398b1df..b8f99babc22 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -73,7 +73,7 @@ _check_database_version(ArchiveHandle *AH)
/*
* Reconnect to the server. If dbname is not NULL, use that database,
* else the one associated with the archive handle. If username is
- * not NULL, use that user name, else the one from the handle. If
+ * not NULL, use that user name, else the one from the handle. If
* both the database and the user match the existing connection already,
* nothing will be done.
*
@@ -114,7 +114,7 @@ ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *username)
*
* Note: it's not really all that sensible to use a single-entry password
* cache if the username keeps changing. In current usage, however, the
- * username never does change, so one savedPassword is sufficient. We do
+ * username never does change, so one savedPassword is sufficient. We do
* update the cache on the off chance that the password has changed since the
* start of the run.
*/
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index aa4fb79ed8c..a4a0a99a5b1 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -1139,7 +1139,7 @@ selectDumpableTable(TableInfo *tbinfo)
* Mark a type as to be dumped or not
*
* If it's a table's rowtype or an autogenerated array type, we also apply a
- * special type code to facilitate sorting into the desired order. (We don't
+ * special type code to facilitate sorting into the desired order. (We don't
* want to consider those to be ordinary types because that would bring tables
* up into the datatype part of the dump order.) We still set the object's
* dump flag; that's not going to cause the dummy type to be dumped, but we
@@ -1502,7 +1502,7 @@ dumpTableData_insert(Archive *fout, void *dcontext)
/*
* These types are printed without quotes unless
* they contain values that aren't accepted by the
- * scanner unquoted (e.g., 'NaN'). Note that
+ * scanner unquoted (e.g., 'NaN'). Note that
* strtod() and friends might accept NaN, so we
* can't use that to test.
*
@@ -1720,7 +1720,7 @@ getTableDataFKConstraints(void)
/*
* guessConstraintInheritance:
* In pre-8.4 databases, we can't tell for certain which constraints
- * are inherited. We assume a CHECK constraint is inherited if its name
+ * are inherited. We assume a CHECK constraint is inherited if its name
* matches the name of any constraint in the parent. Originally this code
* tried to compare the expression texts, but that can fail for various
* reasons --- for example, if the parent and child tables are in different
@@ -2738,7 +2738,7 @@ getNamespaces(int *numNamespaces)
* getNamespaces
*
* NB: for pre-7.3 source database, we use object OID to guess whether it's
- * a system object or not. In 7.3 and later there is no guessing, and we
+ * a system object or not. In 7.3 and later there is no guessing, and we
* don't use objoid at all.
*/
static NamespaceInfo *
@@ -2900,7 +2900,7 @@ getTypes(int *numTypes)
* auto-generated array type by checking the element type's typarray.
* (Before that the test is capable of generating false positives.) We
* still check for name beginning with '_', though, so as to avoid the
- * cost of the subselect probe for all standard types. This would have to
+ * cost of the subselect probe for all standard types. This would have to
* be revisited if the backend ever allows renaming of array types.
*/
@@ -3027,7 +3027,7 @@ getTypes(int *numTypes)
/*
* If it's a base type, make a DumpableObject representing a shell
- * definition of the type. We will need to dump that ahead of the I/O
+ * definition of the type. We will need to dump that ahead of the I/O
* functions for the type.
*
* Note: the shell type doesn't have a catId. You might think it
@@ -3740,7 +3740,7 @@ getFuncs(int *numFuncs)
* Find all user-defined functions. Normally we can exclude functions in
* pg_catalog, which is worth doing since there are several thousand of
* 'em. However, there are some extensions that create functions in
- * pg_catalog. In normal dumps we can still ignore those --- but in
+ * pg_catalog. In normal dumps we can still ignore those --- but in
* binary-upgrade mode, we must dump the member objects of the extension,
* so be sure to fetch any such functions.
*/
@@ -4718,7 +4718,7 @@ getIndexes(TableInfo tblinfo[], int numTables)
/*
* In pre-7.4 releases, indkeys may contain more entries than
* indnkeys says (since indnkeys will be 1 for a functional
- * index). We don't actually care about this case since we don't
+ * index). We don't actually care about this case since we don't
* examine indkeys except for indexes associated with PRIMARY and
* UNIQUE constraints, which are never functional indexes. But we
* have to allocate enough space to keep parseOidArray from
@@ -5674,7 +5674,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
if (g_fout->remoteVersion >= 90100)
{
/*
- * attcollation is new in 9.1. Since we only want to dump COLLATE
+ * attcollation is new in 9.1. Since we only want to dump COLLATE
* clauses for attributes whose collation is different from their
* type's default, we use a CASE here to suppress uninteresting
* attcollations cheaply.
@@ -5927,7 +5927,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
/*
* Defaults on a VIEW must always be dumped as separate ALTER
- * TABLE commands. Defaults on regular tables are dumped as
+ * TABLE commands. Defaults on regular tables are dumped as
* part of the CREATE TABLE if possible, which it won't be
* if the column is not going to be emitted explicitly.
*/
@@ -6096,7 +6096,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
/*
* If the constraint is inherited, this will be detected later
- * (in pre-8.4 databases). We also detect later if the
+ * (in pre-8.4 databases). We also detect later if the
* constraint must be split out from the table definition.
*/
}
@@ -6923,7 +6923,7 @@ findComments(Archive *fout, Oid classoid, Oid objoid,
/*
* Pre-7.2, pg_description does not contain classoid, so collectComments
- * just stores a zero. If there's a collision on object OID, well, you
+ * just stores a zero. If there's a collision on object OID, well, you
* get duplicate comments.
*/
if (fout->remoteVersion < 70200)
@@ -7304,7 +7304,7 @@ dumpExtension(Archive *fout, ExtensionInfo *extinfo)
/*
* Note that we're pushing extconfig (an OID array) back into
- * pg_extension exactly as-is. This is OK because pg_class OIDs are
+ * pg_extension exactly as-is. This is OK because pg_class OIDs are
* preserved in binary upgrade.
*/
if (strlen(extinfo->extconfig) > 2)
@@ -8096,7 +8096,7 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
if (fout->remoteVersion >= 90100)
{
/*
- * attcollation is new in 9.1. Since we only want to dump COLLATE
+ * attcollation is new in 9.1. Since we only want to dump COLLATE
* clauses for attributes whose collation is different from their
* type's default, we use a CASE here to suppress uninteresting
* attcollations cheaply. atttypid will be 0 for dropped columns;
@@ -8406,7 +8406,7 @@ dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
/*
* Note the lack of a DROP command for the shell type; any required DROP
- * is driven off the base type entry, instead. This interacts with
+ * is driven off the base type entry, instead. This interacts with
* _printTocEntry()'s use of the presence of a DROP command to decide
* whether an entry needs an ALTER OWNER command. We don't want to alter
* the shell type's owner immediately on creation; that should happen only
@@ -8443,7 +8443,7 @@ dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
*
* For some backwards compatibility with the older behavior, we forcibly
* dump a PL if its handler function (and validator if any) are in a
- * dumpable namespace. That case is not checked here.
+ * dumpable namespace. That case is not checked here.
*
* Also, if the PL belongs to an extension, we do not use this heuristic.
* That case isn't checked here either.
@@ -9112,7 +9112,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
/*
* COST and ROWS are emitted only if present and not default, so as not to
- * break backwards-compatibility of the dump without need. Keep this code
+ * break backwards-compatibility of the dump without need. Keep this code
* in sync with the defaults in functioncmds.c.
*/
if (strcmp(procost, "0") != 0)
@@ -9915,7 +9915,7 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
* XXX RECHECK is gone as of 8.4, but we'll still print it if dumping an
* older server's opclass in which it is used. This is to avoid
* hard-to-detect breakage if a newer pg_dump is used to dump from an
- * older server and then reload into that old version. This can go away
+ * older server and then reload into that old version. This can go away
* once 8.3 is so old as to not be of interest to anyone.
*/
resetPQExpBuffer(query);
@@ -10177,7 +10177,7 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
* XXX RECHECK is gone as of 8.4, but we'll still print it if dumping an
* older server's opclass in which it is used. This is to avoid
* hard-to-detect breakage if a newer pg_dump is used to dump from an
- * older server and then reload into that old version. This can go away
+ * older server and then reload into that old version. This can go away
* once 8.3 is so old as to not be of interest to anyone.
*/
if (g_fout->remoteVersion >= 90100)
@@ -11523,7 +11523,7 @@ dumpUserMappings(Archive *fout,
* to fail if run by a non-superuser. Note that the view will show
* umoptions as null if the user hasn't got privileges for the associated
* server; this means that pg_dump will dump such a mapping, but with no
- * OPTIONS clause. A possible alternative is to skip such mappings
+ * OPTIONS clause. A possible alternative is to skip such mappings
* altogether, but it's not clear that that's an improvement.
*/
selectSourceSchema("pg_catalog");
@@ -11663,7 +11663,7 @@ dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo)
* 'type' must be one of
* TABLE, SEQUENCE, FUNCTION, LANGUAGE, SCHEMA, DATABASE, TABLESPACE,
* FOREIGN DATA WRAPPER, SERVER, or LARGE OBJECT.
- * 'name' is the formatted name of the object. Must be quoted etc. already.
+ * 'name' is the formatted name of the object. Must be quoted etc. already.
* 'subname' is the formatted name of the sub-object, if any. Must be quoted.
* 'tag' is the tag for the archive entry (typ. unquoted name of object).
* 'nspname' is the namespace the object is in (NULL if none).
@@ -12037,7 +12037,7 @@ dumpTable(Archive *fout, TableInfo *tbinfo)
tbinfo->relacl);
/*
- * Handle column ACLs, if any. Note: we pull these with a separate
+ * Handle column ACLs, if any. Note: we pull these with a separate
* query rather than trying to fetch them during getTableAttrs, so
* that we won't miss ACLs on system columns.
*/
@@ -12432,7 +12432,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
* physical column order, including dropped columns, as in the
* original. Therefore, we create dropped columns above and drop them
* here, also updating their attlen/attalign values so that the
- * dropped column can be skipped properly. (We do not bother with
+ * dropped column can be skipped properly. (We do not bother with
* restoring the original attbyval setting.) Also, inheritance
* relationships are set up by doing ALTER INHERIT rather than using
* an INHERITS clause --- the latter would possibly mess up the column
@@ -12790,7 +12790,7 @@ dumpIndex(Archive *fout, IndxInfo *indxinfo)
/*
* If there's an associated constraint, don't dump the index per se, but
- * do dump any comment for it. (This is safe because dependency ordering
+ * do dump any comment for it. (This is safe because dependency ordering
* will have ensured the constraint is emitted first.) Note that the
* emitted comment has to be shown as depending on the constraint, not
* the index, in such cases.
@@ -13145,7 +13145,7 @@ findLastBuiltinOid_V71(const char *dbname)
* find the last built in oid
*
* For 7.0, we do this by assuming that the last thing that initdb does is to
- * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
+ * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
* initdb won't be changing anymore, it'll do.
*/
static Oid
@@ -14063,7 +14063,7 @@ getDependencies(void)
/*
* Ordinarily, table rowtypes have implicit dependencies on their
- * tables. However, for a composite type the implicit dependency goes
+ * tables. However, for a composite type the implicit dependency goes
* the other way in pg_depend; which is the right thing for DROP but
* it doesn't produce the dependency ordering we need. So in that one
* case, we reverse the direction of the dependency.
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index a7ec992d3c3..828b6cf3225 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -76,10 +76,10 @@ typedef struct SimpleStringList
*
* NOTE: the structures described here live for the entire pg_dump run;
* and in most cases we make a struct for every object we can find in the
- * catalogs, not only those we are actually going to dump. Hence, it's
+ * catalogs, not only those we are actually going to dump. Hence, it's
* best to store a minimal amount of per-object info in these structs,
* and retrieve additional per-object info when and if we dump a specific
- * object. In particular, try to avoid retrieving expensive-to-compute
+ * object. In particular, try to avoid retrieving expensive-to-compute
* information until it's known to be needed. We do, however, have to
* store enough info to determine whether an object should be dumped and
* what order to dump in.
@@ -351,12 +351,12 @@ typedef struct _triggerInfo
} TriggerInfo;
/*
- * struct ConstraintInfo is used for all constraint types. However we
+ * struct ConstraintInfo is used for all constraint types. However we
* use a different objType for foreign key constraints, to make it easier
* to sort them the way we want.
*
* Note: condeferrable and condeferred are currently only valid for
- * unique/primary-key constraints. Otherwise that info is in condef.
+ * unique/primary-key constraints. Otherwise that info is in condef.
*/
typedef struct _constraintInfo
{
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index a517523571f..23253b6ee23 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -21,7 +21,7 @@ static const char *modulename = gettext_noop("sorter");
/*
* Sort priority for object types when dumping a pre-7.3 database.
* Objects are sorted by priority levels, and within an equal priority level
- * by OID. (This is a relatively crude hack to provide semi-reasonable
+ * by OID. (This is a relatively crude hack to provide semi-reasonable
* behavior for old databases without full dependency info.) Note: collations,
* extensions, text search, foreign-data, and default ACL objects can't really
* happen here, so the rather bogus priorities for them don't matter.
@@ -280,11 +280,11 @@ sortDumpableObjects(DumpableObject **objs, int numObjs,
* TopoSort -- topological sort of a dump list
*
* Generate a re-ordering of the dump list that satisfies all the dependency
- * constraints shown in the dump list. (Each such constraint is a fact of a
+ * constraints shown in the dump list. (Each such constraint is a fact of a
* partial ordering.) Minimize rearrangement of the list not needed to
* achieve the partial ordering.
*
- * The input is the list of numObjs objects in objs[]. This list is not
+ * The input is the list of numObjs objects in objs[]. This list is not
* modified.
*
* Returns TRUE if able to build an ordering that satisfies all the
@@ -327,7 +327,7 @@ TopoSort(DumpableObject **objs,
* linked list of items-ready-to-output as Knuth does, we maintain a heap
* of their item numbers, which we can use as a priority queue. This
* turns the algorithm from O(N) to O(N log N) because each insertion or
- * removal of a heap item takes O(log N) time. However, that's still
+ * removal of a heap item takes O(log N) time. However, that's still
* plenty fast enough for this application.
*/
@@ -391,9 +391,9 @@ TopoSort(DumpableObject **objs,
}
/*--------------------
- * Now emit objects, working backwards in the output list. At each step,
+ * Now emit objects, working backwards in the output list. At each step,
* we use the priority heap to select the last item that has no remaining
- * before-constraints. We remove that item from the heap, output it to
+ * before-constraints. We remove that item from the heap, output it to
* ordering[], and decrease the beforeConstraints count of each of the
* items it was constrained against. Whenever an item's beforeConstraints
* count is thereby decreased to zero, we insert it into the priority heap
@@ -521,7 +521,7 @@ removeHeapElement(int *heap, int heapLength)
* before trying TopoSort again. We can safely repair loops that are
* disjoint (have no members in common); if we find overlapping loops
* then we repair only the first one found, because the action taken to
- * repair the first might have repaired the other as well. (If not,
+ * repair the first might have repaired the other as well. (If not,
* we'll fix it on the next go-round.)
*
* objs[] lists the objects TopoSort couldn't sort
@@ -1014,7 +1014,7 @@ repairDependencyLoop(DumpableObject **loop,
/*
* If all the objects are TABLE_DATA items, what we must have is a
* circular set of foreign key constraints (or a single self-referential
- * table). Print an appropriate complaint and break the loop arbitrarily.
+ * table). Print an appropriate complaint and break the loop arbitrarily.
*/
for (i = 0; i < nLoop; i++)
{
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index d0f53e8cc62..25bea51d99d 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -455,9 +455,9 @@ main(int argc, char *argv[])
if (!data_only)
{
/*
- * If asked to --clean, do that first. We can avoid detailed
+ * If asked to --clean, do that first. We can avoid detailed
* dependency analysis because databases never depend on each other,
- * and tablespaces never depend on each other. Roles could have
+ * and tablespaces never depend on each other. Roles could have
* grants to each other, but DROP ROLE will clean those up silently.
*/
if (output_clean)
@@ -1141,7 +1141,7 @@ dumpCreateDB(PGconn *conn)
* commands for just those databases with values different from defaults.
*
* We consider template0's encoding and locale (or, pre-7.1, template1's)
- * to define the installation default. Pre-8.4 installations do not have
+ * to define the installation default. Pre-8.4 installations do not have
* per-database locale settings; for them, every database must necessarily
* be using the installation default, so there's no need to do anything
* (which is good, since in very old versions there is no good way to find
diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c
index 5ea51a02ef5..d1b12dc3fc1 100644
--- a/src/bin/pg_resetxlog/pg_resetxlog.c
+++ b/src/bin/pg_resetxlog/pg_resetxlog.c
@@ -31,7 +31,7 @@
/*
* We have to use postgres.h not postgres_fe.h here, because there's so much
* backend-only stuff in the XLOG include files we need. But we need a
- * frontend-ish environment otherwise. Hence this ugly hack.
+ * frontend-ish environment otherwise. Hence this ugly hack.
*/
#define FRONTEND 1
@@ -723,7 +723,7 @@ FindEndOfXLOG(void)
/*
* Initialize the max() computation using the last checkpoint address from
- * old pg_control. Note that for the moment we are working with segment
+ * old pg_control. Note that for the moment we are working with segment
* numbering according to the old xlog seg size.
*/
newXlogId = ControlFile.checkPointCopy.redo.xlogid;
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 101544800d2..abb108265dd 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -326,7 +326,7 @@ ConnectionUp(void)
* see if it can be restored.
*
* Returns true if either the connection was still there, or it could be
- * restored successfully; false otherwise. If, however, there was no
+ * restored successfully; false otherwise. If, however, there was no
* connection and the session is non-interactive, this will exit the program
* with a code of EXIT_BADCONN.
*/
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 6df42fb957e..a38113ac104 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -3988,7 +3988,7 @@ listOneExtensionContents(const char *extname, const char *oid)
* printACLColumn
*
* Helper function for consistently formatting ACL (privilege) columns.
- * The proper targetlist entry is appended to buf. Note lack of any
+ * The proper targetlist entry is appended to buf. Note lack of any
* whitespace or comma decoration.
*/
static void
diff --git a/src/bin/psql/input.c b/src/bin/psql/input.c
index ec0e6e1d49b..e8961d62663 100644
--- a/src/bin/psql/input.c
+++ b/src/bin/psql/input.c
@@ -38,7 +38,7 @@ static int history_lines_added;
* Preserve newlines in saved queries by mapping '\n' to NL_IN_HISTORY
*
* It is assumed NL_IN_HISTORY will never be entered by the user
- * nor appear inside a multi-byte string. 0x00 is not properly
+ * nor appear inside a multi-byte string. 0x00 is not properly
* handled by the readline routines so it can not be used
* for this purpose.
*/
@@ -162,7 +162,7 @@ pg_send_history(PQExpBuffer history_buf)
*
* Caller *must* have set up sigint_interrupt_jmp before calling.
*
- * Note: we re-use a static PQExpBuffer for each call. This is to avoid
+ * Note: we re-use a static PQExpBuffer for each call. This is to avoid
* leaking memory if interrupted by SIGINT.
*/
char *
@@ -396,7 +396,7 @@ saveHistory(char *fname, int max_lines, bool appendFlag, bool encodeFlag)
/* truncate what we have ... */
if (max_lines >= 0)
stifle_history(max_lines);
- /* ... and overwrite file. Tough luck for concurrent sessions. */
+ /* ... and overwrite file. Tough luck for concurrent sessions. */
errno = 0;
(void) write_history(fname);
if (errno == 0)
diff --git a/src/bin/psql/large_obj.c b/src/bin/psql/large_obj.c
index d8e703af4db..bcdd7d03a95 100644
--- a/src/bin/psql/large_obj.c
+++ b/src/bin/psql/large_obj.c
@@ -47,7 +47,7 @@ print_lo_result(const char *fmt,...)
/*
- * Prepare to do a large-object operation. We *must* be inside a transaction
+ * Prepare to do a large-object operation. We *must* be inside a transaction
* block for all these operations, so start one if needed.
*
* Returns TRUE if okay, FALSE if failed. *own_transaction is set to indicate
diff --git a/src/bin/psql/mainloop.c b/src/bin/psql/mainloop.c
index 6c4a079b0ae..5b573f9102d 100644
--- a/src/bin/psql/mainloop.c
+++ b/src/bin/psql/mainloop.c
@@ -277,7 +277,7 @@ MainLoop(FILE *source)
* If we added a newline to query_buf, and nothing else has
* been inserted in query_buf by the lexer, then strip off the
* newline again. This avoids any change to query_buf when a
- * line contains only a backslash command. Also, in this
+ * line contains only a backslash command. Also, in this
* situation we force out any previous lines as a separate
* history entry; we don't want SQL and backslash commands
* intermixed in history if at all possible.
@@ -419,7 +419,7 @@ MainLoop(FILE *source)
* psqlscan.c is #include'd here instead of being compiled on its own.
* This is because we need postgres_fe.h to be read before any system
* include files, else things tend to break on platforms that have
- * multiple infrastructures for stdio.h and so on. flex is absolutely
+ * multiple infrastructures for stdio.h and so on. flex is absolutely
* uncooperative about that, so we can't compile psqlscan.c on its own.
*/
#include "psqlscan.c"
diff --git a/src/bin/psql/mbprint.c b/src/bin/psql/mbprint.c
index 248a4db0428..f54bb6e848c 100644
--- a/src/bin/psql/mbprint.c
+++ b/src/bin/psql/mbprint.c
@@ -21,7 +21,7 @@
* To avoid version-skew problems, this file must not use declarations
* from pg_wchar.h: the encoding IDs we are dealing with are determined
* by the libpq.so we are linked with, and that might not match the
- * numbers we see at compile time. (If this file were inside libpq,
+ * numbers we see at compile time. (If this file were inside libpq,
* the problem would go away...)
*
* Hence, we have our own definition of pg_wchar, and we get the values
diff --git a/src/bin/psql/print.c b/src/bin/psql/print.c
index 08a853add94..b71e5fd4f7f 100644
--- a/src/bin/psql/print.c
+++ b/src/bin/psql/print.c
@@ -668,7 +668,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
* Optional optimized word wrap. Shrink columns with a high max/avg
* ratio. Slighly bias against wider columns. (Increases chance a
* narrow column will fit in its cell.) If available columns is
- * positive... and greater than the width of the unshrinkable column
+ * positive... and greater than the width of the unshrinkable column
* headers
*/
if (output_columns > 0 && output_columns >= total_header_width)
@@ -2167,7 +2167,7 @@ printTableAddCell(printTableContent *const content, const char *cell,
* strdup'd, so there is no need to keep the original footer string around.
*
* Footers are never translated by the function. If you want the footer
- * translated you must do so yourself, before calling printTableAddFooter. The
+ * translated you must do so yourself, before calling printTableAddFooter. The
* reason this works differently to headers and cells is that footers tend to
* be made of up individually translated components, rather than being
* translated as a whole.
@@ -2506,7 +2506,7 @@ get_line_style(const printTableOpt *opt)
/*
* Compute the byte distance to the end of the string or *target_width
- * display character positions, whichever comes first. Update *target_width
+ * display character positions, whichever comes first. Update *target_width
* to be the number of display character positions actually filled.
*/
static int
diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h
index 0d5c1fbf3d9..dbe4a4cb0f9 100644
--- a/src/bin/psql/settings.h
+++ b/src/bin/psql/settings.h
@@ -96,7 +96,7 @@ typedef struct _psqlSettings
/*
* The remaining fields are set by assign hooks associated with entries in
- * "vars". They should not be set directly except by those hook
+ * "vars". They should not be set directly except by those hook
* functions.
*/
bool autocommit;
diff --git a/src/bin/psql/stringutils.c b/src/bin/psql/stringutils.c
index 24bc2e3e039..7c1090819ed 100644
--- a/src/bin/psql/stringutils.c
+++ b/src/bin/psql/stringutils.c
@@ -77,7 +77,7 @@ strtokx(const char *s,
/*
* We may need extra space to insert delimiter nulls for adjacent
- * tokens. 2X the space is a gross overestimate, but it's unlikely
+ * tokens. 2X the space is a gross overestimate, but it's unlikely
* that this code will be used on huge strings anyway.
*/
storage = pg_malloc(2 * strlen(s) + 1);
@@ -107,7 +107,7 @@ strtokx(const char *s,
{
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. We can just overwrite the next character if it
+ * returned token. We can just overwrite the next character if it
* happens to be in the whitespace set ... otherwise move over the
* rest of the string to make room. (This is why we allocated extra
* space above).
@@ -161,7 +161,7 @@ strtokx(const char *s,
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. See notes above.
+ * returned token. See notes above.
*/
if (*p != '\0')
{
@@ -184,7 +184,7 @@ strtokx(const char *s,
}
/*
- * Otherwise no quoting character. Scan till next whitespace, delimiter
+ * Otherwise no quoting character. Scan till next whitespace, delimiter
* or quote. NB: at this point, *start is known not to be '\0',
* whitespace, delim, or quote, so we will consume at least one character.
*/
@@ -210,7 +210,7 @@ strtokx(const char *s,
/*
* If not at end of string, we need to insert a null to terminate the
- * returned token. See notes above.
+ * returned token. See notes above.
*/
if (*p != '\0')
{
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index a5a035fcc59..c1c77f778e3 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -86,7 +86,7 @@ typedef struct SchemaQuery
/*
* Selection condition --- only rows meeting this condition are candidates
- * to display. If catname mentions multiple tables, include the necessary
+ * to display. If catname mentions multiple tables, include the necessary
* join condition here. For example, "c.relkind = 'r'". Write NULL (not
* an empty string) if not needed.
*/
@@ -420,7 +420,7 @@ static const SchemaQuery Query_for_list_of_views = {
* restricted to names matching a partially entered name. In these queries,
* the first %s will be replaced by the text entered so far (suitably escaped
* to become a SQL literal string). %d will be replaced by the length of the
- * string (in unescaped form). A second and third %s, if present, will be
+ * string (in unescaped form). A second and third %s, if present, will be
* replaced by a suitably-escaped version of the string provided in
* completion_info_charp. A fourth and fifth %s are similarly replaced by
* completion_info_charp2.
@@ -3105,7 +3105,7 @@ _complete_from_query(int is_schema_query, const char *text, int state)
/*
* When fetching relation names, suppress system catalogs unless
- * the input-so-far begins with "pg_". This is a compromise
+ * the input-so-far begins with "pg_". This is a compromise
* between not offering system catalogs for completion at all, and
* having them swamp the result when the input is just "p".
*/
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index 9a7cc2c264c..ef1de148bf9 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -254,7 +254,7 @@ executeMaintenanceCommand(PGconn *conn, const char *query, bool echo)
}
/*
- * "Safe" wrapper around strdup(). Pulled from psql/common.c
+ * "Safe" wrapper around strdup(). Pulled from psql/common.c
*/
char *
pg_strdup(const char *string)
@@ -276,7 +276,7 @@ pg_strdup(const char *string)
}
/*
- * Check yes/no answer in a localized way. 1=yes, 0=no, -1=neither.
+ * Check yes/no answer in a localized way. 1=yes, 0=no, -1=neither.
*/
/* translator: abbreviation for "yes" */
diff --git a/src/include/access/attnum.h b/src/include/access/attnum.h
index 4fec9b5f8c8..7c8018bbd3c 100644
--- a/src/include/access/attnum.h
+++ b/src/include/access/attnum.h
@@ -16,7 +16,7 @@
/*
- * user defined attribute numbers start at 1. -ay 2/95
+ * user defined attribute numbers start at 1. -ay 2/95
*/
typedef int16 AttrNumber;
diff --git a/src/include/access/genam.h b/src/include/access/genam.h
index a95b3d745bc..657eabcd6ba 100644
--- a/src/include/access/genam.h
+++ b/src/include/access/genam.h
@@ -92,7 +92,7 @@ typedef struct SysScanDescData *SysScanDesc;
* blocking to see if a conflicting transaction commits.
*
* For deferrable unique constraints, UNIQUE_CHECK_PARTIAL is specified at
- * insertion time. The index AM should test if the tuple is unique, but
+ * insertion time. The index AM should test if the tuple is unique, but
* should not throw error, block, or prevent the insertion if the tuple
* appears not to be unique. We'll recheck later when it is time for the
* constraint to be enforced. The AM must return true if the tuple is
@@ -101,7 +101,7 @@ typedef struct SysScanDescData *SysScanDesc;
*
* When it is time to recheck the deferred constraint, a pseudo-insertion
* call is made with UNIQUE_CHECK_EXISTING. The tuple is already in the
- * index in this case, so it should not be inserted again. Rather, just
+ * index in this case, so it should not be inserted again. Rather, just
* check for conflicting live tuples (possibly blocking).
*/
typedef enum IndexUniqueCheck
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index ce7069e0d16..665ca886f8e 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -564,7 +564,7 @@ extern void ginPrepareDataScan(GinBtree btree, Relation index);
*
* In each GinScanKeyData, nentries is the true number of entries, while
* nuserentries is the number that extractQueryFn returned (which is what
- * we report to consistentFn). The "user" entries must come first.
+ * we report to consistentFn). The "user" entries must come first.
*/
typedef struct GinScanKeyData *GinScanKey;
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 13ff37ab0b0..b281b0b315b 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -184,7 +184,7 @@ typedef HashMetaPageData *HashMetaPage;
#define ALL_SET ((uint32) ~0)
/*
- * Bitmap pages do not contain tuples. They do contain the standard
+ * Bitmap pages do not contain tuples. They do contain the standard
* page headers and trailers; however, everything in between is a
* giant bit array. The number of bits that fit on a page obviously
* depends on the page size and the header/trailer overhead. We require
diff --git a/src/include/access/htup.h b/src/include/access/htup.h
index 46f41172054..f53971abcc9 100644
--- a/src/include/access/htup.h
+++ b/src/include/access/htup.h
@@ -69,7 +69,7 @@
*
* We store five "virtual" fields Xmin, Cmin, Xmax, Cmax, and Xvac in three
* physical fields. Xmin and Xmax are always really stored, but Cmin, Cmax
- * and Xvac share a field. This works because we know that Cmin and Cmax
+ * and Xvac share a field. This works because we know that Cmin and Cmax
* are only interesting for the lifetime of the inserting and deleting
* transaction respectively. If a tuple is inserted and deleted in the same
* transaction, we store a "combo" command id that can be mapped to the real
@@ -81,7 +81,7 @@
* ie, an insert-in-progress or delete-in-progress tuple.)
*
* A word about t_ctid: whenever a new tuple is stored on disk, its t_ctid
- * is initialized with its own TID (location). If the tuple is ever updated,
+ * is initialized with its own TID (location). If the tuple is ever updated,
* its t_ctid is changed to point to the replacement version of the tuple.
* Thus, a tuple is the latest version of its row iff XMAX is invalid or
* t_ctid points to itself (in which case, if XMAX is valid, the tuple is
@@ -96,10 +96,10 @@
* check fails, one may assume that there is no live descendant version.
*
* Following the fixed header fields, the nulls bitmap is stored (beginning
- * at t_bits). The bitmap is *not* stored if t_infomask shows that there
+ * at t_bits). The bitmap is *not* stored if t_infomask shows that there
* are no nulls in the tuple. If an OID field is present (as indicated by
* t_infomask), then it is stored just before the user data, which begins at
- * the offset shown by t_hoff. Note that t_hoff must be a multiple of
+ * the offset shown by t_hoff. Note that t_hoff must be a multiple of
* MAXALIGN.
*/
@@ -206,7 +206,7 @@ typedef HeapTupleHeaderData *HeapTupleHeader;
/*
* HeapTupleHeader accessor macros
*
- * Note: beware of multiple evaluations of "tup" argument. But the Set
+ * Note: beware of multiple evaluations of "tup" argument. But the Set
* macros evaluate their other argument only once.
*/
@@ -426,7 +426,7 @@ do { \
* MinimalTuple is an alternative representation that is used for transient
* tuples inside the executor, in places where transaction status information
* is not required, the tuple rowtype is known, and shaving off a few bytes
- * is worthwhile because we need to store many tuples. The representation
+ * is worthwhile because we need to store many tuples. The representation
* is chosen so that tuple access routines can work with either full or
* minimal tuples via a HeapTupleData pointer structure. The access routines
* see no difference, except that they must not access the transaction status
@@ -450,7 +450,7 @@ do { \
* the MINIMAL_TUPLE_OFFSET distance. t_len does not include that, however.
*
* MINIMAL_TUPLE_DATA_OFFSET is the offset to the first useful (non-pad) data
- * other than the length word. tuplesort.c and tuplestore.c use this to avoid
+ * other than the length word. tuplesort.c and tuplestore.c use this to avoid
* writing the padding to disk.
*/
#define MINIMAL_TUPLE_OFFSET \
@@ -502,12 +502,12 @@ typedef MinimalTupleData *MinimalTuple;
* This is the output format of heap_form_tuple and related routines.
*
* * Separately allocated tuple: t_data points to a palloc'd chunk that
- * is not adjacent to the HeapTupleData. (This case is deprecated since
+ * is not adjacent to the HeapTupleData. (This case is deprecated since
* it's difficult to tell apart from case #1. It should be used only in
* limited contexts where the code knows that case #1 will never apply.)
*
* * Separately allocated minimal tuple: t_data points MINIMAL_TUPLE_OFFSET
- * bytes before the start of a MinimalTuple. As with the previous case,
+ * bytes before the start of a MinimalTuple. As with the previous case,
* this can't be told apart from case #1 by inspection; code setting up
* or destroying this representation has to know what it's doing.
*
@@ -600,7 +600,7 @@ typedef HeapTupleData *HeapTuple;
*/
#define XLOG_HEAP_INIT_PAGE 0x80
/*
- * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
+ * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
* are associated with RM_HEAP2_ID, but are not logically different from
* the ones above associated with RM_HEAP_ID. We apply XLOG_HEAP_OPMASK,
* although currently XLOG_HEAP_INIT_PAGE is not used for any of these.
@@ -826,7 +826,7 @@ extern Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
* and set *isnull == true. Otherwise, we set *isnull == false.
*
* <tup> is the pointer to the heap tuple. <attnum> is the attribute
- * number of the column (field) caller wants. <tupleDesc> is a
+ * number of the column (field) caller wants. <tupleDesc> is a
* pointer to the structure describing the row and all its fields.
* ----------------
*/
diff --git a/src/include/access/itup.h b/src/include/access/itup.h
index 29247068fd2..290c78a4124 100644
--- a/src/include/access/itup.h
+++ b/src/include/access/itup.h
@@ -22,7 +22,7 @@
/*
* Index tuple header structure
*
- * All index tuples start with IndexTupleData. If the HasNulls bit is set,
+ * All index tuples start with IndexTupleData. If the HasNulls bit is set,
* this is followed by an IndexAttributeBitMapData. The index attribute
* values follow, beginning at a MAXALIGN boundary.
*
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index b62e42cfde1..e79f639b1fe 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -36,9 +36,9 @@ typedef uint16 BTCycleId;
* and status. If the page is deleted, we replace the level with the
* next-transaction-ID value indicating when it is safe to reclaim the page.
*
- * We also store a "vacuum cycle ID". When a page is split while VACUUM is
+ * We also store a "vacuum cycle ID". When a page is split while VACUUM is
* processing the index, a nonzero value associated with the VACUUM run is
- * stored into both halves of the split page. (If VACUUM is not running,
+ * stored into both halves of the split page. (If VACUUM is not running,
* both pages receive zero cycleids.) This allows VACUUM to detect whether
* a page was split since it started, with a small probability of false match
* if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
@@ -75,7 +75,7 @@ typedef BTPageOpaqueData *BTPageOpaque;
#define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DEAD tuples */
/*
- * The max allowed value of a cycle ID is a bit less than 64K. This is
+ * The max allowed value of a cycle ID is a bit less than 64K. This is
* for convenience of pg_filedump and similar utilities: we want to use
* the last 2 bytes of special space as an index type indicator, and
* restricting cycle ID lets btree use that space for vacuum cycle IDs
@@ -272,9 +272,9 @@ typedef struct xl_btree_insert
* Note: the four XLOG_BTREE_SPLIT xl_info codes all use this data record.
* The _L and _R variants indicate whether the inserted tuple went into the
* left or right split page (and thus, whether newitemoff and the new item
- * are stored or not). The _ROOT variants indicate that we are splitting
+ * are stored or not). The _ROOT variants indicate that we are splitting
* the root page, and thus that a newroot record rather than an insert or
- * split record should follow. Note that a split record never carries a
+ * split record should follow. Note that a split record never carries a
* metapage update --- we'll do that in the parent-level update.
*/
typedef struct xl_btree_split
@@ -287,13 +287,13 @@ typedef struct xl_btree_split
OffsetNumber firstright; /* first item moved to right page */
/*
- * If level > 0, BlockIdData downlink follows. (We use BlockIdData rather
+ * If level > 0, BlockIdData downlink follows. (We use BlockIdData rather
* than BlockNumber for alignment reasons: SizeOfBtreeSplit is only 16-bit
* aligned.)
*
* If level > 0, an IndexTuple representing the HIKEY of the left page
* follows. We don't need this on leaf pages, because it's the same as
- * the leftmost key in the new right page. Also, it's suppressed if
+ * the leftmost key in the new right page. Also, it's suppressed if
* XLogInsert chooses to store the left page's whole page image.
*
* In the _L variants, next are OffsetNumber newitemoff and the new item.
@@ -372,7 +372,7 @@ typedef struct xl_btree_vacuum
/*
* This is what we need to know about deletion of a btree page. The target
* identifies the tuple removed from the parent page (note that we remove
- * this tuple's downlink and the *following* tuple's key). Note we do not
+ * this tuple's downlink and the *following* tuple's key). Note we do not
* store any content for the deleted page --- it is just rewritten as empty
* during recovery, apart from resetting the btpo.xact.
*/
@@ -457,7 +457,7 @@ typedef BTStackData *BTStack;
* BTScanOpaqueData is the btree-private state needed for an indexscan.
* This consists of preprocessed scan keys (see _bt_preprocess_keys() for
* details of the preprocessing), information about the current location
- * of the scan, and information about the marked location, if any. (We use
+ * of the scan, and information about the marked location, if any. (We use
* BTScanPosData to represent the data needed for each of current and marked
* locations.) In addition we can remember some known-killed index entries
* that must be marked before we can move off the current page.
@@ -465,9 +465,9 @@ typedef BTStackData *BTStack;
* Index scans work a page at a time: we pin and read-lock the page, identify
* all the matching items on the page and save them in BTScanPosData, then
* release the read-lock while returning the items to the caller for
- * processing. This approach minimizes lock/unlock traffic. Note that we
+ * processing. This approach minimizes lock/unlock traffic. Note that we
* keep the pin on the index page until the caller is done with all the items
- * (this is needed for VACUUM synchronization, see nbtree/README). When we
+ * (this is needed for VACUUM synchronization, see nbtree/README). When we
* are ready to step to the next page, if the caller has told us any of the
* items were killed, we re-lock the page to mark them killed, then unlock.
* Finally we drop the pin and step to the next page in the appropriate
@@ -542,7 +542,7 @@ typedef BTScanOpaqueData *BTScanOpaque;
/*
* We use some private sk_flags bits in preprocessed scan keys. We're allowed
- * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
+ * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
* index's indoption[] array entry for the index attribute.
*/
#define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h
index c7709cc0589..b8c979385ac 100644
--- a/src/include/access/reloptions.h
+++ b/src/include/access/reloptions.h
@@ -194,7 +194,7 @@ typedef struct
* "base" is a pointer to the reloptions structure, and "offset" is an integer
* variable that must be initialized to sizeof(reloptions structure). This
* struct must have been allocated with enough space to hold any string option
- * present, including terminating \0 for every option. SET_VARSIZE() must be
+ * present, including terminating \0 for every option. SET_VARSIZE() must be
* called on the struct with this offset as the second argument, after all the
* string options have been processed.
*/
diff --git a/src/include/access/skey.h b/src/include/access/skey.h
index a82e46ee0e1..45b8cc86e7b 100644
--- a/src/include/access/skey.h
+++ b/src/include/access/skey.h
@@ -42,7 +42,7 @@ typedef uint16 StrategyNumber;
/*
* A ScanKey represents the application of a comparison operator between
- * a table or index column and a constant. When it's part of an array of
+ * a table or index column and a constant. When it's part of an array of
* ScanKeys, the comparison conditions are implicitly ANDed. The index
* column is the left argument of the operator, if it's a binary operator.
* (The data structure can support unary indexable operators too; in that
@@ -106,7 +106,7 @@ typedef ScanKeyData *ScanKey;
* must be sorted according to the leading column number.
*
* The subsidiary ScanKey array appears in logical column order of the row
- * comparison, which may be different from index column order. The array
+ * comparison, which may be different from index column order. The array
* elements are like a normal ScanKey array except that:
* sk_flags must include SK_ROW_MEMBER, plus SK_ROW_END in the last
* element (needed since row header does not include a count)
diff --git a/src/include/access/slru.h b/src/include/access/slru.h
index c491b7d5f9f..4fc2d9b13e6 100644
--- a/src/include/access/slru.h
+++ b/src/include/access/slru.h
@@ -31,7 +31,7 @@
* segment and page numbers in SimpleLruTruncate (see PagePrecedes()).
*
* Note: slru.c currently assumes that segment file names will be four hex
- * digits. This sets a lower bound on the segment size (64K transactions
+ * digits. This sets a lower bound on the segment size (64K transactions
* for 32-bit TransactionIds).
*/
#define SLRU_PAGES_PER_SEGMENT 32
diff --git a/src/include/access/transam.h b/src/include/access/transam.h
index c5e6ab0ca49..344963f13f4 100644
--- a/src/include/access/transam.h
+++ b/src/include/access/transam.h
@@ -69,7 +69,7 @@
* using the OID generator. (We start the generator at 10000.)
*
* OIDs beginning at 16384 are assigned from the OID generator
- * during normal multiuser operation. (We force the generator up to
+ * during normal multiuser operation. (We force the generator up to
* 16384 as soon as we are in normal operation.)
*
* The choices of 10000 and 16384 are completely arbitrary, and can be moved
diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h
index 99448efe12f..e7ef96a47ba 100644
--- a/src/include/access/tupdesc.h
+++ b/src/include/access/tupdesc.h
@@ -53,7 +53,7 @@ typedef struct tupleConstr
* TupleDesc; with the exception that tdhasoid indicates if OID is present.
*
* If the tupdesc is known to correspond to a named rowtype (such as a table's
- * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise
+ * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise
* tdtypeid is RECORDOID, and tdtypmod can be either -1 for a fully anonymous
* row type, or a value >= 0 to allow the rowtype to be looked up in the
* typcache.c type cache.
diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h
index 38e65a807b1..4da2cffbdf5 100644
--- a/src/include/access/tupmacs.h
+++ b/src/include/access/tupmacs.h
@@ -92,7 +92,7 @@
/*
* att_align_datum aligns the given offset as needed for a datum of alignment
- * requirement attalign and typlen attlen. attdatum is the Datum variable
+ * requirement attalign and typlen attlen. attdatum is the Datum variable
* we intend to pack into a tuple (it's only accessed if we are dealing with
* a varlena type). Note that this assumes the Datum will be stored as-is;
* callers that are intending to convert non-short varlena datums to short
@@ -111,7 +111,7 @@
* pointer; when accessing a varlena field we have to "peek" to see if we
* are looking at a pad byte or the first byte of a 1-byte-header datum.
* (A zero byte must be either a pad byte, or the first byte of a correctly
- * aligned 4-byte length word; in either case we can align safely. A non-zero
+ * aligned 4-byte length word; in either case we can align safely. A non-zero
* byte must be either a 1-byte length word, or the first byte of a correctly
* aligned 4-byte length word; in either case we need not align.)
*
diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h
index 177415f26e4..31d93bb0949 100644
--- a/src/include/access/tuptoaster.h
+++ b/src/include/access/tuptoaster.h
@@ -60,7 +60,7 @@
* The code will also consider moving MAIN data out-of-line, but only as a
* last resort if the previous steps haven't reached the target tuple size.
* In this phase we use a different target size, currently equal to the
- * largest tuple that will fit on a heap page. This is reasonable since
+ * largest tuple that will fit on a heap page. This is reasonable since
* the user has told us to keep the data in-line if at all possible.
*/
#define TOAST_TUPLES_PER_PAGE_MAIN 1
@@ -76,7 +76,7 @@
/*
* When we store an oversize datum externally, we divide it into chunks
- * containing at most TOAST_MAX_CHUNK_SIZE data bytes. This number *must*
+ * containing at most TOAST_MAX_CHUNK_SIZE data bytes. This number *must*
* be small enough that the completed toast-table tuple (including the
* ID and sequence fields and all overhead) will fit on a page.
* The coding here sets the size on the theory that we want to fit
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 482b8913e03..41b9da891bc 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -32,11 +32,11 @@
* where there can be zero to three backup blocks (as signaled by xl_info flag
* bits). XLogRecord structs always start on MAXALIGN boundaries in the WAL
* files, and we round up SizeOfXLogRecord so that the rmgr data is also
- * guaranteed to begin on a MAXALIGN boundary. However, no padding is added
+ * guaranteed to begin on a MAXALIGN boundary. However, no padding is added
* to align BkpBlock structs or backup block data.
*
* NOTE: xl_len counts only the rmgr data, not the XLogRecord header,
- * and also not any backup blocks. xl_tot_len counts everything. Neither
+ * and also not any backup blocks. xl_tot_len counts everything. Neither
* length field is rounded up to an alignment boundary.
*/
typedef struct XLogRecord
@@ -115,7 +115,7 @@ extern int sync_method;
* value (ignoring InvalidBuffer) appearing in the rdata chain.
*
* When buffer is valid, caller must set buffer_std to indicate whether the
- * page uses standard pd_lower/pd_upper header fields. If this is true, then
+ * page uses standard pd_lower/pd_upper header fields. If this is true, then
* XLOG is allowed to omit the free space between pd_lower and pd_upper from
* the backed-up page image. Note that even when buffer_std is false, the
* page MUST have an LSN field as its first eight bytes!
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index 7e39630c1bf..0d101456bbc 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -50,7 +50,7 @@ typedef struct BkpBlock
/*
* When there is not enough space on current page for whole record, we
- * continue on the next page with continuation record. (However, the
+ * continue on the next page with continuation record. (However, the
* XLogRecord header will never be split across pages; if there's less than
* SizeOfXLogRecord space left at the end of a page, we just waste it.)
*
@@ -168,7 +168,7 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader;
* Compute ID and segment from an XLogRecPtr.
*
* For XLByteToSeg, do the computation at face value. For XLByteToPrevSeg,
- * a boundary byte is taken to be in the previous segment. This is suitable
+ * a boundary byte is taken to be in the previous segment. This is suitable
* for deciding which segment to write given a pointer to a record end,
* for example. (We can assume xrecoff is not zero, since no valid recptr
* can have that.)
diff --git a/src/include/access/xlogdefs.h b/src/include/access/xlogdefs.h
index 6530df06aca..fc90072bd26 100644
--- a/src/include/access/xlogdefs.h
+++ b/src/include/access/xlogdefs.h
@@ -25,8 +25,8 @@
* NOTE: the "log file number" is somewhat misnamed, since the actual files
* making up the XLOG are much smaller than 4Gb. Each actual file is an
* XLogSegSize-byte "segment" of a logical log file having the indicated
- * xlogid. The log file number and segment number together identify a
- * physical XLOG file. Segment number and offset within the physical file
+ * xlogid. The log file number and segment number together identify a
+ * physical XLOG file. Segment number and offset within the physical file
* are computed from xrecoff div and mod XLogSegSize.
*/
typedef struct XLogRecPtr
@@ -88,7 +88,7 @@ typedef uint32 TimeLineID;
* read those buffers except during crash recovery or if wal_level != minimal,
* it is a win to use it in all cases where we sync on each write(). We could
* allow O_DIRECT with fsync(), but it is unclear if fsync() could process
- * writes not buffered in the kernel. Also, O_DIRECT is never enough to force
+ * writes not buffered in the kernel. Also, O_DIRECT is never enough to force
* data to the drives, it merely tries to bypass the kernel cache, so we still
* need O_SYNC/O_DSYNC.
*/
@@ -101,7 +101,7 @@ typedef uint32 TimeLineID;
/*
* This chunk of hackery attempts to determine which file sync methods
* are available on the current platform, and to choose an appropriate
- * default method. We assume that fsync() is always available, and that
+ * default method. We assume that fsync() is always available, and that
* configure determined whether fdatasync() is.
*/
#if defined(O_SYNC)
diff --git a/src/include/c.h b/src/include/c.h
index 03918608464..be52585ab7d 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -36,7 +36,7 @@
* 8) system-specific hacks
*
* NOTE: since this file is included by both frontend and backend modules, it's
- * almost certainly wrong to put an "extern" declaration here. typedefs and
+ * almost certainly wrong to put an "extern" declaration here. typedefs and
* macros are the kind of thing that might go here.
*
*----------------------------------------------------------------
@@ -106,7 +106,7 @@
/*
* Use this to mark string constants as needing translation at some later
- * time, rather than immediately. This is useful for cases where you need
+ * time, rather than immediately. This is useful for cases where you need
* access to the original string and translated string, and for cases where
* immediate translation is not possible, like when initializing global
* variables.
@@ -382,7 +382,7 @@ typedef struct
* Variable-length datatypes all share the 'struct varlena' header.
*
* NOTE: for TOASTable types, this is an oversimplification, since the value
- * may be compressed or moved out-of-line. However datatype-specific routines
+ * may be compressed or moved out-of-line. However datatype-specific routines
* are mostly content to deal with de-TOASTed values only, and of course
* client-side routines should never see a TOASTed value. But even in a
* de-TOASTed value, beware of touching vl_len_ directly, as its representation
@@ -412,7 +412,7 @@ typedef struct varlena VarChar; /* var-length char, ie SQL varchar(n) */
/*
* Specialized array types. These are physically laid out just the same
* as regular arrays (so that the regular array subscripting code works
- * with them). They exist as distinct types mostly for historical reasons:
+ * with them). They exist as distinct types mostly for historical reasons:
* they have nonstandard I/O behavior which we don't want to change for fear
* of breaking applications that look at the system catalogs. There is also
* an implementation issue for oidvector: it's part of the primary key for
@@ -455,7 +455,7 @@ typedef NameData *Name;
/*
* Support macros for escaping strings. escape_backslash should be TRUE
- * if generating a non-standard-conforming string. Prefixing a string
+ * if generating a non-standard-conforming string. Prefixing a string
* with ESCAPE_STRING_SYNTAX guarantees it is non-standard-conforming.
* Beware of multiple evaluation of the "ch" argument!
*/
@@ -584,7 +584,7 @@ typedef NameData *Name;
* datum) and add a null, do not do it with StrNCpy(..., len+1). That
* might seem to work, but it fetches one byte more than there is in the
* text object. One fine day you'll have a SIGSEGV because there isn't
- * another byte before the end of memory. Don't laugh, we've had real
+ * another byte before the end of memory. Don't laugh, we've had real
* live bug reports from real live users over exactly this mistake.
* Do it honestly with "memcpy(dst,src,len); dst[len] = '\0';", instead.
*/
@@ -610,7 +610,7 @@ typedef NameData *Name;
* Exactly the same as standard library function memset(), but considerably
* faster for zeroing small word-aligned structures (such as parsetree nodes).
* This has to be a macro because the main point is to avoid function-call
- * overhead. However, we have also found that the loop is faster than
+ * overhead. However, we have also found that the loop is faster than
* native libc memset() on some platforms, even those with assembler
* memset() functions. More research needs to be done, perhaps with
* MEMSET_LOOP_LIMIT tests in configure.
@@ -740,7 +740,7 @@ typedef NameData *Name;
* Section 8: system-specific hacks
*
* This should be limited to things that absolutely have to be
- * included in every source file. The port-specific header file
+ * included in every source file. The port-specific header file
* is usually a better place for this sort of thing.
* ----------------------------------------------------------------
*/
@@ -749,7 +749,7 @@ typedef NameData *Name;
* NOTE: this is also used for opening text files.
* WIN32 treats Control-Z as EOF in files opened in text mode.
* Therefore, we open files in binary mode on Win32 so we can read
- * literal control-Z. The other affect is that we see CRLF, but
+ * literal control-Z. The other affect is that we see CRLF, but
* that is OK because we can already handle those cleanly.
*/
#if defined(WIN32) || defined(__CYGWIN__)
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 0200a81dbf6..b621cbc30dd 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -4,7 +4,7 @@
* "Catalog version number" for PostgreSQL.
*
* The catalog version number is used to flag incompatible changes in
- * the PostgreSQL system catalogs. Whenever anyone changes the format of
+ * the PostgreSQL system catalogs. Whenever anyone changes the format of
* a system catalog relation, or adds, deletes, or modifies standard
* catalog entries in such a way that an updated backend wouldn't work
* with an old database (or vice versa), the catalog version number
diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h
index 5dfb25fee74..ac374a5487d 100644
--- a/src/include/catalog/dependency.h
+++ b/src/include/catalog/dependency.h
@@ -59,7 +59,7 @@
* DEPENDENCY_PIN ('p'): there is no dependent object; this type of entry
* is a signal that the system itself depends on the referenced object,
* and so that object must never be deleted. Entries of this type are
- * created only during initdb. The fields for the dependent object
+ * created only during initdb. The fields for the dependent object
* contain zeroes.
*
* Other dependency flavors may be needed in future.
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index 7e1e194794c..8eec215c365 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -19,7 +19,7 @@
/*
* This structure holds a list of possible functions or operators
- * found by namespace lookup. Each function/operator is identified
+ * found by namespace lookup. Each function/operator is identified
* by OID and by argument types; the list must be pruned by type
* resolution rules that are embodied in the parser, not here.
* See FuncnameGetCandidates's comments for more info.
diff --git a/src/include/catalog/objectaccess.h b/src/include/catalog/objectaccess.h
index 29254758e75..8f520223015 100644
--- a/src/include/catalog/objectaccess.h
+++ b/src/include/catalog/objectaccess.h
@@ -12,7 +12,7 @@
/*
* Object access hooks are intended to be called just before or just after
- * performing certain actions on a SQL object. This is intended as
+ * performing certain actions on a SQL object. This is intended as
* infrastructure for security or logging pluggins.
*
* OAT_POST_CREATE should be invoked just after the the object is created.
diff --git a/src/include/catalog/pg_attrdef.h b/src/include/catalog/pg_attrdef.h
index f6513b77b3a..2ec2a7e14ff 100644
--- a/src/include/catalog/pg_attrdef.h
+++ b/src/include/catalog/pg_attrdef.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_attrdef definition. cpp turns this into
+ * pg_attrdef definition. cpp turns this into
* typedef struct FormData_pg_attrdef
* ----------------
*/
diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h
index 409d6ea3e7e..3ce3200b64a 100644
--- a/src/include/catalog/pg_attribute.h
+++ b/src/include/catalog/pg_attribute.h
@@ -160,7 +160,7 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK
/*
* ATTRIBUTE_FIXED_PART_SIZE is the size of the fixed-layout,
- * guaranteed-not-null part of a pg_attribute row. This is in fact as much
+ * guaranteed-not-null part of a pg_attribute row. This is in fact as much
* of the row as gets copied into tuple descriptors, so don't expect you
* can access fields beyond attcollation except in a real tuple!
*/
diff --git a/src/include/catalog/pg_authid.h b/src/include/catalog/pg_authid.h
index c48d96bcacd..d780b211b76 100644
--- a/src/include/catalog/pg_authid.h
+++ b/src/include/catalog/pg_authid.h
@@ -26,7 +26,7 @@
/*
* The CATALOG definition has to refer to the type of rolvaliduntil as
* "timestamptz" (lower case) so that bootstrap mode recognizes it. But
- * the C header files define this type as TimestampTz. Since the field is
+ * the C header files define this type as TimestampTz. Since the field is
* potentially-null and therefore can't be accessed directly from C code,
* there is no particular need for the C struct definition to show the
* field type as TimestampTz --- instead we just make it int.
diff --git a/src/include/catalog/pg_constraint.h b/src/include/catalog/pg_constraint.h
index a12e901fc64..b8070cb129a 100644
--- a/src/include/catalog/pg_constraint.h
+++ b/src/include/catalog/pg_constraint.h
@@ -38,7 +38,7 @@ CATALOG(pg_constraint,2606)
* relations. This is partly for backwards compatibility with past
* Postgres practice, and partly because we don't want to have to obtain a
* global lock to generate a globally unique name for a nameless
- * constraint. We associate a namespace with constraint names only for
+ * constraint. We associate a namespace with constraint names only for
* SQL-spec compatibility.
*/
NameData conname; /* name of this constraint */
@@ -57,7 +57,7 @@ CATALOG(pg_constraint,2606)
/*
* contypid links to the pg_type row for a domain if this is a domain
- * constraint. Otherwise it's 0.
+ * constraint. Otherwise it's 0.
*
* For SQL-style global ASSERTIONs, both conrelid and contypid would be
* zero. This is not presently supported, however.
@@ -76,7 +76,7 @@ CATALOG(pg_constraint,2606)
/*
* These fields, plus confkey, are only meaningful for a foreign-key
- * constraint. Otherwise confrelid is 0 and the char fields are spaces.
+ * constraint. Otherwise confrelid is 0 and the char fields are spaces.
*/
Oid confrelid; /* relation referenced by foreign key */
char confupdtype; /* foreign key's ON UPDATE action */
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index d543ef6e244..b10eefd50d3 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -94,9 +94,9 @@ typedef struct ControlFileData
uint64 system_identifier;
/*
- * Version identifier information. Keep these fields at the same offset,
+ * Version identifier information. Keep these fields at the same offset,
* especially pg_control_version; they won't be real useful if they move
- * around. (For historical reasons they must be 8 bytes into the file
+ * around. (For historical reasons they must be 8 bytes into the file
* rather than immediately at the front.)
*
* pg_control_version identifies the format of pg_control itself.
diff --git a/src/include/catalog/pg_db_role_setting.h b/src/include/catalog/pg_db_role_setting.h
index cda9b72827d..ef680da362c 100644
--- a/src/include/catalog/pg_db_role_setting.h
+++ b/src/include/catalog/pg_db_role_setting.h
@@ -27,7 +27,7 @@
#include "utils/relcache.h"
/* ----------------
- * pg_db_role_setting definition. cpp turns this into
+ * pg_db_role_setting definition. cpp turns this into
* typedef struct FormData_pg_db_role_setting
* ----------------
*/
diff --git a/src/include/catalog/pg_default_acl.h b/src/include/catalog/pg_default_acl.h
index 96e9f0c4c6d..9bc40ebbf2e 100644
--- a/src/include/catalog/pg_default_acl.h
+++ b/src/include/catalog/pg_default_acl.h
@@ -21,7 +21,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_default_acl definition. cpp turns this into
+ * pg_default_acl definition. cpp turns this into
* typedef struct FormData_pg_default_acl
* ----------------
*/
@@ -65,7 +65,7 @@ typedef FormData_pg_default_acl *Form_pg_default_acl;
/*
* Types of objects for which the user is allowed to specify default
- * permissions through pg_default_acl. These codes are used in the
+ * permissions through pg_default_acl. These codes are used in the
* defaclobjtype column.
*/
#define DEFACLOBJ_RELATION 'r' /* table, view */
diff --git a/src/include/catalog/pg_description.h b/src/include/catalog/pg_description.h
index bb7234e2ec5..bd9a4a95a1c 100644
--- a/src/include/catalog/pg_description.h
+++ b/src/include/catalog/pg_description.h
@@ -6,12 +6,12 @@
* NOTE: an object is identified by the OID of the row that primarily
* defines the object, plus the OID of the table that that row appears in.
* For example, a function is identified by the OID of its pg_proc row
- * plus the pg_class OID of table pg_proc. This allows unique identification
+ * plus the pg_class OID of table pg_proc. This allows unique identification
* of objects without assuming that OIDs are unique across tables.
*
* Since attributes don't have OIDs of their own, we identify an attribute
* comment by the objoid+classoid of its parent table, plus an "objsubid"
- * giving the attribute column number. "objsubid" must be zero in a comment
+ * giving the attribute column number. "objsubid" must be zero in a comment
* for a table itself, so that it is distinct from any column comment.
* Currently, objsubid is unused and zero for all other kinds of objects,
* but perhaps it might be useful someday to associate comments with
@@ -39,7 +39,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_description definition. cpp turns this into
+ * pg_description definition. cpp turns this into
* typedef struct FormData_pg_description
* ----------------
*/
diff --git a/src/include/catalog/pg_largeobject.h b/src/include/catalog/pg_largeobject.h
index 7672de79474..0801b07a013 100644
--- a/src/include/catalog/pg_largeobject.h
+++ b/src/include/catalog/pg_largeobject.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_largeobject definition. cpp turns this into
+ * pg_largeobject definition. cpp turns this into
* typedef struct FormData_pg_largeobject
* ----------------
*/
diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h
index d723b2561ae..1adc85b1017 100644
--- a/src/include/catalog/pg_opclass.h
+++ b/src/include/catalog/pg_opclass.h
@@ -17,10 +17,10 @@
* don't support partial indexes on system catalogs.)
*
* Normally opckeytype = InvalidOid (zero), indicating that the data stored
- * in the index is the same as the data in the indexed column. If opckeytype
+ * in the index is the same as the data in the indexed column. If opckeytype
* is nonzero then it indicates that a conversion step is needed to produce
* the stored index data, which will be of type opckeytype (which might be
- * the same or different from the input datatype). Performing such a
+ * the same or different from the input datatype). Performing such a
* conversion is the responsibility of the index access method --- not all
* AMs support this.
*
@@ -42,7 +42,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_opclass definition. cpp turns this into
+ * pg_opclass definition. cpp turns this into
* typedef struct FormData_pg_opclass
* ----------------
*/
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index fdbc7808106..e20e41b112c 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -1401,7 +1401,7 @@ DESCR("natural exponential (e^x)");
/*
* This form of obj_description is now deprecated, since it will fail if
- * OIDs are not unique across system catalogs. Use the other form instead.
+ * OIDs are not unique across system catalogs. Use the other form instead.
*/
DATA(insert OID = 1348 ( obj_description PGNSP PGUID 14 100 0 0 f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and objsubid = 0" _null_ _null_ _null_ ));
DESCR("deprecated, use two-argument form instead");
@@ -4339,7 +4339,7 @@ DESCR("fetch the Nth row value");
#define PROVOLATILE_VOLATILE 'v' /* can change even within a scan */
/*
- * Symbolic values for proargmodes column. Note that these must agree with
+ * Symbolic values for proargmodes column. Note that these must agree with
* the FunctionParameterMode enum in parsenodes.h; we declare them here to
* be accessible from either header.
*/
diff --git a/src/include/catalog/pg_rewrite.h b/src/include/catalog/pg_rewrite.h
index d370829ea46..e211effaf5b 100644
--- a/src/include/catalog/pg_rewrite.h
+++ b/src/include/catalog/pg_rewrite.h
@@ -25,7 +25,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_rewrite definition. cpp turns this into
+ * pg_rewrite definition. cpp turns this into
* typedef struct FormData_pg_rewrite
* ----------------
*/
diff --git a/src/include/catalog/pg_shdepend.h b/src/include/catalog/pg_shdepend.h
index 20465ccd49f..689a67868b9 100644
--- a/src/include/catalog/pg_shdepend.h
+++ b/src/include/catalog/pg_shdepend.h
@@ -33,7 +33,7 @@ CATALOG(pg_shdepend,1214) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
/*
* Identification of the dependent (referencing) object.
*
- * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can
+ * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can
* be zero to denote a shared object.
*/
Oid dbid; /* OID of database containing object */
diff --git a/src/include/catalog/pg_shdescription.h b/src/include/catalog/pg_shdescription.h
index c7757feb580..2b2ed8cc598 100644
--- a/src/include/catalog/pg_shdescription.h
+++ b/src/include/catalog/pg_shdescription.h
@@ -7,7 +7,7 @@
* NOTE: an object is identified by the OID of the row that primarily
* defines the object, plus the OID of the table that that row appears in.
* For example, a database is identified by the OID of its pg_database row
- * plus the pg_class OID of table pg_database. This allows unique
+ * plus the pg_class OID of table pg_database. This allows unique
* identification of objects without assuming that OIDs are unique
* across tables.
*
@@ -32,7 +32,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_shdescription definition. cpp turns this into
+ * pg_shdescription definition. cpp turns this into
* typedef struct FormData_pg_shdescription
* ----------------
*/
diff --git a/src/include/catalog/pg_statistic.h b/src/include/catalog/pg_statistic.h
index 927cd0b0471..044eb446c08 100644
--- a/src/include/catalog/pg_statistic.h
+++ b/src/include/catalog/pg_statistic.h
@@ -49,7 +49,7 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
float4 stanullfrac;
/*
- * stawidth is the average width in bytes of non-null entries. For
+ * stawidth is the average width in bytes of non-null entries. For
* fixed-width datatypes this is of course the same as the typlen, but for
* var-width types it is more useful. Note that this is the average width
* of the data as actually stored, post-TOASTing (eg, for a
@@ -69,7 +69,7 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
* The special negative case allows us to cope with columns that are
* unique (stadistinct = -1) or nearly so (for example, a column in
* which values appear about twice on the average could be represented
- * by stadistinct = -0.5). Because the number-of-rows statistic in
+ * by stadistinct = -0.5). Because the number-of-rows statistic in
* pg_class may be updated more frequently than pg_statistic is, it's
* important to be able to describe such situations as a multiple of
* the number of rows, rather than a fixed number of distinct values.
@@ -81,8 +81,8 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
/* ----------------
* To allow keeping statistics on different kinds of datatypes,
* we do not hard-wire any particular meaning for the remaining
- * statistical fields. Instead, we provide several "slots" in which
- * statistical data can be placed. Each slot includes:
+ * statistical fields. Instead, we provide several "slots" in which
+ * statistical data can be placed. Each slot includes:
* kind integer code identifying kind of data
* op OID of associated operator, if needed
* numbers float4 array (for statistical values)
@@ -169,15 +169,15 @@ typedef FormData_pg_statistic *Form_pg_statistic;
/*
* Currently, three statistical slot "kinds" are defined: most common values,
- * histogram, and correlation. Additional "kinds" will probably appear in
+ * histogram, and correlation. Additional "kinds" will probably appear in
* future to help cope with non-scalar datatypes. Also, custom data types
* can define their own "kind" codes by mutual agreement between a custom
* typanalyze routine and the selectivity estimation functions of the type's
* operators.
*
* Code reading the pg_statistic relation should not assume that a particular
- * data "kind" will appear in any particular slot. Instead, search the
- * stakind fields to see if the desired data is available. (The standard
+ * data "kind" will appear in any particular slot. Instead, search the
+ * stakind fields to see if the desired data is available. (The standard
* function get_attstatsslot() may be used for this.)
*/
@@ -204,7 +204,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* the K most common non-null values appearing in the column, and stanumbers
* contains their frequencies (fractions of total row count). The values
* shall be ordered in decreasing frequency. Note that since the arrays are
- * variable-size, K may be chosen by the statistics collector. Values should
+ * variable-size, K may be chosen by the statistics collector. Values should
* not appear in MCV unless they have been observed to occur more than once;
* a unique column will have no MCV slot.
*/
@@ -216,13 +216,13 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* more than one histogram could appear, if a datatype has more than one
* useful sort operator.) stavalues contains M (>=2) non-null values that
* divide the non-null column data values into M-1 bins of approximately equal
- * population. The first stavalues item is the MIN and the last is the MAX.
+ * population. The first stavalues item is the MIN and the last is the MAX.
* stanumbers is not used and should be NULL. IMPORTANT POINT: if an MCV
* slot is also provided, then the histogram describes the data distribution
* *after removing the values listed in MCV* (thus, it's a "compressed
* histogram" in the technical parlance). This allows a more accurate
* representation of the distribution of a column with some very-common
- * values. In a column with only a few distinct values, it's possible that
+ * values. In a column with only a few distinct values, it's possible that
* the MCV list describes the entire data population; in this case the
* histogram reduces to empty and should be omitted.
*/
@@ -233,7 +233,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* of table tuples and the ordering of data values of this column, as seen
* by the "<" operator identified by staop. (As with the histogram, more
* than one entry could theoretically appear.) stavalues is not used and
- * should be NULL. stanumbers contains a single entry, the correlation
+ * should be NULL. stanumbers contains a single entry, the correlation
* coefficient between the sequence of data values and the sequence of
* their actual tuple positions. The coefficient ranges from +1 to -1.
*/
@@ -242,7 +242,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
/*
* A "most common elements" slot is similar to a "most common values" slot,
* except that it stores the most common non-null *elements* of the column
- * values. This is useful when the column datatype is an array or some other
+ * values. This is useful when the column datatype is an array or some other
* type with identifiable elements (for instance, tsvector). staop contains
* the equality operator appropriate to the element type. stavalues contains
* the most common element values, and stanumbers their frequencies. Unlike
diff --git a/src/include/catalog/pg_trigger.h b/src/include/catalog/pg_trigger.h
index 4f26f8e0c5d..2f05311c952 100644
--- a/src/include/catalog/pg_trigger.h
+++ b/src/include/catalog/pg_trigger.h
@@ -22,7 +22,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_trigger definition. cpp turns this into
+ * pg_trigger definition. cpp turns this into
* typedef struct FormData_pg_trigger
*
* Note: when tgconstraint is nonzero, tgconstrrelid, tgconstrindid,
diff --git a/src/include/catalog/pg_ts_dict.h b/src/include/catalog/pg_ts_dict.h
index a4fdb7a16fb..39b0658480d 100644
--- a/src/include/catalog/pg_ts_dict.h
+++ b/src/include/catalog/pg_ts_dict.h
@@ -24,7 +24,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_ts_dict definition. cpp turns this into
+ * pg_ts_dict definition. cpp turns this into
* typedef struct FormData_pg_ts_dict
* ----------------
*/
diff --git a/src/include/catalog/pg_ts_template.h b/src/include/catalog/pg_ts_template.h
index 1071c4414aa..d711bf8d7eb 100644
--- a/src/include/catalog/pg_ts_template.h
+++ b/src/include/catalog/pg_ts_template.h
@@ -24,7 +24,7 @@
#include "catalog/genbki.h"
/* ----------------
- * pg_ts_template definition. cpp turns this into
+ * pg_ts_template definition. cpp turns this into
* typedef struct FormData_pg_ts_template
* ----------------
*/
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index 93b335189a3..2f80113b09d 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -42,7 +42,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* For a fixed-size type, typlen is the number of bytes we use to
- * represent a value of this type, e.g. 4 for an int4. But for a
+ * represent a value of this type, e.g. 4 for an int4. But for a
* variable-length type, typlen is negative. We use -1 to indicate a
* "varlena" type (one that has a length word), -2 to indicate a
* null-terminated C string.
@@ -51,7 +51,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* typbyval determines whether internal Postgres routines pass a value of
- * this type by value or by reference. typbyval had better be FALSE if
+ * this type by value or by reference. typbyval had better be FALSE if
* the length is not 1, 2, or 4 (or 8 on 8-byte-Datum machines).
* Variable-length types are always passed by reference. Note that
* typbyval can be false even if the length would allow pass-by-value;
@@ -71,7 +71,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* typcategory and typispreferred help the parser distinguish preferred
* and non-preferred coercions. The category can be any single ASCII
- * character (but not \0). The categories used for built-in types are
+ * character (but not \0). The categories used for built-in types are
* identified by the TYPCATEGORY macros below.
*/
char typcategory; /* arbitrary type classification */
@@ -80,7 +80,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* If typisdefined is false, the entry is only a placeholder (forward
- * reference). We know the type name, but not yet anything else about it.
+ * reference). We know the type name, but not yet anything else about it.
*/
bool typisdefined;
@@ -141,7 +141,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
* 'd' = DOUBLE alignment (8 bytes on many machines, but by no means all).
*
* See include/access/tupmacs.h for the macros that compute these
- * alignment requirements. Note also that we allow the nominal alignment
+ * alignment requirements. Note also that we allow the nominal alignment
* to be violated when storing "packed" varlenas; the TOAST mechanism
* takes care of hiding that from most code.
*
@@ -176,7 +176,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* Domains use typbasetype to show the base (or domain) type that the
- * domain is based on. Zero if the type is not a domain.
+ * domain is based on. Zero if the type is not a domain.
*/
Oid typbasetype;
diff --git a/src/include/commands/comment.h b/src/include/commands/comment.h
index 1b12e81bdd3..a59bfe78dc1 100644
--- a/src/include/commands/comment.h
+++ b/src/include/commands/comment.h
@@ -24,7 +24,7 @@
* related routines. CommentObject() implements the SQL "COMMENT ON"
* command. DeleteComments() deletes all comments for an object.
* CreateComments creates (or deletes, if comment is NULL) a comment
- * for a specific key. There are versions of these two methods for
+ * for a specific key. There are versions of these two methods for
* both normal and shared objects.
*------------------------------------------------------------------
*/
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index cfbe0c43924..9ec9799268c 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -25,12 +25,12 @@
/*----------
* ANALYZE builds one of these structs for each attribute (column) that is
- * to be analyzed. The struct and subsidiary data are in anl_context,
+ * to be analyzed. The struct and subsidiary data are in anl_context,
* so they live until the end of the ANALYZE operation.
*
* The type-specific typanalyze function is passed a pointer to this struct
* and must return TRUE to continue analysis, FALSE to skip analysis of this
- * column. In the TRUE case it must set the compute_stats and minrows fields,
+ * column. In the TRUE case it must set the compute_stats and minrows fields,
* and can optionally set extra_data to pass additional info to compute_stats.
* minrows is its request for the minimum number of sample rows to be gathered
* (but note this request might not be honored, eg if there are fewer rows
@@ -68,7 +68,7 @@ typedef struct VacAttrStats
* type-specific typanalyze function.
*
* Note: do not assume that the data being analyzed has the same datatype
- * shown in attr, ie do not trust attr->atttypid, attlen, etc. This is
+ * shown in attr, ie do not trust attr->atttypid, attlen, etc. This is
* because some index opclasses store a different type than the underlying
* column/expression. Instead use attrtypid, attrtypmod, and attrtype for
* information about the datatype being fed to the typanalyze function.
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index bdd499bea66..b834282287d 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -36,7 +36,7 @@
* REWIND indicates that the plan node should try to efficiently support
* rescans without parameter changes. (Nodes must support ExecReScan calls
* in any case, but if this flag was not given, they are at liberty to do it
- * through complete recalculation. Note that a parameter change forces a
+ * through complete recalculation. Note that a parameter change forces a
* full recalculation in any case.)
*
* BACKWARD indicates that the plan node must respect the es_direction flag.
diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h
index 0c6e06f8ff5..24aa8ad096a 100644
--- a/src/include/executor/hashjoin.h
+++ b/src/include/executor/hashjoin.h
@@ -42,7 +42,7 @@
* If nbatch > 1 then tuples that don't belong in first batch get saved
* into inner-batch temp files. The same statements apply for the
* first scan of the outer relation, except we write tuples to outer-batch
- * temp files. After finishing the first scan, we do the following for
+ * temp files. After finishing the first scan, we do the following for
* each remaining batch:
* 1. Read tuples from inner batch file, load into hash buckets.
* 2. Read tuples from outer batch file, match to hash buckets and output.
@@ -133,7 +133,7 @@ typedef struct HashJoinTableData
/*
* These arrays are allocated for the life of the hash join, but only if
- * nbatch > 1. A file is opened only when we first write a tuple into it
+ * nbatch > 1. A file is opened only when we first write a tuple into it
* (otherwise its pointer remains NULL). Note that the zero'th array
* elements never get used, since we will process rather than dump out any
* tuples of batch zero.
diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h
index 5865f532802..1c66263432d 100644
--- a/src/include/executor/spi_priv.h
+++ b/src/include/executor/spi_priv.h
@@ -43,7 +43,7 @@ typedef struct
* For a saved plan, the _SPI_plan struct and the argument type array are in
* the plancxt (which can be really small). All the other subsidiary state
* is in plancache entries identified by plancache_list (note: the list cells
- * themselves are in plancxt). We rely on plancache.c to keep the cache
+ * themselves are in plancxt). We rely on plancache.c to keep the cache
* entries up-to-date as needed. The plancxt is a child of CacheMemoryContext
* since it should persist until explicitly destroyed.
*
diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h
index f774f2d0ab6..8b7d336a15a 100644
--- a/src/include/executor/tuptable.h
+++ b/src/include/executor/tuptable.h
@@ -34,7 +34,7 @@
*
* A "minimal" tuple is handled similarly to a palloc'd regular tuple.
* At present, minimal tuples never are stored in buffers, so there is no
- * parallel to case 1. Note that a minimal tuple has no "system columns".
+ * parallel to case 1. Note that a minimal tuple has no "system columns".
* (Actually, it could have an OID, but we have no need to access the OID.)
*
* A "virtual" tuple is an optimization used to minimize physical data
@@ -44,7 +44,7 @@
* a lower plan node's output TupleTableSlot, or to a function result
* constructed in a plan node's per-tuple econtext. It is the responsibility
* of the generating plan node to be sure these resources are not released
- * for as long as the virtual tuple needs to be valid. We only use virtual
+ * for as long as the virtual tuple needs to be valid. We only use virtual
* tuples in the result slots of plan nodes --- tuples to be copied anywhere
* else need to be "materialized" into physical tuples. Note also that a
* virtual tuple does not have any "system columns".
@@ -58,11 +58,11 @@
* payloads when this is the case.
*
* The Datum/isnull arrays of a TupleTableSlot serve double duty. When the
- * slot contains a virtual tuple, they are the authoritative data. When the
+ * slot contains a virtual tuple, they are the authoritative data. When the
* slot contains a physical tuple, the arrays contain data extracted from
* the tuple. (In this state, any pass-by-reference Datums point into
* the physical tuple.) The extracted information is built "lazily",
- * ie, only as needed. This serves to avoid repeated extraction of data
+ * ie, only as needed. This serves to avoid repeated extraction of data
* from the physical tuple.
*
* A TupleTableSlot can also be "empty", holding no valid data. This is
@@ -89,7 +89,7 @@
* buffer page.)
*
* tts_nvalid indicates the number of valid columns in the tts_values/isnull
- * arrays. When the slot is holding a "virtual" tuple this must be equal
+ * arrays. When the slot is holding a "virtual" tuple this must be equal
* to the descriptor's natts. When the slot is holding a physical tuple
* this is equal to the number of columns we have extracted (we always
* extract columns from left to right, so there are no holes).
@@ -103,7 +103,7 @@
* has only a minimal and not also a regular physical tuple, then tts_tuple
* points at tts_minhdr and the fields of that struct are set correctly
* for access to the minimal tuple; in particular, tts_minhdr.t_data points
- * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column
+ * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column
* extraction to treat the case identically to regular physical tuples.
*
* tts_slow/tts_off are saved state for slot_deform_tuple, and should not
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index e0bde4daf9a..2b9562af6f8 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -101,7 +101,7 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
/*
* This macro initializes all the fields of a FunctionCallInfoData except
- * for the arg[] and argnull[] arrays. Performance testing has shown that
+ * for the arg[] and argnull[] arrays. Performance testing has shown that
* the fastest way to set up argnull[] for small numbers of arguments is to
* explicitly set each required element to false, so we don't try to zero
* out the argnull[] array in the macro.
@@ -118,7 +118,7 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
/*
* This macro invokes a function given a filled-in FunctionCallInfoData
- * struct. The macro result is the returned Datum --- but note that
+ * struct. The macro result is the returned Datum --- but note that
* caller must still check fcinfo->isnull! Also, if function is strict,
* it is caller's responsibility to verify that no null arguments are present
* before calling.
@@ -167,11 +167,11 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
* which are varlena types). pg_detoast_datum() gives you either the input
* datum (if not toasted) or a detoasted copy allocated with palloc().
* pg_detoast_datum_copy() always gives you a palloc'd copy --- use it
- * if you need a modifiable copy of the input. Caller is expected to have
+ * if you need a modifiable copy of the input. Caller is expected to have
* checked for null inputs first, if necessary.
*
* pg_detoast_datum_packed() will return packed (1-byte header) datums
- * unmodified. It will still expand an externally toasted or compressed datum.
+ * unmodified. It will still expand an externally toasted or compressed datum.
* The resulting datum can be accessed using VARSIZE_ANY() and VARDATA_ANY()
* (beware of multiple evaluations in those macros!)
*
@@ -202,7 +202,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena * datum);
pg_detoast_datum_packed((struct varlena *) DatumGetPointer(datum))
/*
- * Support for cleaning up detoasted copies of inputs. This must only
+ * Support for cleaning up detoasted copies of inputs. This must only
* be used for pass-by-ref datatypes, and normally would only be used
* for toastable types. If the given pointer is different from the
* original argument, assume it's a palloc'd detoasted copy, and pfree it.
@@ -319,7 +319,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena * datum);
* Dynamically loaded functions may use either the version-1 ("new style")
* or version-0 ("old style") calling convention. Version 1 is the call
* convention defined in this header file; version 0 is the old "plain C"
- * convention. A version-1 function must be accompanied by the macro call
+ * convention. A version-1 function must be accompanied by the macro call
*
* PG_FUNCTION_INFO_V1(function_name);
*
@@ -500,8 +500,8 @@ extern Datum FunctionCall9Coll(FmgrInfo *flinfo, Oid collation,
/* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially FunctionLookup() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially FunctionLookup() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the FunctionLookup() once and then use FunctionCallN().
*/
extern Datum OidFunctionCall0Coll(Oid functionId, Oid collation);
@@ -655,7 +655,7 @@ extern int AggCheckCallContext(FunctionCallInfo fcinfo,
* We allow plugin modules to hook function entry/exit. This is intended
* as support for loadable security policy modules, which may want to
* perform additional privilege checks on function entry or exit, or to do
- * other internal bookkeeping. To make this possible, such modules must be
+ * other internal bookkeeping. To make this possible, such modules must be
* able not only to support normal function entry and exit, but also to trap
* the case where we bail out due to an error; and they must also be able to
* prevent inlining.
diff --git a/src/include/funcapi.h b/src/include/funcapi.h
index 6c606ebbc81..932355ac7df 100644
--- a/src/include/funcapi.h
+++ b/src/include/funcapi.h
@@ -129,7 +129,7 @@ typedef struct FuncCallContext
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result or the rowtype could not be
* determined. NB: the tupledesc should be copied if it is to be
diff --git a/src/include/lib/stringinfo.h b/src/include/lib/stringinfo.h
index 3657a685395..6e9e942ae3d 100644
--- a/src/include/lib/stringinfo.h
+++ b/src/include/lib/stringinfo.h
@@ -60,7 +60,7 @@ typedef StringInfoData *StringInfo;
*
* NOTE: some routines build up a string using StringInfo, and then
* release the StringInfoData but return the data string itself to their
- * caller. At that point the data string looks like a plain palloc'd
+ * caller. At that point the data string looks like a plain palloc'd
* string.
*-------------------------
*/
@@ -100,7 +100,7 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
/*------------------------
* appendStringInfoVA
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return true; if not (because there's not enough space), return false
* without modifying str. Typically the caller would enlarge str and retry
* on false return --- see appendStringInfo for standard usage pattern.
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index a09a29601a1..3278e039ec3 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -93,10 +93,10 @@ typedef struct
#endif
/*
- * This is used by the postmaster in its communication with frontends. It
+ * This is used by the postmaster in its communication with frontends. It
* contains all state information needed during this communication before the
- * backend is run. The Port structure is kept in malloc'd memory and is
- * still available when a backend is running (see MyProcPort). The data
+ * backend is run. The Port structure is kept in malloc'd memory and is
+ * still available when a backend is running (see MyProcPort). The data
* it points to must also be malloc'd, or else palloc'd in TopMemoryContext,
* so that it survives into PostgresMain execution!
*
@@ -131,7 +131,7 @@ typedef struct Port
/*
* Information that needs to be saved from the startup packet and passed
- * into backend execution. "char *" fields are NULL if not set.
+ * into backend execution. "char *" fields are NULL if not set.
* guc_options points to a List of alternating option names and values.
*/
char *database_name;
diff --git a/src/include/libpq/pqcomm.h b/src/include/libpq/pqcomm.h
index c28ada8b187..d8634813a38 100644
--- a/src/include/libpq/pqcomm.h
+++ b/src/include/libpq/pqcomm.h
@@ -153,7 +153,7 @@ extern bool Db_user_namespace;
/*
* In protocol 3.0 and later, the startup packet length is not fixed, but
- * we set an arbitrary limit on it anyway. This is just to prevent simple
+ * we set an arbitrary limit on it anyway. This is just to prevent simple
* denial-of-service attacks via sending enough data to run the server
* out of memory.
*/
diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h
index 88960c433b3..0a55436e62a 100644
--- a/src/include/mb/pg_wchar.h
+++ b/src/include/mb/pg_wchar.h
@@ -10,7 +10,7 @@
*
* NOTES
* This is used both by the backend and by libpq, but should not be
- * included by libpq client programs. In particular, a libpq client
+ * included by libpq client programs. In particular, a libpq client
* should not assume that the encoding IDs used by the version of libpq
* it's linked to match up with the IDs declared here.
*
@@ -38,7 +38,7 @@ typedef unsigned int pg_wchar;
/*
* Leading byte types or leading prefix byte for MULE internal code.
- * See http://www.xemacs.org for more details. (there is a doc titled
+ * See http://www.xemacs.org for more details. (there is a doc titled
* "XEmacs Internals Manual", "MULE Character Sets and Encodings"
* section.)
*/
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 82af9ab89fa..238fed3ab38 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -37,7 +37,7 @@
* In both cases, we need to be able to clean up the current transaction
* gracefully, so we can't respond to the interrupt instantaneously ---
* there's no guarantee that internal data structures would be self-consistent
- * if the code is interrupted at an arbitrary instant. Instead, the signal
+ * if the code is interrupted at an arbitrary instant. Instead, the signal
* handlers set flags that are checked periodically during execution.
*
* The CHECK_FOR_INTERRUPTS() macro is called at strategically located spots
@@ -46,19 +46,19 @@
* might sometimes be called in contexts that do *not* want to allow a cancel
* or die interrupt. The HOLD_INTERRUPTS() and RESUME_INTERRUPTS() macros
* allow code to ensure that no cancel or die interrupt will be accepted,
- * even if CHECK_FOR_INTERRUPTS() gets called in a subroutine. The interrupt
+ * even if CHECK_FOR_INTERRUPTS() gets called in a subroutine. The interrupt
* will be held off until CHECK_FOR_INTERRUPTS() is done outside any
* HOLD_INTERRUPTS() ... RESUME_INTERRUPTS() section.
*
* Special mechanisms are used to let an interrupt be accepted when we are
* waiting for a lock or when we are waiting for command input (but, of
- * course, only if the interrupt holdoff counter is zero). See the
+ * course, only if the interrupt holdoff counter is zero). See the
* related code for details.
*
* A related, but conceptually distinct, mechanism is the "critical section"
* mechanism. A critical section not only holds off cancel/die interrupts,
* but causes any ereport(ERROR) or ereport(FATAL) to become ereport(PANIC)
- * --- that is, a system-wide reset is forced. Needless to say, only really
+ * --- that is, a system-wide reset is forced. Needless to say, only really
* *critical* code should be marked as a critical section! Currently, this
* mechanism is only used for XLOG-related code.
*
@@ -260,7 +260,7 @@ extern int trace_recovery(int trace_level);
/*****************************************************************************
* pdir.h -- *
- * POSTGRES directory path definitions. *
+ * POSTGRES directory path definitions. *
*****************************************************************************/
/* flags to be OR'd to form sec_context */
@@ -299,7 +299,7 @@ extern bool superuser_arg(Oid roleid); /* given user is superuser */
/*****************************************************************************
* pmod.h -- *
- * POSTGRES processing mode definitions. *
+ * POSTGRES processing mode definitions. *
*****************************************************************************/
/*
@@ -314,7 +314,7 @@ extern bool superuser_arg(Oid roleid); /* given user is superuser */
* is used during the initial generation of template databases.
*
* Initialization mode: used while starting a backend, until all normal
- * initialization is complete. Some code behaves differently when executed
+ * initialization is complete. Some code behaves differently when executed
* in this mode to enable system bootstrapping.
*
* If a POSTGRES binary is in normal mode, then all code may be executed
@@ -347,7 +347,7 @@ extern ProcessingMode Mode;
/*****************************************************************************
* pinit.h -- *
- * POSTGRES initialization and cleanup definitions. *
+ * POSTGRES initialization and cleanup definitions. *
*****************************************************************************/
/* in utils/init/postinit.c */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 828f7007a31..c16f6404710 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -90,14 +90,14 @@ typedef struct ExprContext_CB
*
* This class holds the "current context" information
* needed to evaluate expressions for doing tuple qualifications
- * and tuple projections. For example, if an expression refers
+ * and tuple projections. For example, if an expression refers
* to an attribute in the current inner tuple then we need to know
* what the current inner tuple is and so we look at the expression
* context.
*
* There are two memory contexts associated with an ExprContext:
* * ecxt_per_query_memory is a query-lifespan context, typically the same
- * context the ExprContext node itself is allocated in. This context
+ * context the ExprContext node itself is allocated in. This context
* can be used for purposes such as storing function call cache info.
* * ecxt_per_tuple_memory is a short-term context for expression results.
* As the name suggests, it will typically be reset once per tuple,
@@ -200,9 +200,9 @@ typedef struct ReturnSetInfo
* Nodes which need to do projections create one of these.
*
* ExecProject() evaluates the tlist, forms a tuple, and stores it
- * in the given slot. Note that the result will be a "virtual" tuple
+ * in the given slot. Note that the result will be a "virtual" tuple
* unless ExecMaterializeSlot() is then called to force it to be
- * converted to a physical tuple. The slot must have a tupledesc
+ * converted to a physical tuple. The slot must have a tupledesc
* that matches the output of the tlist!
*
* The planner very often produces tlists that consist entirely of
@@ -257,7 +257,7 @@ typedef struct ProjectionInfo
* in emitted tuples. For example, when we do an UPDATE query,
* the planner adds a "junk" entry to the targetlist so that the tuples
* returned to ExecutePlan() contain an extra attribute: the ctid of
- * the tuple to be updated. This is needed to do the update, but we
+ * the tuple to be updated. This is needed to do the update, but we
* don't want the ctid to be part of the stored new tuple! So, we
* apply a "junk filter" to remove the junk attributes and form the
* real output tuple. The junkfilter code also provides routines to
@@ -391,7 +391,7 @@ typedef struct EState
/*
* These fields are for re-evaluating plan quals when an updated tuple is
- * substituted in READ COMMITTED mode. es_epqTuple[] contains tuples that
+ * substituted in READ COMMITTED mode. es_epqTuple[] contains tuples that
* scan plan nodes should return instead of whatever they'd normally
* return, or NULL if nothing to return; es_epqTupleSet[] is true if a
* particular array entry is valid; and es_epqScanDone[] is state to
@@ -653,7 +653,7 @@ typedef struct FuncExprState
/*
* In some cases we need to compute a tuple descriptor for the function's
- * output. If so, it's stored here.
+ * output. If so, it's stored here.
*/
TupleDesc funcResultDesc;
bool funcReturnsTuple; /* valid when funcResultDesc isn't
@@ -677,7 +677,7 @@ typedef struct FuncExprState
/*
* Flag to remember whether we have registered a shutdown callback for
- * this FuncExprState. We do so only if funcResultStore or setArgsValid
+ * this FuncExprState. We do so only if funcResultStore or setArgsValid
* has been set at least once (since all the callback is for is to release
* the tuplestore or clear setArgsValid).
*/
@@ -1419,7 +1419,7 @@ typedef struct CteScanState
* WorkTableScanState information
*
* WorkTableScan nodes are used to scan the work table created by
- * a RecursiveUnion node. We locate the RecursiveUnion node
+ * a RecursiveUnion node. We locate the RecursiveUnion node
* during executor startup.
* ----------------
*/
@@ -1731,7 +1731,7 @@ typedef struct WindowAggState
* UniqueState information
*
* Unique nodes are used "on top of" sort nodes to discard
- * duplicate tuples returned from the sort phase. Basically
+ * duplicate tuples returned from the sort phase. Basically
* all it does is compare the current tuple from the subplan
* with the previously fetched tuple (stored in its result slot).
* If the two are identical in all interesting fields, then
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 79095639b53..0637a3c2ddd 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -567,7 +567,7 @@ typedef enum JoinType
/*
* Semijoins and anti-semijoins (as defined in relational theory) do not
* appear in the SQL JOIN syntax, but there are standard idioms for
- * representing them (e.g., using EXISTS). The planner recognizes these
+ * representing them (e.g., using EXISTS). The planner recognizes these
* cases and converts them to joins. So the planner and executor must
* support these codes. NOTE: in JOIN_SEMI output, it is unspecified
* which matching RHS row is joined to. In JOIN_ANTI output, the row is
@@ -591,7 +591,7 @@ typedef enum JoinType
/*
* OUTER joins are those for which pushed-down quals must behave differently
* from the join's own quals. This is in fact everything except INNER and
- * SEMI joins. However, this macro must also exclude the JOIN_UNIQUE symbols
+ * SEMI joins. However, this macro must also exclude the JOIN_UNIQUE symbols
* since those are temporary proxies for what will eventually be an INNER
* join.
*
diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h
index cf9a788019f..50c544eb550 100644
--- a/src/include/nodes/params.h
+++ b/src/include/nodes/params.h
@@ -22,20 +22,20 @@ struct ParseState;
* ParamListInfo
*
* ParamListInfo arrays are used to pass parameters into the executor
- * for parameterized plans. Each entry in the array defines the value
+ * for parameterized plans. Each entry in the array defines the value
* to be substituted for a PARAM_EXTERN parameter. The "paramid"
* of a PARAM_EXTERN Param can range from 1 to numParams.
*
* Although parameter numbers are normally consecutive, we allow
* ptype == InvalidOid to signal an unused array entry.
*
- * pflags is a flags field. Currently the only used bit is:
+ * pflags is a flags field. Currently the only used bit is:
* PARAM_FLAG_CONST signals the planner that it may treat this parameter
* as a constant (i.e., generate a plan that works only for this value
* of the parameter).
*
* There are two hook functions that can be associated with a ParamListInfo
- * array to support dynamic parameter handling. First, if paramFetch
+ * array to support dynamic parameter handling. First, if paramFetch
* isn't null and the executor requires a value for an invalid parameter
* (one with ptype == InvalidOid), the paramFetch hook is called to give
* it a chance to fill in the parameter value. Second, a parserSetup
@@ -85,7 +85,7 @@ typedef struct ParamListInfoData
* es_param_exec_vals or ecxt_param_exec_vals.
*
* If execPlan is not NULL, it points to a SubPlanState node that needs
- * to be executed to produce the value. (This is done so that we can have
+ * to be executed to produce the value. (This is done so that we can have
* lazy evaluation of InitPlans: they aren't executed until/unless a
* result value is needed.) Otherwise the value is assumed to be valid
* when needed.
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 1301463c626..758c0873411 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -157,7 +157,7 @@ typedef struct Query
* Supporting data structures for Parse Trees
*
* Most of these node types appear in raw parsetrees output by the grammar,
- * and get transformed to something else by the analyzer. A few of them
+ * and get transformed to something else by the analyzer. A few of them
* are used as-is in transformed querytrees.
****************************************************************************/
@@ -171,7 +171,7 @@ typedef struct Query
* be prespecified in typemod, otherwise typemod is unused.
*
* If pct_type is TRUE, then names is actually a field name and we look up
- * the type of that field. Otherwise (the normal case), names is a type
+ * the type of that field. Otherwise (the normal case), names is a type
* name possibly qualified with schema and database name.
*/
typedef struct TypeName
@@ -190,7 +190,7 @@ typedef struct TypeName
/*
* ColumnRef - specifies a reference to a column, or possibly a whole tuple
*
- * The "fields" list must be nonempty. It can contain string Value nodes
+ * The "fields" list must be nonempty. It can contain string Value nodes
* (representing names) and A_Star nodes (representing occurrence of a '*').
* Currently, A_Star must appear only as the last list element --- the grammar
* is responsible for enforcing this!
@@ -474,7 +474,7 @@ typedef struct RangeFunction
* in either "raw" form (an untransformed parse tree) or "cooked" form
* (a post-parse-analysis, executable expression tree), depending on
* how this ColumnDef node was created (by parsing, or by inheritance
- * from an existing relation). We should never have both in the same node!
+ * from an existing relation). We should never have both in the same node!
*
* Similarly, we may have a COLLATE specification in either raw form
* (represented as a CollateClause with arg==NULL) or cooked form
@@ -544,7 +544,7 @@ typedef struct IndexElem
/*
* DefElem - a generic "name = value" option definition
*
- * In some contexts the name can be qualified. Also, certain SQL commands
+ * In some contexts the name can be qualified. Also, certain SQL commands
* allow a SET/ADD/DROP action to be attached to option settings, so it's
* convenient to carry a field for that too. (Note: currently, it is our
* practice that the grammar allows namespace and action only in statements
@@ -571,7 +571,7 @@ typedef struct DefElem
/*
* LockingClause - raw representation of FOR UPDATE/SHARE options
*
- * Note: lockedRels == NIL means "all relations in query". Otherwise it
+ * Note: lockedRels == NIL means "all relations in query". Otherwise it
* is a list of RangeVar nodes. (We use RangeVar mainly because it carries
* a location field --- currently, parse analysis insists on unqualified
* names in LockingClause.)
@@ -626,8 +626,8 @@ typedef struct XmlSerialize
*
* In RELATION RTEs, the colnames in both alias and eref are indexed by
* physical attribute number; this means there must be colname entries for
- * dropped columns. When building an RTE we insert empty strings ("") for
- * dropped columns. Note however that a stored rule may have nonempty
+ * dropped columns. When building an RTE we insert empty strings ("") for
+ * dropped columns. Note however that a stored rule may have nonempty
* colnames for columns dropped since the rule was created (and for that
* matter the colnames might be out of date due to column renamings).
* The same comments apply to FUNCTION RTEs when the function's return type
@@ -635,9 +635,9 @@ typedef struct XmlSerialize
*
* In JOIN RTEs, the colnames in both alias and eref are one-to-one with
* joinaliasvars entries. A JOIN RTE will omit columns of its inputs when
- * those columns are known to be dropped at parse time. Again, however,
+ * those columns are known to be dropped at parse time. Again, however,
* a stored rule might contain entries for columns dropped since the rule
- * was created. (This is only possible for columns not actually referenced
+ * was created. (This is only possible for columns not actually referenced
* in the rule.) When loading a stored rule, we replace the joinaliasvars
* items for any such columns with null pointers. (We can't simply delete
* them from the joinaliasvars list, because that would affect the attnums
@@ -656,7 +656,7 @@ typedef struct XmlSerialize
* decompiled queries.
*
* requiredPerms and checkAsUser specify run-time access permissions
- * checks to be performed at query startup. The user must have *all*
+ * checks to be performed at query startup. The user must have *all*
* of the permissions that are OR'd together in requiredPerms (zero
* indicates no permissions checking). If checkAsUser is not zero,
* then do the permissions checks using the access rights of that user,
@@ -710,7 +710,7 @@ typedef struct RangeTblEntry
* Fields valid for a join RTE (else NULL/zero):
*
* joinaliasvars is a list of (usually) Vars corresponding to the columns
- * of the join result. An alias Var referencing column K of the join
+ * of the join result. An alias Var referencing column K of the join
* result can be replaced by the K'th element of joinaliasvars --- but to
* simplify the task of reverse-listing aliases correctly, we do not do
* that until planning time. In detail: an element of joinaliasvars can
@@ -731,7 +731,7 @@ typedef struct RangeTblEntry
*
* If the function returns RECORD, funccoltypes lists the column types
* declared in the RTE's column type specification, funccoltypmods lists
- * their declared typmods, funccolcollations their collations. Otherwise,
+ * their declared typmods, funccolcollations their collations. Otherwise,
* those fields are NIL.
*/
Node *funcexpr; /* expression tree for func call */
@@ -776,7 +776,7 @@ typedef struct RangeTblEntry
* You might think that ORDER BY is only interested in defining ordering,
* and GROUP/DISTINCT are only interested in defining equality. However,
* one way to implement grouping is to sort and then apply a "uniq"-like
- * filter. So it's also interesting to keep track of possible sort operators
+ * filter. So it's also interesting to keep track of possible sort operators
* for GROUP/DISTINCT, and in particular to try to sort for the grouping
* in a way that will also yield a requested ORDER BY ordering. So we need
* to be able to compare ORDER BY and GROUP/DISTINCT lists, which motivates
@@ -796,15 +796,15 @@ typedef struct RangeTblEntry
* here, but it's cheap to get it along with the sortop, and requiring it
* to be valid eases comparisons to grouping items.) Note that this isn't
* actually enough information to determine an ordering: if the sortop is
- * collation-sensitive, a collation OID is needed too. We don't store the
+ * collation-sensitive, a collation OID is needed too. We don't store the
* collation in SortGroupClause because it's not available at the time the
* parser builds the SortGroupClause; instead, consult the exposed collation
* of the referenced targetlist expression to find out what it is.
*
- * In a grouping item, eqop must be valid. If the eqop is a btree equality
+ * In a grouping item, eqop must be valid. If the eqop is a btree equality
* operator, then sortop should be set to a compatible ordering operator.
* We prefer to set eqop/sortop/nulls_first to match any ORDER BY item that
- * the query presents for the same tlist item. If there is none, we just
+ * the query presents for the same tlist item. If there is none, we just
* use the default ordering op for the datatype.
*
* If the tlist item's type has a hash opclass but no btree opclass, then
@@ -1060,7 +1060,7 @@ typedef struct SelectStmt
* range table. Its setOperations field shows the tree of set operations,
* with leaf SelectStmt nodes replaced by RangeTblRef nodes, and internal
* nodes replaced by SetOperationStmt nodes. Information about the output
- * column types is added, too. (Note that the child nodes do not necessarily
+ * column types is added, too. (Note that the child nodes do not necessarily
* produce these types directly, but we've checked that their output types
* can be coerced to the output column type.) Also, if it's not UNION ALL,
* information about the types' sort/group semantics is provided in the form
@@ -1348,7 +1348,7 @@ typedef struct AccessPriv
*
* Note: because of the parsing ambiguity with the GRANT <privileges>
* statement, granted_roles is a list of AccessPriv; the execution code
- * should complain if any column lists appear. grantee_roles is a list
+ * should complain if any column lists appear. grantee_roles is a list
* of role names, as Value strings.
* ----------------------
*/
@@ -1378,7 +1378,7 @@ typedef struct AlterDefaultPrivilegesStmt
* Copy Statement
*
* We support "COPY relation FROM file", "COPY relation TO file", and
- * "COPY (query) TO file". In any given CopyStmt, exactly one of "relation"
+ * "COPY (query) TO file". In any given CopyStmt, exactly one of "relation"
* and "query" must be non-NULL.
* ----------------------
*/
@@ -1992,7 +1992,7 @@ typedef struct SecLabelStmt
* Declare Cursor Statement
*
* Note: the "query" field of DeclareCursorStmt is only used in the raw grammar
- * output. After parse analysis it's set to null, and the Query points to the
+ * output. After parse analysis it's set to null, and the Query points to the
* DeclareCursorStmt, not vice versa.
* ----------------------
*/
@@ -2052,7 +2052,7 @@ typedef struct FetchStmt
*
* This represents creation of an index and/or an associated constraint.
* If indexOid isn't InvalidOid, we are not creating an index, just a
- * UNIQUE/PKEY constraint using an existing index. isconstraint must always
+ * UNIQUE/PKEY constraint using an existing index. isconstraint must always
* be true in this case, and the fields describing the index properties are
* empty.
* ----------------------
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 7ab0b34885c..b7b037a5d85 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -230,7 +230,7 @@ typedef struct RecursiveUnion
* BitmapAnd node -
* Generate the intersection of the results of sub-plans.
*
- * The subplans must be of types that yield tuple bitmaps. The targetlist
+ * The subplans must be of types that yield tuple bitmaps. The targetlist
* and qual fields of the plan are unused and are always NIL.
* ----------------
*/
@@ -244,7 +244,7 @@ typedef struct BitmapAnd
* BitmapOr node -
* Generate the union of the results of sub-plans.
*
- * The subplans must be of types that yield tuple bitmaps. The targetlist
+ * The subplans must be of types that yield tuple bitmaps. The targetlist
* and qual fields of the plan are unused and are always NIL.
* ----------------
*/
@@ -278,7 +278,7 @@ typedef Scan SeqScan;
* in the same form it appeared in the query WHERE condition. Each should
* be of the form (indexkey OP comparisonval) or (comparisonval OP indexkey).
* The indexkey is a Var or expression referencing column(s) of the index's
- * base table. The comparisonval might be any expression, but it won't use
+ * base table. The comparisonval might be any expression, but it won't use
* any columns of the base table. The expressions are ordered by index
* column position (but items referencing the same index column can appear
* in any order). indexqualorig is used at runtime only if we have to recheck
@@ -288,15 +288,15 @@ typedef Scan SeqScan;
* necessary to put the indexkeys on the left, and the indexkeys are replaced
* by Var nodes identifying the index columns (varattno is the index column
* position, not the base table's column, even though varno is for the base
- * table). This is a bit hokey ... would be cleaner to use a special-purpose
- * node type that could not be mistaken for a regular Var. But it will do
+ * table). This is a bit hokey ... would be cleaner to use a special-purpose
+ * node type that could not be mistaken for a regular Var. But it will do
* for now.
*
* indexorderbyorig is similarly the original form of any ORDER BY expressions
* that are being implemented by the index, while indexorderby is modified to
* have index column Vars on the left-hand side. Here, multiple expressions
* must appear in exactly the ORDER BY order, and this is not necessarily the
- * index column order. Only the expressions are provided, not the auxiliary
+ * index column order. Only the expressions are provided, not the auxiliary
* sort-order information from the ORDER BY SortGroupClauses; it's assumed
* that the sort ordering is fully determinable from the top-level operators.
* indexorderbyorig is unused at run time, but is needed for EXPLAIN.
@@ -318,7 +318,7 @@ typedef struct IndexScan
* bitmap index scan node
*
* BitmapIndexScan delivers a bitmap of potential tuple locations;
- * it does not access the heap itself. The bitmap is used by an
+ * it does not access the heap itself. The bitmap is used by an
* ancestor BitmapHeapScan node, possibly after passing through
* intermediate BitmapAnd and/or BitmapOr nodes to combine it with
* the results of other BitmapIndexScans.
@@ -378,13 +378,13 @@ typedef struct TidScan
* purposes.
*
* Note: we store the sub-plan in the type-specific subplan field, not in
- * the generic lefttree field as you might expect. This is because we do
+ * the generic lefttree field as you might expect. This is because we do
* not want plan-tree-traversal routines to recurse into the subplan without
* knowing that they are changing Query contexts.
*
* Note: subrtable is used just to carry the subquery rangetable from
* createplan.c to setrefs.c; it should always be NIL by the time the
- * executor sees the plan. Similarly for subrowmark.
+ * executor sees the plan. Similarly for subrowmark.
* ----------------
*/
typedef struct SubqueryScan
@@ -747,7 +747,7 @@ typedef enum RowMarkType
* plan-time representation of FOR UPDATE/SHARE clauses
*
* When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
- * PlanRowMark node for each non-target relation in the query. Relations that
+ * PlanRowMark node for each non-target relation in the query. Relations that
* are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
* real tables) or ROW_MARK_COPY (if not).
*
@@ -793,7 +793,7 @@ typedef struct PlanRowMark
*
* We track the objects on which a PlannedStmt depends in two ways:
* relations are recorded as a simple list of OIDs, and everything else
- * is represented as a list of PlanInvalItems. A PlanInvalItem is designed
+ * is represented as a list of PlanInvalItems. A PlanInvalItem is designed
* to be used with the syscache invalidation mechanism, so it identifies a
* system catalog entry by cache ID and tuple TID.
*/
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 5815139447a..0768418c7a0 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -33,7 +33,7 @@
*
* Note: colnames is a list of Value nodes (always strings). In Alias structs
* associated with RTEs, there may be entries corresponding to dropped
- * columns; these are normally empty strings (""). See parsenodes.h for info.
+ * columns; these are normally empty strings (""). See parsenodes.h for info.
*/
typedef struct Alias
{
@@ -215,14 +215,14 @@ typedef struct Param
* Aggref
*
* The aggregate's args list is a targetlist, ie, a list of TargetEntry nodes
- * (before Postgres 9.0 it was just bare expressions). The non-resjunk TLEs
+ * (before Postgres 9.0 it was just bare expressions). The non-resjunk TLEs
* represent the aggregate's regular arguments (if any) and resjunk TLEs can
* be added at the end to represent ORDER BY expressions that are not also
* arguments. As in a top-level Query, the TLEs can be marked with
* ressortgroupref indexes to let them be referenced by SortGroupClause
* entries in the aggorder and/or aggdistinct lists. This represents ORDER BY
* and DISTINCT operations to be applied to the aggregate input rows before
- * they are passed to the transition function. The grammar only allows a
+ * they are passed to the transition function. The grammar only allows a
* simple "DISTINCT" specifier for the arguments, but we use the full
* query-level representation to allow more code sharing.
*/
@@ -269,7 +269,7 @@ typedef struct WindowFunc
* entire new modified array value.
*
* If reflowerindexpr = NIL, then we are fetching or storing a single array
- * element at the subscripts given by refupperindexpr. Otherwise we are
+ * element at the subscripts given by refupperindexpr. Otherwise we are
* fetching or storing an array slice, that is a rectangular subarray
* with lower and upper bounds given by the index expressions.
* reflowerindexpr must be the same length as refupperindexpr when it
@@ -428,7 +428,7 @@ typedef struct ScalarArrayOpExpr
*
* Notice the arguments are given as a List. For NOT, of course the list
* must always have exactly one element. For AND and OR, the executor can
- * handle any number of arguments. The parser generally treats AND and OR
+ * handle any number of arguments. The parser generally treats AND and OR
* as binary and so it typically only produces two-element lists, but the
* optimizer will flatten trees of AND and OR nodes to produce longer lists
* when possible. There are also a few special cases where more arguments
@@ -451,7 +451,7 @@ typedef struct BoolExpr
* SubLink
*
* A SubLink represents a subselect appearing in an expression, and in some
- * cases also the combining operator(s) just above it. The subLinkType
+ * cases also the combining operator(s) just above it. The subLinkType
* indicates the form of the expression represented:
* EXISTS_SUBLINK EXISTS(SELECT ...)
* ALL_SUBLINK (lefthand) op ALL (SELECT ...)
@@ -478,7 +478,7 @@ typedef struct BoolExpr
*
* NOTE: in the raw output of gram.y, testexpr contains just the raw form
* of the lefthand expression (if any), and operName is the String name of
- * the combining operator. Also, subselect is a raw parsetree. During parse
+ * the combining operator. Also, subselect is a raw parsetree. During parse
* analysis, the parser transforms testexpr into a complete boolean expression
* that compares the lefthand value(s) to PARAM_SUBLINK nodes representing the
* output columns of the subselect. And subselect is transformed to a Query.
@@ -536,7 +536,7 @@ typedef struct SubLink
* list). In this case testexpr is NULL to avoid duplication.
*
* The planner also derives lists of the values that need to be passed into
- * and out of the subplan. Input values are represented as a list "args" of
+ * and out of the subplan. Input values are represented as a list "args" of
* expressions to be evaluated in the outer-query context (currently these
* args are always just Vars, but in principle they could be any expression).
* The values are assigned to the global PARAM_EXEC params indexed by parParam
@@ -627,7 +627,7 @@ typedef struct FieldSelect
* portion of a column.
*
* A single FieldStore can actually represent updates of several different
- * fields. The parser only generates FieldStores with single-element lists,
+ * fields. The parser only generates FieldStores with single-element lists,
* but the planner will collapse multiple updates of the same base column
* into one FieldStore.
* ----------------
@@ -759,7 +759,7 @@ typedef struct CollateExpr
* and the testexpr in the second case.
*
* In the raw grammar output for the second form, the condition expressions
- * of the WHEN clauses are just the comparison values. Parse analysis
+ * of the WHEN clauses are just the comparison values. Parse analysis
* converts these to valid boolean expressions of the form
* CaseTestExpr '=' compexpr
* where the CaseTestExpr node is a placeholder that emits the correct
@@ -833,10 +833,10 @@ typedef struct ArrayExpr
*
* Note: the list of fields must have a one-for-one correspondence with
* physical fields of the associated rowtype, although it is okay for it
- * to be shorter than the rowtype. That is, the N'th list element must
+ * to be shorter than the rowtype. That is, the N'th list element must
* match up with the N'th physical field. When the N'th physical field
* is a dropped column (attisdropped) then the N'th list element can just
- * be a NULL constant. (This case can only occur for named composite types,
+ * be a NULL constant. (This case can only occur for named composite types,
* not RECORD types, since those are built from the RowExpr itself rather
* than vice versa.) It is important not to assume that length(args) is
* the same as the number of columns logically present in the rowtype.
@@ -857,7 +857,7 @@ typedef struct RowExpr
* Note: we deliberately do NOT store a typmod. Although a typmod will be
* associated with specific RECORD types at runtime, it will differ for
* different backends, and so cannot safely be stored in stored
- * parsetrees. We must assume typmod -1 for a RowExpr node.
+ * parsetrees. We must assume typmod -1 for a RowExpr node.
*
* We don't need to store a collation either. The result type is
* necessarily composite, and composite types never have a collation.
@@ -943,7 +943,7 @@ typedef struct MinMaxExpr
* 'args' carries all other arguments.
*
* Note: result type/typmod/collation are not stored, but can be deduced
- * from the XmlExprOp. The type/typmod fields are just used for display
+ * from the XmlExprOp. The type/typmod fields are just used for display
* purposes, and are NOT necessarily the true result type of the node.
* (We also use type == InvalidOid to mark a not-yet-parse-analyzed XmlExpr.)
*/
@@ -1029,8 +1029,8 @@ typedef struct BooleanTest
*
* CoerceToDomain represents the operation of coercing a value to a domain
* type. At runtime (and not before) the precise set of constraints to be
- * checked will be determined. If the value passes, it is returned as the
- * result; if not, an error is raised. Note that this is equivalent to
+ * checked will be determined. If the value passes, it is returned as the
+ * result; if not, an error is raised. Note that this is equivalent to
* RelabelType in the scenario where no constraints are applied.
*/
typedef struct CoerceToDomain
@@ -1046,7 +1046,7 @@ typedef struct CoerceToDomain
/*
* Placeholder node for the value to be processed by a domain's check
- * constraint. This is effectively like a Param, but can be implemented more
+ * constraint. This is effectively like a Param, but can be implemented more
* simply since we need only one replacement value at a time.
*
* Note: the typeId/typeMod/collation will be set from the domain's base type,
@@ -1066,7 +1066,7 @@ typedef struct CoerceToDomainValue
* Placeholder node for a DEFAULT marker in an INSERT or UPDATE command.
*
* This is not an executable expression: it must be replaced by the actual
- * column default expression during rewriting. But it is convenient to
+ * column default expression during rewriting. But it is convenient to
* treat it as an expression node during parsing and rewriting.
*/
typedef struct SetToDefault
@@ -1108,14 +1108,14 @@ typedef struct CurrentOfExpr
* single expression tree.
*
* In a SELECT's targetlist, resno should always be equal to the item's
- * ordinal position (counting from 1). However, in an INSERT or UPDATE
+ * ordinal position (counting from 1). However, in an INSERT or UPDATE
* targetlist, resno represents the attribute number of the destination
* column for the item; so there may be missing or out-of-order resnos.
* It is even legal to have duplicated resnos; consider
* UPDATE table SET arraycol[1] = ..., arraycol[2] = ..., ...
* The two meanings come together in the executor, because the planner
* transforms INSERT/UPDATE tlists into a normalized form with exactly
- * one entry for each column of the destination table. Before that's
+ * one entry for each column of the destination table. Before that's
* happened, however, it is risky to assume that resno == position.
* Generally get_tle_by_resno() should be used rather than list_nth()
* to fetch tlist entries by resno, and only in SELECT should you assume
@@ -1124,25 +1124,25 @@ typedef struct CurrentOfExpr
* resname is required to represent the correct column name in non-resjunk
* entries of top-level SELECT targetlists, since it will be used as the
* column title sent to the frontend. In most other contexts it is only
- * a debugging aid, and may be wrong or even NULL. (In particular, it may
+ * a debugging aid, and may be wrong or even NULL. (In particular, it may
* be wrong in a tlist from a stored rule, if the referenced column has been
- * renamed by ALTER TABLE since the rule was made. Also, the planner tends
+ * renamed by ALTER TABLE since the rule was made. Also, the planner tends
* to store NULL rather than look up a valid name for tlist entries in
* non-toplevel plan nodes.) In resjunk entries, resname should be either
* a specific system-generated name (such as "ctid") or NULL; anything else
* risks confusing ExecGetJunkAttribute!
*
* ressortgroupref is used in the representation of ORDER BY, GROUP BY, and
- * DISTINCT items. Targetlist entries with ressortgroupref=0 are not
+ * DISTINCT items. Targetlist entries with ressortgroupref=0 are not
* sort/group items. If ressortgroupref>0, then this item is an ORDER BY,
- * GROUP BY, and/or DISTINCT target value. No two entries in a targetlist
+ * GROUP BY, and/or DISTINCT target value. No two entries in a targetlist
* may have the same nonzero ressortgroupref --- but there is no particular
* meaning to the nonzero values, except as tags. (For example, one must
* not assume that lower ressortgroupref means a more significant sort key.)
* The order of the associated SortGroupClause lists determine the semantics.
*
* resorigtbl/resorigcol identify the source of the column, if it is a
- * simple reference to a column of a base table (or view). If it is not
+ * simple reference to a column of a base table (or view). If it is not
* a simple reference, these fields are zeroes.
*
* If resjunk is true then the column is a working column (such as a sort key)
@@ -1182,7 +1182,7 @@ typedef struct TargetEntry
*
* NOTE: the qualification expressions present in JoinExpr nodes are
* *in addition to* the query's main WHERE clause, which appears as the
- * qual of the top-level FromExpr. The reason for associating quals with
+ * qual of the top-level FromExpr. The reason for associating quals with
* specific nodes in the jointree is that the position of a qual is critical
* when outer joins are present. (If we enforce a qual too soon or too late,
* that may cause the outer join to produce the wrong set of NULL-extended
@@ -1218,7 +1218,7 @@ typedef struct RangeTblRef
* If he writes NATURAL then parse analysis generates the equivalent USING()
* list, and from that fills in "quals" with the right equality comparisons.
* If he writes USING() then "quals" is filled with equality comparisons.
- * If he writes ON() then only "quals" is set. Note that NATURAL/USING
+ * If he writes ON() then only "quals" is set. Note that NATURAL/USING
* are not equivalent to ON() since they also affect the output column list.
*
* alias is an Alias node representing the AS alias-clause attached to the
@@ -1227,7 +1227,7 @@ typedef struct RangeTblRef
* restricts visibility of the tables/columns inside it.
*
* During parse analysis, an RTE is created for the Join, and its index
- * is filled into rtindex. This RTE is present mainly so that Vars can
+ * is filled into rtindex. This RTE is present mainly so that Vars can
* be created that refer to the outputs of the join. The planner sometimes
* generates JoinExprs internally; these can have rtindex = 0 if there are
* no join alias variables referencing such joins.
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index 14fcb5c0e97..681a2c8a6ad 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -117,7 +117,7 @@ typedef struct PlannerGlobal
*
* This struct is conventionally called "root" in all the planner routines.
* It holds links to all of the planner's working state, in addition to the
- * original Query. Note that at present the planner extensively modifies
+ * original Query. Note that at present the planner extensively modifies
* the passed-in Query data structure; someday that should stop.
*----------
*/
@@ -135,7 +135,7 @@ typedef struct PlannerInfo
/*
* simple_rel_array holds pointers to "base rels" and "other rels" (see
- * comments for RelOptInfo for more info). It is indexed by rangetable
+ * comments for RelOptInfo for more info). It is indexed by rangetable
* index (so entry 0 is always wasted). Entries can be NULL when an RTE
* does not correspond to a base relation, such as a join RTE or an
* unreferenced view RTE; or if the RelOptInfo hasn't been made yet.
@@ -156,7 +156,7 @@ typedef struct PlannerInfo
* considered in this planning run. For small problems we just scan the
* list to do lookups, but when there are many join relations we build a
* hash table for faster lookups. The hash table is present and valid
- * when join_rel_hash is not NULL. Note that we still maintain the list
+ * when join_rel_hash is not NULL. Note that we still maintain the list
* even when using the hash table for lookups; this simplifies life for
* GEQO.
*/
@@ -278,7 +278,7 @@ typedef struct PlannerInfo
* Currently the only kind of otherrels are those made for member relations
* of an "append relation", that is an inheritance set or UNION ALL subquery.
* An append relation has a parent RTE that is a base rel, which represents
- * the entire append relation. The member RTEs are otherrels. The parent
+ * the entire append relation. The member RTEs are otherrels. The parent
* is present in the query join tree but the members are not. The member
* RTEs and otherrels are used to plan the scans of the individual tables or
* subqueries of the append set; then the parent baserel is given Append
@@ -290,7 +290,7 @@ typedef struct PlannerInfo
* alias Vars are expanded to non-aliased form during preprocess_expression.
*
* Parts of this data structure are specific to various scan and join
- * mechanisms. It didn't seem worth creating new node types for them.
+ * mechanisms. It didn't seem worth creating new node types for them.
*
* relids - Set of base-relation identifiers; it is a base relation
* if there is just one, a join relation if more than one
@@ -515,7 +515,7 @@ typedef struct IndexOptInfo
* equal to each other, where "equal" is according to the rules of the btree
* operator family(s) shown in ec_opfamilies, as well as the collation shown
* by ec_collation. (We restrict an EC to contain only equalities whose
- * operators belong to the same set of opfamilies. This could probably be
+ * operators belong to the same set of opfamilies. This could probably be
* relaxed, but for now it's not worth the trouble, since nearly all equality
* operators belong to only one btree opclass anyway. Similarly, we suppose
* that all or none of the input datatypes are collatable, so that a single
@@ -525,7 +525,7 @@ typedef struct IndexOptInfo
* us represent knowledge about different sort orderings being equivalent.
* Since every PathKey must reference an EquivalenceClass, we will end up
* with single-member EquivalenceClasses whenever a sort key expression has
- * not been equivalenced to anything else. It is also possible that such an
+ * not been equivalenced to anything else. It is also possible that such an
* EquivalenceClass will contain a volatile expression ("ORDER BY random()"),
* which is a case that can't arise otherwise since clauses containing
* volatile functions are never considered mergejoinable. We mark such
@@ -538,7 +538,7 @@ typedef struct IndexOptInfo
* We allow equality clauses appearing below the nullable side of an outer join
* to form EquivalenceClasses, but these have a slightly different meaning:
* the included values might be all NULL rather than all the same non-null
- * values. See src/backend/optimizer/README for more on that point.
+ * values. See src/backend/optimizer/README for more on that point.
*
* NB: if ec_merged isn't NULL, this class has been merged into another, and
* should be ignored in favor of using the pointed-to class.
@@ -587,7 +587,7 @@ typedef struct EquivalenceClass
*
* em_datatype is usually the same as exprType(em_expr), but can be
* different when dealing with a binary-compatible opfamily; in particular
- * anyarray_ops would never work without this. Use em_datatype when
+ * anyarray_ops would never work without this. Use em_datatype when
* looking up a specific btree operator to work with this expression.
*/
typedef struct EquivalenceMember
@@ -616,7 +616,7 @@ typedef struct EquivalenceMember
* information.)
*
* Note: pk_strategy is either BTLessStrategyNumber (for ASC) or
- * BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
+ * BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
* index types will use btree-compatible strategy numbers.
*/
@@ -728,11 +728,11 @@ typedef struct IndexPath
*
* The individual indexscans are represented by IndexPath nodes, and any
* logic on top of them is represented by a tree of BitmapAndPath and
- * BitmapOrPath nodes. Notice that we can use the same IndexPath node both
+ * BitmapOrPath nodes. Notice that we can use the same IndexPath node both
* to represent a regular IndexScan plan, and as the child of a BitmapHeapPath
* that represents scanning the same index using a BitmapIndexScan. The
* startup_cost and total_cost figures of an IndexPath always represent the
- * costs to use it as a regular IndexScan. The costs of a BitmapIndexScan
+ * costs to use it as a regular IndexScan. The costs of a BitmapIndexScan
* can be computed using the IndexPath's indextotalcost and indexselectivity.
*
* BitmapHeapPaths can be nestloop inner indexscans. The isjoininner and
@@ -855,7 +855,7 @@ typedef struct MaterialPath
*
* This is unlike the other Path nodes in that it can actually generate
* different plans: either hash-based or sort-based implementation, or a
- * no-op if the input path can be proven distinct already. The decision
+ * no-op if the input path can be proven distinct already. The decision
* is sufficiently localized that it's not worth having separate Path node
* types. (Note: in the no-op case, we could eliminate the UniquePath node
* entirely and just return the subpath; but it's convenient to have a
@@ -981,7 +981,7 @@ typedef struct HashPath
* When we construct a join rel that includes all the base rels referenced
* in a multi-relation restriction clause, we place that clause into the
* joinrestrictinfo lists of paths for the join rel, if neither left nor
- * right sub-path includes all base rels referenced in the clause. The clause
+ * right sub-path includes all base rels referenced in the clause. The clause
* will be applied at that join level, and will not propagate any further up
* the join tree. (Note: the "predicate migration" code was once intended to
* push restriction clauses up and down the plan tree based on evaluation
@@ -1021,13 +1021,13 @@ typedef struct HashPath
* that appeared elsewhere in the tree and were pushed down to the join rel
* because they used no other rels. That's what the is_pushed_down flag is
* for; it tells us that a qual is not an OUTER JOIN qual for the set of base
- * rels listed in required_relids. A clause that originally came from WHERE
+ * rels listed in required_relids. A clause that originally came from WHERE
* or an INNER JOIN condition will *always* have its is_pushed_down flag set.
* It's possible for an OUTER JOIN clause to be marked is_pushed_down too,
* if we decide that it can be pushed down into the nullable side of the join.
* In that case it acts as a plain filter qual for wherever it gets evaluated.
* (In short, is_pushed_down is only false for non-degenerate outer join
- * conditions. Possibly we should rename it to reflect that meaning?)
+ * conditions. Possibly we should rename it to reflect that meaning?)
*
* RestrictInfo nodes also contain an outerjoin_delayed flag, which is true
* if the clause's applicability must be delayed due to any outer joins
@@ -1037,10 +1037,10 @@ typedef struct HashPath
* forced null by some outer join below the clause. outerjoin_delayed = true
* is subtly different from nullable_relids != NULL: a clause might reference
* some nullable rels and yet not be outerjoin_delayed because it also
- * references all the other rels of the outer join(s). A clause that is not
+ * references all the other rels of the outer join(s). A clause that is not
* outerjoin_delayed can be enforced anywhere it is computable.
*
- * In general, the referenced clause might be arbitrarily complex. The
+ * In general, the referenced clause might be arbitrarily complex. The
* kinds of clauses we can handle as indexscan quals, mergejoin clauses,
* or hashjoin clauses are limited (e.g., no volatile functions). The code
* for each kind of path is responsible for identifying the restrict clauses
@@ -1065,7 +1065,7 @@ typedef struct HashPath
*
* The pseudoconstant flag is set true if the clause contains no Vars of
* the current query level and no volatile functions. Such a clause can be
- * pulled out and used as a one-time qual in a gating Result node. We keep
+ * pulled out and used as a one-time qual in a gating Result node. We keep
* pseudoconstant clauses in the same lists as other RestrictInfos so that
* the regular clause-pushing machinery can assign them to the correct join
* level, but they need to be treated specially for cost and selectivity
@@ -1075,7 +1075,7 @@ typedef struct HashPath
*
* When join clauses are generated from EquivalenceClasses, there may be
* several equally valid ways to enforce join equivalence, of which we need
- * apply only one. We mark clauses of this kind by setting parent_ec to
+ * apply only one. We mark clauses of this kind by setting parent_ec to
* point to the generating EquivalenceClass. Multiple clauses with the same
* parent_ec in the same join are redundant.
*/
@@ -1201,8 +1201,8 @@ typedef struct InnerIndexscanInfo
/*
* Placeholder node for an expression to be evaluated below the top level
- * of a plan tree. This is used during planning to represent the contained
- * expression. At the end of the planning process it is replaced by either
+ * of a plan tree. This is used during planning to represent the contained
+ * expression. At the end of the planning process it is replaced by either
* the contained expression or a Var referring to a lower-level evaluation of
* the contained expression. Typically the evaluation occurs below an outer
* join, and Var references above the outer join might thereby yield NULL
@@ -1226,9 +1226,9 @@ typedef struct PlaceHolderVar
* "Special join" info.
*
* One-sided outer joins constrain the order of joining partially but not
- * completely. We flatten such joins into the planner's top-level list of
+ * completely. We flatten such joins into the planner's top-level list of
* relations to join, but record information about each outer join in a
- * SpecialJoinInfo struct. These structs are kept in the PlannerInfo node's
+ * SpecialJoinInfo struct. These structs are kept in the PlannerInfo node's
* join_info_list.
*
* Similarly, semijoins and antijoins created by flattening IN (subselect)
@@ -1256,7 +1256,7 @@ typedef struct PlaceHolderVar
* to be evaluated after this join is formed (because it references the RHS).
* Any outer joins that have such a clause and this join in their RHS cannot
* commute with this join, because that would leave noplace to check the
- * pushed-down clause. (We don't track this for FULL JOINs, either.)
+ * pushed-down clause. (We don't track this for FULL JOINs, either.)
*
* join_quals is an implicit-AND list of the quals syntactically associated
* with the join (they may or may not end up being applied at the join level).
@@ -1341,7 +1341,7 @@ typedef struct AppendRelInfo
/*
* For an inheritance appendrel, the parent and child are both regular
* relations, and we store their rowtype OIDs here for use in translating
- * whole-row Vars. For a UNION-ALL appendrel, the parent and child are
+ * whole-row Vars. For a UNION-ALL appendrel, the parent and child are
* both subqueries with no named rowtype, and we store InvalidOid here.
*/
Oid parent_reltype; /* OID of parent's composite type */
@@ -1353,14 +1353,14 @@ typedef struct AppendRelInfo
* used to translate Vars referencing the parent rel into references to
* the child. A list element is NULL if it corresponds to a dropped
* column of the parent (this is only possible for inheritance cases, not
- * UNION ALL). The list elements are always simple Vars for inheritance
+ * UNION ALL). The list elements are always simple Vars for inheritance
* cases, but can be arbitrary expressions in UNION ALL cases.
*
* Notice we only store entries for user columns (attno > 0). Whole-row
* Vars are special-cased, and system columns (attno < 0) need no special
* translation since their attnos are the same for all tables.
*
- * Caution: the Vars have varlevelsup = 0. Be careful to adjust as needed
+ * Caution: the Vars have varlevelsup = 0. Be careful to adjust as needed
* when copying into a subquery.
*/
List *translated_vars; /* Expressions in the child's Vars */
@@ -1377,7 +1377,7 @@ typedef struct AppendRelInfo
* For each distinct placeholder expression generated during planning, we
* store a PlaceHolderInfo node in the PlannerInfo node's placeholder_list.
* This stores info that is needed centrally rather than in each copy of the
- * PlaceHolderVar. The phid fields identify which PlaceHolderInfo goes with
+ * PlaceHolderVar. The phid fields identify which PlaceHolderInfo goes with
* each PlaceHolderVar. Note that phid is unique throughout a planner run,
* not just within a query level --- this is so that we need not reassign ID's
* when pulling a subquery into its parent.
diff --git a/src/include/nodes/tidbitmap.h b/src/include/nodes/tidbitmap.h
index 1c32b777d8b..c9cdc1defc7 100644
--- a/src/include/nodes/tidbitmap.h
+++ b/src/include/nodes/tidbitmap.h
@@ -26,7 +26,7 @@
/*
- * Actual bitmap representation is private to tidbitmap.c. Callers can
+ * Actual bitmap representation is private to tidbitmap.c. Callers can
* do IsA(x, TIDBitmap) on it, but nothing else.
*/
typedef struct TIDBitmap TIDBitmap;
diff --git a/src/include/nodes/value.h b/src/include/nodes/value.h
index d18f36eddc1..25f6952b928 100644
--- a/src/include/nodes/value.h
+++ b/src/include/nodes/value.h
@@ -29,7 +29,7 @@
*
* (Before Postgres 7.0, we used a double to represent T_Float,
* but that creates loss-of-precision problems when the value is
- * ultimately destined to be converted to NUMERIC. Since Value nodes
+ * ultimately destined to be converted to NUMERIC. Since Value nodes
* are only used in the parsing process, not for runtime data, it's
* better to use the more general representation.)
*
diff --git a/src/include/parser/gramparse.h b/src/include/parser/gramparse.h
index 6657e07ca76..4f3dc6e08fc 100644
--- a/src/include/parser/gramparse.h
+++ b/src/include/parser/gramparse.h
@@ -29,7 +29,7 @@
#include "parser/gram.h"
/*
- * The YY_EXTRA data that a flex scanner allows us to pass around. Private
+ * The YY_EXTRA data that a flex scanner allows us to pass around. Private
* state needed for raw parsing/lexing goes here.
*/
typedef struct base_yy_extra_type
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 0ca79145585..10e38e9799c 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -38,7 +38,7 @@ typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param,
* links to current parse state of outer query.
*
* p_sourcetext: source string that generated the raw parsetree being
- * analyzed, or NULL if not available. (The string is used only to
+ * analyzed, or NULL if not available. (The string is used only to
* generate cursor positions in error messages: we need it to convert
* byte-wise locations in parse structures to character-wise cursor
* positions.)
@@ -72,7 +72,7 @@ typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param,
* to make an RTE before you can access a CTE.
*
* p_future_ctes: list of CommonTableExprs (WITH items) that are not yet
- * visible due to scope rules. This is used to help improve error messages.
+ * visible due to scope rules. This is used to help improve error messages.
*
* p_parent_cte: CommonTableExpr that immediately contains the current query,
* if any.
diff --git a/src/include/parser/scanner.h b/src/include/parser/scanner.h
index 2a88e970a0e..38c1873e12c 100644
--- a/src/include/parser/scanner.h
+++ b/src/include/parser/scanner.h
@@ -4,7 +4,7 @@
* API for the core scanner (flex machine)
*
* The core scanner is also used by PL/pgsql, so we provide a public API
- * for it. However, the rest of the backend is only expected to use the
+ * for it. However, the rest of the backend is only expected to use the
* higher-level API provided by parser.h.
*
*
@@ -58,7 +58,7 @@ typedef union core_YYSTYPE
/*
* The YY_EXTRA data that a flex scanner allows us to pass around.
- * Private state needed by the core scanner goes here. Note that the actual
+ * Private state needed by the core scanner goes here. Note that the actual
* yy_extra struct may be larger and have this as its first component, thus
* allowing the calling parser to keep some fields of its own in YY_EXTRA.
*/
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index 6c8e31269c5..fe6c8510550 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -3,7 +3,7 @@
*
* This file contains various configuration symbols and limits. In
* all cases, changing them is only useful in very rare situations or
- * for developers. If you edit any of these, be sure to do a *full*
+ * for developers. If you edit any of these, be sure to do a *full*
* rebuild (and an initdb if noted).
*
* src/include/pg_config_manual.h
@@ -55,8 +55,8 @@
/*
* Define this if you want to allow the lo_import and lo_export SQL
- * functions to be executed by ordinary users. By default these
- * functions are only available to the Postgres superuser. CAUTION:
+ * functions to be executed by ordinary users. By default these
+ * functions are only available to the Postgres superuser. CAUTION:
* These functions are SECURITY HOLES since they can read and write
* any file that the PostgreSQL server has permission to access. If
* you turn this on, don't say we didn't warn you.
@@ -135,7 +135,7 @@
/*
* This is the default directory in which AF_UNIX socket files are
- * placed. Caution: changing this risks breaking your existing client
+ * placed. Caution: changing this risks breaking your existing client
* applications, which are likely to continue to look in the old
* directory. But if you just hate the idea of sockets in /tmp,
* here's where to twiddle it. You can also override this at runtime
@@ -148,7 +148,7 @@
* MAX_RANDOM_VALUE. Currently, all known implementations yield
* 0..2^31-1, so we just hardwire this constant. We could do a
* configure test if it proves to be necessary. CAUTION: Think not to
- * replace this with RAND_MAX. RAND_MAX defines the maximum value of
+ * replace this with RAND_MAX. RAND_MAX defines the maximum value of
* the older rand() function, which is often different from --- and
* considerably inferior to --- random().
*/
@@ -188,7 +188,7 @@
/*
* Define this to check memory allocation errors (scribbling on more
- * bytes than were allocated). Right now, this gets defined
+ * bytes than were allocated). Right now, this gets defined
* automatically if --enable-cassert.
*/
#ifdef USE_ASSERT_CHECKING
@@ -198,7 +198,7 @@
/*
* Define this to cause palloc()'d memory to be filled with random data, to
* facilitate catching code that depends on the contents of uninitialized
- * memory. Caution: this is horrendously expensive.
+ * memory. Caution: this is horrendously expensive.
*/
/* #define RANDOMIZE_ALLOCATED_MEMORY */
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 5446fa04409..56479469d0a 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -120,7 +120,7 @@ typedef enum PgStat_Single_Reset_Type
*
* Many of the event counters are nontransactional, ie, we count events
* in committed and aborted transactions alike. For these, we just count
- * directly in the PgStat_TableStatus. However, delta_live_tuples,
+ * directly in the PgStat_TableStatus. However, delta_live_tuples,
* delta_dead_tuples, and changed_tuples must be derived from event counts
* with awareness of whether the transaction or subtransaction committed or
* aborted. Hence, we also keep a stack of per-(sub)transaction status
diff --git a/src/include/port.h b/src/include/port.h
index 39d37c9a8c5..e7f37559dde 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -159,7 +159,7 @@ extern unsigned char pg_ascii_tolower(unsigned char ch);
/*
* Versions of libintl >= 0.13 try to replace printf() and friends with
- * macros to their own versions that understand the %$ format. We do the
+ * macros to their own versions that understand the %$ format. We do the
* same, so disable their macros, if they exist.
*/
#ifdef vsnprintf
diff --git a/src/include/port/linux.h b/src/include/port/linux.h
index bcaa42dc4ed..7a6e46cdbb7 100644
--- a/src/include/port/linux.h
+++ b/src/include/port/linux.h
@@ -4,7 +4,7 @@
* As of July 2007, all known versions of the Linux kernel will sometimes
* return EIDRM for a shmctl() operation when EINVAL is correct (it happens
* when the low-order 15 bits of the supplied shm ID match the slot number
- * assigned to a newer shmem segment). We deal with this by assuming that
+ * assigned to a newer shmem segment). We deal with this by assuming that
* EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe
* since in fact Linux has no excuse for ever returning EIDRM; it doesn't
* track removed segments in a way that would allow distinguishing them from
diff --git a/src/include/port/win32.h b/src/include/port/win32.h
index 6a40a322268..d0a2cf72eb0 100644
--- a/src/include/port/win32.h
+++ b/src/include/port/win32.h
@@ -116,7 +116,7 @@
* Signal stuff
*
* For WIN32, there is no wait() call so there are no wait() macros
- * to interpret the return value of system(). Instead, system()
+ * to interpret the return value of system(). Instead, system()
* return values < 0x100 are used for exit() termination, and higher
* values are used to indicated non-exit() termination, which is
* similar to a unix-style signal exit (think SIGSEGV ==
@@ -154,7 +154,7 @@
* NTSTATUS.H from the Windows NT DDK.
*
* Some day we might want to print descriptions for the most common
- * exceptions, rather than printing an include file name. We could use
+ * exceptions, rather than printing an include file name. We could use
* RtlNtStatusToDosError() and pass to FormatMessage(), which can print
* the text of error values, but MinGW does not support
* RtlNtStatusToDosError().
diff --git a/src/include/portability/instr_time.h b/src/include/portability/instr_time.h
index af400b1dca3..5e2e7331390 100644
--- a/src/include/portability/instr_time.h
+++ b/src/include/portability/instr_time.h
@@ -10,8 +10,8 @@
* high-precision-timing APIs on yet other platforms.
*
* The basic data type is instr_time, which all callers should treat as an
- * opaque typedef. instr_time can store either an absolute time (of
- * unspecified reference time) or an interval. The operations provided
+ * opaque typedef. instr_time can store either an absolute time (of
+ * unspecified reference time) or an interval. The operations provided
* for it are:
*
* INSTR_TIME_IS_ZERO(t) is t equal to zero?
diff --git a/src/include/postgres.h b/src/include/postgres.h
index 60a2bdb8475..d14440951e4 100644
--- a/src/include/postgres.h
+++ b/src/include/postgres.h
@@ -33,7 +33,7 @@
* in the backend environment, but are of no interest outside the backend.
*
* Simple type definitions live in c.h, where they are shared with
- * postgres_fe.h. We do that since those type definitions are needed by
+ * postgres_fe.h. We do that since those type definitions are needed by
* frontend modules that want to deal with binary data transmission to or
* from the backend. Type definitions in this file should be for
* representations that never escape the backend, such as Datum or
@@ -55,7 +55,7 @@
/*
* struct varatt_external is a "TOAST pointer", that is, the information
- * needed to fetch a stored-out-of-line Datum. The data is compressed
+ * needed to fetch a stored-out-of-line Datum. The data is compressed
* if and only if va_extsize < va_rawsize - VARHDRSZ. This struct must not
* contain any padding, because we sometimes compare pointers using memcmp.
*
@@ -127,7 +127,7 @@ typedef struct
* The "xxx" bits are the length field (which includes itself in all cases).
* In the big-endian case we mask to extract the length, in the little-endian
* case we shift. Note that in both cases the flag bits are in the physically
- * first byte. Also, it is not possible for a 1-byte length word to be zero;
+ * first byte. Also, it is not possible for a 1-byte length word to be zero;
* this lets us disambiguate alignment padding bytes from the start of an
* unaligned datum. (We now *require* pad bytes to be filled with zero!)
*/
diff --git a/src/include/postgres_ext.h b/src/include/postgres_ext.h
index b6ebb7aac3f..bb37541a98c 100644
--- a/src/include/postgres_ext.h
+++ b/src/include/postgres_ext.h
@@ -7,7 +7,7 @@
* For example, the Oid type is part of the API of libpq and other libraries.
*
* Declarations which are specific to a particular interface should
- * go in the header file for that interface (such as libpq-fe.h). This
+ * go in the header file for that interface (such as libpq-fe.h). This
* file is only for fundamental Postgres declarations.
*
* User-written C functions don't count as "external to Postgres."
diff --git a/src/include/postmaster/syslogger.h b/src/include/postmaster/syslogger.h
index 97ab04257c8..3f05d7bc8f8 100644
--- a/src/include/postmaster/syslogger.h
+++ b/src/include/postmaster/syslogger.h
@@ -20,7 +20,7 @@
* here is to divide long messages into chunks that are not more than
* PIPE_BUF bytes long, which according to POSIX spec must be written into
* the pipe atomically. The pipe reader then uses the protocol headers to
- * reassemble the parts of a message into a single string. The reader can
+ * reassemble the parts of a message into a single string. The reader can
* also cope with non-protocol data coming down the pipe, though we cannot
* guarantee long strings won't get split apart.
*
diff --git a/src/include/regex/regcustom.h b/src/include/regex/regcustom.h
index 04849f291f3..dbb461a0ce7 100644
--- a/src/include/regex/regcustom.h
+++ b/src/include/regex/regcustom.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/include/regex/regex.h b/src/include/regex/regex.h
index 2c7fa4df46f..3020b0ff0f7 100644
--- a/src/include/regex/regex.h
+++ b/src/include/regex/regex.h
@@ -3,7 +3,7 @@
/*
* regular expressions
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/include/regex/regguts.h b/src/include/regex/regguts.h
index 55e1696fe8c..a9b9112ee49 100644
--- a/src/include/regex/regguts.h
+++ b/src/include/regex/regguts.h
@@ -1,7 +1,7 @@
/*
* Internal interface definitions, etc., for the reg package
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -126,8 +126,8 @@
/*
- * We dissect a chr into byts for colormap table indexing. Here we define
- * a byt, which will be the same as a byte on most machines... The exact
+ * We dissect a chr into byts for colormap table indexing. Here we define
+ * a byt, which will be the same as a byte on most machines... The exact
* size of a byt is not critical, but about 8 bits is good, and extraction
* of 8-bit chunks is sometimes especially fast.
*/
@@ -156,9 +156,9 @@ typedef int pcolor; /* what color promotes to */
/*
* A colormap is a tree -- more precisely, a DAG -- indexed at each level
- * by a byt of the chr, to map the chr to a color efficiently. Because
+ * by a byt of the chr, to map the chr to a color efficiently. Because
* lower sections of the tree can be shared, it can exploit the usual
- * sparseness of such a mapping table. The tree is always NBYTS levels
+ * sparseness of such a mapping table. The tree is always NBYTS levels
* deep (in the past it was shallower during construction but was "filled"
* to full depth at the end of that); areas that are unaltered as yet point
* to "fill blocks" which are entirely WHITE in color.
diff --git a/src/include/replication/walprotocol.h b/src/include/replication/walprotocol.h
index 94146679fa6..ecc0127de08 100644
--- a/src/include/replication/walprotocol.h
+++ b/src/include/replication/walprotocol.h
@@ -81,7 +81,7 @@ typedef struct
} StandbyHSFeedbackMessage;
/*
- * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
+ * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
*
* We don't have a good idea of what a good value would be; there's some
* overhead per message in both walsender and walreceiver, but on the other
diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h
index 816fa5ba72a..239e5c7991b 100644
--- a/src/include/replication/walreceiver.h
+++ b/src/include/replication/walreceiver.h
@@ -69,7 +69,7 @@ typedef struct
/*
* latestChunkStart is the starting byte position of the current "batch"
* of received WAL. It's actually the same as the previous value of
- * receivedUpto before the last flush to disk. Startup process can use
+ * receivedUpto before the last flush to disk. Startup process can use
* this to detect whether it's keeping up or not.
*/
XLogRecPtr latestChunkStart;
diff --git a/src/include/snowball/header.h b/src/include/snowball/header.h
index 3ede1a01148..bd87177cd70 100644
--- a/src/include/snowball/header.h
+++ b/src/include/snowball/header.h
@@ -4,7 +4,7 @@
* Replacement header file for Snowball stemmer modules
*
* The Snowball stemmer modules do #include "header.h", and think they
- * are including snowball/libstemmer/header.h. We adjust the CPPFLAGS
+ * are including snowball/libstemmer/header.h. We adjust the CPPFLAGS
* so that this file is found instead, and thereby we can modify the
* headers they see. The main point here is to ensure that pg_config.h
* is included before any system headers such as <stdio.h>; without that,
diff --git a/src/include/storage/block.h b/src/include/storage/block.h
index 6f33ed8c83f..935f1dd3ed9 100644
--- a/src/include/storage/block.h
+++ b/src/include/storage/block.h
@@ -37,7 +37,7 @@ typedef uint32 BlockNumber;
/*
* BlockId:
*
- * this is a storage type for BlockNumber. in other words, this type
+ * this is a storage type for BlockNumber. in other words, this type
* is used for on-disk structures (e.g., in HeapTupleData) whereas
* BlockNumber is the type on which calculations are performed (e.g.,
* in access method code).
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index b7d4ea53a4d..dcd1e87322b 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -110,9 +110,9 @@ typedef struct buftag
*
* Note: buf_hdr_lock must be held to examine or change the tag, flags,
* usage_count, refcount, or wait_backend_pid fields. buf_id field never
- * changes after initialization, so does not need locking. freeNext is
+ * changes after initialization, so does not need locking. freeNext is
* protected by the BufFreelistLock not buf_hdr_lock. The LWLocks can take
- * care of themselves. The buf_hdr_lock is *not* used to control access to
+ * care of themselves. The buf_hdr_lock is *not* used to control access to
* the data in the buffer!
*
* An exception is that if we have the buffer pinned, its tag can't change
@@ -123,7 +123,7 @@ typedef struct buftag
*
* We can't physically remove items from a disk page if another backend has
* the buffer pinned. Hence, a backend may need to wait for all other pins
- * to go away. This is signaled by storing its own PID into
+ * to go away. This is signaled by storing its own PID into
* wait_backend_pid and setting flag bit BM_PIN_COUNT_WAITER. At present,
* there can be only one such waiter per buffer.
*
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index 42d6b10ddac..fd0f998a458 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -26,7 +26,7 @@
* disk page is always a slotted page of the form:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp1 linp2 linp3 ... |
+ * | PageHeaderData | linp1 linp2 linp3 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
@@ -34,7 +34,7 @@
* | |
* | v pd_upper |
* +-------------+------------------------------------+
- * | | tupleN ... |
+ * | | tupleN ... |
* +-------------+------------------+-----------------+
* | ... tuple3 tuple2 tuple1 | "special space" |
* +--------------------------------+-----------------+
@@ -65,7 +65,7 @@
*
* AM-specific per-page data (if any) is kept in the area marked "special
* space"; each AM has an "opaque" structure defined somewhere that is
- * stored as the page trailer. an access method should always
+ * stored as the page trailer. an access method should always
* initialize its pages with PageInit and then set its own opaque
* fields.
*/
@@ -104,7 +104,7 @@ typedef uint16 LocationIndex;
* like a good idea).
*
* pd_prune_xid is a hint field that helps determine whether pruning will be
- * useful. It is currently unused in index pages.
+ * useful. It is currently unused in index pages.
*
* The page version number and page size are packed together into a single
* uint16 field. This is for historical reasons: before PostgreSQL 7.3,
diff --git a/src/include/storage/ipc.h b/src/include/storage/ipc.h
index 26b5ef61838..c5c8a58c79d 100644
--- a/src/include/storage/ipc.h
+++ b/src/include/storage/ipc.h
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
diff --git a/src/include/storage/itemid.h b/src/include/storage/itemid.h
index 961d2c2f9f9..53f25de8673 100644
--- a/src/include/storage/itemid.h
+++ b/src/include/storage/itemid.h
@@ -31,7 +31,7 @@ typedef struct ItemIdData
typedef ItemIdData *ItemId;
/*
- * lp_flags has these possible states. An UNUSED line pointer is available
+ * lp_flags has these possible states. An UNUSED line pointer is available
* for immediate re-use, the other states are not.
*/
#define LP_UNUSED 0 /* unused (should always have lp_len=0) */
diff --git a/src/include/storage/itemptr.h b/src/include/storage/itemptr.h
index 20221d641e3..c644c368ca3 100644
--- a/src/include/storage/itemptr.h
+++ b/src/include/storage/itemptr.h
@@ -29,7 +29,7 @@
* tuple header on disk, it's very important not to waste space with
* structure padding bytes. The struct is designed to be six bytes long
* (it contains three int16 fields) but a few compilers will pad it to
- * eight bytes unless coerced. We apply appropriate persuasion where
+ * eight bytes unless coerced. We apply appropriate persuasion where
* possible, and to cope with unpersuadable compilers, we try to use
* "SizeOfIptrData" rather than "sizeof(ItemPointerData)" when computing
* on-disk sizes.
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index 458b788de2f..7be4b03fdbc 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -43,7 +43,7 @@ extern bool Debug_deadlocks;
/*
* Top-level transactions are identified by VirtualTransactionIDs comprising
* the BackendId of the backend running the xact, plus a locally-assigned
- * LocalTransactionId. These are guaranteed unique over the short term,
+ * LocalTransactionId. These are guaranteed unique over the short term,
* but will be reused after a database restart; hence they should never
* be stored on disk.
*
@@ -161,7 +161,7 @@ typedef uint16 LOCKMETHODID;
/*
* LOCKTAG is the key information needed to look up a LOCK item in the
- * lock hashtable. A LOCKTAG value uniquely identifies a lockable object.
+ * lock hashtable. A LOCKTAG value uniquely identifies a lockable object.
*
* The LockTagType enum defines the different kinds of objects we can lock.
* We can handle up to 256 different LockTagTypes.
@@ -214,7 +214,7 @@ typedef struct LOCKTAG
/*
* These macros define how we map logical IDs of lockable objects into
- * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
+ * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
* rather than accessing the fields directly. Note multiple eval of target!
*/
#define SET_LOCKTAG_RELATION(locktag,dboid,reloid) \
@@ -326,14 +326,14 @@ typedef struct LOCK
* a PROCLOCK struct.
*
* PROCLOCKTAG is the key information needed to look up a PROCLOCK item in the
- * proclock hashtable. A PROCLOCKTAG value uniquely identifies the combination
+ * proclock hashtable. A PROCLOCKTAG value uniquely identifies the combination
* of a lockable object and a holder/waiter for that object. (We can use
* pointers here because the PROCLOCKTAG need only be unique for the lifespan
* of the PROCLOCK, and it will never outlive the lock or the proc.)
*
* Internally to a backend, it is possible for the same lock to be held
* for different purposes: the backend tracks transaction locks separately
- * from session locks. However, this is not reflected in the shared-memory
+ * from session locks. However, this is not reflected in the shared-memory
* state: we only track which backend(s) hold the lock. This is OK since a
* backend can never block itself.
*
@@ -344,7 +344,7 @@ typedef struct LOCK
* as soon as convenient.
*
* releaseMask is workspace for LockReleaseAll(): it shows the locks due
- * to be released during the current call. This must only be examined or
+ * to be released during the current call. This must only be examined or
* set by the backend owning the PROCLOCK.
*
* Each PROCLOCK object is linked into lists for both the associated LOCK
@@ -377,7 +377,7 @@ typedef struct PROCLOCK
/*
* Each backend also maintains a local hash table with information about each
- * lock it is currently interested in. In particular the local table counts
+ * lock it is currently interested in. In particular the local table counts
* the number of times that lock has been acquired. This allows multiple
* requests for the same lock to be executed without additional accesses to
* shared memory. We also track the number of lock acquisitions per
@@ -422,7 +422,7 @@ typedef struct LOCALLOCK
/*
* This struct holds information passed from lmgr internals to the lock
- * listing user-level functions (in lockfuncs.c). For each PROCLOCK in
+ * listing user-level functions (in lockfuncs.c). For each PROCLOCK in
* the system, copies of the PROCLOCK object and associated PGPROC and
* LOCK objects are stored. Note there will often be multiple copies
* of the same PGPROC or LOCK --- to detect whether two are the same,
diff --git a/src/include/storage/pg_sema.h b/src/include/storage/pg_sema.h
index e07ddf6f198..0850927d6f5 100644
--- a/src/include/storage/pg_sema.h
+++ b/src/include/storage/pg_sema.h
@@ -6,7 +6,7 @@
* PostgreSQL requires counting semaphores (the kind that keep track of
* multiple unlock operations, and will allow an equal number of subsequent
* lock operations before blocking). The underlying implementation is
- * not the same on every platform. This file defines the API that must
+ * not the same on every platform. This file defines the API that must
* be provided by each port.
*
*
diff --git a/src/include/storage/pg_shmem.h b/src/include/storage/pg_shmem.h
index 3bbaf41c048..5af11166450 100644
--- a/src/include/storage/pg_shmem.h
+++ b/src/include/storage/pg_shmem.h
@@ -10,7 +10,7 @@
*
* To simplify life for the SysV implementation, the ID is assumed to
* consist of two unsigned long values (these are key and ID in SysV
- * terms). Other platforms may ignore the second value if they need
+ * terms). Other platforms may ignore the second value if they need
* only one ID number.
*
*
diff --git a/src/include/storage/pos.h b/src/include/storage/pos.h
index 689be8c39c4..99c091c6e34 100644
--- a/src/include/storage/pos.h
+++ b/src/include/storage/pos.h
@@ -20,7 +20,7 @@
* been changed to just <offset> as the notion of having multiple pages
* within a block has been removed.
*
- * the 'offset' abstraction is somewhat confusing. it is NOT a byte
+ * the 'offset' abstraction is somewhat confusing. it is NOT a byte
* offset within the page; instead, it is an offset into the line
* pointer array contained on every page that store (heap or index)
* tuples.
diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h
index 3f30d681df4..32e8d4b8674 100644
--- a/src/include/storage/predicate_internals.h
+++ b/src/include/storage/predicate_internals.h
@@ -128,7 +128,7 @@ typedef struct SERIALIZABLEXACT
* The following types are used to provide an ad hoc list for holding
* SERIALIZABLEXACT objects. An HTAB is overkill, since there is no need to
* access these by key -- there are direct pointers to these objects where
- * needed. If a shared memory list is created, these types can probably be
+ * needed. If a shared memory list is created, these types can probably be
* eliminated in favor of using the general solution.
*/
typedef struct PredXactListElementData
@@ -311,9 +311,9 @@ typedef struct PREDICATELOCKTAG
* The PREDICATELOCK struct represents an individual lock.
*
* An entry can be created here when the related database object is read, or
- * by promotion of multiple finer-grained targets. All entries related to a
+ * by promotion of multiple finer-grained targets. All entries related to a
* serializable transaction are removed when that serializable transaction is
- * cleaned up. Entries can also be removed when they are combined into a
+ * cleaned up. Entries can also be removed when they are combined into a
* single coarser-grained lock entry.
*/
typedef struct PREDICATELOCK
@@ -384,7 +384,7 @@ typedef struct PredicateLockData
/*
* These macros define how we map logical IDs of lockable objects into the
- * physical fields of PREDICATELOCKTARGETTAG. Use these to set up values,
+ * physical fields of PREDICATELOCKTARGETTAG. Use these to set up values,
* rather than accessing the fields directly. Note multiple eval of target!
*/
#define SET_PREDICATELOCKTARGETTAG_RELATION(locktag,dboid,reloid) \
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 7b181b22bc8..4fa5e7a1d9d 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -22,7 +22,7 @@
/*
* Each backend advertises up to PGPROC_MAX_CACHED_SUBXIDS TransactionIds
- * for non-aborted subtransactions of its current top transaction. These
+ * for non-aborted subtransactions of its current top transaction. These
* have to be treated as running XIDs by other backends.
*
* We also keep track of whether the cache overflowed (ie, the transaction has
@@ -53,7 +53,7 @@ struct XidCache
* Each backend has a PGPROC struct in shared memory. There is also a list of
* currently-unused PGPROC structs that will be reallocated to new backends.
*
- * links: list link for any list the PGPROC is in. When waiting for a lock,
+ * links: list link for any list the PGPROC is in. When waiting for a lock,
* the PGPROC is linked into that lock's waitProcs queue. A recycled PGPROC
* is linked into ProcGlobal's freeProcs list.
*
diff --git a/src/include/storage/relfilenode.h b/src/include/storage/relfilenode.h
index 5f07e57c9d1..72941ae8713 100644
--- a/src/include/storage/relfilenode.h
+++ b/src/include/storage/relfilenode.h
@@ -48,7 +48,7 @@ typedef enum ForkNumber
* spcNode identifies the tablespace of the relation. It corresponds to
* pg_tablespace.oid.
*
- * dbNode identifies the database of the relation. It is zero for
+ * dbNode identifies the database of the relation. It is zero for
* "shared" relations (those common to all databases of a cluster).
* Nonzero dbNode values correspond to pg_database.oid.
*
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index 987fb9c58d2..37435284277 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -36,7 +36,7 @@
*
* int TAS(slock_t *lock)
* Atomic test-and-set instruction. Attempt to acquire the lock,
- * but do *not* wait. Returns 0 if successful, nonzero if unable
+ * but do *not* wait. Returns 0 if successful, nonzero if unable
* to acquire the lock.
*
* TAS() is NOT part of the API, and should never be called directly.
diff --git a/src/include/storage/sinvaladt.h b/src/include/storage/sinvaladt.h
index c70355847af..12285d2132a 100644
--- a/src/include/storage/sinvaladt.h
+++ b/src/include/storage/sinvaladt.h
@@ -4,7 +4,7 @@
* POSTGRES shared cache invalidation data manager.
*
* The shared cache invalidation manager is responsible for transmitting
- * invalidation messages between backends. Any message sent by any backend
+ * invalidation messages between backends. Any message sent by any backend
* must be delivered to all already-running backends before it can be
* forgotten. (If we run out of space, we instead deliver a "RESET"
* message to backends that have fallen too far behind.)
diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h
index d2db5f6826d..2886ee4551d 100644
--- a/src/include/storage/smgr.h
+++ b/src/include/storage/smgr.h
@@ -31,7 +31,7 @@
*
* An SMgrRelation may have an "owner", which is just a pointer to it from
* somewhere else; smgr.c will clear this pointer if the SMgrRelation is
- * closed. We use this to avoid dangling pointers from relcache to smgr
+ * closed. We use this to avoid dangling pointers from relcache to smgr
* without having to make the smgr explicitly aware of relcache. There
* can't be more than one "owner" pointer per SMgrRelation, but that's
* all we need.
@@ -50,7 +50,7 @@ typedef struct SMgrRelationData
/*
* These next three fields are not actually used or manipulated by smgr,
* except that they are reset to InvalidBlockNumber upon a cache flush
- * event (in particular, upon truncation of the relation). Higher levels
+ * event (in particular, upon truncation of the relation). Higher levels
* store cached state here so that it will be reset when truncation
* happens. In all three cases, InvalidBlockNumber means "unknown".
*/
@@ -62,7 +62,7 @@ typedef struct SMgrRelationData
/*
* Fields below here are intended to be private to smgr.c and its
- * submodules. Do not touch them from elsewhere.
+ * submodules. Do not touch them from elsewhere.
*/
int smgr_which; /* storage manager selector */
diff --git a/src/include/tcop/dest.h b/src/include/tcop/dest.h
index e0890cc8b24..a0b0bad1c22 100644
--- a/src/include/tcop/dest.h
+++ b/src/include/tcop/dest.h
@@ -29,14 +29,14 @@
*
* CreateDestReceiver returns a receiver object appropriate to the specified
* destination. The executor, as well as utility statements that can return
- * tuples, are passed the resulting DestReceiver* pointer. Each executor run
+ * tuples, are passed the resulting DestReceiver* pointer. Each executor run
* or utility execution calls the receiver's rStartup method, then the
* receiveSlot method (zero or more times), then the rShutdown method.
* The same receiver object may be re-used multiple times; eventually it is
* destroyed by calling its rDestroy method.
*
* In some cases, receiver objects require additional parameters that must
- * be passed to them after calling CreateDestReceiver. Since the set of
+ * be passed to them after calling CreateDestReceiver. Since the set of
* parameters varies for different receiver types, this is not handled by
* this module, but by direct calls from the calling code to receiver type
* specific functions.
@@ -45,10 +45,10 @@
* allocated object (for destination types that require no local state),
* in which case rDestroy is a no-op. Alternatively it can be a palloc'd
* object that has DestReceiver as its first field and contains additional
- * fields (see printtup.c for an example). These additional fields are then
+ * fields (see printtup.c for an example). These additional fields are then
* accessible to the DestReceiver functions by casting the DestReceiver*
- * pointer passed to them. The palloc'd object is pfree'd by the rDestroy
- * method. Note that the caller of CreateDestReceiver should take care to
+ * pointer passed to them. The palloc'd object is pfree'd by the rDestroy
+ * method. Note that the caller of CreateDestReceiver should take care to
* do so in a memory context that is long-lived enough for the receiver
* object not to disappear while still needed.
*
@@ -79,7 +79,7 @@
* destination. Someday this will probably need to be improved.
*
* Note: only the values DestNone, DestDebug, DestRemote are legal for the
- * global variable whereToSendOutput. The other values may be used
+ * global variable whereToSendOutput. The other values may be used
* as the destination for individual commands.
* ----------------
*/
diff --git a/src/include/tcop/tcopdebug.h b/src/include/tcop/tcopdebug.h
index 234b9cc16dd..23969044e7b 100644
--- a/src/include/tcop/tcopdebug.h
+++ b/src/include/tcop/tcopdebug.h
@@ -24,7 +24,7 @@
/* ----------------
* TCOP_SHOWSTATS controls whether or not buffer and
- * access method statistics are shown for each query. -cim 2/9/89
+ * access method statistics are shown for each query. -cim 2/9/89
* ----------------
*/
#undef TCOP_SHOWSTATS
diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h
index 6b80039f972..f09ed311dac 100644
--- a/src/include/utils/acl.h
+++ b/src/include/utils/acl.h
@@ -81,11 +81,11 @@ typedef struct AclItem
/*
* Definitions for convenient access to Acl (array of AclItem).
* These are standard PostgreSQL arrays, but are restricted to have one
- * dimension and no nulls. We also ignore the lower bound when reading,
+ * dimension and no nulls. We also ignore the lower bound when reading,
* and set it to one when writing.
*
* CAUTION: as of PostgreSQL 7.1, these arrays are toastable (just like all
- * other array types). Therefore, be careful to detoast them with the
+ * other array types). Therefore, be careful to detoast them with the
* macros provided, unless you know for certain that a particular array
* can't have been toasted.
*/
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index 7a990528e75..92e1124505d 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -78,13 +78,13 @@ typedef struct catctup
/*
* Each tuple in a cache is a member of a Dllist that stores the elements
- * of its hash bucket. We keep each Dllist in LRU order to speed repeated
+ * of its hash bucket. We keep each Dllist in LRU order to speed repeated
* lookups.
*/
Dlelem cache_elem; /* list member of per-bucket list */
/*
- * The tuple may also be a member of at most one CatCList. (If a single
+ * The tuple may also be a member of at most one CatCList. (If a single
* catcache is list-searched with varying numbers of keys, we may have to
* make multiple entries for the same tuple because of this restriction.
* Currently, that's not expected to be common, so we accept the potential
@@ -101,7 +101,7 @@ typedef struct catctup
*
* A negative cache entry is an assertion that there is no tuple matching
* a particular key. This is just as useful as a normal entry so far as
- * avoiding catalog searches is concerned. Management of positive and
+ * avoiding catalog searches is concerned. Management of positive and
* negative entries is identical.
*/
int refcount; /* number of active references */
@@ -120,7 +120,7 @@ typedef struct catclist
/*
* A CatCList describes the result of a partial search, ie, a search using
- * only the first K key columns of an N-key cache. We form the keys used
+ * only the first K key columns of an N-key cache. We form the keys used
* into a tuple (with other attributes NULL) to represent the stored key
* set. The CatCList object contains links to cache entries for all the
* table rows satisfying the partial key. (Note: none of these will be
diff --git a/src/include/utils/datetime.h b/src/include/utils/datetime.h
index 7b04dd6e138..d918125a642 100644
--- a/src/include/utils/datetime.h
+++ b/src/include/utils/datetime.h
@@ -285,7 +285,7 @@ extern const int day_tab[2][13];
/*
* Datetime input parsing routines (ParseDateTime, DecodeDateTime, etc)
- * return zero or a positive value on success. On failure, they return
+ * return zero or a positive value on success. On failure, they return
* one of these negative code values. DateTimeParseError may be used to
* produce a correct ereport.
*/
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index 93b141d6839..997e1fec835 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -89,13 +89,13 @@
* ... other errxxx() fields as needed ...));
*
* The error level is required, and so is a primary error message (errmsg
- * or errmsg_internal). All else is optional. errcode() defaults to
+ * or errmsg_internal). All else is optional. errcode() defaults to
* ERRCODE_INTERNAL_ERROR if elevel is ERROR or more, ERRCODE_WARNING
* if elevel is WARNING, or ERRCODE_SUCCESSFUL_COMPLETION if elevel is
* NOTICE or below.
*
* ereport_domain() allows a message domain to be specified, for modules that
- * wish to use a different message catalog from the backend's. To avoid having
+ * wish to use a different message catalog from the backend's. To avoid having
* one copy of the default text domain per .o file, we define it as NULL here
* and have errstart insert the default text domain. Modules can either use
* ereport_domain() directly, or preferably they can override the TEXTDOMAIN
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index 8975561b20e..1cdff96308f 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -40,7 +40,7 @@
* configuration file, or by client request in the connection startup
* packet (e.g., from libpq's PGOPTIONS variable). Furthermore, an
* already-started backend will ignore changes to such an option in the
- * configuration file. The idea is that these options are fixed for a
+ * configuration file. The idea is that these options are fixed for a
* given backend once it's started, but they can vary across backends.
*
* SUSET options can be set at postmaster startup, with the SIGHUP
@@ -74,7 +74,7 @@ typedef enum
*
* PGC_S_TEST is used when testing values to be stored as per-database or
* per-user defaults ("doit" will always be false, so this never gets stored
- * as the actual source of any value). This is an interactive case, but
+ * as the actual source of any value). This is an interactive case, but
* it needs its own source value because some assign hooks need to make
* different validity checks in this case.
*
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index c53d93e3969..e0c3d0b077c 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -30,7 +30,7 @@ typedef int (*HashCompareFunc) (const void *key1, const void *key2,
Size keysize);
/*
- * Key copying functions must have this signature. The return value is not
+ * Key copying functions must have this signature. The return value is not
* used. (The definition is set up to allow memcpy() and strncpy() to be
* used directly.)
*/
diff --git a/src/include/utils/inet.h b/src/include/utils/inet.h
index bf982f67ee7..bf31ea3345a 100644
--- a/src/include/utils/inet.h
+++ b/src/include/utils/inet.h
@@ -40,7 +40,7 @@ typedef struct
/*
* Both INET and CIDR addresses are represented within Postgres as varlena
* objects, ie, there is a varlena header in front of the struct type
- * depicted above. This struct depicts what we actually have in memory
+ * depicted above. This struct depicts what we actually have in memory
* in "uncompressed" cases. Note that since the maximum data size is only
* 18 bytes, INET/CIDR will invariably be stored into tuples using the
* 1-byte-header varlena format. However, we have to be prepared to cope
diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h
index 7c1202478e5..f5427ac275f 100644
--- a/src/include/utils/memutils.h
+++ b/src/include/utils/memutils.h
@@ -30,7 +30,7 @@
* be summarily denied.
*
* XXX This is deliberately chosen to correspond to the limiting size
- * of varlena objects under TOAST. See VARSIZE_4B() and related macros
+ * of varlena objects under TOAST. See VARSIZE_4B() and related macros
* in postgres.h. Many datatypes assume that any allocatable size can
* be represented in a varlena header.
*
@@ -45,8 +45,8 @@
* All chunks allocated by any memory context manager are required to be
* preceded by a StandardChunkHeader at a spacing of STANDARDCHUNKHEADERSIZE.
* A currently-allocated chunk must contain a backpointer to its owning
- * context as well as the allocated size of the chunk. The backpointer is
- * used by pfree() and repalloc() to find the context to call. The allocated
+ * context as well as the allocated size of the chunk. The backpointer is
+ * used by pfree() and repalloc() to find the context to call. The allocated
* size is not absolutely essential, but it's expected to be needed by any
* reasonable implementation.
*/
diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h
index d0330a57275..3183c32daa6 100644
--- a/src/include/utils/palloc.h
+++ b/src/include/utils/palloc.h
@@ -6,9 +6,9 @@
* This file contains the basic memory allocation interface that is
* needed by almost every backend module. It is included directly by
* postgres.h, so the definitions here are automatically available
- * everywhere. Keep it lean!
+ * everywhere. Keep it lean!
*
- * Memory allocation occurs within "contexts". Every chunk obtained from
+ * Memory allocation occurs within "contexts". Every chunk obtained from
* palloc()/MemoryContextAlloc() is allocated within a specific context.
* The entire contents of a context can be freed easily and quickly by
* resetting or deleting the context --- this is both faster and less
@@ -29,7 +29,7 @@
#define PALLOC_H
/*
- * Type MemoryContextData is declared in nodes/memnodes.h. Most users
+ * Type MemoryContextData is declared in nodes/memnodes.h. Most users
* of memory allocation should just treat it as an abstract type, so we
* do not provide the struct contents here.
*/
@@ -37,7 +37,7 @@ typedef struct MemoryContextData *MemoryContext;
/*
* CurrentMemoryContext is the default allocation context for palloc().
- * We declare it here so that palloc() can be a macro. Avoid accessing it
+ * We declare it here so that palloc() can be a macro. Avoid accessing it
* directly! Instead, use MemoryContextSwitchTo() to change the setting.
*/
extern PGDLLIMPORT MemoryContext CurrentMemoryContext;
diff --git a/src/include/utils/pg_crc.h b/src/include/utils/pg_crc.h
index cc68acd6f8f..e75aebc63e0 100644
--- a/src/include/utils/pg_crc.h
+++ b/src/include/utils/pg_crc.h
@@ -72,7 +72,7 @@ extern CRCDLLIMPORT const uint32 pg_crc32_table[];
/*
* crc0 represents the LSBs of the 64-bit value, crc1 the MSBs. Note that
* with crc0 placed first, the output of 32-bit and 64-bit implementations
- * will be bit-compatible only on little-endian architectures. If it were
+ * will be bit-compatible only on little-endian architectures. If it were
* important to make the two possible implementations bit-compatible on
* all machines, we could do a configure test to decide how to order the
* two fields, but it seems not worth the trouble.
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index b8639a59a0e..25f3962c683 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -40,7 +40,7 @@
* losing any flexibility if a replan turns out to be necessary.
*
* Note: the string referenced by commandTag is not subsidiary storage;
- * it is assumed to be a compile-time-constant string. As with portals,
+ * it is assumed to be a compile-time-constant string. As with portals,
* commandTag shall be NULL if and only if the original query string (before
* rewriting) was an empty string.
*/
diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h
index cf50655e125..e7ee6f9574d 100644
--- a/src/include/utils/portal.h
+++ b/src/include/utils/portal.h
@@ -57,8 +57,8 @@
* single result from the user's viewpoint. However, the rule rewriter
* may expand the single source query to zero or many actual queries.)
*
- * PORTAL_ONE_SELECT: the portal contains one single SELECT query. We run
- * the Executor incrementally as results are demanded. This strategy also
+ * PORTAL_ONE_SELECT: the portal contains one single SELECT query. We run
+ * the Executor incrementally as results are demanded. This strategy also
* supports holdable cursors (the Executor results can be dumped into a
* tuplestore for access after transaction completion).
*
@@ -72,7 +72,7 @@
* all the auxiliary queries.)
*
* PORTAL_ONE_MOD_WITH: the portal contains one single SELECT query, but
- * it has data-modifying CTEs. This is currently treated the same as the
+ * it has data-modifying CTEs. This is currently treated the same as the
* PORTAL_ONE_RETURNING case because of the possibility of needing to fire
* triggers. It may act more like PORTAL_ONE_SELECT in future.
*
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index 5eb9eaa8a8e..3bddf2211d1 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -187,7 +187,7 @@ typedef struct RelationData
* Note: rd_amcache is available for index AMs to cache private data about
* an index. This must be just a cache since it may get reset at any time
* (in particular, it will get reset by a relcache inval message for the
- * index). If used, it must point to a single memory chunk palloc'd in
+ * index). If used, it must point to a single memory chunk palloc'd in
* rd_indexcxt. A relcache reset will include freeing that chunk and
* setting rd_amcache = NULL.
*/
@@ -376,7 +376,7 @@ typedef struct StdRdOptions
* RelationGetTargetBlock
* Fetch relation's current insertion target block.
*
- * Returns InvalidBlockNumber if there is no current target block. Note
+ * Returns InvalidBlockNumber if there is no current target block. Note
* that the target block status is discarded on any smgr-level invalidation.
*/
#define RelationGetTargetBlock(relation) \
diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h
index 5de9f359ef2..9e2ea8ce060 100644
--- a/src/include/utils/relcache.h
+++ b/src/include/utils/relcache.h
@@ -24,7 +24,7 @@ typedef struct RelationData *Relation;
/* ----------------
* RelationPtr is used in the executor to support index scans
* where we have to keep track of several index relations in an
- * array. -cim 9/10/89
+ * array. -cim 9/10/89
* ----------------
*/
typedef Relation *RelationPtr;
diff --git a/src/include/utils/resowner.h b/src/include/utils/resowner.h
index 2d08312b34f..bfde96e75d9 100644
--- a/src/include/utils/resowner.h
+++ b/src/include/utils/resowner.h
@@ -42,7 +42,7 @@ extern PGDLLIMPORT ResourceOwner TopTransactionResourceOwner;
/*
* Resource releasing is done in three phases: pre-locks, locks, and
- * post-locks. The pre-lock phase must release any resources that are
+ * post-locks. The pre-lock phase must release any resources that are
* visible to other backends (such as pinned buffers); this ensures that
* when we release a lock that another backend may be waiting on, it will
* see us as being fully out of our transaction. The post-lock phase
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index 7f98878426d..2b33e296a41 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -23,7 +23,7 @@
/*
* Note: the default selectivity estimates are not chosen entirely at random.
* We want them to be small enough to ensure that indexscans will be used if
- * available, for typical table densities of ~100 tuples/page. Thus, for
+ * available, for typical table densities of ~100 tuples/page. Thus, for
* example, 0.01 is not quite small enough, since that makes it appear that
* nearly all pages will be hit anyway. Also, since we sometimes estimate
* eqsel as 1/num_distinct, we probably want DEFAULT_NUM_DISTINCT to equal
diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h
index e311a8fb44e..a7301c0e488 100644
--- a/src/include/utils/timestamp.h
+++ b/src/include/utils/timestamp.h
@@ -76,7 +76,7 @@ typedef struct
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
- * also return a 'time' value to be used as well. ISO 8601 suggests
+ * also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
diff --git a/src/include/utils/tqual.h b/src/include/utils/tqual.h
index 689f4825d50..1da8e0b2ea9 100644
--- a/src/include/utils/tqual.h
+++ b/src/include/utils/tqual.h
@@ -3,7 +3,7 @@
* tqual.h
* POSTGRES "time qualification" definitions, ie, tuple visibility rules.
*
- * Should be moved/renamed... - vadim 07/28/98
+ * Should be moved/renamed... - vadim 07/28/98
*
* Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h
index 1ebcbfe1724..17917b40ad3 100644
--- a/src/include/utils/tuplesort.h
+++ b/src/include/utils/tuplesort.h
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
@@ -41,9 +41,9 @@ typedef struct Tuplesortstate Tuplesortstate;
* The "heap" API actually stores/sorts MinimalTuples, which means it doesn't
* preserve the system columns (tuple identity and transaction visibility
* info). The sort keys are specified by column numbers within the tuples
- * and sort operator OIDs. We save some cycles by passing and returning the
+ * and sort operator OIDs. We save some cycles by passing and returning the
* tuples in TupleTableSlots, rather than forming actual HeapTuples (which'd
- * have to be converted to MinimalTuples). This API works well for sorts
+ * have to be converted to MinimalTuples). This API works well for sorts
* executed as parts of plan trees.
*
* The "cluster" API stores/sorts full HeapTuples including all visibility
@@ -52,7 +52,7 @@ typedef struct Tuplesortstate Tuplesortstate;
* go with this API, not the "begin_heap" one!
*
* The "index_btree" API stores/sorts IndexTuples (preserving all their
- * header fields). The sort keys are specified by a btree index definition.
+ * header fields). The sort keys are specified by a btree index definition.
*
* The "index_hash" API is similar to index_btree, but the tuples are
* actually sorted by their hash codes not the raw data.
diff --git a/src/include/utils/tuplestore.h b/src/include/utils/tuplestore.h
index 0c3e2eaf9f0..df38ab944ca 100644
--- a/src/include/utils/tuplestore.h
+++ b/src/include/utils/tuplestore.h
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
diff --git a/src/include/utils/typcache.h b/src/include/utils/typcache.h
index eb93c1d3b54..6c832d55669 100644
--- a/src/include/utils/typcache.h
+++ b/src/include/utils/typcache.h
@@ -55,7 +55,7 @@ typedef struct TypeCacheEntry
/*
* Pre-set-up fmgr call info for the equality operator, the btree
- * comparison function, and the hash calculation function. These are kept
+ * comparison function, and the hash calculation function. These are kept
* in the type cache to avoid problems with memory leaks in repeated calls
* to functions such as array_eq, array_cmp, hash_array. There is not
* currently a need to maintain call info for the lt_opr or gt_opr.
@@ -75,7 +75,7 @@ typedef struct TypeCacheEntry
int flags; /* flags about what we've computed */
/*
- * Private information about an enum type. NULL if not enum or
+ * Private information about an enum type. NULL if not enum or
* information hasn't been requested.
*/
struct TypeCacheEnumData *enumData;
diff --git a/src/interfaces/ecpg/include/sqlca.h b/src/interfaces/ecpg/include/sqlca.h
index 52fcbf830f5..41e5b550af4 100644
--- a/src/interfaces/ecpg/include/sqlca.h
+++ b/src/interfaces/ecpg/include/sqlca.h
@@ -40,7 +40,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h b/src/interfaces/ecpg/pgtypeslib/dt.h
index 48c9deda1cb..ce972f19868 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt.h
+++ b/src/interfaces/ecpg/pgtypeslib/dt.h
@@ -255,7 +255,7 @@ do { \
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
- * also return a 'time' value to be used as well. ISO 8601 suggests
+ * also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
diff --git a/src/interfaces/ecpg/pgtypeslib/interval.c b/src/interfaces/ecpg/pgtypeslib/interval.c
index fdd8f49ac83..ed972be41e6 100644
--- a/src/interfaces/ecpg/pgtypeslib/interval.c
+++ b/src/interfaces/ecpg/pgtypeslib/interval.c
@@ -160,7 +160,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c
index 55c5b45616e..aefabea976f 100644
--- a/src/interfaces/ecpg/pgtypeslib/numeric.c
+++ b/src/interfaces/ecpg/pgtypeslib/numeric.c
@@ -974,7 +974,7 @@ PGTYPESnumeric_sub(numeric *var1, numeric *var2, numeric *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Accuracy of result is determined by global_rscale.
+ * in result. Accuracy of result is determined by global_rscale.
* ----------
*/
int
diff --git a/src/interfaces/ecpg/preproc/c_keywords.c b/src/interfaces/ecpg/preproc/c_keywords.c
index 41d20d26d6f..06bf0390994 100644
--- a/src/interfaces/ecpg/preproc/c_keywords.c
+++ b/src/interfaces/ecpg/preproc/c_keywords.c
@@ -57,7 +57,7 @@ static const ScanKeyword ScanCKeywords[] = {
/*
- * Do a binary search using plain strcmp() comparison. This is much like
+ * Do a binary search using plain strcmp() comparison. This is much like
* ScanKeywordLookup(), except we want case-sensitive matching.
*/
const ScanKeyword *
diff --git a/src/interfaces/ecpg/preproc/parser.c b/src/interfaces/ecpg/preproc/parser.c
index cd5323f6c19..c2f80d9226e 100644
--- a/src/interfaces/ecpg/preproc/parser.c
+++ b/src/interfaces/ecpg/preproc/parser.c
@@ -35,7 +35,7 @@ static YYLTYPE lookahead_yylloc; /* yylloc for lookahead token */
* Intermediate filter between parser and base lexer (base_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
diff --git a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
index c1a48911914..390601d84ee 100644
--- a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
+++ b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
@@ -57,7 +57,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/preproc-init.c b/src/interfaces/ecpg/test/expected/preproc-init.c
index 49f2d5d57a6..9e410ff1558 100644
--- a/src/interfaces/ecpg/test/expected/preproc-init.c
+++ b/src/interfaces/ecpg/test/expected/preproc-init.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-array.c b/src/interfaces/ecpg/test/expected/sql-array.c
index 3c879561b37..13b940217cd 100644
--- a/src/interfaces/ecpg/test/expected/sql-array.c
+++ b/src/interfaces/ecpg/test/expected/sql-array.c
@@ -59,7 +59,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-code100.c b/src/interfaces/ecpg/test/expected/sql-code100.c
index 051fc386229..702c6e146fd 100644
--- a/src/interfaces/ecpg/test/expected/sql-code100.c
+++ b/src/interfaces/ecpg/test/expected/sql-code100.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-copystdout.c b/src/interfaces/ecpg/test/expected/sql-copystdout.c
index 563732b05d5..33ea2133d60 100644
--- a/src/interfaces/ecpg/test/expected/sql-copystdout.c
+++ b/src/interfaces/ecpg/test/expected/sql-copystdout.c
@@ -53,7 +53,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-define.c b/src/interfaces/ecpg/test/expected/sql-define.c
index b9571ec53df..4a1d7ee6f03 100644
--- a/src/interfaces/ecpg/test/expected/sql-define.c
+++ b/src/interfaces/ecpg/test/expected/sql-define.c
@@ -51,7 +51,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dynalloc.c b/src/interfaces/ecpg/test/expected/sql-dynalloc.c
index ccc337168e4..cd5d5c0ab02 100644
--- a/src/interfaces/ecpg/test/expected/sql-dynalloc.c
+++ b/src/interfaces/ecpg/test/expected/sql-dynalloc.c
@@ -52,7 +52,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dynalloc2.c b/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
index e85189214bb..e5d2f757827 100644
--- a/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
+++ b/src/interfaces/ecpg/test/expected/sql-dynalloc2.c
@@ -52,7 +52,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-dyntest.c b/src/interfaces/ecpg/test/expected/sql-dyntest.c
index 537d9ff63a8..91b4421b193 100644
--- a/src/interfaces/ecpg/test/expected/sql-dyntest.c
+++ b/src/interfaces/ecpg/test/expected/sql-dyntest.c
@@ -105,7 +105,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/sql-indicators.c b/src/interfaces/ecpg/test/expected/sql-indicators.c
index e805e4ee003..5e167b1944b 100644
--- a/src/interfaces/ecpg/test/expected/sql-indicators.c
+++ b/src/interfaces/ecpg/test/expected/sql-indicators.c
@@ -53,7 +53,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-alloc.c b/src/interfaces/ecpg/test/expected/thread-alloc.c
index 199a77eb151..f80dd6cffba 100644
--- a/src/interfaces/ecpg/test/expected/thread-alloc.c
+++ b/src/interfaces/ecpg/test/expected/thread-alloc.c
@@ -74,7 +74,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-descriptor.c b/src/interfaces/ecpg/test/expected/thread-descriptor.c
index 2584626f4fd..e2be89dec04 100644
--- a/src/interfaces/ecpg/test/expected/thread-descriptor.c
+++ b/src/interfaces/ecpg/test/expected/thread-descriptor.c
@@ -65,7 +65,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/ecpg/test/expected/thread-prep.c b/src/interfaces/ecpg/test/expected/thread-prep.c
index cd28c850991..b7f32721a59 100644
--- a/src/interfaces/ecpg/test/expected/thread-prep.c
+++ b/src/interfaces/ecpg/test/expected/thread-prep.c
@@ -74,7 +74,7 @@ struct sqlca_t
/* Element 0: set to 'W' if at least one other is 'W' */
/* 1: if 'W' at least one character string */
/* value was truncated when it was */
- /* stored into a host variable. */
+ /* stored into a host variable. */
/*
* 2: if 'W' a (hopefully) non-fatal notice occurred
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index e72826cddc3..e3612e54b7b 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -1021,7 +1021,7 @@ pg_fe_getauthname(PQExpBuffer errorMessage)
*
* This is intended to be used by client applications that wish to send
* commands like ALTER USER joe PASSWORD 'pwd'. The password need not
- * be sent in cleartext if it is encrypted on the client side. This is
+ * be sent in cleartext if it is encrypted on the client side. This is
* good because it ensures the cleartext password won't end up in logs,
* pg_stat displays, etc. We export the function so that clients won't
* be dependent on low-level details like whether the enceyption is MD5
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 261cb3066ff..472381f96a9 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -134,7 +134,7 @@ static int ldapServiceLookup(const char *purl, PQconninfoOption *options,
*
* PQconninfoOptions[] is a constant static array that we use to initialize
* a dynamically allocated working copy. All the "val" fields in
- * PQconninfoOptions[] *must* be NULL. In a working copy, non-null "val"
+ * PQconninfoOptions[] *must* be NULL. In a working copy, non-null "val"
* fields point to malloc'd strings that should be freed when the working
* array is freed (see PQconninfoFree).
* ----------
@@ -453,7 +453,7 @@ PQping(const char *conninfo)
* See comment for PQconnectdbParams for the definition of the string format.
*
* Returns a PGconn*. If NULL is returned, a malloc error has occurred, and
- * you should not attempt to proceed with this connection. If the status
+ * you should not attempt to proceed with this connection. If the status
* field of the connection returned is CONNECTION_BAD, an error has
* occurred. In this case you should call PQfinish on the result, (perhaps
* inspecting the error message first). Other fields of the structure may not
@@ -528,7 +528,7 @@ PQconnectStartParams(const char **keywords,
* See comment for PQconnectdb for the definition of the string format.
*
* Returns a PGconn*. If NULL is returned, a malloc error has occurred, and
- * you should not attempt to proceed with this connection. If the status
+ * you should not attempt to proceed with this connection. If the status
* field of the connection returned is CONNECTION_BAD, an error has
* occurred. In this case you should call PQfinish on the result, (perhaps
* inspecting the error message first). Other fields of the structure may not
@@ -810,14 +810,14 @@ connectOptions2(PGconn *conn)
*
* Parse an empty string like PQconnectdb() would do and return the
* resulting connection options array, ie, all the default values that are
- * available from the environment etc. On error (eg out of memory),
+ * available from the environment etc. On error (eg out of memory),
* NULL is returned.
*
* Using this function, an application may determine all possible options
* and their current default values.
*
* NOTE: as of PostgreSQL 7.0, the returned array is dynamically allocated
- * and should be freed when no longer needed via PQconninfoFree(). (In prior
+ * and should be freed when no longer needed via PQconninfoFree(). (In prior
* versions, the returned array was static, but that's not thread-safe.)
* Pre-7.0 applications that use this function will see a small memory leak
* until they are updated to call PQconninfoFree.
@@ -1433,7 +1433,7 @@ connectDBComplete(PGconn *conn)
for (;;)
{
/*
- * Wait, if necessary. Note that the initial state (just after
+ * Wait, if necessary. Note that the initial state (just after
* PQconnectStart) is to wait for the socket to select for writing.
*/
switch (flag)
@@ -1495,7 +1495,7 @@ connectDBComplete(PGconn *conn)
* will not block.
* o If you do not supply an IP address for the remote host (i.e. you
* supply a host name instead) then PQconnectStart will block on
- * gethostbyname. You will be fine if using Unix sockets (i.e. by
+ * gethostbyname. You will be fine if using Unix sockets (i.e. by
* supplying neither a host name nor a host address).
* o If your backend wants to use Kerberos authentication then you must
* supply both a host name and a host address, otherwise this function
@@ -1782,7 +1782,7 @@ keep_going: /* We will come back to here until there is
/*
* This connection failed --- set up error report, then
* close socket (do it this way in case close() affects
- * the value of errno...). We will ignore the connect()
+ * the value of errno...). We will ignore the connect()
* failure and keep going if there are more addresses.
*/
connectFailureMessage(conn, SOCK_ERRNO);
@@ -2477,7 +2477,7 @@ keep_going: /* We will come back to here until there is
/*
* If we tried to send application_name, check to see
* if the error is about that --- pre-9.0 servers will
- * reject it at this stage of the process. If so,
+ * reject it at this stage of the process. If so,
* close the connection and retry without sending
* application_name. We could possibly get a false
* SQLSTATE match here and retry uselessly, but there
@@ -2628,9 +2628,9 @@ internal_ping(PGconn *conn)
/*
* If we failed to get any ERROR response from the postmaster, report
- * PQPING_NO_RESPONSE. This result could be somewhat misleading for a
+ * PQPING_NO_RESPONSE. This result could be somewhat misleading for a
* pre-7.4 server, since it won't send back a SQLSTATE, but those are long
- * out of support. Another corner case where the server could return a
+ * out of support. Another corner case where the server could return a
* failure without a SQLSTATE is fork failure, but NO_RESPONSE isn't
* totally unreasonable for that anyway. We expect that every other
* failure case in a modern server will produce a report with a SQLSTATE.
@@ -2849,7 +2849,7 @@ freePGconn(PGconn *conn)
* - properly close a connection to the backend
*
* This should reset or release all transient state, but NOT the connection
- * parameters. On exit, the PGconn should be in condition to start a fresh
+ * parameters. On exit, the PGconn should be in condition to start a fresh
* connection with the same parameters (see PQreset()).
*/
static void
@@ -2985,7 +2985,7 @@ PQreset(PGconn *conn)
if (connectDBStart(conn) && connectDBComplete(conn))
{
/*
- * Notify event procs of successful reset. We treat an event proc
+ * Notify event procs of successful reset. We treat an event proc
* failure as disabling the connection ... good idea?
*/
int i;
@@ -3045,7 +3045,7 @@ PQresetPoll(PGconn *conn)
if (status == PGRES_POLLING_OK)
{
/*
- * Notify event procs of successful reset. We treat an event proc
+ * Notify event procs of successful reset. We treat an event proc
* failure as disabling the connection ... good idea?
*/
int i;
@@ -3234,7 +3234,7 @@ cancel_errReturn:
* Returns TRUE if able to send the cancel request, FALSE if not.
*
* On failure, an error message is stored in *errbuf, which must be of size
- * errbufsize (recommended size is 256 bytes). *errbuf is not changed on
+ * errbufsize (recommended size is 256 bytes). *errbuf is not changed on
* success return.
*/
int
@@ -4011,7 +4011,7 @@ parseServiceFile(const char *serviceFile,
* PQconninfoParse
*
* Parse a string like PQconnectdb() would do and return the
- * resulting connection options array. NULL is returned on failure.
+ * resulting connection options array. NULL is returned on failure.
* The result contains only options specified directly in the string,
* not any possible default values.
*
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
index fdeee90dd32..2ae436f0888 100644
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -92,7 +92,7 @@ static int check_field_number(const PGresult *res, int field_num);
* doesn't tell us up front how many tuples will be returned.)
* All other subsidiary storage for a PGresult is kept in PGresult_data blocks
* of size PGRESULT_DATA_BLOCKSIZE. The overhead at the start of each block
- * is just a link to the next one, if any. Free-space management info is
+ * is just a link to the next one, if any. Free-space management info is
* kept in the owning PGresult.
* A query returning a small amount of data will thus require three malloc
* calls: one for the PGresult, one for the tuples pointer array, and one
@@ -111,7 +111,7 @@ static int check_field_number(const PGresult *res, int field_num);
* blocks, instead of being crammed into a regular allocation block.
* Requirements for correct function are:
* PGRESULT_ALIGN_BOUNDARY must be a multiple of the alignment requirements
- * of all machine data types. (Currently this is set from configure
+ * of all machine data types. (Currently this is set from configure
* tests, so it should be OK automatically.)
* PGRESULT_SEP_ALLOC_THRESHOLD + PGRESULT_BLOCK_OVERHEAD <=
* PGRESULT_DATA_BLOCKSIZE
@@ -265,10 +265,10 @@ PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs)
* Returns a deep copy of the provided 'src' PGresult, which cannot be NULL.
* The 'flags' argument controls which portions of the result will or will
* NOT be copied. The created result is always put into the
- * PGRES_TUPLES_OK status. The source result error message is not copied,
+ * PGRES_TUPLES_OK status. The source result error message is not copied,
* although cmdStatus is.
*
- * To set custom attributes, use PQsetResultAttrs. That function requires
+ * To set custom attributes, use PQsetResultAttrs. That function requires
* that there are no attrs contained in the result, so to use that
* function you cannot use the PG_COPYRES_ATTRS or PG_COPYRES_TUPLES
* options with this function.
@@ -296,7 +296,7 @@ PQcopyResult(const PGresult *src, int flags)
if (!dest)
return NULL;
- /* Always copy these over. Is cmdStatus really useful here? */
+ /* Always copy these over. Is cmdStatus really useful here? */
dest->client_encoding = src->client_encoding;
strcpy(dest->cmdStatus, src->cmdStatus);
@@ -751,7 +751,7 @@ pqPrepareAsyncResult(PGconn *conn)
PGresult *res;
/*
- * conn->result is the PGresult to return. If it is NULL (which probably
+ * conn->result is the PGresult to return. If it is NULL (which probably
* shouldn't happen) we assume there is an appropriate error message in
* conn->errorMessage.
*/
@@ -1423,7 +1423,7 @@ pqHandleSendFailure(PGconn *conn)
/* loop until no more data readable */ ;
/*
- * Parse any available input messages. Since we are in PGASYNC_IDLE
+ * Parse any available input messages. Since we are in PGASYNC_IDLE
* state, only NOTICE and NOTIFY messages will be eaten.
*/
parseInput(conn);
@@ -1618,7 +1618,7 @@ getCopyResult(PGconn *conn, ExecStatusType copytype)
* If the server connection has been lost, don't pretend everything is
* hunky-dory; instead return a PGRES_FATAL_ERROR result, and reset the
* asyncStatus to idle (corresponding to what we'd do if we'd detected I/O
- * error in the earlier steps in PQgetResult). The text returned in the
+ * error in the earlier steps in PQgetResult). The text returned in the
* result is whatever is in conn->errorMessage; we hope that was filled
* with something relevant when the lost connection was detected.
*/
@@ -1860,7 +1860,7 @@ PQexecFinish(PGconn *conn)
* If the query was not even sent, return NULL; conn->errorMessage is set to
* a relevant message.
* If the query was sent, a new PGresult is returned (which could indicate
- * either success or failure). On success, the PGresult contains status
+ * either success or failure). On success, the PGresult contains status
* PGRES_COMMAND_OK, and its parameter and column-heading fields describe
* the statement's inputs and outputs respectively.
* The user is responsible for freeing the PGresult via PQclear()
@@ -2199,7 +2199,7 @@ PQgetCopyData(PGconn *conn, char **buffer, int async)
* PQgetline - gets a newline-terminated string from the backend.
*
* Chiefly here so that applications can use "COPY <rel> to stdout"
- * and read the output string. Returns a null-terminated string in s.
+ * and read the output string. Returns a null-terminated string in s.
*
* XXX this routine is now deprecated, because it can't handle binary data.
* If called during a COPY BINARY we return EOF.
@@ -2313,7 +2313,7 @@ PQputnbytes(PGconn *conn, const char *buffer, int nbytes)
* the application must call this routine to finish the command protocol.
*
* When using protocol 3.0 this is deprecated; it's cleaner to use PQgetResult
- * to get the transfer status. Note however that when using 2.0 protocol,
+ * to get the transfer status. Note however that when using 2.0 protocol,
* recovering from a copy failure often requires a PQreset. PQendcopy will
* take care of that, PQgetResult won't.
*
@@ -2541,7 +2541,7 @@ PQfname(const PGresult *res, int field_num)
* downcasing in the frontend might follow different locale rules than
* downcasing in the backend...
*
- * Returns -1 if no match. In the present backend it is also possible
+ * Returns -1 if no match. In the present backend it is also possible
* to have multiple matches, in which case the first one is found.
*/
int
@@ -2952,7 +2952,7 @@ PQfreemem(void *ptr)
*
* This function is here only for binary backward compatibility.
* New code should use PQfreemem(). A macro will automatically map
- * calls to PQfreemem. It should be removed in the future. bjm 2003-03-24
+ * calls to PQfreemem. It should be removed in the future. bjm 2003-03-24
*/
#undef PQfreeNotify
@@ -3147,7 +3147,7 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
/*
* If we are escaping a literal that contains backslashes, we use the
* escape string syntax so that the result is correct under either value
- * of standard_conforming_strings. We also emit a leading space in this
+ * of standard_conforming_strings. We also emit a leading space in this
* case, to guard against the possibility that the result might be
* interpolated immediately following an identifier.
*/
diff --git a/src/interfaces/libpq/fe-lobj.c b/src/interfaces/libpq/fe-lobj.c
index b7e79942051..bd4162a7f6a 100644
--- a/src/interfaces/libpq/fe-lobj.c
+++ b/src/interfaces/libpq/fe-lobj.c
@@ -714,7 +714,7 @@ lo_initialize(PGconn *conn)
MemSet((char *) lobjfuncs, 0, sizeof(PGlobjfuncs));
/*
- * Execute the query to get all the functions at once. In 7.3 and later
+ * Execute the query to get all the functions at once. In 7.3 and later
* we need to be schema-safe. lo_create only exists in 8.1 and up.
* lo_truncate only exists in 8.3 and up.
*/
diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c
index 3a3a1cee2cb..a93da700b52 100644
--- a/src/interfaces/libpq/fe-misc.c
+++ b/src/interfaces/libpq/fe-misc.c
@@ -656,13 +656,13 @@ retry3:
/*
* Hack to deal with the fact that some kernels will only give us back
* 1 packet per recv() call, even if we asked for more and there is
- * more available. If it looks like we are reading a long message,
+ * more available. If it looks like we are reading a long message,
* loop back to recv() again immediately, until we run out of data or
* buffer space. Without this, the block-and-restart behavior of
* libpq's higher levels leads to O(N^2) performance on long messages.
*
* Since we left-justified the data above, conn->inEnd gives the
- * amount of data already read in the current message. We consider
+ * amount of data already read in the current message. We consider
* the message "long" once we have acquired 32k ...
*/
if (conn->inEnd > 32768 &&
diff --git a/src/interfaces/libpq/fe-protocol2.c b/src/interfaces/libpq/fe-protocol2.c
index 6f8ac69733c..2a6af5e41c6 100644
--- a/src/interfaces/libpq/fe-protocol2.c
+++ b/src/interfaces/libpq/fe-protocol2.c
@@ -231,7 +231,7 @@ pqSetenvPoll(PGconn *conn)
case SETENV_STATE_QUERY1_SEND:
{
/*
- * Issue query to get information we need. Here we must
+ * Issue query to get information we need. Here we must
* use begin/commit in case autocommit is off by default
* in a 7.3 server.
*
@@ -865,7 +865,7 @@ pqGetErrorNotice2(PGconn *conn, bool isError)
/*
* Since the message might be pretty long, we create a temporary
- * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
+ * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
* for stuff that is expected to be short.
*/
initPQExpBuffer(&workBuf);
@@ -959,10 +959,10 @@ failure:
/*
* checkXactStatus - attempt to track transaction-block status of server
*
- * This is called each time we receive a command-complete message. By
+ * This is called each time we receive a command-complete message. By
* watching for messages from BEGIN/COMMIT/ROLLBACK commands, we can do
* a passable job of tracking the server's xact status. BUT: this does
- * not work at all on 7.3 servers with AUTOCOMMIT OFF. (Man, was that
+ * not work at all on 7.3 servers with AUTOCOMMIT OFF. (Man, was that
* feature ever a mistake.) Caveat user.
*
* The tags known here are all those used as far back as 7.0; is it worth
diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c
index 18b7483db79..5d537d4b416 100644
--- a/src/interfaces/libpq/fe-protocol3.c
+++ b/src/interfaces/libpq/fe-protocol3.c
@@ -166,7 +166,7 @@ pqParseInput3(PGconn *conn)
* ERROR messages are displayed using the notice processor;
* ParameterStatus is handled normally; anything else is just
* dropped on the floor after displaying a suitable warning
- * notice. (An ERROR is very possibly the backend telling us why
+ * notice. (An ERROR is very possibly the backend telling us why
* it is about to close the connection, so we don't want to just
* discard it...)
*/
@@ -367,7 +367,7 @@ pqParseInput3(PGconn *conn)
case 'd': /* Copy Data */
/*
- * If we see Copy Data, just silently drop it. This would
+ * If we see Copy Data, just silently drop it. This would
* only occur if application exits COPY OUT mode too
* early.
*/
@@ -376,7 +376,7 @@ pqParseInput3(PGconn *conn)
case 'c': /* Copy Done */
/*
- * If we see Copy Done, just silently drop it. This is
+ * If we see Copy Done, just silently drop it. This is
* the normal case during PQendcopy. We will keep
* swallowing data, expecting to see command-complete for
* the COPY command.
@@ -732,14 +732,14 @@ pqGetErrorNotice3(PGconn *conn, bool isError)
/*
* Since the fields might be pretty long, we create a temporary
- * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
- * for stuff that is expected to be short. We shouldn't use
+ * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended
+ * for stuff that is expected to be short. We shouldn't use
* conn->errorMessage either, since this might be only a notice.
*/
initPQExpBuffer(&workBuf);
/*
- * Make a PGresult to hold the accumulated fields. We temporarily lie
+ * Make a PGresult to hold the accumulated fields. We temporarily lie
* about the result status, so that PQmakeEmptyPGresult doesn't uselessly
* copy conn->errorMessage.
*/
@@ -928,7 +928,7 @@ reportErrorPosition(PQExpBuffer msg, const char *query, int loc, int encoding)
/*
* Each character might occupy multiple physical bytes in the string, and
* in some Far Eastern character sets it might take more than one screen
- * column as well. We compute the starting byte offset and starting
+ * column as well. We compute the starting byte offset and starting
* screen column of each logical character, and store these in qidx[] and
* scridx[] respectively.
*/
@@ -956,8 +956,8 @@ reportErrorPosition(PQExpBuffer msg, const char *query, int loc, int encoding)
/*
* Within the scanning loop, cno is the current character's logical
* number, qoffset is its offset in wquery, and scroffset is its starting
- * logical screen column (all indexed from 0). "loc" is the logical
- * character number of the error location. We scan to determine loc_line
+ * logical screen column (all indexed from 0). "loc" is the logical
+ * character number of the error location. We scan to determine loc_line
* (the 1-based line number containing loc) and ibeg/iend (first character
* number and last+1 character number of the line containing loc). Note
* that qidx[] and scridx[] are filled only as far as iend.
@@ -1390,7 +1390,7 @@ pqGetCopyData3(PGconn *conn, char **buffer, int async)
for (;;)
{
/*
- * Collect the next input message. To make life simpler for async
+ * Collect the next input message. To make life simpler for async
* callers, we keep returning 0 until the next message is fully
* available, even if it is not Copy Data.
*/
@@ -1399,8 +1399,8 @@ pqGetCopyData3(PGconn *conn, char **buffer, int async)
{
/*
* On end-of-copy, exit COPY_OUT or COPY_BOTH mode and let caller
- * read status with PQgetResult(). The normal case is that it's
- * Copy Done, but we let parseInput read that. If error, we
+ * read status with PQgetResult(). The normal case is that it's
+ * Copy Done, but we let parseInput read that. If error, we
* expect the state was already changed.
*/
if (msgLength == -1)
@@ -1603,7 +1603,7 @@ pqEndcopy3(PGconn *conn)
/*
* Non blocking connections may have to abort at this point. If everyone
* played the game there should be no problem, but in error scenarios the
- * expected messages may not have arrived yet. (We are assuming that the
+ * expected messages may not have arrived yet. (We are assuming that the
* backend's packetizing will ensure that CommandComplete arrives along
* with the CopyDone; are there corner cases where that doesn't happen?)
*/
diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c
index 68a88e30b32..81f1375601d 100644
--- a/src/interfaces/libpq/fe-secure.c
+++ b/src/interfaces/libpq/fe-secure.c
@@ -1039,7 +1039,7 @@ destroy_ssl_system(void)
* Initialize (potentially) per-connection SSL data, namely the
* client certificate, private key, and trusted CA certs.
*
- * conn->ssl must already be created. It receives the connection's client
+ * conn->ssl must already be created. It receives the connection's client
* certificate and private key. Note however that certificates also get
* loaded into the SSL_context object, and are therefore accessible to all
* connections in this process. This should be OK as long as there aren't
@@ -1404,7 +1404,7 @@ initialize_SSL(PGconn *conn)
{
/*
* stat() failed; assume root file doesn't exist. If sslmode is
- * verify-ca or verify-full, this is an error. Otherwise, continue
+ * verify-ca or verify-full, this is an error. Otherwise, continue
* without performing any server cert verification.
*/
if (conn->sslmode[0] == 'v') /* "verify-ca" or "verify-full" */
@@ -1642,7 +1642,7 @@ PQgetssl(PGconn *conn)
#if defined(ENABLE_THREAD_SAFETY) && !defined(WIN32)
/*
- * Block SIGPIPE for this thread. This prevents send()/write() from exiting
+ * Block SIGPIPE for this thread. This prevents send()/write() from exiting
* the application.
*/
int
@@ -1681,7 +1681,7 @@ pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending)
* Discard any pending SIGPIPE and reset the signal mask.
*
* Note: we are effectively assuming here that the C library doesn't queue
- * up multiple SIGPIPE events. If it did, then we'd accidentally leave
+ * up multiple SIGPIPE events. If it did, then we'd accidentally leave
* ours in the queue when an event was already pending and we got another.
* As long as it doesn't queue multiple events, we're OK because the caller
* can't tell the difference.
@@ -1692,7 +1692,7 @@ pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending)
* gotten one, pass got_epipe = TRUE.
*
* We do not want this to change errno, since if it did that could lose
- * the error code from a preceding send(). We essentially assume that if
+ * the error code from a preceding send(). We essentially assume that if
* we were able to do pq_block_sigpipe(), this can't fail.
*/
void
diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h
index d7802753ef4..ef4660af551 100644
--- a/src/interfaces/libpq/libpq-fe.h
+++ b/src/interfaces/libpq/libpq-fe.h
@@ -54,9 +54,9 @@ typedef enum
* be used for user feedback or similar purposes.
*/
CONNECTION_STARTED, /* Waiting for connection to be made. */
- CONNECTION_MADE, /* Connection OK; waiting to send. */
+ CONNECTION_MADE, /* Connection OK; waiting to send. */
CONNECTION_AWAITING_RESPONSE, /* Waiting for a response from the
- * postmaster. */
+ * postmaster. */
CONNECTION_AUTH_OK, /* Received authentication; waiting for
* backend startup. */
CONNECTION_SETENV, /* Negotiating environment. */
diff --git a/src/interfaces/libpq/pqexpbuffer.c b/src/interfaces/libpq/pqexpbuffer.c
index 1abfed79894..32c07ef3e69 100644
--- a/src/interfaces/libpq/pqexpbuffer.c
+++ b/src/interfaces/libpq/pqexpbuffer.c
@@ -161,7 +161,7 @@ resetPQExpBuffer(PQExpBuffer str)
* Make sure there is enough space for 'needed' more bytes in the buffer
* ('needed' does not include the terminating null).
*
- * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
+ * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
* the buffer is left in "broken" state.)
*/
int
@@ -175,7 +175,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* Guard against ridiculous "needed" values, which can occur if we're fed
- * bogus data. Without this, we can get an overflow or infinite loop in
+ * bogus data. Without this, we can get an overflow or infinite loop in
* the following.
*/
if (needed >= ((size_t) INT_MAX - str->len))
@@ -202,7 +202,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* Clamp to INT_MAX in case we went past it. Note we are assuming here
- * that INT_MAX <= UINT_MAX/2, else the above loop could overflow. We
+ * that INT_MAX <= UINT_MAX/2, else the above loop could overflow. We
* will still have newlen >= needed.
*/
if (newlen > (size_t) INT_MAX)
@@ -223,7 +223,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed)
/*
* printfPQExpBuffer
* Format text data under the control of fmt (an sprintf-like format string)
- * and insert it into str. More space is allocated to str if necessary.
+ * and insert it into str. More space is allocated to str if necessary.
* This is a convenience routine that does the same thing as
* resetPQExpBuffer() followed by appendPQExpBuffer().
*/
diff --git a/src/interfaces/libpq/pqexpbuffer.h b/src/interfaces/libpq/pqexpbuffer.h
index d60fedc762e..1a2d1235e7f 100644
--- a/src/interfaces/libpq/pqexpbuffer.h
+++ b/src/interfaces/libpq/pqexpbuffer.h
@@ -37,7 +37,7 @@
* more space. We must always have maxlen > len.
*
* An exception occurs if we failed to allocate enough memory for the string
- * buffer. In that case data points to a statically allocated empty string,
+ * buffer. In that case data points to a statically allocated empty string,
* and len = maxlen = 0.
*-------------------------
*/
@@ -107,7 +107,7 @@ extern void initPQExpBuffer(PQExpBuffer str);
*
* NOTE: some routines build up a string using PQExpBuffer, and then
* release the PQExpBufferData but return the data string itself to their
- * caller. At that point the data string looks like a plain malloc'd
+ * caller. At that point the data string looks like a plain malloc'd
* string.
*/
extern void destroyPQExpBuffer(PQExpBuffer str);
@@ -126,7 +126,7 @@ extern void resetPQExpBuffer(PQExpBuffer str);
* Make sure there is enough space for 'needed' more bytes in the buffer
* ('needed' does not include the terminating null).
*
- * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
+ * Returns 1 if OK, 0 if failed to enlarge buffer. (In the latter case
* the buffer is left in "broken" state.)
*/
extern int enlargePQExpBuffer(PQExpBuffer str, size_t needed);
@@ -134,7 +134,7 @@ extern int enlargePQExpBuffer(PQExpBuffer str, size_t needed);
/*------------------------
* printfPQExpBuffer
* Format text data under the control of fmt (an sprintf-like format string)
- * and insert it into str. More space is allocated to str if necessary.
+ * and insert it into str. More space is allocated to str if necessary.
* This is a convenience routine that does the same thing as
* resetPQExpBuffer() followed by appendPQExpBuffer().
*/
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 6b0c1cad5cb..f1c9d46a53f 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -60,8 +60,8 @@ PG_MODULE_MAGIC;
/**********************************************************************
- * Information associated with a Perl interpreter. We have one interpreter
- * that is used for all plperlu (untrusted) functions. For plperl (trusted)
+ * Information associated with a Perl interpreter. We have one interpreter
+ * that is used for all plperlu (untrusted) functions. For plperl (trusted)
* functions, there is a separate interpreter for each effective SQL userid.
* (This is needed to ensure that an unprivileged user can't inject Perl code
* that'll be executed with the privileges of some other SQL user.)
@@ -2926,7 +2926,7 @@ plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed,
/*
* Note: plperl_return_next is called both in Postgres and Perl contexts.
- * We report any errors in Postgres fashion (via ereport). If called in
+ * We report any errors in Postgres fashion (via ereport). If called in
* Perl context, it is SPI.xs's responsibility to catch the error and
* convert to a Perl error. We assume (perhaps without adequate justification)
* that we need not abort the current transaction if the Perl code traps the
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index 79ff6f57669..26d7a4880f0 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -289,7 +289,7 @@ do_compile(FunctionCallInfo fcinfo,
MemoryContext func_cxt;
/*
- * Setup the scanner input and error info. We assume that this function
+ * Setup the scanner input and error info. We assume that this function
* cannot be invoked recursively, so there's no need to save and restore
* the static variables used here.
*/
@@ -379,7 +379,7 @@ do_compile(FunctionCallInfo fcinfo,
* needed permanently, so make them in tmp cxt.
*
* We also need to resolve any polymorphic input or output
- * argument types. In validation mode we won't be able to, so we
+ * argument types. In validation mode we won't be able to, so we
* arbitrarily assume we are dealing with integers.
*/
MemoryContextSwitchTo(compile_tmp_cxt);
@@ -462,7 +462,7 @@ do_compile(FunctionCallInfo fcinfo,
/*
* If there's just one OUT parameter, out_param_varno points
- * directly to it. If there's more than one, build a row that
+ * directly to it. If there's more than one, build a row that
* holds all of them.
*/
if (num_out_args == 1)
@@ -768,7 +768,7 @@ plpgsql_compile_inline(char *proc_source)
int i;
/*
- * Setup the scanner input and error info. We assume that this function
+ * Setup the scanner input and error info. We assume that this function
* cannot be invoked recursively, so there's no need to save and restore
* the static variables used here.
*/
@@ -1024,7 +1024,7 @@ plpgsql_post_column_ref(ParseState *pstate, ColumnRef *cref, Node *var)
/*
* If we find a record/row variable but can't match a field name, throw
- * error if there was no core resolution for the ColumnRef either. In
+ * error if there was no core resolution for the ColumnRef either. In
* that situation, the reference is inevitably going to fail, and
* complaining about the record/row variable is likely to be more on-point
* than the core parser's error message. (It's too bad we don't have
@@ -1218,7 +1218,7 @@ resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr,
/*
* We should not get here, because a RECFIELD datum should
* have been built at parse time for every possible qualified
- * reference to fields of this record. But if we do, handle
+ * reference to fields of this record. But if we do, handle
* it like field-not-found: throw error or return NULL.
*/
if (error_if_no_field)
@@ -1827,7 +1827,7 @@ plpgsql_parse_cwordrowtype(List *idents)
*
* The returned struct may be a PLpgSQL_var, PLpgSQL_row, or
* PLpgSQL_rec depending on the given datatype, and is allocated via
- * palloc. The struct is automatically added to the current datum
+ * palloc. The struct is automatically added to the current datum
* array, and optionally to the current namespace.
*/
PLpgSQL_variable *
@@ -2276,7 +2276,7 @@ plpgsql_adddatum(PLpgSQL_datum *new)
* last call.
*
* This is used around a DECLARE section to create a list of the VARs
- * that have to be initialized at block entry. Note that VARs can also
+ * that have to be initialized at block entry. Note that VARs can also
* be created elsewhere than DECLARE, eg by a FOR-loop, but it is then
* the responsibility of special-purpose code to initialize them.
* ----------
@@ -2430,7 +2430,7 @@ plpgsql_resolve_polymorphic_argtypes(int numargs,
* delete_function - clean up as much as possible of a stale function cache
*
* We can't release the PLpgSQL_function struct itself, because of the
- * possibility that there are fn_extra pointers to it. We can release
+ * possibility that there are fn_extra pointers to it. We can release
* the subsidiary storage, but only if there are no active evaluations
* in progress. Otherwise we'll just leak that storage. Since the
* case would only occur if a pg_proc update is detected during a nested
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 9e03f2b5154..0b64dfaa9a0 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -54,7 +54,7 @@ typedef struct
* creates its own "eval_econtext" ExprContext within this estate for
* per-evaluation workspace. eval_econtext is freed at normal function exit,
* and the EState is freed at transaction end (in case of error, we assume
- * that the abort mechanisms clean it all up). Furthermore, any exception
+ * that the abort mechanisms clean it all up). Furthermore, any exception
* block within a function has to have its own eval_econtext separate from
* the containing function's, so that we can clean up ExprContext callbacks
* properly at subtransaction exit. We maintain a stack that tracks the
@@ -62,7 +62,7 @@ typedef struct
*
* This arrangement is a bit tedious to maintain, but it's worth the trouble
* so that we don't have to re-prepare simple expressions on each trip through
- * a function. (We assume the case to optimize is many repetitions of a
+ * a function. (We assume the case to optimize is many repetitions of a
* function within a transaction.)
*/
typedef struct SimpleEcontextStackEntry
@@ -523,7 +523,7 @@ plpgsql_exec_trigger(PLpgSQL_function *func,
*
* We make the tupdescs available in both records even though only one may
* have a value. This allows parsing of record references to succeed in
- * functions that are used for multiple trigger types. For example, we
+ * functions that are used for multiple trigger types. For example, we
* might have a test like "if (TG_OP = 'INSERT' and NEW.foo = 'xyz')",
* which should parse regardless of the current trigger type.
*/
@@ -3042,7 +3042,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
/*
* Check for error, and set FOUND if appropriate (for historical reasons
- * we set FOUND only for certain query types). Also Assert that we
+ * we set FOUND only for certain query types). Also Assert that we
* identified the statement type the same as SPI did.
*/
switch (rc)
@@ -3727,7 +3727,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
var->datatype->typlen);
/*
- * Now free the old value. (We can't do this any earlier
+ * Now free the old value. (We can't do this any earlier
* because of the possibility that we are assigning the var's
* old value to it, eg "foo := foo". We could optimize out
* the assignment altogether in such cases, but it's too
@@ -4100,7 +4100,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
* At present this doesn't handle PLpgSQL_expr or PLpgSQL_arrayelem datums.
*
* NOTE: caller must not modify the returned value, since it points right
- * at the stored value in the case of pass-by-reference datatypes. In some
+ * at the stored value in the case of pass-by-reference datatypes. In some
* cases we have to palloc a return value, and in such cases we put it into
* the estate's short-term memory context.
*/
@@ -4613,7 +4613,7 @@ exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt,
PinPortal(portal);
/*
- * Fetch the initial tuple(s). If prefetching is allowed then we grab a
+ * Fetch the initial tuple(s). If prefetching is allowed then we grab a
* few more rows to avoid multiple trips through executor startup
* overhead.
*/
@@ -4751,7 +4751,7 @@ loop_exit:
* Because we only store one execution tree for a simple expression, we
* can't handle recursion cases. So, if we see the tree is already busy
* with an evaluation in the current xact, we just return FALSE and let the
- * caller run the expression the hard way. (Other alternatives such as
+ * caller run the expression the hard way. (Other alternatives such as
* creating a new tree for a recursive call either introduce memory leaks,
* or add enough bookkeeping to be doubtful wins anyway.) Another case that
* is covered by the expr_simple_in_use test is where a previous execution
@@ -4911,7 +4911,7 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate,
*
* The ParamListInfo array is initially all zeroes, in particular the
* ptype values are all InvalidOid. This causes the executor to call the
- * paramFetch hook each time it wants a value. We thus evaluate only the
+ * paramFetch hook each time it wants a value. We thus evaluate only the
* parameters actually demanded.
*
* The result is a locally palloc'd array that should be pfree'd after use;
@@ -4947,7 +4947,7 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr)
estate->cur_expr = expr;
/*
- * Also make sure this is set before parser hooks need it. There is
+ * Also make sure this is set before parser hooks need it. There is
* no need to save and restore, since the value is always correct once
* set.
*/
@@ -4982,7 +4982,7 @@ plpgsql_param_fetch(ParamListInfo params, int paramid)
/*
* Do nothing if asked for a value that's not supposed to be used by this
- * SQL expression. This avoids unwanted evaluations when functions such
+ * SQL expression. This avoids unwanted evaluations when functions such
* as copyParamList try to materialize all the values.
*/
if (!bms_is_member(dno, expr->paramnos))
@@ -5679,7 +5679,7 @@ plpgsql_create_econtext(PLpgSQL_execstate *estate)
/*
* Create an EState for evaluation of simple expressions, if there's not
- * one already in the current transaction. The EState is made a child of
+ * one already in the current transaction. The EState is made a child of
* TopTransactionContext so it will have the right lifespan.
*/
if (simple_eval_estate == NULL)
diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c
index 1f83114d9b1..b79a72fddc4 100644
--- a/src/pl/plpgsql/src/pl_funcs.c
+++ b/src/pl/plpgsql/src/pl_funcs.c
@@ -25,7 +25,7 @@
* list or "chain" (from the youngest item to the root) is accessible from
* any one plpgsql statement. During initial parsing of a function, ns_top
* points to the youngest item accessible from the block currently being
- * parsed. We store the entire tree, however, since at runtime we will need
+ * parsed. We store the entire tree, however, since at runtime we will need
* to access the chain that's relevant to any one statement.
*
* Block boundaries in the namespace chain are marked by PLPGSQL_NSTYPE_LABEL
@@ -113,7 +113,7 @@ plpgsql_ns_additem(int itemtype, int itemno, const char *name)
*
* If localmode is TRUE, only the topmost block level is searched.
*
- * name1 must be non-NULL. Pass NULL for name2 and/or name3 if parsing a name
+ * name1 must be non-NULL. Pass NULL for name2 and/or name3 if parsing a name
* with fewer than three components.
*
* If names_used isn't NULL, *names_used receives the number of names
diff --git a/src/pl/plpgsql/src/pl_scanner.c b/src/pl/plpgsql/src/pl_scanner.c
index e1c0b625954..d44f5284e8e 100644
--- a/src/pl/plpgsql/src/pl_scanner.c
+++ b/src/pl/plpgsql/src/pl_scanner.c
@@ -44,7 +44,7 @@ IdentifierLookup plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_NORMAL;
*
* For the most part, the reserved keywords are those that start a PL/pgSQL
* statement (and so would conflict with an assignment to a variable of the
- * same name). We also don't sweat it much about reserving keywords that
+ * same name). We also don't sweat it much about reserving keywords that
* are reserved in the core grammar. Try to avoid reserving other words.
*/
@@ -158,7 +158,7 @@ typedef struct
/*
* Scanner working state. At some point we might wish to fold all this
- * into a YY_EXTRA struct. For the moment, there is no need for plpgsql's
+ * into a YY_EXTRA struct. For the moment, there is no need for plpgsql's
* lexer to be re-entrant, and the notational burden of passing a yyscanner
* pointer around is great enough to not want to do it without need.
*/
@@ -326,7 +326,7 @@ plpgsql_yylex(void)
/*
* Internal yylex function. This wraps the core lexer and adds one feature:
- * a token pushback stack. We also make a couple of trivial single-token
+ * a token pushback stack. We also make a couple of trivial single-token
* translations from what the core lexer does to what we want, in particular
* interfacing from the core_YYSTYPE to YYSTYPE union.
*/
@@ -472,7 +472,7 @@ plpgsql_yyerror(const char *message)
/*
* If we have done any lookahead then flex will have restored the
* character after the end-of-token. Zap it again so that we report
- * only the single token here. This modifies scanbuf but we no longer
+ * only the single token here. This modifies scanbuf but we no longer
* care about that.
*/
yytext[plpgsql_yyleng] = '\0';
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index 89103aea8c5..ab31e954ca6 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -636,7 +636,7 @@ typedef struct PLpgSQL_func_hashkey
/*
* For a trigger function, the OID of the relation triggered on is part of
* the hash key --- we want to compile the trigger separately for each
- * relation it is used with, in case the rowtype is different. Zero if
+ * relation it is used with, in case the rowtype is different. Zero if
* not called as a trigger.
*/
Oid trigrelOid;
@@ -775,7 +775,7 @@ typedef struct PLpgSQL_execstate
*
* Also, immediately before any call to func_setup, PL/pgSQL fills in the
* error_callback and assign_expr fields with pointers to its own
- * plpgsql_exec_error_callback and exec_assign_expr functions. This is
+ * plpgsql_exec_error_callback and exec_assign_expr functions. This is
* a somewhat ad-hoc expedient to simplify life for debugger plugins.
*/
diff --git a/src/pl/plpython/plpython.c b/src/pl/plpython/plpython.c
index 642417b595b..8cf2d716f14 100644
--- a/src/pl/plpython/plpython.c
+++ b/src/pl/plpython/plpython.c
@@ -322,7 +322,7 @@ PG_FUNCTION_INFO_V1(plpython_inline_handler);
/* most of the remaining of the declarations, all static */
/*
- * These should only be called once from _PG_init. Initialize the
+ * These should only be called once from _PG_init. Initialize the
* Python interpreter and global data.
*/
static void PLy_init_interp(void);
@@ -678,7 +678,7 @@ PLy_trigger_handler(FunctionCallInfo fcinfo, PLyProcedure *proc)
Assert(CALLED_AS_TRIGGER(fcinfo));
/*
- * Input/output conversion for trigger tuples. Use the result TypeInfo
+ * Input/output conversion for trigger tuples. Use the result TypeInfo
* variable to store the tuple conversion info. We do this over again on
* each call to cover the possibility that the relation's tupdesc changed
* since the trigger was last called. PLy_input_tuple_funcs and
@@ -2113,7 +2113,7 @@ PLy_output_datum_func2(PLyObToDatum *arg, HeapTuple typeTup)
/*
* Select a conversion function to convert Python objects to PostgreSQL
- * datums. Most data types can go through the generic function.
+ * datums. Most data types can go through the generic function.
*/
switch (getBaseType(element_type ? element_type : arg->typoid))
{
@@ -2458,7 +2458,7 @@ PLyObject_ToCompositeDatum(PLyTypeInfo *info, TupleDesc desc, PyObject *plrv)
}
/*
- * Convert a Python object to a PostgreSQL bool datum. This can't go
+ * Convert a Python object to a PostgreSQL bool datum. This can't go
* through the generic conversion function, because Python attaches a
* Boolean value to everything, more things than the PostgreSQL bool
* type can parse.
@@ -4470,7 +4470,7 @@ failure:
/* Emit a PG error or notice, together with any available info about
* the current Python error, previously set by PLy_exception_set().
- * This should be used to propagate Python errors into PG. If fmt is
+ * This should be used to propagate Python errors into PG. If fmt is
* NULL, the Python error becomes the primary error message, otherwise
* it becomes the detail. If there is a Python traceback, it is put
* in the context.
@@ -4880,7 +4880,7 @@ PLy_free(void *ptr)
/*
* Convert a Python unicode object to a Python string/bytes object in
- * PostgreSQL server encoding. Reference ownership is passed to the
+ * PostgreSQL server encoding. Reference ownership is passed to the
* caller.
*/
static PyObject *
@@ -4945,7 +4945,7 @@ PLyUnicode_Bytes(PyObject *unicode)
* function. The result is palloc'ed.
*
* Note that this function is disguised as PyString_AsString() when
- * using Python 3. That function retuns a pointer into the internal
+ * using Python 3. That function retuns a pointer into the internal
* memory of the argument, which isn't exactly the interface of this
* function. But in either case you get a rather short-lived
* reference that you ought to better leave alone.
@@ -4963,7 +4963,7 @@ PLyUnicode_AsString(PyObject *unicode)
#if PY_MAJOR_VERSION >= 3
/*
* Convert a C string in the PostgreSQL server encoding to a Python
- * unicode object. Reference ownership is passed to the caller.
+ * unicode object. Reference ownership is passed to the caller.
*/
static PyObject *
PLyUnicode_FromString(const char *s)
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index 7b952b27e1a..37f33e15b41 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -1532,7 +1532,7 @@ pltcl_elog(ClientData cdata, Tcl_Interp *interp,
if (level == ERROR)
{
/*
- * We just pass the error back to Tcl. If it's not caught, it'll
+ * We just pass the error back to Tcl. If it's not caught, it'll
* eventually get converted to a PG error when we reach the call
* handler.
*/
diff --git a/src/port/chklocale.c b/src/port/chklocale.c
index 004c91bd04a..20b64a3383a 100644
--- a/src/port/chklocale.c
+++ b/src/port/chklocale.c
@@ -321,7 +321,7 @@ pg_get_encoding_from_locale(const char *ctype, bool write_message)
/*
* We print a warning if we got a CODESET string but couldn't recognize
- * it. This means we need another entry in the table.
+ * it. This means we need another entry in the table.
*/
if (write_message)
{
diff --git a/src/port/crypt.c b/src/port/crypt.c
index 9347d3b47c9..ef8bf46338d 100644
--- a/src/port/crypt.c
+++ b/src/port/crypt.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -118,7 +118,7 @@ typedef int int32_t;
* representation is to store one bit per byte in an array of bytes. Bit N of
* the NBS spec is stored as the LSB of the Nth byte (index N-1) in the array.
* Another representation stores the 64 bits in 8 bytes, with bits 1..8 in the
- * first byte, 9..16 in the second, and so on. The DES spec apparently has
+ * first byte, 9..16 in the second, and so on. The DES spec apparently has
* bit 1 in the MSB of the first byte, but that is particularly noxious so we
* bit-reverse each byte so that bit 1 is the LSB of the first byte, bit 8 is
* the MSB of the first byte. Specifically, the 64-bit input data and key are
@@ -126,21 +126,21 @@ typedef int int32_t;
* MSB format.
*
* DES operates internally on groups of 32 bits which are expanded to 48 bits
- * by permutation E and shrunk back to 32 bits by the S boxes. To speed up
+ * by permutation E and shrunk back to 32 bits by the S boxes. To speed up
* the computation, the expansion is applied only once, the expanded
* representation is maintained during the encryption, and a compression
- * permutation is applied only at the end. To speed up the S-box lookups,
+ * permutation is applied only at the end. To speed up the S-box lookups,
* the 48 bits are maintained as eight 6 bit groups, one per byte, which
* directly feed the eight S-boxes. Within each byte, the 6 bits are the
- * most significant ones. The low two bits of each byte are zero. (Thus,
+ * most significant ones. The low two bits of each byte are zero. (Thus,
* bit 1 of the 48 bit E expansion is stored as the "4"-valued bit of the
* first byte in the eight byte representation, bit 2 of the 48 bit value is
* the "8"-valued bit, and so on.) In fact, a combined "SPE"-box lookup is
* used, in which the output is the 64 bit result of an S-box lookup which
* has been permuted by P and expanded by E, and is ready for use in the next
* iteration. Two 32-bit wide tables, SPE[0] and SPE[1], are used for this
- * lookup. Since each byte in the 48 bit path is a multiple of four, indexed
- * lookup of SPE[0] and SPE[1] is simple and fast. The key schedule and
+ * lookup. Since each byte in the 48 bit path is a multiple of four, indexed
+ * lookup of SPE[0] and SPE[1] is simple and fast. The key schedule and
* "salt" are also converted to this 8*(6+2) format. The SPE table size is
* 8*64*8 = 4K bytes.
*
@@ -154,7 +154,7 @@ typedef int int32_t;
* The byte-order problem is unfortunate, since on the one hand it is good
* to have a machine-independent C_block representation (bits 1..8 in the
* first byte, etc.), and on the other hand it is good for the LSB of the
- * first byte to be the LSB of i0. We cannot have both these things, so we
+ * first byte to be the LSB of i0. We cannot have both these things, so we
* currently use the "little-endian" representation and avoid any multi-byte
* operations that depend on byte order. This largely precludes use of the
* 64-bit datatype since the relative order of i0 and i1 are unknown. It
@@ -181,13 +181,13 @@ typedef int int32_t;
* IE3264: MSB->LSB conversion, initial permutation, and expansion.
* This is done by collecting the 32 even-numbered bits and applying
* a 32->64 bit transformation, and then collecting the 32 odd-numbered
- * bits and applying the same transformation. Since there are only
+ * bits and applying the same transformation. Since there are only
* 32 input bits, the IE3264 transformation table is half the size of
* the usual table.
* CF6464: Compression, final permutation, and LSB->MSB conversion.
* This is done by two trivial 48->32 bit compressions to obtain
* a 64-bit block (the bit numbering is given in the "CIFP" table)
- * followed by a 64->64 bit "cleanup" transformation. (It would
+ * followed by a 64->64 bit "cleanup" transformation. (It would
* be possible to group the bits in the 64-bit block so that 2
* identical 32->32 bit transformations could be used instead,
* saving a factor of 4 in space and possibly 2 in time, but
@@ -206,7 +206,7 @@ typedef int int32_t;
* transforms 56 bits into 48 bits, dropping 8 bits, so PC2 is not
* invertible. We get around that problem by using a modified PC2
* which retains the 8 otherwise-lost bits in the unused low-order
- * bits of each byte. The low-order bits are cleared when the
+ * bits of each byte. The low-order bits are cleared when the
* codes are stored into the key schedule.
* PC2ROT[1]: Same as PC2ROT[0], but with two rotations.
* This is faster than applying PC2ROT[0] twice,
@@ -215,7 +215,7 @@ typedef int int32_t;
*
* The salting is a simple permutation applied to the 48-bit result of E.
* Specifically, if bit i (1 <= i <= 24) of the salt is set then bits i and
- * i+24 of the result are swapped. The salt is thus a 24 bit number, with
+ * i+24 of the result are swapped. The salt is thus a 24 bit number, with
* 16777216 possible values. (The original salt was 12 bits and could not
* swap bits 13..24 with 36..48.)
*
@@ -467,7 +467,7 @@ static C_block PC2ROT[2][64 / CHUNKBITS][1 << CHUNKBITS];
/* Initial permutation/expansion table */
static C_block IE3264[32 / CHUNKBITS][1 << CHUNKBITS];
-/* Table that combines the S, P, and E operations. */
+/* Table that combines the S, P, and E operations. */
static int32_t SPE[2][8][64];
/* compressed/interleaved => final permutation table */
diff --git a/src/port/getaddrinfo.c b/src/port/getaddrinfo.c
index 6817d87f9dd..0528637e83d 100644
--- a/src/port/getaddrinfo.c
+++ b/src/port/getaddrinfo.c
@@ -4,7 +4,7 @@
* Support getaddrinfo() on platforms that don't have it.
*
* We also supply getnameinfo() here, assuming that the platform will have
- * it if and only if it has getaddrinfo(). If this proves false on some
+ * it if and only if it has getaddrinfo(). If this proves false on some
* platform, we'll need to split this file and provide a separate configure
* test for getnameinfo().
*
diff --git a/src/port/getopt.c b/src/port/getopt.c
index eb50cba1ca4..31141d7574d 100644
--- a/src/port/getopt.c
+++ b/src/port/getopt.c
@@ -21,7 +21,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/getopt_long.c b/src/port/getopt_long.c
index d624216ad03..b099091a76d 100644
--- a/src/port/getopt_long.c
+++ b/src/port/getopt_long.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/inet_aton.c b/src/port/inet_aton.c
index 473f51f88d0..27e8aaa4ec4 100644
--- a/src/port/inet_aton.c
+++ b/src/port/inet_aton.c
@@ -6,7 +6,7 @@
*
* The function was been extracted whole from the file inet_aton.c in
* Release 5.3.12 of the Linux C library, which is derived from the
- * GNU C library, by Bryan Henderson in October 1996. The copyright
+ * GNU C library, by Bryan Henderson in October 1996. The copyright
* notice from that file is below.
*/
@@ -29,7 +29,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/memcmp.c b/src/port/memcmp.c
index 185a9c59a85..269f58fa9b5 100644
--- a/src/port/memcmp.c
+++ b/src/port/memcmp.c
@@ -36,7 +36,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/path.c b/src/port/path.c
index 89878c94b05..f1b1103cd2b 100644
--- a/src/port/path.c
+++ b/src/port/path.c
@@ -49,7 +49,7 @@ static void trim_trailing_separator(char *path);
/*
* skip_drive
*
- * On Windows, a path may begin with "C:" or "//network/". Advance over
+ * On Windows, a path may begin with "C:" or "//network/". Advance over
* this and point to the effective start of the path.
*/
#ifdef WIN32
@@ -262,7 +262,7 @@ canonicalize_path(char *path)
* Remove any trailing uses of "." and process ".." ourselves
*
* Note that "/../.." should reduce to just "/", while "../.." has to be
- * kept as-is. In the latter case we put back mistakenly trimmed ".."
+ * kept as-is. In the latter case we put back mistakenly trimmed ".."
* components below. Also note that we want a Windows drive spec to be
* visible to trim_directory(), but it's not part of the logic that's
* looking at the name components; hence distinction between path and
diff --git a/src/port/pgmkdirp.c b/src/port/pgmkdirp.c
index 84367f7c9b3..d9c95b522c9 100644
--- a/src/port/pgmkdirp.c
+++ b/src/port/pgmkdirp.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/qsort.c b/src/port/qsort.c
index 8e2c6d92c2d..b91013c9bfc 100644
--- a/src/port/qsort.c
+++ b/src/port/qsort.c
@@ -33,7 +33,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/qsort_arg.c b/src/port/qsort_arg.c
index 28d1894992b..3c0c89f1880 100644
--- a/src/port/qsort_arg.c
+++ b/src/port/qsort_arg.c
@@ -33,7 +33,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/snprintf.c b/src/port/snprintf.c
index 9f719505f49..a6323d692f1 100644
--- a/src/port/snprintf.c
+++ b/src/port/snprintf.c
@@ -18,7 +18,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -66,7 +66,7 @@
* platforms. This implementation is compatible with the Single Unix Spec:
*
* 1. -1 is returned only if processing is abandoned due to an invalid
- * parameter, such as incorrect format string. (Although not required by
+ * parameter, such as incorrect format string. (Although not required by
* the spec, this happens only when no characters have yet been transmitted
* to the destination.)
*
@@ -87,7 +87,7 @@
* Original:
* Patrick Powell Tue Apr 11 09:48:21 PDT 1995
* A bombproof version of doprnt (dopr) included.
- * Sigh. This sort of thing is always nasty do deal with. Note that
+ * Sigh. This sort of thing is always nasty do deal with. Note that
* the version here does not include floating point. (now it does ... tgl)
**************************************************************/
diff --git a/src/port/strlcat.c b/src/port/strlcat.c
index 33ee22520db..190e57338e0 100644
--- a/src/port/strlcat.c
+++ b/src/port/strlcat.c
@@ -25,7 +25,7 @@
/*
* Appends src to string dst of size siz (unlike strncat, siz is the
* full size of dst, not space left). At most siz-1 characters
- * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
+ * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
* Returns strlen(src) + MIN(siz, strlen(initial dst)).
* If retval >= siz, truncation occurred.
*/
diff --git a/src/port/strlcpy.c b/src/port/strlcpy.c
index b20af029cc0..6dd41b8193c 100644
--- a/src/port/strlcpy.c
+++ b/src/port/strlcpy.c
@@ -36,8 +36,8 @@
/*
- * Copy src to string dst of size siz. At most siz-1 characters
- * will be copied. Always NUL terminates (unless siz == 0).
+ * Copy src to string dst of size siz. At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz == 0).
* Returns strlen(src); if retval >= siz, truncation occurred.
* Function creation history: http://www.gratisoft.us/todd/papers/strlcpy.html
*/
diff --git a/src/port/strtol.c b/src/port/strtol.c
index 2689efdd468..0f5a5358f08 100644
--- a/src/port/strtol.c
+++ b/src/port/strtol.c
@@ -21,7 +21,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -104,7 +104,7 @@ int base;
* that is greater than this value, if followed by a legal input
* character, is too big. One that is equal to this value may be valid or
* not; the limit between valid and invalid numbers is then based on the
- * last digit. For instance, if the range for longs is
+ * last digit. For instance, if the range for longs is
* [-2147483648..2147483647] and the input base is 10, cutoff will be set
* to 214748364 and cutlim to either 7 (neg==0) or 8 (neg==1), meaning
* that if we have accumulated a value > 214748364, or equal but the next
diff --git a/src/port/strtoul.c b/src/port/strtoul.c
index c9ec156ad15..82c72a04612 100644
--- a/src/port/strtoul.c
+++ b/src/port/strtoul.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/port/thread.c b/src/port/thread.c
index bca5815a09c..a9af127aba2 100644
--- a/src/port/thread.c
+++ b/src/port/thread.c
@@ -28,12 +28,12 @@
* Additional confusion exists because many operating systems that
* use pthread_setspecific/pthread_getspecific() also have *_r versions
* of standard library functions for compatibility with operating systems
- * that require them. However, internally, these *_r functions merely
+ * that require them. However, internally, these *_r functions merely
* call the thread-safe standard library functions.
*
* For example, BSD/OS 4.3 uses Bind 8.2.3 for getpwuid(). Internally,
* getpwuid() calls pthread_setspecific/pthread_getspecific() to return
- * static data to the caller in a thread-safe manner. However, BSD/OS
+ * static data to the caller in a thread-safe manner. However, BSD/OS
* also has getpwuid_r(), which merely calls getpwuid() and shifts
* around the arguments to match the getpwuid_r() function declaration.
* Therefore, while BSD/OS has getpwuid_r(), it isn't required. It also
diff --git a/src/port/unsetenv.c b/src/port/unsetenv.c
index 339e1725937..d037b86df11 100644
--- a/src/port/unsetenv.c
+++ b/src/port/unsetenv.c
@@ -30,7 +30,7 @@ unsetenv(const char *name)
* entry. When we clobber the entry in the second step we are ensuring
* that we zap the actual environ member. However, there are some libc
* implementations (notably recent BSDs) that do not obey SUS but copy the
- * presented string. This method fails on such platforms. Hopefully all
+ * presented string. This method fails on such platforms. Hopefully all
* such platforms have unsetenv() and thus won't be using this hack. See:
* http://www.greenend.org.uk/rjk/2008/putenv.html
*
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 37253713d6e..19fc3f209dd 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -67,7 +67,7 @@ static char *shellprog = SHELLPROG;
/*
* On Windows we use -w in diff switches to avoid problems with inconsistent
- * newline representation. The actual result files will generally have
+ * newline representation. The actual result files will generally have
* Windows-style newlines, but the comparison files might or might not.
*/
#ifndef WIN32
@@ -547,7 +547,7 @@ convert_sourcefiles(void)
* namely, it is a standard regular expression with an implicit ^ at the start.
* (We currently support only a very limited subset of regular expressions,
* see string_matches_pattern() above.) What hostplatformpattern will be
- * matched against is the config.guess output. (In the shell-script version,
+ * matched against is the config.guess output. (In the shell-script version,
* we also provided an indication of whether gcc or another compiler was in
* use, but that facility isn't used anymore.)
*/
@@ -793,7 +793,7 @@ initialize_environment(void)
/*
* GNU make stores some flags in the MAKEFLAGS environment variable to
- * pass arguments to its own children. If we are invoked by make,
+ * pass arguments to its own children. If we are invoked by make,
* that causes the make invoked by us to think its part of the make
* task invoking us, and so it tries to communicate with the toplevel
* make. Which fails.
@@ -826,7 +826,7 @@ initialize_environment(void)
* Set up shared library paths to include the temp install.
*
* LD_LIBRARY_PATH covers many platforms. DYLD_LIBRARY_PATH works on
- * Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
+ * Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
* Windows needs shared libraries in PATH (only those linked into
* executables, not dlopen'ed ones). Feel free to account for others
* as well.
@@ -946,7 +946,7 @@ spawn_process(const char *cmdline)
pid_t pid;
/*
- * Must flush I/O buffers before fork. Ideally we'd use fflush(NULL) here
+ * Must flush I/O buffers before fork. Ideally we'd use fflush(NULL) here
* ... does anyone still care about systems where that doesn't work?
*/
fflush(stdout);
@@ -967,7 +967,7 @@ spawn_process(const char *cmdline)
* In child
*
* Instead of using system(), exec the shell directly, and tell it to
- * "exec" the command too. This saves two useless processes per
+ * "exec" the command too. This saves two useless processes per
* parallel test case.
*/
char *cmdline2 = malloc(strlen(cmdline) + 6);
@@ -1812,7 +1812,7 @@ create_database(const char *dbname)
dbname, dbname, dbname, dbname, dbname);
/*
- * Install any requested procedural languages. We use CREATE OR REPLACE
+ * Install any requested procedural languages. We use CREATE OR REPLACE
* so that this will work whether or not the language is preinstalled.
*/
for (sl = loadlanguage; sl != NULL; sl = sl->next)
diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c
index 3b80f3861f9..304be13259d 100644
--- a/src/timezone/localtime.c
+++ b/src/timezone/localtime.c
@@ -24,15 +24,15 @@
#ifndef WILDABBR
/*----------
* Someone might make incorrect use of a time zone abbreviation:
- * 1. They might reference tzname[0] before calling tzset (explicitly
+ * 1. They might reference tzname[0] before calling tzset (explicitly
* or implicitly).
- * 2. They might reference tzname[1] before calling tzset (explicitly
+ * 2. They might reference tzname[1] before calling tzset (explicitly
* or implicitly).
- * 3. They might reference tzname[1] after setting to a time zone
+ * 3. They might reference tzname[1] after setting to a time zone
* in which Daylight Saving Time is never observed.
- * 4. They might reference tzname[0] after setting to a time zone
+ * 4. They might reference tzname[0] after setting to a time zone
* in which Standard Time is never observed.
- * 5. They might reference tm.TM_ZONE after calling offtime.
+ * 5. They might reference tm.TM_ZONE after calling offtime.
* What's best to do in the above cases is open to debate;
* for now, we just set things up so that in any of the five cases
* WILDABBR is used. Another possibility: initialize tzname[0] to the
@@ -1451,7 +1451,7 @@ pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff)
{
/*
* The zone could have more than one ttinfo, if it's historically used
- * more than one abbreviation. We return TRUE as long as they all have
+ * more than one abbreviation. We return TRUE as long as they all have
* the same gmtoff.
*/
const struct state *sp;
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index 324f2369051..008acb395b9 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -130,7 +130,7 @@ pg_open_tzfile(const char *name, char *canonname)
/*
* Scan specified directory for a case-insensitive match to fname
- * (of length fnamelen --- fname may not be null terminated!). If found,
+ * (of length fnamelen --- fname may not be null terminated!). If found,
* copy the actual filename into canonname and return true.
*/
static bool
@@ -153,7 +153,7 @@ scan_directory_ci(const char *dirname, const char *fname, int fnamelen,
while ((direntry = ReadDir(dirdesc, dirname)) != NULL)
{
/*
- * Ignore . and .., plus any other "hidden" files. This is a security
+ * Ignore . and .., plus any other "hidden" files. This is a security
* measure to prevent access to files outside the timezone directory.
*/
if (direntry->d_name[0] == '.')
@@ -526,7 +526,7 @@ identify_system_timezone(void)
return resultbuf;
/*
- * Did not find the timezone. Fallback to use a GMT zone. Note that the
+ * Did not find the timezone. Fallback to use a GMT zone. Note that the
* zic timezone database names the GMT-offset zones in POSIX style: plus
* is west of Greenwich. It's unfortunate that this is opposite of SQL
* conventions. Should we therefore change the names? Probably not...
@@ -546,7 +546,7 @@ identify_system_timezone(void)
* Recursively scan the timezone database looking for the best match to
* the system timezone behavior.
*
- * tzdir points to a buffer of size MAXPGPATH. On entry, it holds the
+ * tzdir points to a buffer of size MAXPGPATH. On entry, it holds the
* pathname of a directory containing TZ files. We internally modify it
* to hold pathnames of sub-directories and files, but must restore it
* to its original contents before exit.
@@ -1397,7 +1397,7 @@ tz_acceptable(pg_tz *tz)
/*
- * Get a pg_tz struct for the given timezone name. Returns NULL if name
+ * Get a pg_tz struct for the given timezone name. Returns NULL if name
* is invalid or not an "acceptable" zone.
*/
static pg_tz *
@@ -1455,7 +1455,7 @@ select_default_timezone(void)
*
* This is called before GUC variable initialization begins. Its purpose
* is to ensure that elog.c has a pgtz variable available to format timestamps
- * with, in case log_line_prefix is set to a value requiring that. We cannot
+ * with, in case log_line_prefix is set to a value requiring that. We cannot
* set log_timezone yet.
*/
void
diff --git a/src/tutorial/complex.c b/src/tutorial/complex.c
index edae0065c7d..52cfcc77218 100644
--- a/src/tutorial/complex.c
+++ b/src/tutorial/complex.c
@@ -140,7 +140,7 @@ complex_add(PG_FUNCTION_ARGS)
* It's essential that the comparison operators and support function for a
* B-tree index opclass always agree on the relative ordering of any two
* data values. Experience has shown that it's depressingly easy to write
- * unintentionally inconsistent functions. One way to reduce the odds of
+ * unintentionally inconsistent functions. One way to reduce the odds of
* making a mistake is to make all the functions simple wrappers around
* an internal three-way-comparison function, as we do here.
*****************************************************************************/