aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/common/heaptuple.c10
-rw-r--r--src/backend/access/common/indextuple.c6
-rw-r--r--src/backend/access/common/printtup.c2
-rw-r--r--src/backend/access/common/reloptions.c4
-rw-r--r--src/backend/access/common/tupconvert.c2
-rw-r--r--src/backend/access/common/tupdesc.c2
-rw-r--r--src/backend/access/gin/ginarrayproc.c2
-rw-r--r--src/backend/access/gin/ginbulk.c2
-rw-r--r--src/backend/access/gin/ginentrypage.c2
-rw-r--r--src/backend/access/gin/ginfast.c8
-rw-r--r--src/backend/access/gin/ginget.c16
-rw-r--r--src/backend/access/gin/gininsert.c2
-rw-r--r--src/backend/access/gin/ginscan.c2
-rw-r--r--src/backend/access/gin/ginutil.c2
-rw-r--r--src/backend/access/gin/ginxlog.c2
-rw-r--r--src/backend/access/gist/gist.c2
-rw-r--r--src/backend/access/gist/gistget.c6
-rw-r--r--src/backend/access/gist/gistscan.c2
-rw-r--r--src/backend/access/gist/gistsplit.c10
-rw-r--r--src/backend/access/gist/gistutil.c8
-rw-r--r--src/backend/access/gist/gistvacuum.c2
-rw-r--r--src/backend/access/gist/gistxlog.c4
-rw-r--r--src/backend/access/hash/hash.c8
-rw-r--r--src/backend/access/hash/hashfunc.c10
-rw-r--r--src/backend/access/hash/hashinsert.c2
-rw-r--r--src/backend/access/hash/hashovfl.c16
-rw-r--r--src/backend/access/hash/hashpage.c16
-rw-r--r--src/backend/access/hash/hashsearch.c4
-rw-r--r--src/backend/access/hash/hashsort.c4
-rw-r--r--src/backend/access/hash/hashutil.c4
-rw-r--r--src/backend/access/heap/heapam.c120
-rw-r--r--src/backend/access/heap/hio.c20
-rw-r--r--src/backend/access/heap/pruneheap.c12
-rw-r--r--src/backend/access/heap/rewriteheap.c14
-rw-r--r--src/backend/access/heap/syncscan.c6
-rw-r--r--src/backend/access/heap/tuptoaster.c10
-rw-r--r--src/backend/access/heap/visibilitymap.c20
-rw-r--r--src/backend/access/index/genam.c6
-rw-r--r--src/backend/access/index/indexam.c14
-rw-r--r--src/backend/access/nbtree/nbtcompare.c2
-rw-r--r--src/backend/access/nbtree/nbtinsert.c28
-rw-r--r--src/backend/access/nbtree/nbtpage.c34
-rw-r--r--src/backend/access/nbtree/nbtree.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c28
-rw-r--r--src/backend/access/nbtree/nbtsort.c24
-rw-r--r--src/backend/access/nbtree/nbtutils.c52
-rw-r--r--src/backend/access/nbtree/nbtxlog.c14
-rw-r--r--src/backend/access/spgist/spgdoinsert.c14
-rw-r--r--src/backend/access/spgist/spginsert.c4
-rw-r--r--src/backend/access/spgist/spgscan.c4
-rw-r--r--src/backend/access/spgist/spgtextproc.c8
-rw-r--r--src/backend/access/spgist/spgutils.c6
-rw-r--r--src/backend/access/spgist/spgvacuum.c4
-rw-r--r--src/backend/access/spgist/spgxlog.c6
-rw-r--r--src/backend/access/transam/clog.c10
-rw-r--r--src/backend/access/transam/multixact.c56
-rw-r--r--src/backend/access/transam/slru.c18
-rw-r--r--src/backend/access/transam/subtrans.c4
-rw-r--r--src/backend/access/transam/timeline.c6
-rw-r--r--src/backend/access/transam/transam.c4
-rw-r--r--src/backend/access/transam/twophase.c14
-rw-r--r--src/backend/access/transam/varsup.c12
-rw-r--r--src/backend/access/transam/xact.c62
-rw-r--r--src/backend/access/transam/xlog.c92
-rw-r--r--src/backend/access/transam/xlogreader.c8
-rw-r--r--src/backend/bootstrap/bootstrap.c16
-rw-r--r--src/backend/catalog/aclchk.c18
-rw-r--r--src/backend/catalog/catalog.c6
-rw-r--r--src/backend/catalog/dependency.c36
-rw-r--r--src/backend/catalog/heap.c34
-rw-r--r--src/backend/catalog/index.c98
-rw-r--r--src/backend/catalog/indexing.c2
-rw-r--r--src/backend/catalog/namespace.c54
-rw-r--r--src/backend/catalog/objectaddress.c2
-rw-r--r--src/backend/catalog/pg_collation.c2
-rw-r--r--src/backend/catalog/pg_constraint.c8
-rw-r--r--src/backend/catalog/pg_db_role_setting.c2
-rw-r--r--src/backend/catalog/pg_depend.c8
-rw-r--r--src/backend/catalog/pg_enum.c2
-rw-r--r--src/backend/catalog/pg_largeobject.c2
-rw-r--r--src/backend/catalog/pg_operator.c4
-rw-r--r--src/backend/catalog/pg_proc.c8
-rw-r--r--src/backend/catalog/pg_shdepend.c18
-rw-r--r--src/backend/catalog/pg_type.c2
-rw-r--r--src/backend/catalog/storage.c4
-rw-r--r--src/backend/catalog/toasting.c2
-rw-r--r--src/backend/commands/aggregatecmds.c2
-rw-r--r--src/backend/commands/alter.c2
-rw-r--r--src/backend/commands/analyze.c46
-rw-r--r--src/backend/commands/async.c32
-rw-r--r--src/backend/commands/cluster.c42
-rw-r--r--src/backend/commands/constraint.c4
-rw-r--r--src/backend/commands/copy.c32
-rw-r--r--src/backend/commands/createas.c8
-rw-r--r--src/backend/commands/dbcommands.c14
-rw-r--r--src/backend/commands/define.c2
-rw-r--r--src/backend/commands/event_trigger.c14
-rw-r--r--src/backend/commands/explain.c10
-rw-r--r--src/backend/commands/extension.c28
-rw-r--r--src/backend/commands/foreigncmds.c4
-rw-r--r--src/backend/commands/functioncmds.c12
-rw-r--r--src/backend/commands/indexcmds.c42
-rw-r--r--src/backend/commands/matview.c2
-rw-r--r--src/backend/commands/opclasscmds.c8
-rw-r--r--src/backend/commands/operatorcmds.c2
-rw-r--r--src/backend/commands/portalcmds.c8
-rw-r--r--src/backend/commands/prepare.c10
-rw-r--r--src/backend/commands/proclang.c2
-rw-r--r--src/backend/commands/schemacmds.c4
-rw-r--r--src/backend/commands/sequence.c18
-rw-r--r--src/backend/commands/tablecmds.c92
-rw-r--r--src/backend/commands/tablespace.c22
-rw-r--r--src/backend/commands/trigger.c58
-rw-r--r--src/backend/commands/typecmds.c30
-rw-r--r--src/backend/commands/user.c8
-rw-r--r--src/backend/commands/vacuum.c26
-rw-r--r--src/backend/commands/vacuumlazy.c10
-rw-r--r--src/backend/commands/variable.c16
-rw-r--r--src/backend/commands/view.c6
-rw-r--r--src/backend/executor/execAmi.c4
-rw-r--r--src/backend/executor/execCurrent.c2
-rw-r--r--src/backend/executor/execJunk.c2
-rw-r--r--src/backend/executor/execMain.c26
-rw-r--r--src/backend/executor/execProcnode.c4
-rw-r--r--src/backend/executor/execQual.c48
-rw-r--r--src/backend/executor/execScan.c4
-rw-r--r--src/backend/executor/execTuples.c12
-rw-r--r--src/backend/executor/execUtils.c20
-rw-r--r--src/backend/executor/functions.c34
-rw-r--r--src/backend/executor/nodeAgg.c46
-rw-r--r--src/backend/executor/nodeAppend.c2
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c4
-rw-r--r--src/backend/executor/nodeForeignscan.c2
-rw-r--r--src/backend/executor/nodeFunctionscan.c2
-rw-r--r--src/backend/executor/nodeHash.c16
-rw-r--r--src/backend/executor/nodeHashjoin.c6
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c6
-rw-r--r--src/backend/executor/nodeIndexscan.c4
-rw-r--r--src/backend/executor/nodeLimit.c2
-rw-r--r--src/backend/executor/nodeLockRows.c8
-rw-r--r--src/backend/executor/nodeMaterial.c2
-rw-r--r--src/backend/executor/nodeMergeAppend.c2
-rw-r--r--src/backend/executor/nodeMergejoin.c12
-rw-r--r--src/backend/executor/nodeModifyTable.c24
-rw-r--r--src/backend/executor/nodeRecursiveunion.c2
-rw-r--r--src/backend/executor/nodeSetOp.c6
-rw-r--r--src/backend/executor/nodeSubplan.c10
-rw-r--r--src/backend/executor/nodeSubqueryscan.c2
-rw-r--r--src/backend/executor/nodeUnique.c2
-rw-r--r--src/backend/executor/nodeValuesscan.c2
-rw-r--r--src/backend/executor/nodeWindowAgg.c24
-rw-r--r--src/backend/executor/nodeWorktablescan.c2
-rw-r--r--src/backend/executor/spi.c12
-rw-r--r--src/backend/executor/tstoreReceiver.c2
-rw-r--r--src/backend/lib/stringinfo.c4
-rw-r--r--src/backend/libpq/auth.c6
-rw-r--r--src/backend/libpq/be-secure.c4
-rw-r--r--src/backend/libpq/hba.c4
-rw-r--r--src/backend/libpq/md5.c2
-rw-r--r--src/backend/libpq/pqcomm.c4
-rw-r--r--src/backend/libpq/pqformat.c2
-rw-r--r--src/backend/main/main.c10
-rw-r--r--src/backend/nodes/bitmapset.c4
-rw-r--r--src/backend/nodes/copyfuncs.c6
-rw-r--r--src/backend/nodes/equalfuncs.c12
-rw-r--r--src/backend/nodes/list.c4
-rw-r--r--src/backend/nodes/nodeFuncs.c12
-rw-r--r--src/backend/nodes/outfuncs.c6
-rw-r--r--src/backend/nodes/params.c2
-rw-r--r--src/backend/nodes/read.c10
-rw-r--r--src/backend/nodes/readfuncs.c14
-rw-r--r--src/backend/nodes/tidbitmap.c18
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c10
-rw-r--r--src/backend/optimizer/path/allpaths.c30
-rw-r--r--src/backend/optimizer/path/clausesel.c20
-rw-r--r--src/backend/optimizer/path/costsize.c74
-rw-r--r--src/backend/optimizer/path/equivclass.c58
-rw-r--r--src/backend/optimizer/path/indxpath.c78
-rw-r--r--src/backend/optimizer/path/joinpath.c26
-rw-r--r--src/backend/optimizer/path/joinrels.c12
-rw-r--r--src/backend/optimizer/path/orindxpath.c6
-rw-r--r--src/backend/optimizer/path/pathkeys.c34
-rw-r--r--src/backend/optimizer/path/tidpath.c4
-rw-r--r--src/backend/optimizer/plan/analyzejoins.c10
-rw-r--r--src/backend/optimizer/plan/createplan.c48
-rw-r--r--src/backend/optimizer/plan/initsplan.c54
-rw-r--r--src/backend/optimizer/plan/planagg.c8
-rw-r--r--src/backend/optimizer/plan/planmain.c14
-rw-r--r--src/backend/optimizer/plan/planner.c54
-rw-r--r--src/backend/optimizer/plan/setrefs.c20
-rw-r--r--src/backend/optimizer/plan/subselect.c38
-rw-r--r--src/backend/optimizer/prep/prepjointree.c42
-rw-r--r--src/backend/optimizer/prep/prepqual.c12
-rw-r--r--src/backend/optimizer/prep/preptlist.c12
-rw-r--r--src/backend/optimizer/prep/prepunion.c16
-rw-r--r--src/backend/optimizer/util/clauses.c68
-rw-r--r--src/backend/optimizer/util/joininfo.c2
-rw-r--r--src/backend/optimizer/util/pathnode.c26
-rw-r--r--src/backend/optimizer/util/placeholder.c4
-rw-r--r--src/backend/optimizer/util/plancat.c10
-rw-r--r--src/backend/optimizer/util/predtest.c22
-rw-r--r--src/backend/optimizer/util/relnode.c18
-rw-r--r--src/backend/optimizer/util/restrictinfo.c6
-rw-r--r--src/backend/optimizer/util/tlist.c2
-rw-r--r--src/backend/optimizer/util/var.c14
-rw-r--r--src/backend/parser/analyze.c26
-rw-r--r--src/backend/parser/kwlookup.c2
-rw-r--r--src/backend/parser/parse_agg.c10
-rw-r--r--src/backend/parser/parse_clause.c36
-rw-r--r--src/backend/parser/parse_coerce.c46
-rw-r--r--src/backend/parser/parse_collate.c14
-rw-r--r--src/backend/parser/parse_cte.c8
-rw-r--r--src/backend/parser/parse_expr.c20
-rw-r--r--src/backend/parser/parse_func.c26
-rw-r--r--src/backend/parser/parse_node.c10
-rw-r--r--src/backend/parser/parse_oper.c4
-rw-r--r--src/backend/parser/parse_param.c2
-rw-r--r--src/backend/parser/parse_relation.c20
-rw-r--r--src/backend/parser/parse_target.c24
-rw-r--r--src/backend/parser/parse_type.c12
-rw-r--r--src/backend/parser/parse_utilcmd.c18
-rw-r--r--src/backend/parser/parser.c2
-rw-r--r--src/backend/port/darwin/system.c2
-rw-r--r--src/backend/port/dynloader/darwin.c2
-rw-r--r--src/backend/port/dynloader/freebsd.c2
-rw-r--r--src/backend/port/dynloader/netbsd.c2
-rw-r--r--src/backend/port/dynloader/openbsd.c2
-rw-r--r--src/backend/port/posix_sema.c2
-rw-r--r--src/backend/port/sysv_sema.c14
-rw-r--r--src/backend/port/sysv_shmem.c10
-rw-r--r--src/backend/port/unix_latch.c6
-rw-r--r--src/backend/port/win32/socket.c4
-rw-r--r--src/backend/port/win32_latch.c2
-rw-r--r--src/backend/port/win32_shmem.c2
-rw-r--r--src/backend/postmaster/autovacuum.c46
-rw-r--r--src/backend/postmaster/bgwriter.c14
-rw-r--r--src/backend/postmaster/checkpointer.c24
-rw-r--r--src/backend/postmaster/fork_process.c2
-rw-r--r--src/backend/postmaster/pgarch.c4
-rw-r--r--src/backend/postmaster/pgstat.c36
-rw-r--r--src/backend/postmaster/postmaster.c74
-rw-r--r--src/backend/postmaster/startup.c2
-rw-r--r--src/backend/postmaster/syslogger.c14
-rw-r--r--src/backend/postmaster/walwriter.c8
-rw-r--r--src/backend/regex/regc_color.c2
-rw-r--r--src/backend/regex/regc_cvec.c2
-rw-r--r--src/backend/regex/regc_lex.c2
-rw-r--r--src/backend/regex/regc_locale.c6
-rw-r--r--src/backend/regex/regc_nfa.c6
-rw-r--r--src/backend/regex/regc_pg_locale.c12
-rw-r--r--src/backend/regex/regcomp.c8
-rw-r--r--src/backend/regex/rege_dfa.c2
-rw-r--r--src/backend/regex/regerror.c2
-rw-r--r--src/backend/regex/regexec.c18
-rw-r--r--src/backend/regex/regfree.c2
-rw-r--r--src/backend/regex/regprefix.c8
-rw-r--r--src/backend/replication/basebackup.c2
-rw-r--r--src/backend/replication/syncrep.c2
-rw-r--r--src/backend/replication/walreceiver.c4
-rw-r--r--src/backend/replication/walreceiverfuncs.c2
-rw-r--r--src/backend/replication/walsender.c6
-rw-r--r--src/backend/rewrite/rewriteDefine.c6
-rw-r--r--src/backend/rewrite/rewriteHandler.c42
-rw-r--r--src/backend/rewrite/rewriteManip.c18
-rw-r--r--src/backend/rewrite/rewriteSupport.c2
-rw-r--r--src/backend/storage/buffer/buf_init.c4
-rw-r--r--src/backend/storage/buffer/buf_table.c4
-rw-r--r--src/backend/storage/buffer/bufmgr.c38
-rw-r--r--src/backend/storage/buffer/freelist.c12
-rw-r--r--src/backend/storage/buffer/localbuf.c6
-rw-r--r--src/backend/storage/file/buffile.c6
-rw-r--r--src/backend/storage/file/fd.c26
-rw-r--r--src/backend/storage/freespace/freespace.c4
-rw-r--r--src/backend/storage/freespace/fsmpage.c4
-rw-r--r--src/backend/storage/ipc/ipc.c10
-rw-r--r--src/backend/storage/ipc/ipci.c4
-rw-r--r--src/backend/storage/ipc/pmsignal.c6
-rw-r--r--src/backend/storage/ipc/procarray.c34
-rw-r--r--src/backend/storage/ipc/shmem.c18
-rw-r--r--src/backend/storage/ipc/shmqueue.c2
-rw-r--r--src/backend/storage/ipc/sinval.c12
-rw-r--r--src/backend/storage/ipc/sinvaladt.c22
-rw-r--r--src/backend/storage/ipc/standby.c4
-rw-r--r--src/backend/storage/large_object/inv_api.c2
-rw-r--r--src/backend/storage/lmgr/deadlock.c18
-rw-r--r--src/backend/storage/lmgr/lmgr.c8
-rw-r--r--src/backend/storage/lmgr/lock.c40
-rw-r--r--src/backend/storage/lmgr/lwlock.c10
-rw-r--r--src/backend/storage/lmgr/predicate.c52
-rw-r--r--src/backend/storage/lmgr/proc.c32
-rw-r--r--src/backend/storage/lmgr/s_lock.c6
-rw-r--r--src/backend/storage/lmgr/spin.c2
-rw-r--r--src/backend/storage/page/bufpage.c8
-rw-r--r--src/backend/storage/smgr/md.c32
-rw-r--r--src/backend/storage/smgr/smgr.c4
-rw-r--r--src/backend/tcop/fastpath.c10
-rw-r--r--src/backend/tcop/postgres.c32
-rw-r--r--src/backend/tcop/pquery.c14
-rw-r--r--src/backend/tcop/utility.c4
-rw-r--r--src/backend/tsearch/ts_locale.c4
-rw-r--r--src/backend/tsearch/ts_selfuncs.c2
-rw-r--r--src/backend/tsearch/ts_typanalyze.c14
-rw-r--r--src/backend/tsearch/ts_utils.c8
-rw-r--r--src/backend/tsearch/wparser_def.c2
-rw-r--r--src/backend/utils/adt/acl.c18
-rw-r--r--src/backend/utils/adt/array_selfuncs.c20
-rw-r--r--src/backend/utils/adt/array_typanalyze.c14
-rw-r--r--src/backend/utils/adt/array_userfuncs.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c26
-rw-r--r--src/backend/utils/adt/arrayutils.c2
-rw-r--r--src/backend/utils/adt/char.c2
-rw-r--r--src/backend/utils/adt/date.c6
-rw-r--r--src/backend/utils/adt/datetime.c14
-rw-r--r--src/backend/utils/adt/datum.c2
-rw-r--r--src/backend/utils/adt/dbsize.c2
-rw-r--r--src/backend/utils/adt/domains.c8
-rw-r--r--src/backend/utils/adt/float.c10
-rw-r--r--src/backend/utils/adt/format_type.c6
-rw-r--r--src/backend/utils/adt/formatting.c4
-rw-r--r--src/backend/utils/adt/geo_selfuncs.c4
-rw-r--r--src/backend/utils/adt/inet_cidr_ntop.c2
-rw-r--r--src/backend/utils/adt/int.c30
-rw-r--r--src/backend/utils/adt/int8.c44
-rw-r--r--src/backend/utils/adt/json.c12
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/misc.c12
-rw-r--r--src/backend/utils/adt/nabstime.c4
-rw-r--r--src/backend/utils/adt/network.c14
-rw-r--r--src/backend/utils/adt/numeric.c50
-rw-r--r--src/backend/utils/adt/oid.c2
-rw-r--r--src/backend/utils/adt/pg_locale.c26
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c6
-rw-r--r--src/backend/utils/adt/pseudotypes.c6
-rw-r--r--src/backend/utils/adt/rangetypes.c8
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c8
-rw-r--r--src/backend/utils/adt/regexp.c6
-rw-r--r--src/backend/utils/adt/regproc.c14
-rw-r--r--src/backend/utils/adt/ri_triggers.c14
-rw-r--r--src/backend/utils/adt/rowtypes.c8
-rw-r--r--src/backend/utils/adt/ruleutils.c72
-rw-r--r--src/backend/utils/adt/selfuncs.c128
-rw-r--r--src/backend/utils/adt/timestamp.c22
-rw-r--r--src/backend/utils/adt/tsginidx.c2
-rw-r--r--src/backend/utils/adt/varchar.c4
-rw-r--r--src/backend/utils/adt/varlena.c40
-rw-r--r--src/backend/utils/adt/xml.c40
-rw-r--r--src/backend/utils/cache/attoptcache.c2
-rw-r--r--src/backend/utils/cache/catcache.c14
-rw-r--r--src/backend/utils/cache/evtcache.c2
-rw-r--r--src/backend/utils/cache/inval.c30
-rw-r--r--src/backend/utils/cache/lsyscache.c10
-rw-r--r--src/backend/utils/cache/plancache.c42
-rw-r--r--src/backend/utils/cache/relcache.c64
-rw-r--r--src/backend/utils/cache/relmapper.c20
-rw-r--r--src/backend/utils/cache/spccache.c6
-rw-r--r--src/backend/utils/cache/syscache.c4
-rw-r--r--src/backend/utils/cache/ts_cache.c2
-rw-r--r--src/backend/utils/cache/typcache.c8
-rw-r--r--src/backend/utils/error/elog.c46
-rw-r--r--src/backend/utils/fmgr/dfmgr.c6
-rw-r--r--src/backend/utils/fmgr/fmgr.c18
-rw-r--r--src/backend/utils/fmgr/funcapi.c10
-rw-r--r--src/backend/utils/hash/dynahash.c28
-rw-r--r--src/backend/utils/init/miscinit.c32
-rw-r--r--src/backend/utils/init/postinit.c16
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c2
-rw-r--r--src/backend/utils/mb/mbutils.c14
-rw-r--r--src/backend/utils/mb/wchar.c4
-rw-r--r--src/backend/utils/mb/wstrcmp.c2
-rw-r--r--src/backend/utils/mb/wstrncmp.c2
-rw-r--r--src/backend/utils/misc/guc.c26
-rw-r--r--src/backend/utils/misc/ps_status.c6
-rw-r--r--src/backend/utils/misc/rbtree.c12
-rw-r--r--src/backend/utils/misc/timeout.c22
-rw-r--r--src/backend/utils/misc/tzparser.c4
-rw-r--r--src/backend/utils/mmgr/aset.c16
-rw-r--r--src/backend/utils/mmgr/mcxt.c16
-rw-r--r--src/backend/utils/mmgr/portalmem.c18
-rw-r--r--src/backend/utils/resowner/resowner.c8
-rw-r--r--src/backend/utils/sort/logtape.c30
-rw-r--r--src/backend/utils/sort/tuplesort.c70
-rw-r--r--src/backend/utils/sort/tuplestore.c26
-rw-r--r--src/backend/utils/time/combocid.c2
-rw-r--r--src/backend/utils/time/snapmgr.c20
-rw-r--r--src/backend/utils/time/tqual.c18
385 files changed, 2906 insertions, 2906 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 8c45fd6b95e..770cc778b18 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -21,7 +21,7 @@
* tuptoaster.c.
*
* This change will break any code that assumes it needn't detoast values
- * that have been put into a tuple but never sent to disk. Hopefully there
+ * that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@@ -387,7 +387,7 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
@@ -454,7 +454,7 @@ nocachegetattr(HeapTuple tuple,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
@@ -549,7 +549,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
/*
* cmin and cmax are now both aliases for the same field, which
- * can in fact also be a combo command id. XXX perhaps we should
+ * can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
@@ -709,7 +709,7 @@ heap_form_tuple(TupleDesc tupleDescriptor,
len += data_len;
/*
- * Allocate and zero the space needed. Note that the tuple body and
+ * Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 2f96a302011..febfe531940 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
/*
* If value is stored EXTERNAL, must fetch it so we are not depending
- * on outside storage. This should be improved someday.
+ * on outside storage. This should be improved someday.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
{
@@ -281,7 +281,7 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
+ * target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
@@ -348,7 +348,7 @@ nocache_index_getattr(IndexTuple tup,
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
+ * then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we reach either a null or a var-width attribute.
*/
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 5286be5391e..fc99c8cc961 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -181,7 +181,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 17bbcb5dbab..84530dcfa6f 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -519,7 +519,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
- * validity of the values. It must elog(ERROR) when the argument string is
+ * validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
@@ -847,7 +847,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
- * returned array. Values of type string are allocated separately and must
+ * returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *
diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c
index 075add5bc0e..e83344e3c1a 100644
--- a/src/backend/access/common/tupconvert.c
+++ b/src/backend/access/common/tupconvert.c
@@ -5,7 +5,7 @@
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
- * of dropped columns. There is some overlap of functionality with the
+ * of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index cc2e1ed0d23..4a5e255f0aa 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -540,7 +540,7 @@ TupleDescInitEntryCollation(TupleDesc desc,
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
- * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
+ * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index 55562c6f380..bd31ee2b53e 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -197,7 +197,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = true;
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index 37ab49ccb47..d6d02ec3921 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -187,7 +187,7 @@ ginInsertBAEntry(BuildAccumulator *accum,
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
- * rebalancing operations and is slow. To prevent this, we attempt to insert
+ * rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 8cb5148c810..bd9908e3482 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -163,7 +163,7 @@ GinShortenTuple(IndexTuple itup, uint32 nipd)
* Form a non-leaf entry tuple by copying the key data from the given tuple,
* which can be either a leaf or non-leaf entry tuple.
*
- * Any posting list in the source tuple is not copied. The specified child
+ * Any posting list in the source tuple is not copied. The specified child
* block number is inserted into t_tid.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 177a2246bc5..a0d6ae2a005 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -440,7 +440,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
* Create temporary index tuples for a single indexable item (one index column
* for the heap tuple specified by ht_ctid), and append them to the array
* in *collector. They will subsequently be written out using
- * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
+ * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
* temp tuples for a given heap tuple must be written in one call to
* ginHeapTupleFastInsert.
*/
@@ -707,7 +707,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
- * either. The reason it's okay is that multiple insertion of the same entry
+ * either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@@ -761,7 +761,7 @@ ginInsertCleanup(GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
- * Initialize. All temporary space will be in opCtx
+ * Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@@ -855,7 +855,7 @@ ginInsertCleanup(GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
- * added to it. If so, process those entries immediately. There
+ * added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* guarantees that inserted row(s) will not continue on next page.
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 9dcf638dc23..d1462d091ac 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -164,7 +164,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
/*
* Collects TIDs into scanEntry->matchBitmap for all heap tuples that
- * match the search entry. This supports three different match modes:
+ * match the search entry. This supports three different match modes:
*
* 1. Partial-match support: scan from current point until the
* comparePartialFn says we're done.
@@ -260,7 +260,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
/*
* In ALL mode, we are not interested in null items, so we can
* stop if we get to a null-item placeholder (which will be the
- * last entry for a given attnum). We do want to include NULL_KEY
+ * last entry for a given attnum). We do want to include NULL_KEY
* and EMPTY_ITEM entries, though.
*/
if (icategory == GIN_CAT_NULL_ITEM)
@@ -956,14 +956,14 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
* that exact TID, or a lossy reference to the same page.
*
* This logic works only if a keyGetItem stream can never contain both
- * exact and lossy pointers for the same page. Else we could have a
+ * exact and lossy pointers for the same page. Else we could have a
* case like
*
* stream 1 stream 2
- * ... ...
+ * ... ...
* 42/6 42/7
* 50/1 42/0xffff
- * ... ...
+ * ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
@@ -992,7 +992,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
break;
/*
- * No hit. Update myAdvancePast to this TID, so that on the next pass
+ * No hit. Update myAdvancePast to this TID, so that on the next pass
* we'll move to the next possible entry.
*/
myAdvancePast = *item;
@@ -1508,10 +1508,10 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* First, scan the pending list and collect any matching entries into the
- * bitmap. After we scan a pending item, some other backend could post it
+ * bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
- * bitmap. (The possibility of duplicate visits is a major reason why GIN
+ * bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index beaa65317f3..af9ef3fab32 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -98,7 +98,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems)
* Adds array of item pointers to tuple's posting list, or
* creates posting tree and tuple pointing to tree in case
* of not enough space. Max size of tuple is defined in
- * GinFormTuple(). Returns a new, modified index tuple.
+ * GinFormTuple(). Returns a new, modified index tuple.
* items[] must be in sorted order with no duplicates.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index afee2dbd924..2dfa241e88a 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -387,7 +387,7 @@ ginNewScanKey(IndexScanDesc scan)
/*
* If the index is version 0, it may be missing null and placeholder
* entries, which would render searches for nulls and full-index scans
- * unreliable. Throw an error if so.
+ * unreliable. Throw an error if so.
*/
if (hasNullQuery && !so->isVoidRes)
{
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 8a716810517..f4a181d02b7 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -436,7 +436,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
* If there's more than one key, sort and unique-ify.
*
* XXX Using qsort here is notationally painful, and the overhead is
- * pretty bad too. For small numbers of keys it'd likely be better to use
+ * pretty bad too. For small numbers of keys it'd likely be better to use
* a simple insertion sort.
*/
if (*nentries > 1)
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index f487ba5c1ad..f43ac50115b 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -675,7 +675,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
/*
* In normal operation, shiftList() takes exclusive lock on all the
- * pages-to-be-deleted simultaneously. During replay, however, it should
+ * pages-to-be-deleted simultaneously. During replay, however, it should
* be all right to lock them one at a time. This is dependent on the fact
* that we are deleting pages from the head of the list, and that readers
* share-lock the next page before releasing the one they are on. So we
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 3c2284566b0..65c15fd7b40 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1382,7 +1382,7 @@ initGISTstate(Relation index)
/*
* If the index column has a specified collation, we should honor that
* while doing comparisons. However, we may have a collatable storage
- * type for a noncollatable indexed data type. If there's no index
+ * type for a noncollatable indexed data type. If there's no index
* collation then specify default collation in case the support
* functions need collation. This is harmless if the support
* functions don't care about collation, so we just do it
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index e97ab8f3fd5..8c76fe15c01 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -31,7 +31,7 @@
*
* On success return for a heap tuple, *recheck_p is set to indicate
* whether recheck is needed. We recheck if any of the consistent() functions
- * request it. recheck is not interesting when examining a non-leaf entry,
+ * request it. recheck is not interesting when examining a non-leaf entry,
* since we must visit the lower index page if there's any doubt.
*
* If we are doing an ordered scan, so->distances[] is filled with distance
@@ -62,7 +62,7 @@ gistindex_keytest(IndexScanDesc scan,
/*
* If it's a leftover invalid tuple from pre-9.1, treat it as a match with
- * minimum possible distances. This means we'll always follow it to the
+ * minimum possible distances. This means we'll always follow it to the
* referenced page.
*/
if (GistTupleIsInvalid(tuple))
@@ -224,7 +224,7 @@ gistindex_keytest(IndexScanDesc scan,
* ntids: if not NULL, gistgetbitmap's output tuple counter
*
* If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
- * tuples should be reported directly into the bitmap. If they are NULL,
+ * tuples should be reported directly into the bitmap. If they are NULL,
* we're doing a plain or ordered indexscan. For a plain indexscan, heap
* tuple TIDs are returned into so->pageData[]. For an ordered indexscan,
* heap tuple TIDs are pushed into individual search queue items.
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index b5553ffe69b..858e1a67cfc 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -56,7 +56,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg)
/*
* If new item is heap tuple, it goes to front of chain; otherwise insert
* it before the first index-page item, so that index pages are visited in
- * LIFO order, ensuring depth-first search of index pages. See comments
+ * LIFO order, ensuring depth-first search of index pages. See comments
* in gist_private.h.
*/
if (GISTSearchItemIsHeap(*newitem))
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index c97f6da156e..5435cc70ccc 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
* Recompute unions of left- and right-side subkeys after a page split,
* ignoring any tuples that are marked in spl->spl_dontcare[].
*
- * Note: we always recompute union keys for all index columns. In some cases
+ * Note: we always recompute union keys for all index columns. In some cases
* this might represent duplicate work for the leftmost column(s), but it's
* not safe to assume that "zero penalty to move a tuple" means "the union
* key doesn't change at all". Penalty functions aren't 100% accurate.
@@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec,
/*
* Remove tuples that are marked don't-cares from the tuple index array a[]
- * of length *len. This is applied separately to the spl_left and spl_right
+ * of length *len. This is applied separately to the spl_left and spl_right
* arrays.
*/
static void
@@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare)
/*
* Place a single don't-care tuple into either the left or right side of the
* split, according to which has least penalty for merging the tuple into
- * the previously-computed union keys. We need consider only columns starting
+ * the previously-computed union keys. We need consider only columns starting
* at attno.
*/
static void
@@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno,
/*
* There is only one previously defined union, so we just choose swap
- * or not by lowest penalty for that side. We can only get here if a
+ * or not by lowest penalty for that side. We can only get here if a
* secondary split happened to have all NULLs in its column in the
* tuples that the outer recursion level had assigned to one side.
* (Note that the null checks in gistSplitByKey don't prevent the
@@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
sv->spl_rdatum = v->spl_rattr[attno];
/*
- * Let the opclass-specific PickSplit method do its thing. Note that at
+ * Let the opclass-specific PickSplit method do its thing. Note that at
* this point we know there are no null keys in the entryvec.
*/
FunctionCall2Coll(&giststate->picksplitFn[attno],
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index b9c1967ebc0..1aedaec3fae 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
* some inserts to go to other equally-good subtrees.
*
* keep_current_best is -1 if we haven't yet had to make a random choice
- * whether to keep the current best tuple. If we have done so, and
+ * whether to keep the current best tuple. If we have done so, and
* decided to keep it, keep_current_best is 1; if we've decided to
* replace, keep_current_best is 0. (This state will be reset to -1 as
* soon as we've made the replacement, but sometimes we make the choice in
@@ -456,7 +456,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* New best penalty for column. Tentatively select this tuple
- * as the target, and record the best penalty. Then reset the
+ * as the target, and record the best penalty. Then reset the
* next column's penalty to "unknown" (and indirectly, the
* same for all the ones to its right). This will force us to
* adopt this tuple's penalty values as the best for all the
@@ -475,7 +475,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
{
/*
* The current tuple is exactly as good for this column as the
- * best tuple seen so far. The next iteration of this loop
+ * best tuple seen so far. The next iteration of this loop
* will compare the next column.
*/
}
@@ -681,7 +681,7 @@ gistcheckpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 59c3105b083..dc32c9d5292 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -49,7 +49,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
stats->estimated_count = info->estimated_count;
/*
- * XXX the above is wrong if index is partial. Would it be OK to just
+ * XXX the above is wrong if index is partial. Would it be OK to just
* return NULL, or is there work we must do below?
*/
}
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index 15b7eb024ae..56019c808fe 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */
* follow-right flag, because that change is not included in the full-page
* image. To be sure that the intermediate state with the wrong flag value is
* not visible to concurrent Hot Standby queries, this function handles
- * restoring the full-page image as well as updating the flag. (Note that
+ * restoring the full-page image as well as updating the flag. (Note that
* we never need to do anything else to the child page in the current WAL
* action.)
*/
@@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
/*
* We need to acquire and hold lock on target page while updating the left
- * child page. If we have a full-page image of target page, getting the
+ * child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 8895f585034..b3ebfae2ff9 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -78,7 +78,7 @@ hashbuild(PG_FUNCTION_ARGS)
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
- * tuples by (expected) bucket number. However, such a sort is useless
+ * tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
@@ -248,7 +248,7 @@ hashgettuple(PG_FUNCTION_ARGS)
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
- * for the TID we previously returned. (Because we hold share lock on
+ * for the TID we previously returned. (Because we hold share lock on
* the bucket, no deletions or splits could have occurred; therefore
* we can expect that the TID still exists in the current index page,
* at an offset >= where we were.)
@@ -524,7 +524,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
/*
* Read the metapage to fetch original bucket and tuple counts. Also, we
* keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a bit
+ * hashm_spares[] values to compute bucket page addresses. This is a bit
* hokey but perfectly safe, since the interesting entries in the spares
* array cannot change under us; and it beats rereading the metapage for
* each bucket.
@@ -655,7 +655,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by dead-reckoning.
+ * double-scanned tuples in split buckets. Proceed by dead-reckoning.
* (Note: we still return estimated_count = false, because using this
* count is better than not updating reltuples at all.)
*/
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index d5992821933..9a88aa5a432 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -11,7 +11,7 @@
* src/backend/access/hash/hashfunc.c
*
* NOTES
- * These functions are stored in pg_amproc. For each operator class
+ * These functions are stored in pg_amproc. For each operator class
* defined for hash indexes, they compute the hash value of the argument.
*
* Additional hash functions appear in /utils/adt/ files for various
@@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS)
/*
* Note: this is currently identical in behavior to hashvarlena, but keep
* it as a separate function in case we someday want to do something
- * different in non-C locales. (See also hashbpchar, if so.)
+ * different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA_ANY(key),
VARSIZE_ANY_EXHDR(key));
@@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS)
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
- * direction from the goal of parallelism. I did what I could. Rotates
+ * direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
@@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
- * bits (affecting 32 bits is enough). The original hash function had
+ * bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
@@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS)
* k : the key (the unaligned variable-length array of bytes)
* len : the length of the key, counting by bytes
*
- * Returns a uint32 value. Every bit of the key affects every bit of
+ * Returns a uint32 value. Every bit of the key affects every bit of
* the return value. Every 1-bit and 2-bit delta achieves avalanche.
* About 6*len+35 instructions. The best hash table sizes are powers
* of 2. There is no need to do mod a prime (mod is sooo slow!).
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 4508a36bd05..63be2f37872 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -90,7 +90,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 03199218f73..0d13359e6d2 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -80,7 +80,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
- * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
+ * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
@@ -89,12 +89,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
- * no one else tries to compact the bucket meanwhile. This guarantees that
+ * no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
- * immediate successor of the originally passed 'buf'. Additional overflow
+ * immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
@@ -157,7 +157,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
- * Find an available overflow page and return it. The returned buffer
+ * Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@@ -253,7 +253,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
* We create the new bitmap page with all pages marked "in use".
* Actually two pages in the new bitmap's range will exist
* immediately: the bitmap page itself, and the following page which
- * is the one we return to the caller. Both of these are correctly
+ * is the one we return to the caller. Both of these are correctly
* marked "in use". Subsequent pages do not exist yet, but it is
* convenient to pre-mark them as "in use" too.
*/
@@ -284,7 +284,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
metap->hashm_spares[splitnum]++;
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -313,7 +313,7 @@ found:
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
* changing it if someone moved it while we were searching bitmap pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
@@ -494,7 +494,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
/*
* _hash_initbitmap()
*
- * Initialize a new bitmap page. The metapage has a write-lock upon
+ * Initialize a new bitmap page. The metapage has a write-lock upon
* entering the function, and must be written by caller after return.
*
* 'blkno' is the block number of the new bitmap page.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index e91419b12e3..62b0c062c31 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -49,7 +49,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
@@ -136,7 +136,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@@ -344,7 +344,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
*/
data_width = sizeof(uint32);
@@ -377,7 +377,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
/*
* We initialize the metapage, the first N bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
- * calls to occur. This ensures that the smgr level has the right idea of
+ * calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
@@ -545,7 +545,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
@@ -603,7 +603,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
}
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
@@ -641,7 +641,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
* split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
+ * date before _hash_splitbucket finishes. That's okay, since all it
* needs is to tell which of these two buckets to map hashkeys into.
*/
maxbucket = metap->hashm_maxbucket;
@@ -876,7 +876,7 @@ _hash_splitbucket(Relation rel,
/*
* We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
+ * the tuples. Before quitting, call _hash_squeezebucket to ensure the
* tuples remaining in the old bucket (including the overflow pages) are
* packed as tightly as possible. The new bucket is already tight.
*/
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 91661ba0e03..052809218dc 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
@@ -269,7 +269,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* _hash_step() -- step to the next valid item in a scan in the bucket.
*
* If no valid record exists in the requested direction, return
- * false. Else, return true and set the hashso_curpos for the
+ * false. Else, return true and set the hashso_curpos for the
* scan to the right thing.
*
* 'bufP' points to the current buffer, which is pinned and read-locked.
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 1dfa7109ca6..26a3f1c437a 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -8,7 +8,7 @@
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
- * bucket splits may occur during the index build. In that case we'd
+ * bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
@@ -52,7 +52,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
hspool->index = index;
/*
- * Determine the bitmask for hash code values. Since there are currently
+ * Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index aa071d9185a..9773ac790a2 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -160,7 +160,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -280,7 +280,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
- * existing hash keys in the page. This is the appropriate place to start
+ * existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index a8653e4a96c..ccdcb5be2a2 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -207,7 +207,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
- * might go into pages we already scanned. To guarantee consistent
+ * might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
@@ -215,7 +215,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
/*
* If the table is large relative to NBuffers, use a bulk-read access
- * strategy and enable synchronized scanning (see syncscan.c). Although
+ * strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
* (However, some callers need to be able to disable one or both of these
@@ -319,7 +319,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
}
/*
- * Be sure to check for interrupts at least once per page. Checks at
+ * Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters many
* pages' worth of consecutive dead tuples.
*/
@@ -344,7 +344,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1120,7 +1120,7 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
*
* Same as relation_openrv, but with an additional missing_ok argument
* allowing a NULL return rather than an error if the relation is not
- * found. (Note that some other causes, such as permissions problems,
+ * found. (Note that some other causes, such as permissions problems,
* will still result in an ereport.)
* ----------------
*/
@@ -1720,7 +1720,7 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
/*
* When first_call is true (and thus, skip is initially false) we'll
- * return the first tuple we find. But on later passes, heapTuple
+ * return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever), so
* we skip it and return the next match we find.
@@ -1802,7 +1802,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
* possibly uncommitted version.
*
* *tid is both an input and an output parameter: it is updated to
- * show the latest version of the row. Note that it will not be changed
+ * show the latest version of the row. Note that it will not be changed
* if no version of the row passes the snapshot test.
*/
void
@@ -1922,7 +1922,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
- * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously.
*
@@ -2009,7 +2009,7 @@ FreeBulkInsertState(BulkInsertState bistate)
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@@ -2038,7 +2038,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
- * lock "gaps" as index page locks do. So we don't need to identify a
+ * lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@@ -2184,7 +2184,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
/*
* If the object id of this tuple has already been assigned, trust the
- * caller. There are a couple of ways this can happen. At initial db
+ * caller. There are a couple of ways this can happen. At initial db
* creation, the backend program sets oids for tuples. When we define
* an index, we set the oid. Finally, in the future, we may allow
* users to set their own object ids in order to support a persistent
@@ -2279,7 +2279,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
- * lock "gaps" as index page locks do. So we don't need to identify a
+ * lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@@ -2293,7 +2293,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
int nthispage;
/*
- * Find buffer where at least the next tuple will fit. If the page is
+ * Find buffer where at least the next tuple will fit. If the page is
* all-visible, this will also pin the requisite visibility map page.
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
@@ -2660,10 +2660,10 @@ l1:
/*
* You might think the multixact is necessarily done here, but not
* so: it could have surviving members, namely our own xact or
- * other subxacts of this backend. It is legal for us to delete
+ * other subxacts of this backend. It is legal for us to delete
* the tuple in either case, however (the latter case is
* essentially a situation of upgrading our former shared lock to
- * exclusive). We don't bother changing the on-disk hint bits
+ * exclusive). We don't bother changing the on-disk hint bits
* since we are about to overwrite the xmax altogether.
*/
}
@@ -2739,7 +2739,7 @@ l1:
* If this is the first possibly-multixact-able operation in the current
* transaction, set my per-backend OldestMemberMXactId setting. We can be
* certain that the transaction will never become a member of any older
- * MultiXactIds than that. (We have to do this even if we end up just
+ * MultiXactIds than that. (We have to do this even if we end up just
* using our own TransactionId below, since some other backend could
* incorporate our XID into a MultiXact immediately afterwards.)
*/
@@ -2755,7 +2755,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@@ -2859,7 +2859,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -2971,7 +2971,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
* wasted effort if we fail to update or have to put the new tuple on a
- * different page. But we must compute the list before obtaining buffer
+ * different page. But we must compute the list before obtaining buffer
* lock --- in the worst case, if we are doing an update on one of the
* relevant system catalogs, we could deadlock if we try to fetch the list
* later. In any case, the relcache caches the data so this is usually
@@ -3052,7 +3052,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId
* setting. We can be certain that the transaction will never become a
- * member of any older MultiXactIds than that. (We have to do this
+ * member of any older MultiXactIds than that. (We have to do this
* even if we end up just using our own TransactionId below, since
* some other backend could incorporate our XID into a MultiXact
* immediately afterwards.)
@@ -3097,7 +3097,7 @@ l2:
/*
* XXX note that we don't consider the "no wait" case here. This
* isn't a problem currently because no caller uses that case, but it
- * should be fixed if such a caller is introduced. It wasn't a
+ * should be fixed if such a caller is introduced. It wasn't a
* problem previously because this code would always wait, but now
* that some tuple locks do not conflict with one of the lock modes we
* use, it is possible that this case is interesting to handle
@@ -3135,7 +3135,7 @@ l2:
* it as locker, unless it is gone completely.
*
* If it's not a multi, we need to check for sleeping conditions
- * before actually going to sleep. If the update doesn't conflict
+ * before actually going to sleep. If the update doesn't conflict
* with the locks, we just continue without sleeping (but making sure
* it is preserved).
*/
@@ -3160,10 +3160,10 @@ l2:
goto l2;
/*
- * Note that the multixact may not be done by now. It could have
+ * Note that the multixact may not be done by now. It could have
* surviving members; our own xact or other subxacts of this
* backend, and also any other concurrent transaction that locked
- * the tuple with KeyShare if we only got TupleLockUpdate. If
+ * the tuple with KeyShare if we only got TupleLockUpdate. If
* this is the case, we have to be careful to mark the updated
* tuple with the surviving members in Xmax.
*
@@ -3369,7 +3369,7 @@ l2:
* If the toaster needs to be activated, OR if the new tuple will not fit
* on the same page as the old, then we need to release the content lock
* (but not the pin!) on the old tuple's buffer while we are off doing
- * TOAST and/or table-file-extension work. We must mark the old tuple to
+ * TOAST and/or table-file-extension work. We must mark the old tuple to
* show that it's already being updated, else other processes may try to
* update it themselves.
*
@@ -3435,7 +3435,7 @@ l2:
* there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
- * buffer locks on both old and new pages. To avoid deadlock against
+ * buffer locks on both old and new pages. To avoid deadlock against
* some other backend trying to get the same two locks in the other
* order, we must be consistent about the order we get the locks in.
* We use the rule "lock the lower-numbered page of the relation
@@ -3495,7 +3495,7 @@ l2:
/*
* At this point newbuf and buffer are both pinned and locked, and newbuf
- * has enough space for the new tuple. If they are the same buffer, only
+ * has enough space for the new tuple. If they are the same buffer, only
* one pin is held.
*/
@@ -3503,7 +3503,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
- * to do a HOT update. Check if any of the index columns have been
+ * to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (satisfies_hot)
@@ -3521,13 +3521,13 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
* XXX Should we set hint on newbuf as well? If the transaction aborts,
* there would be a prunable tuple in the newbuf; but for now we choose
- * not to optimize for aborts. Note that heap_xlog_update must be kept in
+ * not to optimize for aborts. Note that heap_xlog_update must be kept in
* sync if this decision changes.
*/
PageSetPrunable(page, xid);
@@ -3612,7 +3612,7 @@ l2:
* Mark old tuple for invalidation from system caches at next command
* boundary, and mark the new tuple for invalidation in case we abort. We
* have to do this before releasing the buffer because oldtup is in the
- * buffer. (heaptup is all in local memory, but it's necessary to process
+ * buffer. (heaptup is all in local memory, but it's necessary to process
* both tuple versions in one call to inval.c so we can avoid redundant
* sinval messages.)
*/
@@ -3687,7 +3687,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
- * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
+ * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
* a single heap_deform_tuple call on each tuple, instead? But that
* doesn't work for system columns ...
*/
@@ -3710,7 +3710,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
- * same logical value. But we should be OK as long as there are no false
+ * same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@@ -3832,7 +3832,7 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via ereport().
*/
void
@@ -4056,15 +4056,15 @@ l3:
* However, if there are updates, we need to walk the update chain
* to mark future versions of the row as locked, too. That way,
* if somebody deletes that future version, we're protected
- * against the key going away. This locking of future versions
+ * against the key going away. This locking of future versions
* could block momentarily, if a concurrent transaction is
* deleting a key; or it could return a value to the effect that
- * the transaction deleting the key has already committed. So we
+ * the transaction deleting the key has already committed. So we
* do this before re-locking the buffer; otherwise this would be
* prone to deadlocks.
*
* Note that the TID we're locking was grabbed before we unlocked
- * the buffer. For it to change while we're not looking, the
+ * the buffer. For it to change while we're not looking, the
* other properties we're testing for below after re-locking the
* buffer would also change, in which case we would restart this
* loop above.
@@ -4285,7 +4285,7 @@ l3:
* Of course, the multixact might not be done here: if we're
* requesting a light lock mode, other transactions with light
* locks could still be alive, as well as locks owned by our
- * own xact or other subxacts of this backend. We need to
+ * own xact or other subxacts of this backend. We need to
* preserve the surviving MultiXact members. Note that it
* isn't absolutely necessary in the latter case, but doing so
* is simpler.
@@ -4328,7 +4328,7 @@ l3:
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * this point. Check for xmax change, and start over if so.
*/
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(
@@ -4337,7 +4337,7 @@ l3:
goto l3;
/*
- * Otherwise check if it committed or aborted. Note we cannot
+ * Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that should have been handled above. So
* that transaction must necessarily be gone by now.
@@ -4417,7 +4417,7 @@ failed:
* If this is the first possibly-multixact-able operation in the current
* transaction, set my per-backend OldestMemberMXactId setting. We can be
* certain that the transaction will never become a member of any older
- * MultiXactIds than that. (We have to do this even if we end up just
+ * MultiXactIds than that. (We have to do this even if we end up just
* using our own TransactionId below, since some other backend could
* incorporate our XID into a MultiXact immediately afterwards.)
*/
@@ -4453,7 +4453,7 @@ failed:
HeapTupleHeaderSetXmax(tuple->t_data, xid);
/*
- * Make sure there is no forward chain link in t_ctid. Note that in the
+ * Make sure there is no forward chain link in t_ctid. Note that in the
* cases where the tuple has been updated, we must not overwrite t_ctid,
* because it was set by the updater. Moreover, if the tuple has been
* updated, we need to follow the update chain to lock the new versions of
@@ -4465,8 +4465,8 @@ failed:
MarkBufferDirty(*buffer);
/*
- * XLOG stuff. You might think that we don't need an XLOG record because
- * there is no state change worth restoring after a crash. You would be
+ * XLOG stuff. You might think that we don't need an XLOG record because
+ * there is no state change worth restoring after a crash. You would be
* wrong however: we have just written either a TransactionId or a
* MultiXactId that may never have been seen on disk before, and we need
* to make sure that there are XLOG entries covering those ID numbers.
@@ -4630,7 +4630,7 @@ l5:
* If the XMAX is already a MultiXactId, then we need to expand it to
* include add_to_xmax; but if all the members were lockers and are
* all gone, we can do away with the IS_MULTI bit and just set
- * add_to_xmax as the only locker/updater. If all lockers are gone
+ * add_to_xmax as the only locker/updater. If all lockers are gone
* and we have an updater that aborted, we can also do without a
* multi.
*
@@ -4712,8 +4712,8 @@ l5:
{
/*
* LOCK_ONLY can be present alone only when a page has been
- * upgraded by pg_upgrade. But in that case,
- * TransactionIdIsInProgress() should have returned false. We
+ * upgraded by pg_upgrade. But in that case,
+ * TransactionIdIsInProgress() should have returned false. We
* assume it's no longer locked in this case.
*/
elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
@@ -4871,7 +4871,7 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
* The other transaction committed. If it was only a locker, then the
* lock is completely gone now and we can return success; but if it
* was an update, then what we do depends on whether the two lock
- * modes conflict. If they conflict, then we must report error to
+ * modes conflict. If they conflict, then we must report error to
* caller. But if they don't, we can fall through to allow the current
* transaction to lock the tuple.
*
@@ -5132,7 +5132,7 @@ l4:
* The initial tuple is assumed to be already locked.
*
* This function doesn't check visibility, it just inconditionally marks the
- * tuple(s) as locked. If any tuple in the updated chain is being deleted
+ * tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished.
*
@@ -5156,7 +5156,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
* If this is the first possibly-multixact-able operation in the
* current transaction, set my per-backend OldestMemberMXactId
* setting. We can be certain that the transaction will never become a
- * member of any older MultiXactIds than that. (We have to do this
+ * member of any older MultiXactIds than that. (We have to do this
* even if we end up just using our own TransactionId below, since
* some other backend could incorporate our XID into a MultiXact
* immediately afterwards.)
@@ -5175,7 +5175,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
- * of this function in Postgres are extremely limited. Nonetheless we
+ * of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
@@ -5537,7 +5537,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
* heap_prepare_freeze_tuple
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID and cutoff MultiXactId. If so,
+ * are older than the specified cutoff XID and cutoff MultiXactId. If so,
* setup enough state (in the *frz output argument) to later execute and
* WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
* is to be changed.
@@ -5666,7 +5666,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
- * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
* Also get rid of the HEAP_KEYS_UPDATED bit.
*/
frz->t_infomask &= ~HEAP_XMAX_BITS;
@@ -6040,7 +6040,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
+ * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.
@@ -6164,7 +6164,7 @@ heap_restrpos(HeapScanDesc scan)
else
{
/*
- * If we reached end of scan, rs_inited will now be false. We must
+ * If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
@@ -6348,7 +6348,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-freeze operation. Caller must have already
+ * Perform XLogInsert for a heap-freeze operation. Caller must have already
* modified the buffer and marked it dirty.
*/
XLogRecPtr
@@ -6393,7 +6393,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
/*
* Perform XLogInsert for a heap-visible operation. 'block' is the block
* being marked all-visible, and vm_buffer is the buffer containing the
- * corresponding visibility map block. Both should have already been modified
+ * corresponding visibility map block. Both should have already been modified
* and dirtied.
*
* If checksums are enabled, we also add the heap_buffer to the chain to
@@ -6442,7 +6442,7 @@ log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
}
/*
- * Perform XLogInsert for a heap-update operation. Caller must already
+ * Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
@@ -6772,7 +6772,7 @@ heap_xlog_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
- * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
* Also get rid of the HEAP_KEYS_UPDATED bit.
*/
tuple->t_infomask &= ~HEAP_XMAX_BITS;
@@ -7492,7 +7492,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
/*
* In normal operation, it is important to lock the two pages in
* page-number order, to avoid possible deadlocks against other update
- * operations going the other way. However, during WAL replay there can
+ * operations going the other way. However, during WAL replay there can
* be no other update happening, so we don't need to worry about that. But
* we *do* need to worry that we don't expose an inconsistent state to Hot
* Standby queries --- so the original page can't be unlocked before we've
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 8da26908daf..c6fe2cb0e37 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -146,7 +146,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
/*
* If there are two buffers involved and we pinned just one of them,
* it's possible that the second one became all-visible while we were
- * busy pinning the first one. If it looks like that's a possible
+ * busy pinning the first one. If it looks like that's a possible
* scenario, we'll need to make a second pass through this loop.
*/
if (buffer2 == InvalidBuffer || buffer1 == buffer2
@@ -177,7 +177,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
* NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
* same buffer we select for insertion of the new tuple (this could only
* happen if space is freed in that page after heap_update finds there's not
- * enough there). In that case, the page will be pinned and locked only once.
+ * enough there). In that case, the page will be pinned and locked only once.
*
* For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by
* locking them only after locking the corresponding heap page, and taking
@@ -198,7 +198,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
- * insertions into the same relation. This keeps a pin on the current
+ * insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@@ -252,7 +252,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
- * on, as cached in the BulkInsertState or relcache entry. If that
+ * on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite
@@ -284,7 +284,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* If the FSM knows nothing of the rel, try the last page before we
- * give up and extend. This avoids one-tuple-per-page syndrome during
+ * give up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
@@ -306,7 +306,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
* If the page-level all-visible flag is set, caller will need to
* clear both that and the corresponding visibility map bit. However,
* by the time we return, we'll have x-locked the buffer, and we don't
- * want to do any I/O while in that state. So we check the bit here
+ * want to do any I/O while in that state. So we check the bit here
* before taking the lock, and pin the page if it appears necessary.
* Checking without the lock creates a risk of getting the wrong
* answer, so we'll have to recheck after acquiring the lock.
@@ -348,7 +348,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We now have the target page (and the other buffer, if any) pinned
- * and locked. However, since our initial PageIsAllVisible checks
+ * and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now be
* out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to
@@ -391,7 +391,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Not enough space, so we must give up our page locks and pin (if
- * any) and prepare to look elsewhere. We don't care which order we
+ * any) and prepare to look elsewhere. We don't care which order we
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
@@ -433,7 +433,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* XXX This does an lseek - rather expensive - but at the moment it is the
- * only way to accurately determine how many blocks are in a relation. Is
+ * only way to accurately determine how many blocks are in a relation. Is
* it worth keeping an accurate file length in shared memory someplace,
* rather than relying on the kernel to do it for us?
*/
@@ -453,7 +453,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Release the file-extension lock; it's now OK for someone else to extend
- * the relation some more. Note that we cannot release this lock before
+ * the relation some more. Note that we cannot release this lock before
* we have buffer lock on the new page, or we risk a race condition
* against vacuumlazy.c --- see comments therein.
*/
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 8f0c02d9c2c..9a8db74cb95 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -100,7 +100,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
* Checking free space here is questionable since we aren't holding any
* lock on the buffer; in the worst case we could get a bogus answer. It's
* unlikely to be *seriously* wrong, though, since reading either pd_lower
- * or pd_upper is probably atomic. Avoiding taking a lock seems more
+ * or pd_upper is probably atomic. Avoiding taking a lock seems more
* important than sometimes getting a wrong answer in what is after all
* just a heuristic estimate.
*/
@@ -315,8 +315,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
- * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
- * prstate showing the changes to be made. Items to be redirected are added
+ * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
+ * prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@@ -358,7 +358,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* We need this primarily to handle aborted HOT updates, that is,
* XMIN_INVALID heap-only tuples. Those might not be linked to by
* any chain, since the parent tuple might be re-updated before
- * any pruning occurs. So we have to be able to reap them
+ * any pruning occurs. So we have to be able to reap them
* separately from chain-pruning. (Note that
* HeapTupleHeaderIsHotUpdated will never return true for an
* XMIN_INVALID tuple, so this code will work even when there were
@@ -544,7 +544,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/*
* If the root entry had been a normal tuple, we are deleting it, so
- * count it in the result. But changing a redirect (even to DEAD
+ * count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
@@ -633,7 +633,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
- * to replay the WAL record when needed after a crash. Note that the
+ * to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 951894ce5ac..76f972e79f9 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
- * rebuilding indexes. The caller must hold AccessExclusiveLock on the
+ * rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
- * or B first. (Note that a tuple in the middle of a chain is both A and B
+ * or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
- * is encountered. That helps to keep the memory usage down. At the end,
+ * is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
- * for deadness using OldestXmin is not exact. In such a case we might
+ * for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
- * in the whole table. Note that if we do fail halfway through a CLUSTER,
+ * in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@@ -287,7 +287,7 @@ end_heap_rewrite(RewriteState state)
}
/*
- * If the rel is WAL-logged, must fsync before commit. We use heap_sync
+ * If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less
@@ -554,7 +554,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
- * Insert a tuple to the new relation. This has to track heap_insert
+ * Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c
index 09384d8850b..884485ccccd 100644
--- a/src/backend/access/heap/syncscan.c
+++ b/src/backend/access/heap/syncscan.c
@@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
- * to keep them synchronized to reduce the overall I/O needed. The goal is
+ * to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
- * tables in progress at any time. Therefore we just keep the scan positions
+ * tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@@ -243,7 +243,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
- * so that number is passed in rather than computing it again. The result
+ * so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 219e6a635ae..55afdc8ca44 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -552,7 +552,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
- * PLAIN storage). If necessary, we'll push it out as a new
+ * PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@@ -695,7 +695,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
- * inline. But skip this if there's no toast table to push them to.
+ * inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@@ -805,7 +805,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
}
/*
- * Finally we store attributes of type 'm' externally. At this point we
+ * Finally we store attributes of type 'm' externally. At this point we
* increase the target tuple size, so that 'm' attributes aren't stored
* externally unless really necessary.
*/
@@ -1351,7 +1351,7 @@ toast_save_datum(Relation rel, Datum value,
* those versions could easily reference the same toast value.
* When we copy the second or later version of such a row,
* reusing the OID will mean we select an OID that's already
- * in the new toast table. Check for that, and if so, just
+ * in the new toast table. Check for that, and if so, just
* fall through without writing the data again.
*
* While annoying and ugly-looking, this is a good thing
@@ -1417,7 +1417,7 @@ toast_save_datum(Relation rel, Datum value,
heap_insert(toastrel, toasttup, mycid, options, NULL);
/*
- * Create the index entry. We cheat a little here by not using
+ * Create the index entry. We cheat a little here by not using
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 7f40d89b9f1..f0436859957 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -27,7 +27,7 @@
* the sense that we make sure that whenever a bit is set, we know the
* condition is true, but if a bit is not set, it might or might not be true.
*
- * Clearing a visibility map bit is not separately WAL-logged. The callers
+ * Clearing a visibility map bit is not separately WAL-logged. The callers
* must make sure that whenever a bit is cleared, the bit is cleared on WAL
* replay of the updating operation as well.
*
@@ -36,9 +36,9 @@
* it may still be the case that every tuple on the page is visible to all
* transactions; we just don't know that for certain. The difficulty is that
* there are two bits which are typically set together: the PD_ALL_VISIBLE bit
- * on the page itself, and the visibility map bit. If a crash occurs after the
+ * on the page itself, and the visibility map bit. If a crash occurs after the
* visibility map page makes it to disk and before the updated heap page makes
- * it to disk, redo must set the bit on the heap page. Otherwise, the next
+ * it to disk, redo must set the bit on the heap page. Otherwise, the next
* insert, update, or delete on the heap page will fail to realize that the
* visibility map bit must be cleared, possibly causing index-only scans to
* return wrong answers.
@@ -59,10 +59,10 @@
* the buffer lock over any I/O that may be required to read in the visibility
* map page. To avoid this, we examine the heap page before locking it;
* if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
- * bit. Then, we lock the buffer. But this creates a race condition: there
+ * bit. Then, we lock the buffer. But this creates a race condition: there
* is a possibility that in the time it takes to lock the buffer, the
* PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
- * buffer, pin the visibility map page, and relock the buffer. This shouldn't
+ * buffer, pin the visibility map page, and relock the buffer. This shouldn't
* happen often, because only VACUUM currently sets visibility map bits,
* and the race will only occur if VACUUM processes a given page at almost
* exactly the same time that someone tries to further modify it.
@@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* visibilitymap_set - set a bit on a previously pinned page
*
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
- * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
+ * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
- * page LSN to that value. cutoff_xid is the largest xmin on the page being
+ * page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
@@ -320,10 +320,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
* releasing *buf after it's done testing and setting bits.
*
* NOTE: This function is typically called without a lock on the heap page,
- * so somebody else could change the bit just after we look at it. In fact,
+ * so somebody else could change the bit just after we look at it. In fact,
* since we don't lock the visibility map page either, it's even possible that
* someone else could have changed the bit just before we look at it, but yet
- * we might see the old value. It is the caller's responsibility to deal with
+ * we might see the old value. It is the caller's responsibility to deal with
* all concurrency issues!
*/
bool
@@ -526,7 +526,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
/*
* We might not have opened the relation at the smgr level yet, or we
- * might have been forced to close it by a sinval message. The code below
+ * might have been forced to close it by a sinval message. The code below
* won't necessarily notice relation extension immediately when extend =
* false, so we rely on sinval messages to ensure that our ideas about the
* size of the map aren't too far out of date.
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 31a419b841e..bc9e807401d 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -44,7 +44,7 @@
*
* At the end of a scan, the AM's endscan routine undoes the locking,
* but does *not* call IndexScanEnd --- the higher-level index_endscan
- * routine does that. (We can't do it in the AM because index_endscan
+ * routine does that. (We can't do it in the AM because index_endscan
* still needs to touch the IndexScanDesc after calling the AM.)
*
* Because of this, the AM does not have a choice whether to call
@@ -187,7 +187,7 @@ BuildIndexValueDescription(Relation indexRelation,
* at rd_opcintype not the index tupdesc.
*
* Note: this is a bit shaky for opclasses that have pseudotype
- * input types such as ANYARRAY or RECORD. Currently, the
+ * input types such as ANYARRAY or RECORD. Currently, the
* typoutput functions associated with the pseudotypes will work
* okay, but we might have to try harder in future.
*/
@@ -413,7 +413,7 @@ systable_endscan(SysScanDesc sysscan)
* index order. Also, for largely historical reasons, the index to use
* is opened and locked by the caller, not here.
*
- * Currently we do not support non-index-based scans here. (In principle
+ * Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index b87815544d9..2067347a615 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -81,7 +81,7 @@
*
* Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
* to check that we don't try to scan or do retail insertions into an index
- * that is currently being rebuilt or pending rebuild. This helps to catch
+ * that is currently being rebuilt or pending rebuild. This helps to catch
* things that don't work when reindexing system catalogs. The assertion
* doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
* when calling the index AM's ambuild routine, and there is no reason for
@@ -146,7 +146,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
- * obtained on the index. (Generally, NoLock should only be
+ * obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@@ -411,7 +411,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
+ * that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@@ -551,7 +551,7 @@ index_fetch_heap(IndexScanDesc scan)
/*
* If we scanned a whole HOT chain and found only dead tuples, tell index
* AM to kill its entry for that TID (this will take effect in the next
- * amgettuple call, in index_getnext_tid). We do not do this when in
+ * amgettuple call, in index_getnext_tid). We do not do this when in
* recovery because it may violate MVCC to do so. See comments in
* RelationGetIndexScan().
*/
@@ -588,7 +588,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
{
/*
* We are resuming scan of a HOT chain after having returned an
- * earlier member. Must still hold pin on current heap page.
+ * earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(&scan->xs_ctup.t_self) ==
@@ -758,7 +758,7 @@ index_can_return(Relation indexRelation)
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
- * type instead). Only the default functions are stored in relcache
+ * type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@@ -792,7 +792,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
- * support procs in the relcache. As above, only the "default"
+ * support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index 3b1bde12efd..11209b91037 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -25,7 +25,7 @@
* Although any negative int32 (except INT_MIN) is acceptable for reporting
* "<", and any positive int32 is acceptable for reporting ">", routines
* that work on 32-bit or wider datatypes can't just return "a - b".
- * That could overflow and give the wrong answer. Also, one must not
+ * That could overflow and give the wrong answer. Also, one must not
* return INT_MIN to report "<", since some callers will negate the result.
*
* NOTE: it is critical that the comparison function impose a total order
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 8bf4344cded..fa15e8f59a1 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -88,7 +88,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel);
* and btinsert. By here, itup is filled in, including the TID.
*
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
- * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
+ * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
* don't actually insert.
@@ -127,7 +127,7 @@ top:
* If the page was split between the time that we surrendered our read
* lock and acquired our write lock, then this page may no longer be the
* right place for the key we want to insert. In this case, we need to
- * move right in the tree. See Lehman and Yao for an excruciatingly
+ * move right in the tree. See Lehman and Yao for an excruciatingly
* precise description.
*/
buf = _bt_moveright(rel, buf, natts, itup_scankey, false, BT_WRITE);
@@ -207,7 +207,7 @@ top:
* is the first tuple on the next page.
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
- * we must wait for to see if it commits a conflicting tuple. If an actual
+ * we must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just ereport().
*
* However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
@@ -289,7 +289,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* If we are doing a recheck, we expect to find the tuple we
- * are rechecking. It's not a duplicate, but we have to keep
+ * are rechecking. It's not a duplicate, but we have to keep
* scanning.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING &&
@@ -478,7 +478,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* If the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
- * the next page. If it is equal to the high key, and there's not room
+ * the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@@ -490,7 +490,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* removing any LP_DEAD tuples.
*
* On entry, *buf and *offsetptr point to the first legal position
- * where the new tuple could be inserted. The caller should hold an
+ * where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search for the
* right location within the page if needed. On exit, they point to the
@@ -560,7 +560,7 @@ _bt_findinsertloc(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
- * pages). Currently the probability of moving right is set at 0.99,
+ * pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
@@ -661,7 +661,7 @@ _bt_findinsertloc(Relation rel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
- * insertion, and the buffer must be pinned and write-locked. On return,
+ * insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* The locking interactions in this code are critical. You should
@@ -925,7 +925,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* origpage is the original page to be split. leftpage is a temporary
* buffer that receives the left-sibling data, which will be copied back
* into origpage on success. rightpage is the new page that receives the
- * right-sibling data. If we fail before reaching the critical section,
+ * right-sibling data. If we fail before reaching the critical section,
* origpage hasn't been modified and leftpage is only workspace. In
* principle we shouldn't need to worry about rightpage either, because it
* hasn't been linked into the btree page structure; but to avoid leaving
@@ -1140,7 +1140,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
- * examine both pages. But if D, our right sibling, has a different
+ * examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@@ -1359,7 +1359,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
- * the left or right page. The bool is necessary to disambiguate the case
+ * the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@@ -1595,7 +1595,7 @@ _bt_checksplitloc(FindSplitData *state,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
- * write locks once we have write lock on the parent page. (Any sooner,
+ * write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@@ -1618,7 +1618,7 @@ _bt_insert_parent(Relation rel,
* Here we have to do something Lehman and Yao don't talk about: deal with
* a root split and construction of a new root. If our stack is empty
* then we have just split a node on what had been the root level when we
- * descended the tree. If it was still the root then we perform a
+ * descended the tree. If it was still the root then we perform a
* new-root construction. If it *wasn't* the root anymore, search to find
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
@@ -1768,7 +1768,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* These loops will check every item on the page --- but in an
* order that's attuned to the probability of where it actually
- * is. Scan to the right first, then to the left.
+ * is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index f4077533bf5..135a3eeaba1 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -12,7 +12,7 @@
* src/backend/access/nbtree/nbtpage.c
*
* NOTES
- * Postgres btree pages look like ordinary relation pages. The opaque
+ * Postgres btree pages look like ordinary relation pages. The opaque
* data at high addresses includes pointers to left and right siblings
* and flag data describing page state. The first page in a btree, page
* zero, is special -- it stores meta-information describing the tree.
@@ -55,7 +55,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
metaopaque->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) page)->pd_lower =
@@ -73,7 +73,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
- * and no root page exists, we just return InvalidBuffer. For
+ * and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@@ -190,7 +190,7 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to guarantee no
* deadlocks, we have to release the metadata page and start all
- * over again. (Is that really true? But it's hardly worth trying
+ * over again. (Is that really true? But it's hardly worth trying
* to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
@@ -253,7 +253,7 @@ _bt_getroot(Relation rel, int access)
CacheInvalidateRelcache(rel);
/*
- * swap root write lock for read lock. There is no danger of anyone
+ * swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
@@ -321,7 +321,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
- * from whatever non-root page we were at. If we ever do need to lock the
+ * from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@@ -496,7 +496,7 @@ _bt_checkpage(Relation rel, Buffer buf)
/*
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
- * page header or is all-zero. We have to defend against the all-zero
+ * page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
@@ -563,7 +563,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
/*
* _bt_getbuf() -- Get a buffer by block number for read or write.
*
- * blkno == P_NEW means to get an unallocated index page. The page
+ * blkno == P_NEW means to get an unallocated index page. The page
* will be initialized before returning it.
*
* When this routine returns, the appropriate lock is set on the
@@ -594,7 +594,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check that
- * the page is still free. (For example, an already-free page could
+ * the page is still free. (For example, an already-free page could
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
@@ -773,7 +773,7 @@ _bt_page_recyclable(Page page)
/*
* Delete item(s) from a btree page during VACUUM.
*
- * This must only be used for deleting leaf items. Deleting an item on a
+ * This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@@ -841,7 +841,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf,
/*
* The target-offsets array is not in the buffer, but pretend that it
- * is. When XLogInsert stores the whole buffer, the offsets array
+ * is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
@@ -1092,7 +1092,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
BTPageOpaque opaque;
/*
- * We can never delete rightmost pages nor root pages. While at it, check
+ * We can never delete rightmost pages nor root pages. While at it, check
* that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
@@ -1164,7 +1164,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
- * facilities not available in recovery mode). Instead, just set
+ * facilities not available in recovery mode). Instead, just set
* up a dummy stack pointing to the left end of the parent tree
* level, from which _bt_getstackbuf will walk right to the parent
* page. Painful, but we don't care too much about performance in
@@ -1199,7 +1199,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if either
* the sibling or the target page was deleted by someone else meanwhile;
- * if so, give up. (Right now, that should never happen, since page
+ * if so, give up. (Right now, that should never happen, since page
* deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
@@ -1228,7 +1228,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
lbuf = InvalidBuffer;
/*
- * Next write-lock the target page itself. It should be okay to take just
+ * Next write-lock the target page itself. It should be okay to take just
* a write lock not a superexclusive lock, since no scans would stop on an
* empty page.
*/
@@ -1350,7 +1350,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
/*
* Check that the parent-page index items we're about to delete/overwrite
- * contain what we expect. This can fail if the index has become corrupt
+ * contain what we expect. This can fail if the index has become corrupt
* for some reason. We want to throw any error before entering the
* critical section --- otherwise it'd be a PANIC.
*
@@ -1434,7 +1434,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
* we're in VACUUM and would not otherwise have an XID. Having already
* updated links to the target, ReadNewTransactionId() suffices as an
* upper bound. Any scan having retained a now-stale link is advertising
- * in its PGXACT an xmin less than or equal to the value we read here. It
+ * in its PGXACT an xmin less than or equal to the value we read here. It
* will continue to do so, holding back RecentGlobalXmin, for the duration
* of that scan.
*/
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index eb396b4b0e7..229d276d9c1 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -151,7 +151,7 @@ btbuild(PG_FUNCTION_ARGS)
/*
* If we are reindexing a pre-existing index, it is critical to send out a
* relcache invalidation SI message to ensure all backends re-read the
- * index metapage. We expect that the caller will ensure that happens
+ * index metapage. We expect that the caller will ensure that happens
* (typically as a side effect of updating index stats, but it must happen
* even if the stats don't change!)
*/
@@ -216,7 +216,7 @@ btbuildempty(PG_FUNCTION_ARGS)
metapage = (Page) palloc(BLCKSZ);
_bt_initmetapage(metapage, P_NONE, 0);
- /* Write the page. If archiving/streaming, XLOG it. */
+ /* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
@@ -435,7 +435,7 @@ btbeginscan(PG_FUNCTION_ARGS)
/*
* We don't know yet whether the scan will be index-only, so we do not
- * allocate the tuple workspace arrays until btrescan. However, we set up
+ * allocate the tuple workspace arrays until btrescan. However, we set up
* scan->xs_itupdesc whether we'll need it or not, since that's so cheap.
*/
so->currTuples = so->markTuples = NULL;
@@ -480,7 +480,7 @@ btrescan(PG_FUNCTION_ARGS)
/*
* Allocate tuple workspace arrays, if needed for an index-only scan and
- * not already done in a previous rescan call. To save on palloc
+ * not already done in a previous rescan call. To save on palloc
* overhead, both workspaces are allocated as one palloc block; only this
* function and btendscan know that.
*
@@ -960,7 +960,7 @@ restart:
vstate->lastBlockLocked = blkno;
/*
- * Check whether we need to recurse back to earlier pages. What we
+ * Check whether we need to recurse back to earlier pages. What we
* are concerned about is a page split that happened since we started
* the vacuum scan. If the split moved some tuples to a lower page
* then we might have missed 'em. If so, set up for tail recursion.
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index ac98589477b..cfd13038367 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -50,7 +50,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
- * to be created and returned. When access = BT_READ, an empty index
+ * to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer.
*/
BTStack
@@ -227,7 +227,7 @@ _bt_moveright(Relation rel,
* (or leaf keys > given scankey when nextkey is true).
*
* This procedure is not responsible for walking right, it just examines
- * the given page. _bt_binsrch() has no lock or refcount side effects
+ * the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@@ -359,7 +359,7 @@ _bt_compare(Relation rel,
/*
* The scan key is set up with the attribute number associated with each
* term in the key. It is important that, if the index is multi-key, the
- * scan contain the first k key attributes, and that they be in order. If
+ * scan contain the first k key attributes, and that they be in order. If
* you think about how multi-key ordering works, you'll understand why
* this is.
*
@@ -398,7 +398,7 @@ _bt_compare(Relation rel,
/*
* The sk_func needs to be passed the index value as left arg and
* the sk_argument as right arg (they might be of different
- * types). Since it is convenient for callers to think of
+ * types). Since it is convenient for callers to think of
* _bt_compare as comparing the scankey to the index item, we have
* to flip the sign of the comparison result. (Unless it's a DESC
* column, in which case we *don't* flip the sign.)
@@ -427,7 +427,7 @@ _bt_compare(Relation rel,
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
- * conditions, and the tree ordering. We find the first item (or,
+ * conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
@@ -483,7 +483,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* We want to identify the keys that can be used as starting boundaries;
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
* a backwards scan. We can use keys for multiple attributes so long as
- * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
+ * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
@@ -698,7 +698,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
- * scheme. But, by the same token, if we aren't able to use all
+ * scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
* did use has to be treated as just a ">=" or "<=" condition, and
* so we'd better adjust strat_total accordingly.
@@ -817,7 +817,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* Find first item >= scankey, then back up one to arrive at last
- * item < scankey. (Note: this positioning strategy is only used
+ * item < scankey. (Note: this positioning strategy is only used
* for a backward scan, so that is always the correct starting
* position.)
*/
@@ -866,7 +866,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
case BTGreaterEqualStrategyNumber:
/*
- * Find first item >= scankey. (This is only used for forward
+ * Find first item >= scankey. (This is only used for forward
* scans.)
*/
nextkey = false;
@@ -944,7 +944,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
- * the last item on this page. Adjust the starting offset if needed. (If
+ * the last item on this page. Adjust the starting offset if needed. (If
* this results in an offset before the first item or after the last one,
* _bt_readpage will report no items found, and then we'll step to the
* next page as needed.)
@@ -1260,7 +1260,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
* than the walk-right case because of the possibility that the page
* to our left splits while we are in flight to it, plus the
* possibility that the page we were on gets deleted after we leave
- * it. See nbtree/README for details.
+ * it. See nbtree/README for details.
*/
for (;;)
{
@@ -1355,7 +1355,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
- * because half-dead pages are still in the sibling chain. Caller
+ * because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
@@ -1381,7 +1381,7 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
- * It was deleted. Move right to first nondeleted page (there
+ * It was deleted. Move right to first nondeleted page (there
* must be one); that is the page that has acquired the deleted
* one's keyspace, so stepping left from it will take us where we
* want to be.
@@ -1425,7 +1425,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes ereport(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 52c5a2676ef..ec5bb4f7bfb 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -7,7 +7,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. We load source tuples into leaf-level pages.
+ * for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@@ -42,11 +42,11 @@
*
* Since the index will never be used unless it is completely built,
* from a crash-recovery point of view there is no need to WAL-log the
- * steps of the build. After completing the index build, we can just sync
+ * steps of the build. After completing the index build, we can just sync
* the whole file to disk using smgrimmedsync() before exiting this module.
* This can be seen to be sufficient for crash recovery by considering that
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
- * just after the index build. However, it is clearly not sufficient if the
+ * just after the index build. However, it is clearly not sufficient if the
* DBA is using the WAL log for PITR or replication purposes, since another
* machine would not be able to reconstruct the index from WAL. Therefore,
* we log the completed index pages to WAL if and only if WAL archiving is
@@ -89,7 +89,7 @@ struct BTSpool
};
/*
- * Status record for a btree page being built. We have one of these
+ * Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@@ -160,7 +160,7 @@ _bt_spoolinit(Relation heap, Relation index, bool isunique, bool isdead)
* We size the sort area as maintenance_work_mem rather than work_mem to
* speed index creation. This should be OK since a single backend can't
* run multiple index creations in parallel. Note that creation of a
- * unique index actually requires two BTSpool objects. We expect that the
+ * unique index actually requires two BTSpool objects. We expect that the
* second one (for dead tuples) won't get very full, so we give it only
* work_mem.
*/
@@ -298,7 +298,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
PageSetChecksumInplace(page, blkno);
/*
- * Now write the page. There's no need for smgr to schedule an fsync for
+ * Now write the page. There's no need for smgr to schedule an fsync for
* this write; we'll do it ourselves before ending the build.
*/
if (blkno == wstate->btws_pages_written)
@@ -423,14 +423,14 @@ _bt_sortaddtup(Page page,
* A leaf page being built looks like:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp0 linp1 linp2 ... |
+ * | PageHeaderData | linp0 linp1 linp2 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
* | ^ last |
* | |
* +-------------+------------------------------------+
- * | | itemN ... |
+ * | | itemN ... |
* +-------------+------------------+-----------------+
* | ... item3 item2 item1 | "special space" |
* +--------------------------------+-----------------+
@@ -493,9 +493,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
RelationGetRelationName(wstate->index))));
/*
- * Check to see if page is "full". It's definitely full if the item won't
+ * Check to see if page is "full". It's definitely full if the item won't
* fit. Otherwise, compare to the target freespace derived from the
- * fillfactor. However, we must put at least two items on each page, so
+ * fillfactor. However, we must put at least two items on each page, so
* disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
@@ -568,7 +568,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
}
/*
- * Write out the old page. We never need to touch it again, so we can
+ * Write out the old page. We never need to touch it again, so we can
* free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);
@@ -805,7 +805,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
/*
* If the index is WAL-logged, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a non-WAL-logged index we don't
+ * safe to commit the transaction. (For a non-WAL-logged index we don't
* care since the index will be uninteresting after a crash anyway.)
*
* It's obvious that we must do this when not WAL-logging the build. It's
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 352c77cbea2..99f9f290f4f 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
- * data is first stored into the key entries. Currently this
+ * data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
@@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
continue;
/*
- * First, deconstruct the array into elements. Anything allocated
+ * First, deconstruct the array into elements. Anything allocated
* here (including a possibly detoasted array value) is in the
* workspace context.
*/
@@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
&elem_values, &elem_nulls, &num_elems);
/*
- * Compress out any null elements. We can ignore them since we assume
+ * Compress out any null elements. We can ignore them since we assume
* all btree operators are strict.
*/
num_nonnulls = 0;
@@ -517,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg)
* _bt_start_array_keys() -- Initialize array keys at start of a scan
*
* Set up the cur_elem counters and fill in the first sk_argument value for
- * each array scankey. We can't do this until we know the scan direction.
+ * each array scankey. We can't do this until we know the scan direction.
*/
void
_bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
@@ -670,8 +670,8 @@ _bt_restore_array_keys(IndexScanDesc scan)
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover which scan keys must be
- * satisfied to continue the scan. It also attempts to eliminate redundant
- * keys and detect contradictory keys. (If the index opfamily provides
+ * satisfied to continue the scan. It also attempts to eliminate redundant
+ * keys and detect contradictory keys. (If the index opfamily provides
* incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.)
*
@@ -702,7 +702,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
+ * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@@ -737,7 +737,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* Note: the reason we have to copy the preprocessed scan keys into private
* storage is that we are modifying the array based on comparisons of the
* key argument values, which could change on a rescan or after moving to
- * new elements of array keys. Therefore we can't overwrite the source data.
+ * new elements of array keys. Therefore we can't overwrite the source data.
*/
void
_bt_preprocess_keys(IndexScanDesc scan)
@@ -919,7 +919,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
- * mark them if they are required. They are required (possibly
+ * mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
@@ -1017,7 +1017,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
- * may not be able to make the comparison. If we can make the comparison
+ * may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@@ -1043,7 +1043,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
StrategyNumber strat;
/*
- * First, deal with cases where one or both args are NULL. This should
+ * First, deal with cases where one or both args are NULL. This should
* only happen when the scankeys represent IS NULL/NOT NULL conditions.
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
@@ -1183,7 +1183,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
- * a NULL means that the qual cannot be satisfied. We return TRUE if the
+ * a NULL means that the qual cannot be satisfied. We return TRUE if the
* comparison value isn't NULL, or FALSE if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
@@ -1212,7 +1212,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* --- we can treat IS NULL as an equality operator for purposes of search
* strategy.
*
- * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
+ * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
* than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
* FIRST index.
*
@@ -1284,7 +1284,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
- * directions or just one. Also, if the key is a row comparison header,
+ * directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns
@@ -1296,7 +1296,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
- * anyway on a rescan. Something to keep an eye on though.
+ * anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
@@ -1482,7 +1482,7 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
@@ -1498,8 +1498,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
@@ -1593,7 +1593,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
@@ -1609,8 +1609,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
@@ -1631,7 +1631,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
- * But it can never match. If all the earlier row comparison
+ * But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop the
* scan, because there can't be another tuple that will succeed.
*/
@@ -1696,7 +1696,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Tuple fails this qual. If it's a required qual for the current
* scan direction, then we can conclude no further tuples will pass,
- * either. Note we have to look at the deciding column, not
+ * either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1722,7 +1722,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* is sufficient for setting LP_DEAD status (which is only a hint).
*
* We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
+ * delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
@@ -1806,8 +1806,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
- * operations. There is a single counter which increments each time we
- * start a vacuum to assign it a cycle ID. Since multiple vacuums could
+ * operations. There is a single counter which increments each time we
+ * start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 8e2b0b6459a..027c1cb5479 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -128,7 +128,7 @@ forget_matching_deletion(RelFileNode node, BlockNumber delblk)
* in correct itemno sequence, but physically the opposite order from the
* original, because we insert them in the opposite of itemno order. This
* does not matter in any current btree code, but it's something to keep an
- * eye on. Is it worth changing just on general principles? See also the
+ * eye on. Is it worth changing just on general principles? See also the
* notes in btree_xlog_split().
*/
static void
@@ -179,7 +179,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn,
pageop->btpo_flags = BTP_META;
/*
- * Set pd_lower just past the end of the metadata. This is not essential
+ * Set pd_lower just past the end of the metadata. This is not essential
* but it makes the page look compressible to xlog.c.
*/
((PageHeader) metapg)->pd_lower =
@@ -387,7 +387,7 @@ btree_xlog_split(bool onleft, bool isroot,
/*
* Remove the items from the left page that were copied to the
- * right page. Also remove the old high key, if any. (We must
+ * right page. Also remove the old high key, if any. (We must
* remove everything before trying to insert any items, else
* we risk not having enough space.)
*/
@@ -625,7 +625,7 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* In what follows, we have to examine the previous state of the index
- * page, as well as the heap page(s) it points to. This is only valid if
+ * page, as well as the heap page(s) it points to. This is only valid if
* WAL replay has reached a consistent database state; which means that
* the preceding check is not just an optimization, but is *necessary*. We
* won't have let in any user sessions before we reach consistency.
@@ -634,9 +634,9 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
/*
- * Get index page. If the DB is consistent, this should not fail, nor
+ * Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
- * InvalidTransactionId to cancel all HS transactions. That's probably
+ * InvalidTransactionId to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
@@ -740,7 +740,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
* If we have any conflict processing to do, it must happen before we
* update the page.
*
- * Btree delete records can conflict with standby queries. You might
+ * Btree delete records can conflict with standby queries. You might
* think that vacuum records would conflict as well, but we've handled
* that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
* cleaned by the vacuum of the heap and so we can resolve any conflicts
diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c
index 62d3d3a30ec..57cbe4d2f8b 100644
--- a/src/backend/access/spgist/spgdoinsert.c
+++ b/src/backend/access/spgist/spgdoinsert.c
@@ -25,7 +25,7 @@
/*
* SPPageDesc tracks all info about a page we are inserting into. In some
* situations it actually identifies a tuple, or even a specific node within
- * an inner tuple. But any of the fields can be invalid. If the buffer
+ * an inner tuple. But any of the fields can be invalid. If the buffer
* field is valid, it implies we hold pin and exclusive lock on that buffer.
* page pointer should be valid exactly when buffer is.
*/
@@ -249,7 +249,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
else
{
/*
- * Tuple must be inserted into existing chain. We mustn't change the
+ * Tuple must be inserted into existing chain. We mustn't change the
* chain's head address, but we don't need to chase the entire chain
* to put the tuple at the end; we can insert it second.
*
@@ -814,7 +814,7 @@ doPickSplit(Relation index, SpGistState *state,
* We may not actually insert new tuple because another picksplit may be
* necessary due to too large value, but we will try to allocate enough
* space to include it; and in any case it has to be included in the input
- * for the picksplit function. So don't increment nToInsert yet.
+ * for the picksplit function. So don't increment nToInsert yet.
*/
in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state);
heapPtrs[in.nTuples] = newLeafTuple->heapPtr;
@@ -872,7 +872,7 @@ doPickSplit(Relation index, SpGistState *state,
/*
* Check to see if the picksplit function failed to separate the values,
* ie, it put them all into the same child node. If so, select allTheSame
- * mode and create a random split instead. See comments for
+ * mode and create a random split instead. See comments for
* checkAllTheSame as to why we need to know if the new leaf tuples could
* fit on one page.
*/
@@ -1037,7 +1037,7 @@ doPickSplit(Relation index, SpGistState *state,
&xlrec.initDest);
/*
- * Attempt to assign node groups to the two pages. We might fail to
+ * Attempt to assign node groups to the two pages. We might fail to
* do so, even if totalLeafSizes is less than the available space,
* because we can't split a group across pages.
*/
@@ -1917,7 +1917,7 @@ spgdoinsert(Relation index, SpGistState *state,
if (current.blkno == InvalidBlockNumber)
{
/*
- * Create a leaf page. If leafSize is too large to fit on a page,
+ * Create a leaf page. If leafSize is too large to fit on a page,
* we won't actually use the page yet, but it simplifies the API
* for doPickSplit to always have a leaf page at hand; so just
* quietly limit our request to a page size.
@@ -2120,7 +2120,7 @@ spgdoinsert(Relation index, SpGistState *state,
out.result.addNode.nodeLabel);
/*
- * Retry insertion into the enlarged node. We assume that
+ * Retry insertion into the enlarged node. We assume that
* we'll get a MatchNode result this time.
*/
goto process_inner_tuple;
diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c
index 2a50d87c74b..a354cb8973f 100644
--- a/src/backend/access/spgist/spginsert.c
+++ b/src/backend/access/spgist/spginsert.c
@@ -163,7 +163,7 @@ spgbuildempty(PG_FUNCTION_ARGS)
page = (Page) palloc(BLCKSZ);
SpGistInitMetapage(page);
- /* Write the page. If archiving/streaming, XLOG it. */
+ /* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
(char *) page, true);
@@ -232,7 +232,7 @@ spginsert(PG_FUNCTION_ARGS)
/*
* We might have to repeat spgdoinsert() multiple times, if conflicts
* occur with concurrent insertions. If so, reset the insertCtx each time
- * to avoid cumulative memory consumption. That means we also have to
+ * to avoid cumulative memory consumption. That means we also have to
* redo initSpGistState(), but it's cheap enough not to matter.
*/
while (!spgdoinsert(index, &spgstate, ht_ctid, *values, *isnull))
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index c9d3cb686c3..eb049ede4e7 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -103,7 +103,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
* Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
*
* The point here is to eliminate null-related considerations from what the
- * opclass consistent functions need to deal with. We assume all SPGiST-
+ * opclass consistent functions need to deal with. We assume all SPGiST-
* indexable operators are strict, so any null RHS value makes the scan
* condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL
* conditions; their effect is reflected into searchNulls/searchNonNulls.
@@ -600,7 +600,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
if (so->want_itup)
{
/*
- * Reconstruct desired IndexTuple. We have to copy the datum out of
+ * Reconstruct desired IndexTuple. We have to copy the datum out of
* the temp context anyway, so we may as well create the tuple here.
*/
so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc,
diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c
index e430d9c1ace..36fae98287c 100644
--- a/src/backend/access/spgist/spgtextproc.c
+++ b/src/backend/access/spgist/spgtextproc.c
@@ -26,11 +26,11 @@
* In the worst case, a inner tuple in a text radix tree could have as many
* as 256 nodes (one for each possible byte value). Each node can take 16
* bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page
- * of size BLCKSZ. Rather than assuming we know the exact amount of overhead
+ * of size BLCKSZ. Rather than assuming we know the exact amount of overhead
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). So we can safely create prefixes up to
- * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
+ * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
* it is always possible to get "SPGiST inner tuple size exceeds maximum"
* if there are too many distinct next-byte values at a given place in the
@@ -327,7 +327,7 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
}
/*
- * Sort by label bytes so that we can group the values into nodes. This
+ * Sort by label bytes so that we can group the values into nodes. This
* also ensures that the nodes are ordered by label value, allowing the
* use of binary search in searchChar.
*/
@@ -377,7 +377,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
/*
* Reconstruct values represented at this tuple, including parent data,
- * prefix of this tuple if any, and the node label if any. in->level
+ * prefix of this tuple if any, and the node label if any. in->level
* should be the length of the previously reconstructed value, and the
* number of bytes added here is prefixSize or prefixSize + 1.
*
diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c
index 64fd82fbf30..aed1b8d839b 100644
--- a/src/backend/access/spgist/spgutils.c
+++ b/src/backend/access/spgist/spgutils.c
@@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When requesting an inner page, if we get one with the wrong parity,
* we just release the buffer and try again. We will get a different page
- * because GetFreeIndexPage will have marked the page used in FSM. The page
+ * because GetFreeIndexPage will have marked the page used in FSM. The page
* is entered in our local lastUsedPages cache, so there's some hope of
* making use of it later in this session, but otherwise we rely on VACUUM
* to eventually re-enter the page in FSM, making it available for recycling.
@@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When we return a buffer to the caller, the page is *not* entered into
* the lastUsedPages cache; we expect the caller will do so after it's taken
- * whatever space it will use. This is because after the caller has used up
+ * whatever space it will use. This is because after the caller has used up
* some space, the page might have less space than whatever was cached already
* so we'd rather not trash the old cache entry.
*/
@@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
/*
* If possible, increase the space request to include relation's
- * fillfactor. This ensures that when we add unrelated tuples to a page,
+ * fillfactor. This ensures that when we add unrelated tuples to a page,
* we try to keep 100-fillfactor% available for adding tuples that are
* related to the ones already on it. But fillfactor mustn't cause an
* error for requests that would otherwise be legal.
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 1874bdd0953..c7f1b8ec473 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -211,7 +211,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* Figure out exactly what we have to do. We do this separately from
* actually modifying the page, mainly so that we have a representation
* that can be dumped into WAL and then the replay code can do exactly
- * the same thing. The output of this step consists of six arrays
+ * the same thing. The output of this step consists of six arrays
* describing four kinds of operations, to be performed in this order:
*
* toDead[]: tuple numbers to be replaced with DEAD tuples
@@ -287,7 +287,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else
{
/*
- * Second or later live tuple. Arrange to re-chain it to the
+ * Second or later live tuple. Arrange to re-chain it to the
* previous live one, if there was a gap.
*/
if (interveningDeletable)
diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c
index 3f5556f65f7..4fe857d3ed2 100644
--- a/src/backend/access/spgist/spgxlog.c
+++ b/src/backend/access/spgist/spgxlog.c
@@ -41,7 +41,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc)
}
/*
- * Add a leaf tuple, or replace an existing placeholder tuple. This is used
+ * Add a leaf tuple, or replace an existing placeholder tuple. This is used
* to replay SpGistPageAddNewItem() operations. If the offset points at an
* existing tuple, it had better be a placeholder tuple.
*/
@@ -462,7 +462,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
}
/*
- * Update parent downlink. Since parent could be in either of the
+ * Update parent downlink. Since parent could be in either of the
* previous two buffers, it's a bit tricky to determine which BKP bit
* applies.
*/
@@ -799,7 +799,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
bbi++;
/*
- * Now we can release the leaf-page locks. It's okay to do this before
+ * Now we can release the leaf-page locks. It's okay to do this before
* updating the parent downlink.
*/
if (BufferIsValid(srcBuffer))
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 6a963b6116d..0add733984d 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -11,15 +11,15 @@
* log can be broken into relatively small, independent segments.
*
* XLOG interactions: this module generates an XLOG record whenever a new
- * CLOG page is initialized to zeroes. Other writes of CLOG come from
+ * CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
- * on redo; so we need make no additional XLOG entry here. For synchronous
+ * on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
- * XLOG that far and satisfy the WAL rule. We don't have to worry about this
+ * XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
@@ -105,7 +105,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
* in the tree of xid. In various cases nsubxids may be zero.
*
* lsn must be the WAL location of the commit record when recording an async
- * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
+ * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*
@@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
* Testing during the PostgreSQL 9.2 development cycle revealed that on a
* large multi-processor system, it was possible to have more CLOG page
* requests in flight at one time than the numebr of CLOG buffers which existed
- * at that time, which was hardcoded to 8. Further testing revealed that
+ * at that time, which was hardcoded to 8. Further testing revealed that
* performance dropped off with more than 32 CLOG buffers, possibly because
* the linear buffer search algorithm doesn't scale well.
*
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index bacdbd64eff..8886ce4c672 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -5,7 +5,7 @@
*
* The pg_multixact manager is a pg_clog-like manager that stores an array of
* MultiXactMember for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. Each MultiXactMember is comprised of a
+ * shared-row-lock implementation. Each MultiXactMember is comprised of a
* TransactionId and a set of flag bits. The name is a bit historical:
* originally, a MultiXactId consisted of more than one TransactionId (except
* in rare corner cases), hence "multi". Nowadays, however, it's perfectly
@@ -18,7 +18,7 @@
*
* We use two SLRU areas, one for storing the offsets at which the data
* starts for each MultiXactId in the other one. This trick allows us to
- * store variable length arrays of TransactionIds. (We could alternatively
+ * store variable length arrays of TransactionIds. (We could alternatively
* use one area containing counts and TransactionIds, with valid MultiXactId
* values pointing at slots containing counts; but that way seems less robust
* since it would get completely confused if someone inquired about a bogus
@@ -38,7 +38,7 @@
*
* Like clog.c, and unlike subtrans.c, we have to preserve state across
* crashes and ensure that MXID and offset numbering increases monotonically
- * across a crash. We do this in the same way as it's done for transaction
+ * across a crash. We do this in the same way as it's done for transaction
* IDs: the WAL record is guaranteed to contain evidence of every MXID we
* could need to worry about, and we just make sure that at the end of
* replay, the next-MXID and next-offset counters are at least as large as
@@ -50,7 +50,7 @@
* The minimum value in each database is stored in pg_database, and the
* global minimum is part of pg_control. Any vacuum that is able to
* advance its database's minimum value also computes a new global minimum,
- * and uses this value to truncate older segments. When new multixactid
+ * and uses this value to truncate older segments. When new multixactid
* values are to be created, care is taken that the counter does not
* fall within the wraparound horizon considering the global minimum value.
*
@@ -85,13 +85,13 @@
/*
- * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
+ * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is
* used everywhere else in Postgres.
*
* Note: because MultiXactOffsets are 32 bits and wrap around at 0xFFFFFFFF,
* MultiXact page numbering also wraps around at
* 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE, and segment numbering at
- * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need
+ * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need
* take no explicit notice of that fact in this module, except when comparing
* segment and page numbers in TruncateMultiXact (see
* MultiXactOffsetPagePrecedes).
@@ -110,7 +110,7 @@
* additional flag bits for each TransactionId. To do this without getting
* into alignment issues, we store four bytes of flags, and then the
* corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and
- * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
+ * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
* per page. This wastes 12 bytes per page, but that's OK -- simplicity (and
* performance) trumps space efficiency here.
*
@@ -161,7 +161,7 @@ static SlruCtlData MultiXactMemberCtlData;
#define MultiXactMemberCtl (&MultiXactMemberCtlData)
/*
- * MultiXact state shared across all backends. All this state is protected
+ * MultiXact state shared across all backends. All this state is protected
* by MultiXactGenLock. (We also use MultiXactOffsetControlLock and
* MultiXactMemberControlLock to guard accesses to the two sets of SLRU
* buffers. For concurrency's sake, we avoid holding more than one of these
@@ -179,7 +179,7 @@ typedef struct MultiXactStateData
MultiXactId lastTruncationPoint;
/*
- * oldest multixact that is still on disk. Anything older than this
+ * oldest multixact that is still on disk. Anything older than this
* should not be consulted.
*/
MultiXactId oldestMultiXactId;
@@ -528,7 +528,7 @@ MultiXactIdIsRunning(MultiXactId multi)
/*
* This could be made faster by having another entry point in procarray.c,
- * walking the PGPROC array only once for all the members. But in most
+ * walking the PGPROC array only once for all the members. But in most
* cases nmembers should be small enough that it doesn't much matter.
*/
for (i = 0; i < nmembers; i++)
@@ -615,7 +615,7 @@ MultiXactIdSetOldestMember(void)
* The value to set is the oldest of nextMXact and all the valid per-backend
* OldestMemberMXactId[] entries. Because of the locking we do, we can be
* certain that no subsequent call to MultiXactIdSetOldestMember can set
- * an OldestMemberMXactId[] entry older than what we compute here. Therefore
+ * an OldestMemberMXactId[] entry older than what we compute here. Therefore
* there is no live transaction, now or later, that can be a member of any
* MultiXactId older than the OldestVisibleMXactId we compute here.
*/
@@ -751,7 +751,7 @@ MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
* heap_lock_tuple() to have put it there, and heap_lock_tuple() generates
* an XLOG record that must follow ours. The normal LSN interlock between
* the data page and that XLOG record will ensure that our XLOG record
- * reaches disk first. If the SLRU members/offsets data reaches disk
+ * reaches disk first. If the SLRU members/offsets data reaches disk
* sooner than the XLOG record, we do not care because we'll overwrite it
* with zeroes unless the XLOG record is there too; see notes at top of
* this file.
@@ -882,7 +882,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
* GetNewMultiXactId
* Get the next MultiXactId.
*
- * Also, reserve the needed amount of space in the "members" area. The
+ * Also, reserve the needed amount of space in the "members" area. The
* starting offset of the reserved space is returned in *offset.
*
* This may generate XLOG records for expansion of the offsets and/or members
@@ -916,7 +916,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
/*----------
* Check to see if it's safe to assign another MultiXactId. This protects
- * against catastrophic data loss due to multixact wraparound. The basic
+ * against catastrophic data loss due to multixact wraparound. The basic
* rules are:
*
* If we're past multiVacLimit, start trying to force autovacuum cycles.
@@ -930,7 +930,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
{
/*
* For safety's sake, we release MultiXactGenLock while sending
- * signals, warnings, etc. This is not so much because we care about
+ * signals, warnings, etc. This is not so much because we care about
* preserving concurrency in this situation, as to avoid any
* possibility of deadlock while doing get_database_name(). First,
* copy all the shared values we'll need in this path.
@@ -1036,7 +1036,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
* until after file extension has succeeded!
*
* We don't care about MultiXactId wraparound here; it will be handled by
- * the next iteration. But note that nextMXact may be InvalidMultiXactId
+ * the next iteration. But note that nextMXact may be InvalidMultiXactId
* or the first value on a segment-beginning page after this routine
* exits, so anyone else looking at the variable must be prepared to deal
* with either case. Similarly, nextOffset may be zero, but we won't use
@@ -1114,16 +1114,16 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
+ * tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. This raises a
+ * seen, it implies undetected ID wraparound has occurred. This raises a
* hard error.
*
* Shared lock is enough here since we aren't modifying any global state.
- * Acquire it just long enough to grab the current counter values. We may
+ * Acquire it just long enough to grab the current counter values. We may
* need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -1151,12 +1151,12 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
/*
* Find out the offset at which we need to start reading MultiXactMembers
- * and the number of members in the multixact. We determine the latter as
+ * and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about:
*
* 1. This multixact may be the latest one created, in which case there is
- * no next one to look at. In this case the nextOffset value we just
+ * no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in: that
@@ -1167,11 +1167,11 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
- * have this case. We sleep for a bit and try again.
+ * have this case. We sleep for a bit and try again.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
- * wraparound. If we see next multixact's offset is one, is that our
+ * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
@@ -1694,7 +1694,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info,
/*
* Initialization of shared memory for MultiXact. We use two SLRU areas,
- * thus double memory. Also, reserve space for the shared MultiXactState
+ * thus double memory. Also, reserve space for the shared MultiXactState
* struct and the per-backend MultiXactId arrays (two of those, too).
*/
Size
@@ -1754,7 +1754,7 @@ MultiXactShmemInit(void)
/*
* This func must be called ONCE on system install. It creates the initial
- * MultiXact segments. (The MultiXacts directories are assumed to have been
+ * MultiXact segments. (The MultiXacts directories are assumed to have been
* created by initdb, and MultiXactShmemInit must have been called already.)
*/
void
@@ -1925,7 +1925,7 @@ TrimMultiXact(void)
MultiXactOffsetCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current offsets page. See notes in
+ * Zero out the remainder of the current offsets page. See notes in
* StartupCLOG() for motivation.
*/
entryno = MultiXactIdToOffsetEntry(multi);
@@ -1955,7 +1955,7 @@ TrimMultiXact(void)
MultiXactMemberCtl->shared->latest_page_number = pageno;
/*
- * Zero out the remainder of the current members page. See notes in
+ * Zero out the remainder of the current members page. See notes in
* TrimCLOG() for motivation.
*/
flagsoff = MXOffsetToFlagsOffset(offset);
@@ -2097,7 +2097,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/*
* We'll start complaining loudly when we get within 10M multis of the
- * stop point. This is kind of arbitrary, but if you let your gas gauge
+ * stop point. This is kind of arbitrary, but if you let your gas gauge
* get down to 1% of full, would you be looking for the next gas station?
* We need to be fairly liberal about this number because there are lots
* of scenarios where most transactions are done by automatic clients that
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index af249022476..603d65fc96e 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -15,7 +15,7 @@
*
* We use a control LWLock to protect the shared data structures, plus
* per-buffer LWLocks that synchronize I/O for each buffer. The control lock
- * must be held to examine or modify any shared state. A process that is
+ * must be held to examine or modify any shared state. A process that is
* reading in or writing out a page buffer does not hold the control lock,
* only the per-buffer lock for the buffer it is working on.
*
@@ -34,7 +34,7 @@
* could have happened while we didn't have the lock).
*
* As with the regular buffer manager, it is possible for another process
- * to re-dirty a page that is currently being written out. This is handled
+ * to re-dirty a page that is currently being written out. This is handled
* by re-setting the page's page_dirty flag.
*
*
@@ -96,7 +96,7 @@ typedef struct SlruFlushData *SlruFlush;
* page_lru_count entries to be "reset" to lower values than they should have,
* in case a process is delayed while it executes this macro. With care in
* SlruSelectLRUPage(), this does little harm, and in any case the absolute
- * worst possible consequence is a nonoptimal choice of page to evict. The
+ * worst possible consequence is a nonoptimal choice of page to evict. The
* gain from allowing concurrent reads of SLRU pages seems worth it.
*/
#define SlruRecentlyUsed(shared, slotno) \
@@ -481,7 +481,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
- * the write). However, we *do* attempt a fresh write even if the page
+ * the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
@@ -634,7 +634,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
* In a crash-and-restart situation, it's possible for us to receive
* commands to set the commit status of transactions whose bits are in
* already-truncated segments of the commit log (see notes in
- * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
+ * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
* where the file doesn't exist, and return zeroes instead.
*/
fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
@@ -964,9 +964,9 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If we find any EMPTY slot, just select that one. Else choose a
- * victim page to replace. We normally take the least recently used
+ * victim page to replace. We normally take the least recently used
* valid page, but we will never take the slot containing
- * latest_page_number, even if it appears least recently used. We
+ * latest_page_number, even if it appears least recently used. We
* will select a slot that is already I/O busy only if there is no
* other choice: a read-busy slot will not be least recently used once
* the read finishes, and waiting for an I/O on a write-busy slot is
@@ -1041,7 +1041,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If all pages (except possibly the latest one) are I/O busy, we'll
- * have to wait for an I/O to complete and then retry. In that
+ * have to wait for an I/O to complete and then retry. In that
* unhappy case, we choose to wait for the I/O on the least recently
* used slot, on the assumption that it was likely initiated first of
* all the I/Os in progress and may therefore finish first.
@@ -1193,7 +1193,7 @@ restart:;
/*
* Hmm, we have (or may have) I/O operations acting on the page, so
* we've got to wait for them to finish and then start again. This is
- * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
+ * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
* wouldn't it be OK to just discard it without writing it? For now,
* keep the logic the same as it was.)
*/
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 88c42c88695..1453418d5bd 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -5,7 +5,7 @@
*
* The pg_subtrans manager is a pg_clog-like manager that stores the parent
* transaction Id for each transaction. It is a fundamental part of the
- * nested transactions implementation. A main transaction has a parent
+ * nested transactions implementation. A main transaction has a parent
* of InvalidTransactionId, and each subtransaction has its immediate parent.
* The tree can easily be walked from child to parent, but not in the
* opposite direction.
@@ -191,7 +191,7 @@ SUBTRANSShmemInit(void)
* must have been called already.)
*
* Note: it's not really necessary to create the initial segment now,
- * since slru.c would create it on first write anyway. But we may as well
+ * since slru.c would create it on first write anyway. But we may as well
* do it to be sure the directory is set up correctly.
*/
void
diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c
index e47134a65a0..0f34c17e1f6 100644
--- a/src/backend/access/transam/timeline.c
+++ b/src/backend/access/transam/timeline.c
@@ -66,7 +66,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
* Try to read a timeline's history file.
*
* If successful, return the list of component TLIs (the given TLI followed by
- * its ancestor TLIs). If we can't find the history file, assume that the
+ * its ancestor TLIs). If we can't find the history file, assume that the
* timeline has no parents, and return a list of just the specified timeline
* ID.
*/
@@ -281,7 +281,7 @@ findNewestTimeLine(TimeLineID startTLI)
* reason: human-readable explanation of why the timeline was switched
*
* Currently this is only used at the end recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
void
@@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
/*
* Prefer link() to rename() here just to be really sure that we don't
- * overwrite an existing file. However, there shouldn't be one, so
+ * overwrite an existing file. However, there shouldn't be one, so
* rename() is an acceptable substitute except for the truly paranoid.
*/
#if HAVE_WORKING_LINK
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index b92a27bff36..94165f5fec8 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -145,7 +145,7 @@ TransactionIdDidCommit(TransactionId transactionId)
* be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
- * zeroed. Since this case should not happen under normal conditions, it
+ * zeroed. Since this case should not happen under normal conditions, it
* seems reasonable to emit a WARNING for it.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
@@ -301,7 +301,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2)
{
/*
* If either ID is a permanent XID then we can just do unsigned
- * comparison. If both are normal, do a modulo-2^32 comparison.
+ * comparison. If both are normal, do a modulo-2^32 comparison.
*/
int32 diff;
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index e975f8d26d0..fbdd0abc5d0 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -442,7 +442,7 @@ LockGXact(const char *gid, Oid user)
/*
* Note: it probably would be possible to allow committing from
* another database; but at the moment NOTIFY is known not to work and
- * there may be some other issues as well. Hence disallow until
+ * there may be some other issues as well. Hence disallow until
* someone gets motivated to make it work.
*/
if (MyDatabaseId != proc->databaseId)
@@ -1030,7 +1030,7 @@ EndPrepare(GlobalTransaction gxact)
* out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
- * the write --- then, WAL replay should repair the inconsistency. The
+ * the write --- then, WAL replay should repair the inconsistency. The
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
@@ -1068,7 +1068,7 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks
+ * Mark the prepared transaction as valid. As soon as xact.c marks
* MyPgXact as not running our XID (which it will do immediately after
* this function returns), others can commit/rollback the xact.
*
@@ -1335,7 +1335,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
* no one else will try to commit/rollback, and so it can be recycled
- * properly later. It is still locked by our XID so it won't go away yet.
+ * properly later. It is still locked by our XID so it won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
*/
@@ -1539,7 +1539,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* This approach creates a race condition: someone else could delete a
* GXACT between the time we release TwoPhaseStateLock and the time we try
- * to open its state file. We handle this by special-casing ENOENT
+ * to open its state file. We handle this by special-casing ENOENT
* failures: if we see that, we verify that the GXACT is no longer valid,
* and if so ignore the failure.
*/
@@ -1620,7 +1620,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
*
* We throw away any prepared xacts with main XID beyond nextXid --- if any
* are present, it suggests that the DBA has done a PITR recovery to an
- * earlier point in time without cleaning out pg_twophase. We dare not
+ * earlier point in time without cleaning out pg_twophase. We dare not
* try to recover such prepared xacts since they likely depend on database
* state that doesn't exist now.
*
@@ -1712,7 +1712,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
* XID, and they may force us to advance nextXid.
*
* We don't expect anyone else to modify nextXid, hence we don't
- * need to hold a lock while examining it. We still acquire the
+ * need to hold a lock while examining it. We still acquire the
* lock to modify it, though.
*/
subxids = (TransactionId *)
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 7252ee25c88..35f63a2ff85 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -39,7 +39,7 @@ VariableCache ShmemVariableCache = NULL;
*
* Note: when this is called, we are actually already inside a valid
* transaction, since XIDs are now not allocated until the transaction
- * does something. So it is safe to do a database lookup if we want to
+ * does something. So it is safe to do a database lookup if we want to
* issue a warning about XID wrap.
*/
TransactionId
@@ -165,20 +165,20 @@ GetNewTransactionId(bool isSubXact)
/*
* Now advance the nextXid counter. This must not happen until after we
* have successfully completed ExtendCLOG() --- if that routine fails, we
- * want the next incoming transaction to try it again. We cannot assign
+ * want the next incoming transaction to try it again. We cannot assign
* more XIDs until there is CLOG space for them.
*/
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
* We must store the new XID into the shared ProcArray before releasing
- * XidGenLock. This ensures that every active XID older than
+ * XidGenLock. This ensures that every active XID older than
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
* are relying on fetch/store of an xid to be atomic, else other backends
- * might see a partially-set xid here. But holding both locks at once
+ * might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*
* Note that readers of PGXACT xid fields should be careful to fetch the
@@ -289,7 +289,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
/*
* We'll start complaining loudly when we get within 10M transactions of
- * the stop point. This is kind of arbitrary, but if you let your gas
+ * the stop point. This is kind of arbitrary, but if you let your gas
* gauge get down to 1% of full, would you be looking for the next gas
* station? We need to be fairly liberal about this number because there
* are lots of scenarios where most transactions are done by automatic
@@ -390,7 +390,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
* We primarily check whether oldestXidDB is valid. The cases we have in
* mind are that that database was dropped, or the field was reset to zero
* by pg_resetxlog. In either case we should force recalculation of the
- * wrap limit. Also do it if oldestXid is old enough to be forcing
+ * wrap limit. Also do it if oldestXid is old enough to be forcing
* autovacuums or other actions; this ensures we update our state as soon
* as possible once extra overhead is being incurred.
*/
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 77bf9748352..914ff3a3bc0 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -599,7 +599,7 @@ SubTransactionIsActive(SubTransactionId subxid)
*
* "used" must be TRUE if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. FALSE means the ID is being fetched
- * for read-only purposes (ie, as a snapshot validity cutoff). See
+ * for read-only purposes (ie, as a snapshot validity cutoff). See
* CommandCounterIncrement() for discussion.
*/
CommandId
@@ -686,7 +686,7 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
/*
* We always say that BootstrapTransactionId is "not my transaction ID"
- * even when it is (ie, during bootstrap). Along with the fact that
+ * even when it is (ie, during bootstrap). Along with the fact that
* transam.c always treats BootstrapTransactionId as already committed,
* this causes the tqual.c routines to see all tuples as committed, which
* is what we need during bootstrap. (Bootstrap mode only inserts tuples,
@@ -828,7 +828,7 @@ AtStart_Memory(void)
/*
* If this is the first time through, create a private context for
* AbortTransaction to work in. By reserving some space now, we can
- * insulate AbortTransaction from out-of-memory scenarios. Like
+ * insulate AbortTransaction from out-of-memory scenarios. Like
* ErrorContext, we set it up with slow growth rate and a nonzero minimum
* size, so that space will be reserved immediately.
*/
@@ -931,7 +931,7 @@ AtSubStart_ResourceOwner(void)
Assert(s->parent != NULL);
/*
- * Create a resource owner for the subtransaction. We make it a child of
+ * Create a resource owner for the subtransaction. We make it a child of
* the immediate parent's resource owner.
*/
s->curTransactionOwner =
@@ -951,7 +951,7 @@ AtSubStart_ResourceOwner(void)
* RecordTransactionCommit
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionCommit(void)
@@ -996,7 +996,7 @@ RecordTransactionCommit(void)
/*
* If we didn't create XLOG entries, we're done here; otherwise we
- * should flush those entries the same as a commit record. (An
+ * should flush those entries the same as a commit record. (An
* example of a possible record that wouldn't cause an XID to be
* assigned is a sequence advance record due to nextval() --- we want
* to flush that to disk before reporting commit.)
@@ -1013,7 +1013,7 @@ RecordTransactionCommit(void)
BufmgrCommit();
/*
- * Mark ourselves as within our "commit critical section". This
+ * Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
* pg_clog. Without this, it is possible for the checkpoint to set
* REDO after the XLOG record but fail to flush the pg_clog update to
@@ -1021,7 +1021,7 @@ RecordTransactionCommit(void)
* crashes a little later.
*
* Note: we could, but don't bother to, set this flag in
- * RecordTransactionAbort. That's because loss of a transaction abort
+ * RecordTransactionAbort. That's because loss of a transaction abort
* is noncritical; the presumption would be that it aborted, anyway.
*
* It's safe to change the delayChkpt flag of our own backend without
@@ -1122,15 +1122,15 @@ RecordTransactionCommit(void)
/*
* Check if we want to commit asynchronously. We can allow the XLOG flush
* to happen asynchronously if synchronous_commit=off, or if the current
- * transaction has not performed any WAL-logged operation. The latter
+ * transaction has not performed any WAL-logged operation. The latter
* case can arise if the current transaction wrote only to temporary
- * and/or unlogged tables. In case of a crash, the loss of such a
+ * and/or unlogged tables. In case of a crash, the loss of such a
* transaction will be irrelevant since temp tables will be lost anyway,
* and unlogged tables will be truncated. (Given the foregoing, you might
* think that it would be unnecessary to emit the XLOG record at all in
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
- * KnownAssignedXids machinery requires tracking every XID assignment. It
+ * KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < hot_standby, but for now
* we don't.)
*
@@ -1377,7 +1377,7 @@ AtSubCommit_childXids(void)
* RecordTransactionAbort
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionAbort(bool isSubXact)
@@ -1394,7 +1394,7 @@ RecordTransactionAbort(bool isSubXact)
/*
* If we haven't been assigned an XID, nobody will care whether we aborted
- * or not. Hence, we're done in that case. It does not matter if we have
+ * or not. Hence, we're done in that case. It does not matter if we have
* rels to delete (note that this routine is not responsible for actually
* deleting 'em). We cannot have any child XIDs, either.
*/
@@ -1410,7 +1410,7 @@ RecordTransactionAbort(bool isSubXact)
* We have a valid XID, so we should write an ABORT record for it.
*
* We do not flush XLOG to disk here, since the default assumption after a
- * crash would be that we aborted, anyway. For the same reason, we don't
+ * crash would be that we aborted, anyway. For the same reason, we don't
* need to worry about interlocking against checkpoint start.
*/
@@ -1578,7 +1578,7 @@ AtSubAbort_childXids(void)
/*
* We keep the child-XID arrays in TopTransactionContext (see
- * AtSubCommit_childXids). This means we'd better free the array
+ * AtSubCommit_childXids). This means we'd better free the array
* explicitly at abort to avoid leakage.
*/
if (s->childXids != NULL)
@@ -1755,7 +1755,7 @@ StartTransaction(void)
VirtualXactLockTableInsert(vxid);
/*
- * Advertise it in the proc array. We assume assignment of
+ * Advertise it in the proc array. We assume assignment of
* LocalTransactionID is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
@@ -1852,7 +1852,7 @@ CommitTransaction(void)
/*
* The remaining actions cannot call any user-defined code, so it's safe
- * to start shutting down within-transaction services. But note that most
+ * to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
@@ -2057,7 +2057,7 @@ PrepareTransaction(void)
/*
* The remaining actions cannot call any user-defined code, so it's safe
- * to start shutting down within-transaction services. But note that most
+ * to start shutting down within-transaction services. But note that most
* of this stuff could still throw an error, which would switch us into
* the transaction-abort path.
*/
@@ -2177,7 +2177,7 @@ PrepareTransaction(void)
XactLastRecEnd = 0;
/*
- * Let others know about no transaction in progress by me. This has to be
+ * Let others know about no transaction in progress by me. This has to be
* done *after* the prepared transaction has been marked valid, else
* someone may think it is unlocked and recyclable.
*/
@@ -2186,7 +2186,7 @@ PrepareTransaction(void)
/*
* This is all post-transaction cleanup. Note that if an error is raised
* here, it's too late to abort the transaction. This should be just
- * noncritical resource releasing. See notes in CommitTransaction.
+ * noncritical resource releasing. See notes in CommitTransaction.
*/
CallXactCallbacks(XACT_EVENT_PREPARE);
@@ -2364,7 +2364,7 @@ AbortTransaction(void)
ProcArrayEndTransaction(MyProc, latestXid);
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering. We can skip all of it if the transaction failed before
* creating a resource owner.
*/
@@ -2599,7 +2599,7 @@ CommitTransactionCommand(void)
/*
* Here we were in a perfectly good transaction block but the user
- * told us to ROLLBACK anyway. We have to abort the transaction
+ * told us to ROLLBACK anyway. We have to abort the transaction
* and then clean up.
*/
case TBLOCK_ABORT_PENDING:
@@ -2619,7 +2619,7 @@ CommitTransactionCommand(void)
/*
* We were just issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already did
+ * Start a subtransaction. (DefineSavepoint already did
* PushTransaction, so as to have someplace to put the SUBBEGIN
* state.)
*/
@@ -2823,7 +2823,7 @@ AbortCurrentTransaction(void)
break;
/*
- * Here, we failed while trying to COMMIT. Clean up the
+ * Here, we failed while trying to COMMIT. Clean up the
* transaction and return to idle state (we do not want to stay in
* the transaction).
*/
@@ -2885,7 +2885,7 @@ AbortCurrentTransaction(void)
/*
* If we failed while trying to create a subtransaction, clean up
- * the broken subtransaction and abort the parent. The same
+ * the broken subtransaction and abort the parent. The same
* applies if we get a failure while ending a subtransaction.
*/
case TBLOCK_SUBBEGIN:
@@ -3417,7 +3417,7 @@ UserAbortTransactionBlock(void)
break;
/*
- * We are inside a subtransaction. Mark everything up to top
+ * We are inside a subtransaction. Mark everything up to top
* level as exitable.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3551,7 +3551,7 @@ ReleaseSavepoint(List *options)
break;
/*
- * We are in a non-aborted subtransaction. This is the only valid
+ * We are in a non-aborted subtransaction. This is the only valid
* case.
*/
case TBLOCK_SUBINPROGRESS:
@@ -3608,7 +3608,7 @@ ReleaseSavepoint(List *options)
/*
* Mark "commit pending" all subtransactions up to the target
- * subtransaction. The actual commits will happen when control gets to
+ * subtransaction. The actual commits will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -3707,7 +3707,7 @@ RollbackToSavepoint(List *options)
/*
* Mark "abort pending" all subtransactions up to the target
- * subtransaction. The actual aborts will happen when control gets to
+ * subtransaction. The actual aborts will happen when control gets to
* CommitTransactionCommand.
*/
xact = CurrentTransactionState;
@@ -4114,7 +4114,7 @@ CommitSubTransaction(void)
CommandCounterIncrement();
/*
- * Prior to 8.4 we marked subcommit in clog at this point. We now only
+ * Prior to 8.4 we marked subcommit in clog at this point. We now only
* perform that step, if required, as part of the atomic update of the
* whole transaction tree at top level commit or abort.
*/
@@ -4573,7 +4573,7 @@ TransStateAsString(TransState state)
/*
* xactGetCommittedChildren
*
- * Gets the list of committed children of the current transaction. The return
+ * Gets the list of committed children of the current transaction. The return
* value is the number of child transactions. *ptr is set to point to an
* array of TransactionIds. The array is allocated in TopTransactionContext;
* the caller should *not* pfree() it (this is a change from pre-8.4 code!).
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 9deadf9ed9e..a437d266a3b 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -94,7 +94,7 @@ bool XLOG_DEBUG = false;
* future XLOG segment as long as there aren't already XLOGfileslop future
* segments; else we'll delete it. This could be made a separate GUC
* variable, but at present I think it's sufficient to hardwire it as
- * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
+ * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* no more than 2*CheckPointSegments log segments, and we want to recycle all
* of them; the +1 allows boundary cases to happen without wasting a
* delete/create-segment cycle.
@@ -183,7 +183,7 @@ static bool LocalHotStandbyActive = false;
* 0: unconditionally not allowed to insert XLOG
* -1: must check RecoveryInProgress(); disallow until it is false
* Most processes start with -1 and transition to 1 after seeing that recovery
- * is not in progress. But we can also force the value for special cases.
+ * is not in progress. But we can also force the value for special cases.
* The coding in XLogInsertAllowed() depends on the first two of these states
* being numerically the same as bool true and false.
*/
@@ -248,7 +248,7 @@ static bool recoveryStopAfter;
*
* expectedTLEs: a list of TimeLineHistoryEntries for recoveryTargetTLI and the timelines of
* its known parents, newest first (so recoveryTargetTLI is always the
- * first list member). Only these TLIs are expected to be seen in the WAL
+ * first list member). Only these TLIs are expected to be seen in the WAL
* segments we read, and indeed only these TLIs will be considered as
* candidate WAL files to open at all.
*
@@ -277,9 +277,9 @@ XLogRecPtr XactLastRecEnd = InvalidXLogRecPtr;
/*
* RedoRecPtr is this backend's local copy of the REDO record pointer
* (which is almost but not quite the same as a pointer to the most recent
- * CHECKPOINT record). We update this from the shared-memory copy,
+ * CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
- * hold the Insert lock). See XLogInsert for details. We are also allowed
+ * hold the Insert lock). See XLogInsert for details. We are also allowed
* to update from XLogCtl->Insert.RedoRecPtr if we hold the info_lck;
* see GetRedoRecPtr. A freshly spawned backend obtains the value during
* InitXLOGAccess.
@@ -1428,10 +1428,10 @@ AdvanceXLInsertBuffer(bool new_segment)
* WAL records beginning in this page have removable backup blocks. This
* allows the WAL archiver to know whether it is safe to compress archived
* WAL data by transforming full-block records into the non-full-block
- * format. It is sufficient to record this at the page level because we
+ * format. It is sufficient to record this at the page level because we
* force a page switch (in fact a segment switch) when starting a backup,
* so the flag will be off before any records can be written during the
- * backup. At the end of a backup, the last page will be marked as all
+ * backup. At the end of a backup, the last page will be marked as all
* unsafe when perhaps only part is unsafe, but at worst the archiver
* would miss the opportunity to compress a few records.
*/
@@ -1711,7 +1711,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
{
/*
* Could get here without iterating above loop, in which case we might
- * have no open file or the wrong one. However, we do not need to
+ * have no open file or the wrong one. However, we do not need to
* fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN &&
@@ -1780,7 +1780,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
/*
* If the WALWriter is sleeping, we should kick it to make it come out of
- * low-power mode. Otherwise, determine whether there's a full page of
+ * low-power mode. Otherwise, determine whether there's a full page of
* WAL available to write.
*/
if (!sleeping)
@@ -2058,9 +2058,9 @@ XLogFlush(XLogRecPtr record)
* We normally flush only completed blocks; but if there is nothing to do on
* that basis, we check for unflushed async commits in the current incomplete
* block, and flush through the latest one of those. Thus, if async commits
- * are not being used, we will flush complete blocks only. We can guarantee
+ * are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
- * one or two. (When flushing complete blocks, we allow XLogWrite to write
+ * one or two. (When flushing complete blocks, we allow XLogWrite to write
* "flexibly", meaning it can stop at the end of the buffer ring; this makes a
* difference only with very high load or long wal_writer_delay, but imposes
* one extra cycle for the worst case for async commits.)
@@ -2228,7 +2228,7 @@ XLogNeedsFlush(XLogRecPtr record)
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
- * pre-existing file will be deleted). On return, TRUE if a pre-existing
+ * pre-existing file will be deleted). On return, TRUE if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock while moving file into
@@ -2295,11 +2295,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
errmsg("could not create file \"%s\": %m", tmppath)));
/*
- * Zero-fill the file. We have to do this the hard way to ensure that all
+ * Zero-fill the file. We have to do this the hard way to ensure that all
* the file space has really been allocated --- on platforms that allow
* "holes" in files, just seeking to the end doesn't allocate intermediate
* space. This way, we know that we have all the space and (after the
- * fsync below) that all the indirect blocks are down on disk. Therefore,
+ * fsync below) that all the indirect blocks are down on disk. Therefore,
* fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the
* log file.
*
@@ -2391,7 +2391,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
* a different timeline)
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
@@ -2709,13 +2709,13 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source)
* the timelines listed in expectedTLEs.
*
* We expect curFileTLI on entry to be the TLI of the preceding file in
- * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
+ * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
* to go backwards; this prevents us from picking up the wrong file when a
* parent timeline extends to higher segment numbers than the child we
* want to read.
*
* If we haven't read the timeline history file yet, read it now, so that
- * we know which TLIs to scan. We don't save the list in expectedTLEs,
+ * we know which TLIs to scan. We don't save the list in expectedTLEs,
* however, unless we actually find a valid segment. That way if there is
* neither a timeline history file nor a WAL segment in the archive, and
* streaming replication is set up, we'll read the timeline history file
@@ -2779,7 +2779,7 @@ XLogFileClose(void)
/*
* WAL segment files will not be re-read in normal operation, so we advise
- * the OS to release any cached pages. But do not do so if WAL archiving
+ * the OS to release any cached pages. But do not do so if WAL archiving
* or streaming is active, because archiver and walsender process could
* use the cache to read the WAL segment.
*/
@@ -2924,7 +2924,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr)
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that we
+ * deciding whether a segment is still needed. This ensures that we
* won't prematurely remove a segment from a parent timeline. We could
* probably be a little more proactive about removing segments of
* non-parent timelines, but that would be a whole lot more
@@ -3458,7 +3458,7 @@ rescanLatestTimeLine(void)
* I/O routines for pg_control
*
* *ControlFile is a buffer in shared memory that holds an image of the
- * contents of pg_control. WriteControlFile() initializes pg_control
+ * contents of pg_control. WriteControlFile() initializes pg_control
* given a preloaded buffer, ReadControlFile() loads the buffer from
* the pg_control file (during postmaster or standalone-backend startup),
* and UpdateControlFile() rewrites pg_control after we modify xlog state.
@@ -3863,7 +3863,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source)
{
/*
* If we haven't yet changed the boot_val default of -1, just let it
- * be. We'll fix it when XLOGShmemSize is called.
+ * be. We'll fix it when XLOGShmemSize is called.
*/
if (XLOGbuffers == -1)
return true;
@@ -4367,7 +4367,7 @@ readRecoveryCommandFile(void)
/*
* If user specified recovery_target_timeline, validate it or compute the
- * "latest" value. We can't do this until after we've gotten the restore
+ * "latest" value. We can't do this until after we've gotten the restore
* command and set InArchiveRecovery, because we need to fetch timeline
* history files from the archive.
*/
@@ -4926,7 +4926,7 @@ StartupXLOG(void)
ValidateXLOGDirectoryStructure();
/*
- * Clear out any old relcache cache files. This is *necessary* if we do
+ * Clear out any old relcache cache files. This is *necessary* if we do
* any WAL replay, since that would probably result in the cache files
* being out of sync with database reality. In theory we could leave them
* in place if the database had been cleanly shut down, but it seems
@@ -5805,8 +5805,8 @@ StartupXLOG(void)
/*
* Consider whether we need to assign a new timeline ID.
*
- * If we are doing an archive recovery, we always assign a new ID. This
- * handles a couple of issues. If we stopped short of the end of WAL
+ * If we are doing an archive recovery, we always assign a new ID. This
+ * handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
* current last segment is problematic because it may result in trying to
@@ -5879,7 +5879,7 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the LastRec
- * record spans, not the one it starts in. The last block is indeed the
+ * record spans, not the one it starts in. The last block is indeed the
* one we want to use.
*/
if (EndOfLog % XLOG_BLCKSZ == 0)
@@ -5915,7 +5915,7 @@ StartupXLOG(void)
* Write.curridx must point to the *next* page (see XLogWrite()).
*
* Note: it might seem we should do AdvanceXLInsertBuffer() here, but
- * this is sufficient. The first actual attempt to insert a log
+ * this is sufficient. The first actual attempt to insert a log
* record will advance the insert state.
*/
XLogCtl->Write.curridx = NextBufIdx(0);
@@ -6094,7 +6094,7 @@ StartupXLOG(void)
XLogReportParameters();
/*
- * All done. Allow backends to write WAL. (Although the bool flag is
+ * All done. Allow backends to write WAL. (Although the bool flag is
* probably atomic in itself, we use the info_lck here to ensure that
* there are no race conditions concerning visibility of other recent
* updates to shared memory.)
@@ -6254,7 +6254,7 @@ RecoveryInProgress(void)
/*
* Initialize TimeLineID and RedoRecPtr when we discover that recovery
* is finished. InitPostgres() relies upon this behaviour to ensure
- * that InitXLOGAccess() is called at backend startup. (If you change
+ * that InitXLOGAccess() is called at backend startup. (If you change
* this, see also LocalSetXLogInsertAllowed.)
*/
if (!LocalRecoveryInProgress)
@@ -6886,7 +6886,7 @@ CreateCheckPoint(int flags)
/*
* If this isn't a shutdown or forced checkpoint, and we have not inserted
* any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
+ * checkpoint. The idea here is to avoid inserting duplicate checkpoints
* when the system is idle. That wastes log space, and more importantly it
* exposes us to possible loss of both current and previous checkpoint
* records if the machine crashes just as we're writing the update.
@@ -6991,7 +6991,7 @@ CreateCheckPoint(int flags)
* performing those groups of actions.
*
* One example is end of transaction, so we must wait for any transactions
- * that are currently in commit critical sections. If an xact inserted
+ * that are currently in commit critical sections. If an xact inserted
* its commit record into XLOG just before the REDO point, then a crash
* restart from the REDO point would not replay that record, which means
* that our flushing had better include the xact's update of pg_clog. So
@@ -7186,9 +7186,9 @@ CreateCheckPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). During recovery, though, we mustn't do this because
+ * in subtrans.c). During recovery, though, we mustn't do this because
* StartupSUBTRANS hasn't been called yet.
*/
if (!RecoveryInProgress())
@@ -7558,9 +7558,9 @@ CreateRestartPoint(int flags)
/*
* Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
+ * the oldest XMIN of any running transaction. No future transaction will
* attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). When hot standby is disabled, though, we mustn't do
+ * in subtrans.c). When hot standby is disabled, though, we mustn't do
* this because StartupSUBTRANS hasn't been called yet.
*/
if (EnableHotStandby)
@@ -7633,7 +7633,7 @@ XLogPutNextOid(Oid nextOid)
* We need not flush the NEXTOID record immediately, because any of the
* just-allocated OIDs could only reach disk as part of a tuple insert or
* update that would have its own XLOG record that must follow the NEXTOID
- * record. Therefore, the standard buffer LSN interlock applied to those
+ * record. Therefore, the standard buffer LSN interlock applied to those
* records will ensure no such OID reaches disk before the NEXTOID record
* does.
*
@@ -7969,7 +7969,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* We used to try to take the maximum of ShmemVariableCache->nextOid
* and the recorded nextOid, but that fails if the OID counter wraps
- * around. Since no OID allocation should be happening during replay
+ * around. Since no OID allocation should be happening during replay
* anyway, better to just believe the record exactly. We still take
* OidGenLock while setting the variable, just in case.
*/
@@ -8310,7 +8310,7 @@ get_sync_bit(int method)
/*
* Optimize writes by bypassing kernel cache with O_DIRECT when using
- * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
+ * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
* disabled, otherwise the archive command or walsender process will read
* the WAL soon after writing it, which is guaranteed to cause a physical
* read if we bypassed the kernel cache. We also skip the
@@ -8514,7 +8514,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* during an on-line backup even if not doing so at other times, because
* it's quite possible for the backup dump to obtain a "torn" (partially
* written) copy of a database page if it reads the page concurrently with
- * our write to the same page. This can be fixed as long as the first
+ * our write to the same page. This can be fixed as long as the first
* write to the page in the WAL sequence is a full-page write. Hence, we
* turn on forcePageWrites and then force a CHECKPOINT, to ensure there
* are no dirty pages in shared memory that might get dumped while the
@@ -8558,7 +8558,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* old timeline IDs. That would otherwise happen if you called
* pg_start_backup() right after restoring from a PITR archive: the
* first WAL segment containing the startup checkpoint has pages in
- * the beginning with the old timeline ID. That can cause trouble at
+ * the beginning with the old timeline ID. That can cause trouble at
* recovery: we won't have a history file covering the old timeline if
* pg_xlog directory was not included in the base backup and the WAL
* archive was cleared too before starting the backup.
@@ -8581,7 +8581,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
bool checkpointfpw;
/*
- * Force a CHECKPOINT. Aside from being necessary to prevent torn
+ * Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
@@ -9234,7 +9234,7 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli)
*
* If we see a backup_label during recovery, we assume that we are recovering
* from a backup dump file, and we therefore roll forward from the checkpoint
- * identified by the label file, NOT what pg_control says. This avoids the
+ * identified by the label file, NOT what pg_control says. This avoids the
* problem that pg_control might have been archived one or more checkpoints
* later than the start of the dump, and so if we rely on it as the start
* point, we will fail to restore a consistent database state.
@@ -9839,11 +9839,11 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
if (havedata)
{
/*
- * Great, streamed far enough. Open the file if it's
+ * Great, streamed far enough. Open the file if it's
* not open already. Also read the timeline history
* file if we haven't initialized timeline history
* yet; it should be streamed over and present in
- * pg_xlog by now. Use XLOG_FROM_STREAM so that
+ * pg_xlog by now. Use XLOG_FROM_STREAM so that
* source info is set correctly and XLogReceiptTime
* isn't changed.
*/
@@ -9916,9 +9916,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* in the current WAL page, previously read by XLogPageRead().
*
* 'emode' is the error mode that would be used to report a file-not-found
- * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
+ * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
* we're retrying the exact same record that we've tried previously, only
- * complain the first time to keep the noise down. However, we only do when
+ * complain the first time to keep the noise down. However, we only do when
* reading from pg_xlog, because we don't expect any invalid records in archive
* or in records streamed from master. Files in the archive should be complete,
* and we should never hit the end of WAL because we stop and wait for more WAL
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index f1b52bfe4fa..7c1a201b8ed 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -199,7 +199,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
randAccess = true;
/*
- * RecPtr is pointing to end+1 of the previous WAL record. If we're
+ * RecPtr is pointing to end+1 of the previous WAL record. If we're
* at a page boundary, no more records can fit on the current page. We
* must skip over the page header, but we can't do that until we've
* read in the page, since the header size is variable.
@@ -277,7 +277,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
/*
* If the whole record header is on this page, validate it immediately.
* Otherwise do just a basic sanity check on xl_tot_len, and validate the
- * rest of the header after reading it from the next page. The xl_tot_len
+ * rest of the header after reading it from the next page. The xl_tot_len
* check is necessary here to ensure that we enter the "Need to reassemble
* record" code path below; otherwise we might fail to apply
* ValidXLogRecordHeader at all.
@@ -572,7 +572,7 @@ err:
* Validate an XLOG record header.
*
* This is just a convenience subroutine to avoid duplicated code in
- * XLogReadRecord. It's not intended for use from anywhere else.
+ * XLogReadRecord. It's not intended for use from anywhere else.
*/
static bool
ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
@@ -661,7 +661,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
* data to read in) until we've checked the CRCs.
*
* We assume all of the record (that is, xl_tot_len bytes) has been read
- * into memory at *record. Also, ValidXLogRecordHeader() has accepted the
+ * into memory at *record. Also, ValidXLogRecordHeader() has accepted the
* record's header, which means in particular that xl_tot_len is at least
* SizeOfXlogRecord, so it is safe to fetch xl_len.
*/
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index cb671b43468..e954762e549 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -81,7 +81,7 @@ int numattr; /* number of attributes for cur. rel */
* in the core "bootstrapped" catalogs.
*
* XXX several of these input/output functions do catalog scans
- * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
+ * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some
* order dependencies in the catalog creation process.
*/
struct typinfo
@@ -379,9 +379,9 @@ AuxiliaryProcessMain(int argc, char *argv[])
#endif
/*
- * Assign the ProcSignalSlot for an auxiliary process. Since it
+ * Assign the ProcSignalSlot for an auxiliary process. Since it
* doesn't have a BackendId, the slot is statically allocated based on
- * the auxiliary process type (MyAuxProcType). Backends use slots
+ * the auxiliary process type (MyAuxProcType). Backends use slots
* indexed in the range from 1 to MaxBackends (inclusive), so we use
* MaxBackends + AuxProcType + 1 as the index of the slot for an
* auxiliary process.
@@ -566,7 +566,7 @@ bootstrap_signals(void)
}
/*
- * Begin shutdown of an auxiliary process. This is approximately the equivalent
+ * Begin shutdown of an auxiliary process. This is approximately the equivalent
* of ShutdownPostgres() in postinit.c. We can't run transactions in an
* auxiliary process, so most of the work of AbortTransaction() is not needed,
* but we do need to make sure we've released any LWLocks we are holding.
@@ -881,7 +881,7 @@ cleanup(void)
* and not an OID at all, until the first reference to a type not known in
* TypInfo[]. At that point it will read and cache pg_type in the Typ array,
* and subsequently return a real OID (and set the global pointer Ap to
- * point at the found row in Typ). So caller must check whether Typ is
+ * point at the found row in Typ). So caller must check whether Typ is
* still NULL to determine what the return value is!
* ----------------
*/
@@ -1078,9 +1078,9 @@ MapArrayTypeName(char *s)
*
* At bootstrap time, we define a bunch of indexes on system catalogs.
* We postpone actually building the indexes until just before we're
- * finished with initialization, however. This is because the indexes
+ * finished with initialization, however. This is because the indexes
* themselves have catalog entries, and those have to be included in the
- * indexes on those catalogs. Doing it in two phases is the simplest
+ * indexes on those catalogs. Doing it in two phases is the simplest
* way of making sure the indexes have the right contents at the end.
*/
void
@@ -1093,7 +1093,7 @@ index_register(Oid heap,
/*
* XXX mao 10/31/92 -- don't gc index reldescs, associated info at
- * bootstrap time. we'll declare the indexes now, but want to create them
+ * bootstrap time. we'll declare the indexes now, but want to create them
* later.
*/
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 3a69aaf0a82..39bace3e92a 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -313,7 +313,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
/*
* Restrict the operation to what we can actually grant or revoke, and
- * issue a warning if appropriate. (For REVOKE this isn't quite what the
+ * issue a warning if appropriate. (For REVOKE this isn't quite what the
* spec says to do: the spec seems to want a warning only if no privilege
* bits actually change in the ACL. In practice that behavior seems much
* too noisy, as well as inconsistent with the GRANT case.)
@@ -1092,7 +1092,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
/*
* The default for a global entry is the hard-wired default ACL for the
- * particular object type. The default for non-global entries is an empty
+ * particular object type. The default for non-global entries is an empty
* ACL. This must be so because global entries replace the hard-wired
* defaults, while others are added on.
*/
@@ -1662,7 +1662,7 @@ ExecGrant_Attribute(InternalGrant *istmt, Oid relOid, const char *relname,
* If the updated ACL is empty, we can set attacl to null, and maybe even
* avoid an update of the pg_attribute row. This is worth testing because
* we'll come through here multiple times for any relation-level REVOKE,
- * even if there were never any column GRANTs. Note we are assuming that
+ * even if there were never any column GRANTs. Note we are assuming that
* the "default" ACL state for columns is empty.
*/
if (ACL_NUM(new_acl) > 0)
@@ -1787,7 +1787,7 @@ ExecGrant_Relation(InternalGrant *istmt)
{
/*
* Mention the object name because the user needs to know
- * which operations succeeded. This is required because
+ * which operations succeeded. This is required because
* WARNING allows the command to continue.
*/
ereport(WARNING,
@@ -1816,7 +1816,7 @@ ExecGrant_Relation(InternalGrant *istmt)
/*
* Set up array in which we'll accumulate any column privilege bits
- * that need modification. The array is indexed such that entry [0]
+ * that need modification. The array is indexed such that entry [0]
* corresponds to FirstLowInvalidHeapAttributeNumber.
*/
num_col_privileges = pg_class_tuple->relnatts - FirstLowInvalidHeapAttributeNumber + 1;
@@ -3507,7 +3507,7 @@ pg_aclmask(AclObjectKind objkind, Oid table_oid, AttrNumber attnum, Oid roleid,
*
* Note: this considers only privileges granted specifically on the column.
* It is caller's responsibility to take relation-level privileges into account
- * as appropriate. (For the same reason, we have no special case for
+ * as appropriate. (For the same reason, we have no special case for
* superuser-ness here.)
*/
AclMode
@@ -3620,12 +3620,12 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
/*
* Deny anyone permission to update a system catalog unless
- * pg_authid.rolcatupdate is set. (This is to let superusers protect
+ * pg_authid.rolcatupdate is set. (This is to let superusers protect
* themselves from themselves.) Also allow it if allowSystemTableMods.
*
* As of 7.4 we have some updatable system views; those shouldn't be
* protected in this way. Assume the view rules can take care of
- * themselves. ACL_USAGE is if we ever have system sequences.
+ * themselves. ACL_USAGE is if we ever have system sequences.
*/
if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE | ACL_USAGE)) &&
IsSystemClass(classForm) &&
@@ -4328,7 +4328,7 @@ pg_attribute_aclcheck_all(Oid table_oid, Oid roleid, AclMode mode,
ReleaseSysCache(classTuple);
/*
- * Initialize result in case there are no non-dropped columns. We want to
+ * Initialize result in case there are no non-dropped columns. We want to
* report failure in such cases for either value of 'how'.
*/
result = ACLCHECK_NO_PRIV;
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 41a5da0bd23..ea888448904 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -220,7 +220,7 @@ IsReservedName(const char *name)
*
* Hard-wiring this list is pretty grotty, but we really need it so that
* we can compute the locktag for a relation (and then lock it) without
- * having already read its pg_class entry. If we try to retrieve relisshared
+ * having already read its pg_class entry. If we try to retrieve relisshared
* from pg_class with no pre-existing lock, there is a race condition against
* anyone who is concurrently committing a change to the pg_class entry:
* since we read system catalog entries under SnapshotNow, it's possible
@@ -289,7 +289,7 @@ IsSharedRelation(Oid relationId)
* Since the OID is not immediately inserted into the table, there is a
* race condition here; but a problem could occur only if someone else
* managed to cycle through 2^32 OIDs and generate the same OID before we
- * finish inserting our row. This seems unlikely to be a problem. Note
+ * finish inserting our row. This seems unlikely to be a problem. Note
* that if we had to *commit* the row to end the race condition, the risk
* would be rather higher; therefore we use SnapshotDirty in the test,
* so that we will see uncommitted rows.
@@ -335,7 +335,7 @@ GetNewOid(Relation relation)
* This is exported separately because there are cases where we want to use
* an index that will not be recognized by RelationGetOidIndex: TOAST tables
* have indexes that are usable, but have multiple columns and are on
- * ordinary columns rather than a true OID column. This code will work
+ * ordinary columns rather than a true OID column. This code will work
* anyway, so long as the OID is the index's first column. The caller must
* pass in the actual heap attnum of the OID column, however.
*
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 69171f8311c..01024f8c6c1 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -261,7 +261,7 @@ performDeletion(const ObjectAddress *object,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * Acquire deletion lock on the target object. (Ideally the caller has
+ * Acquire deletion lock on the target object. (Ideally the caller has
* done this already, but many places are sloppy about it.)
*/
AcquireDeletionLock(object, 0);
@@ -373,7 +373,7 @@ performMultipleDeletions(const ObjectAddresses *objects,
/*
* deleteWhatDependsOn: attempt to drop everything that depends on the
- * specified object, though not the object itself. Behavior is always
+ * specified object, though not the object itself. Behavior is always
* CASCADE.
*
* This is currently used only to clean out the contents of a schema
@@ -399,7 +399,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
- * Acquire deletion lock on the target object. (Ideally the caller has
+ * Acquire deletion lock on the target object. (Ideally the caller has
* done this already, but many places are sloppy about it.)
*/
AcquireDeletionLock(object, 0);
@@ -441,7 +441,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
* Since this function is currently only used to clean out temporary
* schemas, we pass PERFORM_DELETION_INTERNAL here, indicating that
* the operation is an automatic system operation rather than a user
- * action. If, in the future, this function is used for other
+ * action. If, in the future, this function is used for other
* purposes, we might need to revisit this.
*/
deleteOneObject(thisobj, &depRel, PERFORM_DELETION_INTERNAL);
@@ -458,7 +458,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
*
* For every object that depends on the starting object, acquire a deletion
* lock on the object, add it to targetObjects (if not already there),
- * and recursively find objects that depend on it. An object's dependencies
+ * and recursively find objects that depend on it. An object's dependencies
* will be placed into targetObjects before the object itself; this means
* that the finished list's order represents a safe deletion order.
*
@@ -510,7 +510,7 @@ findDependentObjects(const ObjectAddress *object,
* will not break a loop at an internal dependency: if we enter the loop
* at an "owned" object we will switch and start at the "owning" object
* instead. We could probably hack something up to avoid breaking at an
- * auto dependency, too, if we had to. However there are no known cases
+ * auto dependency, too, if we had to. However there are no known cases
* where that would be necessary.
*/
if (stack_address_present_add_flags(object, flags, stack))
@@ -531,7 +531,7 @@ findDependentObjects(const ObjectAddress *object,
/*
* The target object might be internally dependent on some other object
* (its "owner"), and/or be a member of an extension (also considered its
- * owner). If so, and if we aren't recursing from the owning object, we
+ * owner). If so, and if we aren't recursing from the owning object, we
* have to transform this deletion request into a deletion request of the
* owning object. (We'll eventually recurse back to this object, but the
* owning object has to be visited first so it will be deleted after.) The
@@ -594,7 +594,7 @@ findDependentObjects(const ObjectAddress *object,
/*
* Exception 1a: if the owning object is listed in
* pendingObjects, just release the caller's lock and
- * return. We'll eventually complete the DROP when we
+ * return. We'll eventually complete the DROP when we
* reach that entry in the pending list.
*/
if (pendingObjects &&
@@ -647,7 +647,7 @@ findDependentObjects(const ObjectAddress *object,
* owning object.
*
* First, release caller's lock on this object and get
- * deletion lock on the owning object. (We must release
+ * deletion lock on the owning object. (We must release
* caller's lock to avoid deadlock against a concurrent
* deletion of the owning object.)
*/
@@ -809,7 +809,7 @@ findDependentObjects(const ObjectAddress *object,
systable_endscan(scan);
/*
- * Finally, we can add the target object to targetObjects. Be careful to
+ * Finally, we can add the target object to targetObjects. Be careful to
* include any flags that were passed back down to us from inner recursion
* levels.
*/
@@ -864,7 +864,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report.
+ * enormous error strings. The server log always gets a full report.
*/
#define MAX_REPORTED_DEPS 100
@@ -897,7 +897,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
DEPFLAG_EXTENSION))
{
/*
- * auto-cascades are reported at DEBUG2, not msglevel. We don't
+ * auto-cascades are reported at DEBUG2, not msglevel. We don't
* try to combine them with the regular message because the
* results are too confusing when client_min_messages and
* log_min_messages are different.
@@ -1079,7 +1079,7 @@ deleteOneObject(const ObjectAddress *object, Relation *depRel, int flags)
systable_endscan(scan);
/*
- * Delete shared dependency references related to this object. Again, if
+ * Delete shared dependency references related to this object. Again, if
* subId = 0, remove records for sub-objects too.
*/
deleteSharedDependencyRecordsFor(object->classId, object->objectId,
@@ -1344,13 +1344,13 @@ recordDependencyOnExpr(const ObjectAddress *depender,
* recordDependencyOnSingleRelExpr - find expression dependencies
*
* As above, but only one relation is expected to be referenced (with
- * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
+ * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
* range table. An additional frammish is that dependencies on that
* relation (or its component columns) will be marked with 'self_behavior',
* whereas 'behavior' is used for everything else.
*
* NOTE: the caller should ensure that a whole-table dependency on the
- * specified relation is created separately, if one is needed. In particular,
+ * specified relation is created separately, if one is needed. In particular,
* a whole-row Var "relation.*" will not cause this routine to emit any
* dependency item. This is appropriate behavior for subexpressions of an
* ordinary query, so other cases need to cope as necessary.
@@ -1470,7 +1470,7 @@ find_expr_references_walker(Node *node,
/*
* A whole-row Var references no specific columns, so adds no new
- * dependency. (We assume that there is a whole-table dependency
+ * dependency. (We assume that there is a whole-table dependency
* arising from each underlying rangetable entry. While we could
* record such a dependency when finding a whole-row Var that
* references a relation directly, it's quite unclear how to extend
@@ -1529,7 +1529,7 @@ find_expr_references_walker(Node *node,
/*
* If it's a regclass or similar literal referring to an existing
- * object, add a reference to that object. (Currently, only the
+ * object, add a reference to that object. (Currently, only the
* regclass and regconfig cases have any likely use, but we may as
* well handle all the OID-alias datatypes consistently.)
*/
@@ -2122,7 +2122,7 @@ object_address_present_add_flags(const ObjectAddress *object,
{
/*
* We get here if we find a need to delete a column after
- * having already decided to drop its whole table. Obviously
+ * having already decided to drop its whole table. Obviously
* we no longer need to drop the column. But don't plaster
* its flags on the table.
*/
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index c06427711f2..2ec4e774cdb 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -21,7 +21,7 @@
* the old heap_create_with_catalog, amcreate, and amdestroy.
* those routines will soon call these routines using the function
* manager,
- * just like the poorly named "NewXXX" routines do. The
+ * just like the poorly named "NewXXX" routines do. The
* "New" routines are all going to die soon, once and for all!
* -cim 1/13/91
*
@@ -198,7 +198,7 @@ SystemAttributeDefinition(AttrNumber attno, bool relhasoids)
/*
* If the given name is a system attribute name, return a Form_pg_attribute
- * pointer for a prototype definition. If not, return NULL.
+ * pointer for a prototype definition. If not, return NULL.
*/
Form_pg_attribute
SystemAttributeByName(const char *attname, bool relhasoids)
@@ -519,7 +519,7 @@ CheckAttributeType(const char *attname,
int i;
/*
- * Check for self-containment. Eventually we might be able to allow
+ * Check for self-containment. Eventually we might be able to allow
* this (just return without complaint, if so) but it's not clear how
* many other places would require anti-recursion defenses before it
* would be safe to allow tables to contain their own rowtype.
@@ -582,7 +582,7 @@ CheckAttributeType(const char *attname,
* attribute to insert (but we ignore attacl and attoptions, which are always
* initialized to NULL).
*
- * indstate is the index state for CatalogIndexInsert. It can be passed as
+ * indstate is the index state for CatalogIndexInsert. It can be passed as
* NULL, in which case we'll fetch the necessary info. (Don't do this when
* inserting multiple attributes, because it's a tad more expensive.)
*/
@@ -749,7 +749,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
* Tuple data is taken from new_rel_desc->rd_rel, except for the
* variable-width fields which are not present in a cached reldesc.
* relacl and reloptions are passed in Datum form (to avoid having
- * to reference the data types in heap.h). Pass (Datum) 0 to set them
+ * to reference the data types in heap.h). Pass (Datum) 0 to set them
* to NULL.
* --------------------------------
*/
@@ -808,7 +808,7 @@ InsertPgClassTuple(Relation pg_class_desc,
tup = heap_form_tuple(RelationGetDescr(pg_class_desc), values, nulls);
/*
- * The new tuple must have the oid already chosen for the rel. Sure would
+ * The new tuple must have the oid already chosen for the rel. Sure would
* be embarrassing to do this sort of thing in polite company.
*/
HeapTupleSetOid(tup, new_rel_oid);
@@ -1364,8 +1364,8 @@ heap_create_init_fork(Relation rel)
* RelationRemoveInheritance
*
* Formerly, this routine checked for child relations and aborted the
- * deletion if any were found. Now we rely on the dependency mechanism
- * to check for or delete child relations. By the time we get here,
+ * deletion if any were found. Now we rely on the dependency mechanism
+ * to check for or delete child relations. By the time we get here,
* there are no children and we need only remove any pg_inherits rows
* linking this relation to its parent(s).
*/
@@ -1650,7 +1650,7 @@ RemoveAttrDefault(Oid relid, AttrNumber attnum,
/*
* RemoveAttrDefaultById
*
- * Remove a pg_attrdef entry specified by OID. This is the guts of
+ * Remove a pg_attrdef entry specified by OID. This is the guts of
* attribute-default removal. Note it should be called via performDeletion,
* not directly.
*/
@@ -2057,7 +2057,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal)
/*
* Deparsing of constraint expressions will fail unless the just-created
- * pg_attribute tuples for this relation are made visible. So, bump the
+ * pg_attribute tuples for this relation are made visible. So, bump the
* command counter. CAUTION: this will cause a relcache entry rebuild.
*/
CommandCounterIncrement();
@@ -2109,7 +2109,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal)
* the default and constraint expressions added to the relation.
*
* NB: caller should have opened rel with AccessExclusiveLock, and should
- * hold that lock till end of transaction. Also, we assume the caller has
+ * hold that lock till end of transaction. Also, we assume the caller has
* done a CommandCounterIncrement if necessary to make the relation's catalog
* tuples visible.
*/
@@ -2254,7 +2254,7 @@ AddRelationNewConstraints(Relation rel,
checknames = lappend(checknames, ccname);
/*
- * Check against pre-existing constraints. If we are allowed to
+ * Check against pre-existing constraints. If we are allowed to
* merge with an existing constraint, there's no more to do here.
* (We omit the duplicate constraint from the result, which is
* what ATAddCheckConstraint wants.)
@@ -2271,7 +2271,7 @@ AddRelationNewConstraints(Relation rel,
* column constraint and "tab_check" for a table constraint. We
* no longer have any info about the syntactic positioning of the
* constraint phrase, so we approximate this by seeing whether the
- * expression references more than one column. (If the user
+ * expression references more than one column. (If the user
* played by the rules, the result is the same...)
*
* Note: pull_var_clause() doesn't descend into sublinks, but we
@@ -2656,7 +2656,7 @@ RemoveStatistics(Oid relid, AttrNumber attnum)
* with the heap relation to zero tuples.
*
* The routine will truncate and then reconstruct the indexes on
- * the specified relation. Caller must hold exclusive lock on rel.
+ * the specified relation. Caller must hold exclusive lock on rel.
*/
static void
RelationTruncateIndexes(Relation heapRelation)
@@ -2696,7 +2696,7 @@ RelationTruncateIndexes(Relation heapRelation)
* This routine deletes all data within all the specified relations.
*
* This is not transaction-safe! There is another, transaction-safe
- * implementation in commands/tablecmds.c. We now use this only for
+ * implementation in commands/tablecmds.c. We now use this only for
* ON COMMIT truncation of temporary tables, where it doesn't matter.
*/
void
@@ -2805,7 +2805,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
return;
/*
- * Otherwise, must scan pg_constraint. We make one pass with all the
+ * Otherwise, must scan pg_constraint. We make one pass with all the
* relations considered; if this finds nothing, then all is well.
*/
dependents = heap_truncate_find_FKs(oids);
@@ -2866,7 +2866,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
* behavior to change depending on chance locations of rows in pg_constraint.)
*
* Note: caller should already have appropriate lock on all rels mentioned
- * in relationIds. Since adding or dropping an FK requires exclusive lock
+ * in relationIds. Since adding or dropping an FK requires exclusive lock
* on both rels, this ensures that the answer will be stable.
*/
List *
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 3dea690a1ae..62696b15e0d 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -410,7 +410,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/*
* We do not yet have the correct relation OID for the index, so just
- * set it invalid for now. InitializeAttributeOids() will fix it
+ * set it invalid for now. InitializeAttributeOids() will fix it
* later.
*/
to->attrelid = InvalidOid;
@@ -649,7 +649,7 @@ UpdateIndexRelation(Oid indexoid,
* heapRelation: table to build index on (suitably locked by caller)
* indexRelationName: what it say
* indexRelationId: normally, pass InvalidOid to let this routine
- * generate an OID for the index. During bootstrap this may be
+ * generate an OID for the index. During bootstrap this may be
* nonzero to specify a preselected OID.
* relFileNode: normally, pass InvalidOid to get new storage. May be
* nonzero to attach an existing valid build.
@@ -668,7 +668,7 @@ UpdateIndexRelation(Oid indexoid,
* allow_system_table_mods: allow table to be a system catalog
* skip_build: true to skip the index_build() step for the moment; caller
* must do it later (typically via reindex_index())
- * concurrent: if true, do not lock the table against writers. The index
+ * concurrent: if true, do not lock the table against writers. The index
* will be marked "invalid" and the caller must take additional steps
* to fix it up.
* is_internal: if true, post creation hook for new index
@@ -958,7 +958,7 @@ index_create(Relation heapRelation,
/*
* If there are no simply-referenced columns, give the index an
- * auto dependency on the whole table. In most cases, this will
+ * auto dependency on the whole table. In most cases, this will
* be redundant, but it might not be if the index expressions and
* predicate contain no Vars or only whole-row Vars.
*/
@@ -1084,7 +1084,7 @@ index_create(Relation heapRelation,
/*
* Close the index; but we keep the lock that we acquired above until end
- * of transaction. Closing the heap is caller's responsibility.
+ * of transaction. Closing the heap is caller's responsibility.
*/
index_close(indexRelation, NoLock);
@@ -1242,7 +1242,7 @@ index_constraint_create(Relation heapRelation,
* have been so marked already, so no need to clear the flag in the other
* case.
*
- * Note: this might better be done by callers. We do it here to avoid
+ * Note: this might better be done by callers. We do it here to avoid
* exposing index_update_stats() globally, but that wouldn't be necessary
* if relhaspkey went away.
*/
@@ -1339,7 +1339,7 @@ index_drop(Oid indexId, bool concurrent)
* in multiple steps and waiting out any transactions that might be using
* the index, so we don't need exclusive lock on the parent table. Instead
* we take ShareUpdateExclusiveLock, to ensure that two sessions aren't
- * doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get
+ * doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get
* AccessExclusiveLock on the index below, once we're sure nobody else is
* using it.)
*/
@@ -1379,7 +1379,7 @@ index_drop(Oid indexId, bool concurrent)
* non-concurrent case we can just do that now. In the concurrent case
* it's a bit trickier. The predicate locks must be moved when there are
* no index scans in progress on the index and no more can subsequently
- * start, so that no new predicate locks can be made on the index. Also,
+ * start, so that no new predicate locks can be made on the index. Also,
* they must be moved before heap inserts stop maintaining the index, else
* the conflict with the predicate lock on the index gap could be missed
* before the lock on the heap relation is in place to detect a conflict
@@ -1389,11 +1389,11 @@ index_drop(Oid indexId, bool concurrent)
{
/*
* We must commit our transaction in order to make the first pg_index
- * state update visible to other sessions. If the DROP machinery has
+ * state update visible to other sessions. If the DROP machinery has
* already performed any other actions (removal of other objects,
* pg_depend entries, etc), the commit would make those actions
* permanent, which would leave us with inconsistent catalog state if
- * we fail partway through the following sequence. Since DROP INDEX
+ * we fail partway through the following sequence. Since DROP INDEX
* CONCURRENTLY is restricted to dropping just one index that has no
* dependencies, we should get here before anything's been done ---
* but let's check that to be sure. We can verify that the current
@@ -1429,7 +1429,7 @@ index_drop(Oid indexId, bool concurrent)
* We must commit our current transaction so that the indisvalid
* update becomes visible to other transactions; then start another.
* Note that any previously-built data structures are lost in the
- * commit. The only data we keep past here are the relation IDs.
+ * commit. The only data we keep past here are the relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
* that neither it nor the index can be dropped before we finish. This
@@ -1459,7 +1459,7 @@ index_drop(Oid indexId, bool concurrent)
* detect deadlock and error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need
- * not check for that. Also, prepared xacts are not reported, which
+ * not check for that. Also, prepared xacts are not reported, which
* is fine since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock);
@@ -1482,7 +1482,7 @@ index_drop(Oid indexId, bool concurrent)
/*
* Now we are sure that nobody uses the index for queries; they just
- * might have it open for updating it. So now we can unset indisready
+ * might have it open for updating it. So now we can unset indisready
* and indislive, then wait till nobody could be using it at all
* anymore.
*/
@@ -1619,7 +1619,7 @@ index_drop(Oid indexId, bool concurrent)
*
* IndexInfo stores the information about the index that's needed by
* FormIndexDatum, which is used for both index_build() and later insertion
- * of individual index tuples. Normally we build an IndexInfo for an index
+ * of individual index tuples. Normally we build an IndexInfo for an index
* just once per command, and then use it for (potentially) many tuples.
* ----------------
*/
@@ -1689,7 +1689,7 @@ BuildIndexInfo(Relation index)
* context must point to the heap tuple passed in.
*
* Notice we don't actually call index_form_tuple() here; we just prepare
- * its input arrays values[] and isnull[]. This is because the index AM
+ * its input arrays values[] and isnull[]. This is because the index AM
* may wish to alter the data before storage.
* ----------------
*/
@@ -1755,7 +1755,7 @@ FormIndexDatum(IndexInfo *indexInfo,
* index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX
*
* This routine updates the pg_class row of either an index or its parent
- * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
+ * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
* to ensure we can do all the necessary work in just one update.
*
* hasindex: set relhasindex to this value
@@ -1769,7 +1769,7 @@ FormIndexDatum(IndexInfo *indexInfo,
*
* NOTE: an important side-effect of this operation is that an SI invalidation
* message is sent out to all backends --- including me --- causing relcache
- * entries to be flushed or updated with the new data. This must happen even
+ * entries to be flushed or updated with the new data. This must happen even
* if we find that no change is needed in the pg_class row. When updating
* a heap entry, this ensures that other backends find out about the new
* index. When updating an index, it's important because some index AMs
@@ -1807,13 +1807,13 @@ index_update_stats(Relation rel,
* 4. Even with just a single CREATE INDEX, there's a risk factor because
* someone else might be trying to open the rel while we commit, and this
* creates a race condition as to whether he will see both or neither of
- * the pg_class row versions as valid. Again, a non-transactional update
+ * the pg_class row versions as valid. Again, a non-transactional update
* avoids the risk. It is indeterminate which state of the row the other
* process will see, but it doesn't matter (if he's only taking
* AccessShareLock, then it's not critical that he see relhasindex true).
*
* It is safe to use a non-transactional update even though our
- * transaction could still fail before committing. Setting relhasindex
+ * transaction could still fail before committing. Setting relhasindex
* true is safe even if there are no indexes (VACUUM will eventually fix
* it), likewise for relhaspkey. And of course the new relpages and
* reltuples counts are correct regardless. However, we don't want to
@@ -1825,7 +1825,7 @@ index_update_stats(Relation rel,
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
/*
- * Make a copy of the tuple to update. Normally we use the syscache, but
+ * Make a copy of the tuple to update. Normally we use the syscache, but
* we can't rely on that during bootstrap or while reindexing pg_class
* itself.
*/
@@ -1933,7 +1933,7 @@ index_update_stats(Relation rel,
* index_build - invoke access-method-specific index build procedure
*
* On entry, the index's catalog entries are valid, and its physical disk
- * file has been created but is empty. We call the AM-specific build
+ * file has been created but is empty. We call the AM-specific build
* procedure to fill in the index contents. We then update the pg_class
* entries of the index and heap relation as needed, using statistics
* returned by ambuild as well as data passed by the caller.
@@ -2031,7 +2031,7 @@ index_build(Relation heapRelation,
* Therefore, this code path can only be taken during non-concurrent
* CREATE INDEX. Thus the fact that heap_update will set the pg_index
* tuple's xmin doesn't matter, because that tuple was created in the
- * current transaction anyway. That also means we don't need to worry
+ * current transaction anyway. That also means we don't need to worry
* about any concurrent readers of the tuple; no other transaction can see
* it yet.
*/
@@ -2083,7 +2083,7 @@ index_build(Relation heapRelation,
/*
* If it's for an exclusion constraint, make a second pass over the heap
- * to verify that the constraint is satisfied. We must not do this until
+ * to verify that the constraint is satisfied. We must not do this until
* the index is fully valid. (Broken HOT chains shouldn't matter, though;
* see comments for IndexCheckExclusion.)
*/
@@ -2108,8 +2108,8 @@ index_build(Relation heapRelation,
* things to add it to the new index. After we return, the AM's index
* build procedure does whatever cleanup it needs.
*
- * The total count of heap tuples is returned. This is for updating pg_class
- * statistics. (It's annoying not to be able to do that here, but we want
+ * The total count of heap tuples is returned. This is for updating pg_class
+ * statistics. (It's annoying not to be able to do that here, but we want
* to merge that update with others; see index_update_stats.) Note that the
* index AM itself must keep track of the number of index tuples; we don't do
* so here because the AM might reject some of the tuples for its own reasons,
@@ -2159,7 +2159,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2178,7 +2178,7 @@ IndexBuildHeapScan(Relation heapRelation,
* SnapshotAny because we must retrieve all tuples and do our own time
* qual checks (because we have to index RECENTLY_DEAD tuples). In a
* concurrent build, we take a regular MVCC snapshot and index whatever's
- * live according to that. During bootstrap we just use SnapshotNow.
+ * live according to that. During bootstrap we just use SnapshotNow.
*/
if (IsBootstrapProcessingMode())
{
@@ -2289,7 +2289,7 @@ IndexBuildHeapScan(Relation heapRelation,
* building it, and may need to see such tuples.)
*
* However, if it was HOT-updated then we must only index
- * the live tuple at the end of the HOT-chain. Since this
+ * the live tuple at the end of the HOT-chain. Since this
* breaks semantics for pre-existing snapshots, mark the
* index as unusable for them.
*/
@@ -2309,7 +2309,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* Since caller should hold ShareLock or better, normally
* the only way to see this is if it was inserted earlier
- * in our own transaction. However, it can happen in
+ * in our own transaction. However, it can happen in
* system catalogs, since we tend to release write lock
* before commit there. Give a warning if neither case
* applies.
@@ -2460,7 +2460,7 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* You'd think we should go ahead and build the index tuple here, but
- * some index AMs want to do further processing on the data first. So
+ * some index AMs want to do further processing on the data first. So
* pass the values[] and isnull[] arrays, instead.
*/
@@ -2550,7 +2550,7 @@ IndexCheckExclusion(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2628,11 +2628,11 @@ IndexCheckExclusion(Relation heapRelation,
* We do a concurrent index build by first inserting the catalog entry for the
* index via index_create(), marking it not indisready and not indisvalid.
* Then we commit our transaction and start a new one, then we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* honor its constraints on HOT updates; so while existing HOT-chains might
* be broken with respect to the index, no currently live tuple will have an
- * incompatible HOT update done to it. We now build the index normally via
+ * incompatible HOT update done to it. We now build the index normally via
* index_build(), while holding a weak lock that allows concurrent
* insert/update/delete. Also, we index only tuples that are valid
* as of the start of the scan (see IndexBuildHeapScan), whereas a normal
@@ -2646,13 +2646,13 @@ IndexCheckExclusion(Relation heapRelation,
*
* Next, we mark the index "indisready" (but still not "indisvalid") and
* commit the second transaction and start a third. Again we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* insert their new tuples into it. We then take a new reference snapshot
* which is passed to validate_index(). Any tuples that are valid according
* to this snap, but are not in the index, must be added to the index.
* (Any tuples committed live after the snap will be inserted into the
- * index by their originating transaction. Any tuples committed dead before
+ * index by their originating transaction. Any tuples committed dead before
* the snap need not be indexed, because we will wait out all transactions
* that might care about them before we mark the index valid.)
*
@@ -2661,7 +2661,7 @@ IndexCheckExclusion(Relation heapRelation,
* ever say "delete it". (This should be faster than a plain indexscan;
* also, not all index AMs support full-index indexscan.) Then we sort the
* TIDs, and finally scan the table doing a "merge join" against the TID list
- * to see which tuples are missing from the index. Thus we will ensure that
+ * to see which tuples are missing from the index. Thus we will ensure that
* all tuples valid according to the reference snapshot are in the index.
*
* Building a unique index this way is tricky: we might try to insert a
@@ -2677,7 +2677,7 @@ IndexCheckExclusion(Relation heapRelation,
* were alive at the time of the reference snapshot are gone; this is
* necessary to be sure there are none left with a transaction snapshot
* older than the reference (and hence possibly able to see tuples we did
- * not index). Then we mark the index "indisvalid" and commit. Subsequent
+ * not index). Then we mark the index "indisvalid" and commit. Subsequent
* transactions will be able to use it for queries.
*
* Doing two full table scans is a brute-force strategy. We could try to be
@@ -2703,7 +2703,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
indexRelation = index_open(indexId, RowExclusiveLock);
/*
- * Fetch info needed for index_insert. (You might think this should be
+ * Fetch info needed for index_insert. (You might think this should be
* passed in from DefineIndex, but its copy is long gone due to having
* been built in a previous transaction.)
*/
@@ -2820,7 +2820,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* Need an EState for evaluation of index expressions and partial-index
- * predicates. Also a slot to hold the current tuple.
+ * predicates. Also a slot to hold the current tuple.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -2869,7 +2869,7 @@ validate_index_heapscan(Relation heapRelation,
* visit the live tuples in order by their offsets, but the root
* offsets that we need to compare against the index contents might be
* ordered differently. So we might have to "look back" within the
- * tuplesort output, but only within the current page. We handle that
+ * tuplesort output, but only within the current page. We handle that
* by keeping a bool array in_index[] showing all the
* already-passed-over tuplesort output TIDs of the current page. We
* clear that array here, when advancing onto a new heap page.
@@ -2954,7 +2954,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* For the current heap tuple, extract all the attributes we use
- * in this index, and note which are null. This also performs
+ * in this index, and note which are null. This also performs
* evaluation of any expressions needed.
*/
FormIndexDatum(indexInfo,
@@ -2976,7 +2976,7 @@ validate_index_heapscan(Relation heapRelation,
* for a uniqueness check on the whole HOT-chain. That is, the
* tuple we have here could be dead because it was already
* HOT-updated, and if so the updating transaction will not have
- * thought it should insert index entries. The index AM will
+ * thought it should insert index entries. The index AM will
* check the whole HOT-chain and correctly detect a conflict if
* there is one.
*/
@@ -3098,7 +3098,7 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action)
/*
* IndexGetRelation: given an index's relation OID, get the OID of the
- * relation it is an index on. Uses the system cache.
+ * relation it is an index on. Uses the system cache.
*/
Oid
IndexGetRelation(Oid indexId, bool missing_ok)
@@ -3135,7 +3135,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
volatile bool skipped_constraint = false;
/*
- * Open and lock the parent heap relation. ShareLock is sufficient since
+ * Open and lock the parent heap relation. ShareLock is sufficient since
* we only need to be sure no schema or data changes are going on.
*/
heapId = IndexGetRelation(indexId, false);
@@ -3223,7 +3223,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
* chains, we had better force indcheckxmin true, because the normal
* argument that the HOT chains couldn't conflict with the index is
* suspect for an invalid index. (A conflict is definitely possible if
- * the index was dead. It probably shouldn't happen otherwise, but let's
+ * the index was dead. It probably shouldn't happen otherwise, but let's
* be conservative.) In this case advancing the usability horizon is
* appropriate.
*
@@ -3315,7 +3315,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
* the data in a manner that risks a change in constraint validity.
*
* Returns true if any indexes were rebuilt (including toast table's index
- * when relevant). Note that a CommandCounterIncrement will occur after each
+ * when relevant). Note that a CommandCounterIncrement will occur after each
* index rebuild.
*/
bool
@@ -3328,7 +3328,7 @@ reindex_relation(Oid relid, int flags)
bool result;
/*
- * Open and lock the relation. ShareLock is sufficient since we only need
+ * Open and lock the relation. ShareLock is sufficient since we only need
* to prevent schema and data changes in it. The lock level used here
* should match ReindexTable().
*/
@@ -3347,7 +3347,7 @@ reindex_relation(Oid relid, int flags)
* reindex_index will attempt to update the pg_class rows for the relation
* and index. If we are processing pg_class itself, we want to make sure
* that the updates do not try to insert index entries into indexes we
- * have not processed yet. (When we are trying to recover from corrupted
+ * have not processed yet. (When we are trying to recover from corrupted
* indexes, that could easily cause a crash.) We can accomplish this
* because CatalogUpdateIndexes will use the relcache's index list to know
* which indexes to update. We just force the index list to be only the
@@ -3356,7 +3356,7 @@ reindex_relation(Oid relid, int flags)
* It is okay to not insert entries into the indexes we have not processed
* yet because all of this is transaction-safe. If we fail partway
* through, the updated rows are dead and it doesn't matter whether they
- * have index entries. Also, a new pg_class index will be created with a
+ * have index entries. Also, a new pg_class index will be created with a
* correct entry for its own pg_class row because we do
* RelationSetNewRelfilenode() before we do index_build().
*
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index 880155e7de0..361726b8ba6 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -149,7 +149,7 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple)
* CatalogUpdateIndexes - do all the indexing work for a new catalog tuple
*
* This is a convenience routine for the common case where we only need
- * to insert or update a single tuple in a system catalog. Avoid using it for
+ * to insert or update a single tuple in a system catalog. Avoid using it for
* multiple tuples, since opening the indexes and building the index info
* structures is moderately expensive.
*/
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 23943ff9ce2..3c6aa520d53 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -66,10 +66,10 @@
* when we are obeying an override search path spec that says not to use the
* temp namespace, or the temp namespace is included in the explicit list.)
*
- * 2. The system catalog namespace is always searched. If the system
+ * 2. The system catalog namespace is always searched. If the system
* namespace is present in the explicit path then it will be searched in
* the specified order; otherwise it will be searched after TEMP tables and
- * *before* the explicit list. (It might seem that the system namespace
+ * *before* the explicit list. (It might seem that the system namespace
* should be implicitly last, but this behavior appears to be required by
* SQL99. Also, this provides a way to search the system namespace first
* without thereby making it the default creation target namespace.)
@@ -87,7 +87,7 @@
* to refer to the current backend's temp namespace. This is usually also
* ignorable if the temp namespace hasn't been set up, but there's a special
* case: if "pg_temp" appears first then it should be the default creation
- * target. We kluge this case a little bit so that the temp namespace isn't
+ * target. We kluge this case a little bit so that the temp namespace isn't
* set up until the first attempt to create something in it. (The reason for
* klugery is that we can't create the temp namespace outside a transaction,
* but initial GUC processing of search_path happens outside a transaction.)
@@ -98,7 +98,7 @@
* In bootstrap mode, the search path is set equal to "pg_catalog", so that
* the system namespace is the only one searched or inserted into.
* initdb is also careful to set search_path to "pg_catalog" for its
- * post-bootstrap standalone backend runs. Otherwise the default search
+ * post-bootstrap standalone backend runs. Otherwise the default search
* path is determined by GUC. The factory default path contains the PUBLIC
* namespace (if it exists), preceded by the user's personal namespace
* (if one exists).
@@ -162,13 +162,13 @@ static List *overrideStack = NIL;
/*
* myTempNamespace is InvalidOid until and unless a TEMP namespace is set up
* in a particular backend session (this happens when a CREATE TEMP TABLE
- * command is first executed). Thereafter it's the OID of the temp namespace.
+ * command is first executed). Thereafter it's the OID of the temp namespace.
*
* myTempToastNamespace is the OID of the namespace for my temp tables' toast
- * tables. It is set when myTempNamespace is, and is InvalidOid before that.
+ * tables. It is set when myTempNamespace is, and is InvalidOid before that.
*
* myTempNamespaceSubID shows whether we've created the TEMP namespace in the
- * current subtransaction. The flag propagates up the subtransaction tree,
+ * current subtransaction. The flag propagates up the subtransaction tree,
* so the main transaction will correctly recognize the flag if all
* intermediate subtransactions commit. When it is InvalidSubTransactionId,
* we either haven't made the TEMP namespace yet, or have successfully
@@ -250,7 +250,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
}
/*
- * DDL operations can change the results of a name lookup. Since all such
+ * DDL operations can change the results of a name lookup. Since all such
* operations will generate invalidation messages, we keep track of
* whether any such messages show up while we're performing the operation,
* and retry until either (1) no more invalidation messages show up or (2)
@@ -259,7 +259,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
* But if lockmode = NoLock, then we assume that either the caller is OK
* with the answer changing under them, or that they already hold some
* appropriate lock, and therefore return the first answer we get without
- * checking for invalidation messages. Also, if the requested lock is
+ * checking for invalidation messages. Also, if the requested lock is
* already held, no LockRelationOid will not AcceptInvalidationMessages,
* so we may fail to notice a change. We could protect against that case
* by calling AcceptInvalidationMessages() before beginning this loop, but
@@ -396,7 +396,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
break;
/*
- * Something may have changed. Let's repeat the name lookup, to make
+ * Something may have changed. Let's repeat the name lookup, to make
* sure this name still references the same relation it did
* previously.
*/
@@ -869,7 +869,7 @@ TypeIsVisible(Oid typid)
* and the returned nvargs will always be zero.
*
* If expand_defaults is true, functions that could match after insertion of
- * default argument values will also be retrieved. In this case the returned
+ * default argument values will also be retrieved. In this case the returned
* structs could have nargs > passed-in nargs, and ndargs is set to the number
* of additional args (which can be retrieved from the function's
* proargdefaults entry).
@@ -1025,7 +1025,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
* Call uses positional notation
*
* Check if function is variadic, and get variadic element type if
- * so. If expand_variadic is false, we should just ignore
+ * so. If expand_variadic is false, we should just ignore
* variadic-ness.
*/
if (pronargs <= nargs && expand_variadic)
@@ -1155,7 +1155,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
if (prevResult)
{
/*
- * We have a match with a previous result. Decide which one
+ * We have a match with a previous result. Decide which one
* to keep, or mark it ambiguous if we can't decide. The
* logic here is preference > 0 means prefer the old result,
* preference < 0 means prefer the new, preference = 0 means
@@ -1540,7 +1540,7 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright)
* identical entries in later namespaces.
*
* The returned items always have two args[] entries --- one or the other
- * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
+ * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too.
*/
FuncCandidateList
OpernameGetCandidates(List *names, char oprkind)
@@ -2521,7 +2521,7 @@ get_ts_config_oid(List *names, bool missing_ok)
/*
* TSConfigIsVisible
* Determine whether a text search configuration (identified by OID)
- * is visible in the current search path. Visible means "would be found
+ * is visible in the current search path. Visible means "would be found
* by searching for the unqualified text search configuration name".
*/
bool
@@ -2840,7 +2840,7 @@ QualifiedNameGetCreationNamespace(List *names, char **objname_p)
/*
* get_namespace_oid - given a namespace name, look up the OID
*
- * If missing_ok is false, throw an error if namespace name not found. If
+ * If missing_ok is false, throw an error if namespace name not found. If
* true, just return InvalidOid.
*/
Oid
@@ -3055,7 +3055,7 @@ GetTempNamespaceBackendId(Oid namespaceId)
/*
* GetTempToastNamespace - get the OID of my temporary-toast-table namespace,
- * which must already be assigned. (This is only used when creating a toast
+ * which must already be assigned. (This is only used when creating a toast
* table for a temp table, so we must have already done InitTempTableNamespace)
*/
Oid
@@ -3153,8 +3153,8 @@ OverrideSearchPathMatchesCurrent(OverrideSearchPath *path)
*
* It's possible that newpath->useTemp is set but there is no longer any
* active temp namespace, if the path was saved during a transaction that
- * created a temp namespace and was later rolled back. In that case we just
- * ignore useTemp. A plausible alternative would be to create a new temp
+ * created a temp namespace and was later rolled back. In that case we just
+ * ignore useTemp. A plausible alternative would be to create a new temp
* namespace, but for existing callers that's not necessary because an empty
* temp namespace wouldn't affect their results anyway.
*
@@ -3187,7 +3187,7 @@ PushOverrideSearchPath(OverrideSearchPath *newpath)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go on
+ * Add any implicitly-searched namespaces to the list. Note these go on
* the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
@@ -3510,7 +3510,7 @@ recomputeNamespacePath(void)
}
/*
- * Remember the first member of the explicit list. (Note: this is
+ * Remember the first member of the explicit list. (Note: this is
* nominally wrong if temp_missing, but we need it anyway to distinguish
* explicit from implicit mention of pg_catalog.)
*/
@@ -3520,7 +3520,7 @@ recomputeNamespacePath(void)
firstNS = linitial_oid(oidlist);
/*
- * Add any implicitly-searched namespaces to the list. Note these go on
+ * Add any implicitly-searched namespaces to the list. Note these go on
* the front, not the back; also notice that we do not check USAGE
* permissions for these.
*/
@@ -3575,7 +3575,7 @@ InitTempTableNamespace(void)
/*
* First, do permission check to see if we are authorized to make temp
- * tables. We use a nonstandard error message here since "databasename:
+ * tables. We use a nonstandard error message here since "databasename:
* permission denied" might be a tad cryptic.
*
* Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask;
@@ -3594,9 +3594,9 @@ InitTempTableNamespace(void)
* Do not allow a Hot Standby slave session to make temp tables. Aside
* from problems with modifying the system catalogs, there is a naming
* conflict: pg_temp_N belongs to the session with BackendId N on the
- * master, not to a slave session with the same BackendId. We should not
+ * master, not to a slave session with the same BackendId. We should not
* be able to get here anyway due to XactReadOnly checks, but let's just
- * make real sure. Note that this also backstops various operations that
+ * make real sure. Note that this also backstops various operations that
* allow XactReadOnly transactions to modify temp tables; they'd need
* RecoveryInProgress checks if not for this.
*/
@@ -3952,7 +3952,7 @@ fetch_search_path(bool includeImplicit)
/*
* If the temp namespace should be first, force it to exist. This is so
* that callers can trust the result to reflect the actual default
- * creation namespace. It's a bit bogus to do this here, since
+ * creation namespace. It's a bit bogus to do this here, since
* current_schema() is supposedly a stable function without side-effects,
* but the alternatives seem worse.
*/
@@ -3974,7 +3974,7 @@ fetch_search_path(bool includeImplicit)
/*
* Fetch the active search path into a caller-allocated array of OIDs.
- * Returns the number of path entries. (If this is more than sarray_len,
+ * Returns the number of path entries. (If this is more than sarray_len,
* then the data didn't fit and is not all stored.)
*
* The returned list always includes the implicitly-prepended namespaces,
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index d40edee3c4e..f1fe67b6c50 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -652,7 +652,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
/*
* If we're dealing with a relation or attribute, then the relation is
- * already locked. Otherwise, we lock it now.
+ * already locked. Otherwise, we lock it now.
*/
if (address.classId != RelationRelationId)
{
diff --git a/src/backend/catalog/pg_collation.c b/src/backend/catalog/pg_collation.c
index dd0050267f1..413a127fcf0 100644
--- a/src/backend/catalog/pg_collation.c
+++ b/src/backend/catalog/pg_collation.c
@@ -78,7 +78,7 @@ CollationCreate(const char *collname, Oid collnamespace,
collname, pg_encoding_to_char(collencoding))));
/*
- * Also forbid matching an any-encoding entry. This test of course is not
+ * Also forbid matching an any-encoding entry. This test of course is not
* backed up by the unique index, but it's not a problem since we don't
* support adding any-encoding entries after initdb.
*/
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 52cb9132ee4..dc032fda29e 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -38,7 +38,7 @@
* Create a constraint table entry.
*
* Subsidiary records (such as triggers or indexes to implement the
- * constraint) are *not* created here. But we do make dependency links
+ * constraint) are *not* created here. But we do make dependency links
* from the constraint to the things it depends on.
*/
Oid
@@ -305,7 +305,7 @@ CreateConstraintEntry(const char *constraintName,
{
/*
* Register normal dependency on the unique index that supports a
- * foreign-key constraint. (Note: for indexes associated with unique
+ * foreign-key constraint. (Note: for indexes associated with unique
* or primary-key constraints, the dependency runs the other way, and
* is not made here.)
*/
@@ -895,10 +895,10 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok)
* the rel of interest are Vars with the indicated varno/varlevelsup.
*
* Currently we only check to see if the rel has a primary key that is a
- * subset of the grouping_columns. We could also use plain unique constraints
+ * subset of the grouping_columns. We could also use plain unique constraints
* if all their columns are known not null, but there's a problem: we need
* to be able to represent the not-null-ness as part of the constraints added
- * to *constraintDeps. FIXME whenever not-null constraints get represented
+ * to *constraintDeps. FIXME whenever not-null constraints get represented
* in pg_constraint.
*/
bool
diff --git a/src/backend/catalog/pg_db_role_setting.c b/src/backend/catalog/pg_db_role_setting.c
index 45949129cfb..dddc390536f 100644
--- a/src/backend/catalog/pg_db_role_setting.c
+++ b/src/backend/catalog/pg_db_role_setting.c
@@ -172,7 +172,7 @@ AlterSetting(Oid databaseid, Oid roleid, VariableSetStmt *setstmt)
/*
* Drop some settings from the catalog. These can be for a particular
- * database, or for a particular role. (It is of course possible to do both
+ * database, or for a particular role. (It is of course possible to do both
* too, but it doesn't make sense for current uses.)
*/
void
diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c
index 9535fba21e7..69f613f60ad 100644
--- a/src/backend/catalog/pg_depend.c
+++ b/src/backend/catalog/pg_depend.c
@@ -50,7 +50,7 @@ recordDependencyOn(const ObjectAddress *depender,
/*
* Record multiple dependencies (of the same kind) for a single dependent
- * object. This has a little less overhead than recording each separately.
+ * object. This has a little less overhead than recording each separately.
*/
void
recordMultipleDependencies(const ObjectAddress *depender,
@@ -127,7 +127,7 @@ recordMultipleDependencies(const ObjectAddress *depender,
/*
* If we are executing a CREATE EXTENSION operation, mark the given object
- * as being a member of the extension. Otherwise, do nothing.
+ * as being a member of the extension. Otherwise, do nothing.
*
* This must be called during creation of any user-definable object type
* that could be a member of an extension.
@@ -186,7 +186,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object,
* (possibly with some differences from before).
*
* If skipExtensionDeps is true, we do not delete any dependencies that
- * show that the given object is a member of an extension. This avoids
+ * show that the given object is a member of an extension. This avoids
* needing a lot of extra logic to fetch and recreate that dependency.
*/
long
@@ -492,7 +492,7 @@ getExtensionOfObject(Oid classId, Oid objectId)
* Detect whether a sequence is marked as "owned" by a column
*
* An ownership marker is an AUTO dependency from the sequence to the
- * column. If we find one, store the identity of the owning column
+ * column. If we find one, store the identity of the owning column
* into *tableId and *colId and return TRUE; else return FALSE.
*
* Note: if there's more than one such pg_depend entry then you get
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 7e746f96676..e4b29bb0d7f 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -464,7 +464,7 @@ restart:
* We avoid doing this unless absolutely necessary; in most installations
* it will never happen. The reason is that updating existing pg_enum
* entries creates hazards for other backends that are concurrently reading
- * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could
+ * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could
* see both old and new versions of an updated row as valid, or neither of
* them, if the commit happens between scanning the two versions. It's
* also quite likely for a concurrent scan to see an inconsistent set of
diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c
index d01a5a7a72e..ce877e8e582 100644
--- a/src/backend/catalog/pg_largeobject.c
+++ b/src/backend/catalog/pg_largeobject.c
@@ -76,7 +76,7 @@ LargeObjectCreate(Oid loid)
}
/*
- * Drop a large object having the given LO identifier. Both the data pages
+ * Drop a large object having the given LO identifier. Both the data pages
* and metadata must be dropped.
*/
void
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 3c4fedbd49c..d8a61dfe7bf 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -315,7 +315,7 @@ OperatorShellMake(const char *operatorName,
* specify operators that do not exist. For example, if operator
* "op" is being defined, the negator operator "negop" and the
* commutator "commop" can also be defined without specifying
- * any information other than their names. Since in order to
+ * any information other than their names. Since in order to
* add "op" to the PG_OPERATOR catalog, all the Oid's for these
* operators must be placed in the fields of "op", a forward
* declaration is done on the commutator and negator operators.
@@ -433,7 +433,7 @@ OperatorCreate(const char *operatorName,
operatorName);
/*
- * Set up the other operators. If they do not currently exist, create
+ * Set up the other operators. If they do not currently exist, create
* shells in order to get ObjectId's.
*/
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index e3673e566fe..1b4336ebe94 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -229,7 +229,7 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow polymorphic return type unless at least one input argument
- * is polymorphic. ANYRANGE return type is even stricter: must have an
+ * is polymorphic. ANYRANGE return type is even stricter: must have an
* ANYRANGE input (since we can't deduce the specific range type from
* ANYELEMENT). Also, do not allow return type INTERNAL unless at least
* one input argument is INTERNAL.
@@ -676,7 +676,7 @@ ProcedureCreate(const char *procedureName,
/*
* Set per-function configuration parameters so that the validation is
- * done with the environment the function expects. However, if
+ * done with the environment the function expects. However, if
* check_function_bodies is off, we don't do this, because that would
* create dump ordering hazards that pg_dump doesn't know how to deal
* with. (For example, a SET clause might refer to a not-yet-created
@@ -948,7 +948,7 @@ sql_function_parse_error_callback(void *arg)
/*
* Adjust a syntax error occurring inside the function body of a CREATE
- * FUNCTION or DO command. This can be used by any function validator or
+ * FUNCTION or DO command. This can be used by any function validator or
* anonymous-block handler, not only for SQL-language functions.
* It is assumed that the syntax error position is initially relative to the
* function body string (as passed in). If possible, we adjust the position
@@ -1081,7 +1081,7 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
/*
* This implementation handles backslashes and doubled quotes in the
- * string literal. It does not handle the SQL syntax for literals
+ * string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
* We do the comparison a character at a time, not a byte at a time, so
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 7de4420fa3d..bc02e162702 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -167,7 +167,7 @@ recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner)
* shdepChangeDep
*
* Update shared dependency records to account for an updated referenced
- * object. This is an internal workhorse for operations such as changing
+ * object. This is an internal workhorse for operations such as changing
* an object's owner.
*
* There must be no more than one existing entry for the given dependent
@@ -316,7 +316,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId)
* was previously granted some rights to the object.
*
* This step is analogous to aclnewowner's removal of duplicate entries
- * in the ACL. We have to do it to handle this scenario:
+ * in the ACL. We have to do it to handle this scenario:
* A grants some rights on an object to B
* ALTER OWNER changes the object's owner to B
* ALTER OWNER changes the object's owner to C
@@ -402,9 +402,9 @@ getOidListDiff(Oid *list1, int *nlist1, Oid *list2, int *nlist2)
* and then insert or delete from pg_shdepend as appropriate.
*
* Note that we can't just insert all referenced roles blindly during GRANT,
- * because we would end up with duplicate registered dependencies. We could
+ * because we would end up with duplicate registered dependencies. We could
* check for existence of the tuples before inserting, but that seems to be
- * more expensive than what we are doing here. Likewise we can't just delete
+ * more expensive than what we are doing here. Likewise we can't just delete
* blindly during REVOKE, because the user may still have other privileges.
* It is also possible that REVOKE actually adds dependencies, due to
* instantiation of a formerly implicit default ACL (although at present,
@@ -535,7 +535,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report.
+ * enormous error strings. The server log always gets a full report.
*/
#define MAX_REPORTED_DEPS 100
@@ -616,7 +616,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
bool stored = false;
/*
- * XXX this info is kept on a simple List. Maybe it's not good
+ * XXX this info is kept on a simple List. Maybe it's not good
* for performance, but using a hash table seems needlessly
* complex. The expected number of databases is not high anyway,
* I suppose.
@@ -853,7 +853,7 @@ shdepAddDependency(Relation sdepRel,
/*
* Make sure the object doesn't go away while we record the dependency on
- * it. DROP routines should lock the object exclusively before they check
+ * it. DROP routines should lock the object exclusively before they check
* shared dependencies.
*/
shdepLockAndCheckObject(refclassId, refobjId);
@@ -1004,7 +1004,7 @@ shdepLockAndCheckObject(Oid classId, Oid objectId)
/*
* Currently, this routine need not support any other shared
- * object types besides roles. If we wanted to record explicit
+ * object types besides roles. If we wanted to record explicit
* dependencies on databases or tablespaces, we'd need code along
* these lines:
*/
@@ -1150,7 +1150,7 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel)
/*
* shdepDropOwned
*
- * Drop the objects owned by any one of the given RoleIds. If a role has
+ * Drop the objects owned by any one of the given RoleIds. If a role has
* access to an object, the grant will be removed as well (but the object
* will not, of course).
*
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 23ac3dd3365..ea81a3e0a4d 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -393,7 +393,7 @@ TypeCreate(Oid newTypeOid,
if (HeapTupleIsValid(tup))
{
/*
- * check that the type is not already defined. It may exist as a
+ * check that the type is not already defined. It may exist as a
* shell type, however.
*/
if (((Form_pg_type) GETSTRUCT(tup))->typisdefined)
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 892f1636a90..f322f17bb87 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -35,7 +35,7 @@
* that have been created or deleted in the current transaction. When
* a relation is created, we create the physical file immediately, but
* remember it so that we can delete the file again if the current
- * transaction is aborted. Conversely, a deletion request is NOT
+ * transaction is aborted. Conversely, a deletion request is NOT
* executed immediately, but is just entered in the list. When and if
* the transaction commits, we can delete the physical file.
*
@@ -378,7 +378,7 @@ smgrDoPendingDeletes(bool isCommit)
* *ptr is set to point to a freshly-palloc'd array of RelFileNodes.
* If there are no relations to be deleted, *ptr is set to NULL.
*
- * Only non-temporary relations are included in the returned list. This is OK
+ * Only non-temporary relations are included in the returned list. This is OK
* because the list is used only in contexts where temporary relations don't
* matter: we're either writing to the two-phase state file (and transactions
* that have touched temp tables can't be prepared) or we're writing to xlog
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index 385d64d4c07..b5d1c3f63b3 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -340,7 +340,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptio
}
/*
- * Check to see whether the table needs a TOAST table. It does only if
+ * Check to see whether the table needs a TOAST table. It does only if
* (1) there are any toastable attributes, and (2) the maximum length
* of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
* create a toast table for something like "f1 varchar(20)".)
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 4a03786210a..f672382cb9a 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -176,7 +176,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
*
* transtype can't be a pseudo-type, since we need to be able to store
* values of the transtype. However, we can allow polymorphic transtype
- * in some cases (AggregateCreate will check). Also, we allow "internal"
+ * in some cases (AggregateCreate will check). Also, we allow "internal"
* for functions that want to pass pointers to private data structures;
* but allow that only to superusers, since you could crash the system (or
* worse) by connecting up incompatible internal-using functions in an
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index b62ec70e20f..287ea5043b7 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -296,7 +296,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
}
/*
- * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
+ * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
* type, the function appropriate to that type is executed.
*/
Oid
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 5f9674699e2..38560d11273 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -408,7 +408,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Open all indexes of the relation, and see if there are any analyzable
- * columns in the indexes. We do not analyze index columns if there was
+ * columns in the indexes. We do not analyze index columns if there was
* an explicit column list in the ANALYZE command, however. If we are
* doing a recursive scan, we don't want to touch the parent's indexes at
* all.
@@ -465,7 +465,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Determine how many rows we need to sample, using the worst case from
- * all analyzable columns. We use a lower bound of 100 rows to avoid
+ * all analyzable columns. We use a lower bound of 100 rows to avoid
* possible overflow in Vitter's algorithm. (Note: that will also be the
* target in the corner case where there are no analyzable columns.)
*/
@@ -500,7 +500,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
&totalrows, &totaldeadrows);
/*
- * Compute the statistics. Temporary results during the calculations for
+ * Compute the statistics. Temporary results during the calculations for
* each column are stored in a child context. The calc routines are
* responsible to make sure that whatever they store into the VacAttrStats
* structure is allocated in anl_context.
@@ -557,7 +557,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Emit the completed stats rows into pg_statistic, replacing any
- * previous statistics for the target columns. (If there are stats in
+ * previous statistics for the target columns. (If there are stats in
* pg_statistic for columns we didn't process, we leave them alone.)
*/
update_attstats(RelationGetRelid(onerel), inh,
@@ -609,7 +609,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
}
/*
- * Report ANALYZE to the stats collector, too. However, if doing
+ * Report ANALYZE to the stats collector, too. However, if doing
* inherited stats we shouldn't report, because the stats collector only
* tracks per-table stats.
*/
@@ -871,7 +871,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
return NULL;
/*
- * Create the VacAttrStats struct. Note that we only have a copy of the
+ * Create the VacAttrStats struct. Note that we only have a copy of the
* fixed fields of the pg_attribute tuple.
*/
stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
@@ -881,7 +881,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
/*
* When analyzing an expression index, believe the expression tree's type
* not the column datatype --- the latter might be the opckeytype storage
- * type of the opclass, which is not interesting for our purposes. (Note:
+ * type of the opclass, which is not interesting for our purposes. (Note:
* if we did anything with non-expression index columns, we'd need to
* figure out where to get the correct type info from, but for now that's
* not a problem.) It's not clear whether anyone will care about the
@@ -920,7 +920,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
}
/*
- * Call the type-specific typanalyze function. If none is specified, use
+ * Call the type-specific typanalyze function. If none is specified, use
* std_typanalyze().
*/
if (OidIsValid(stats->attrtype->typanalyze))
@@ -996,7 +996,7 @@ BlockSampler_Next(BlockSampler bs)
* If we are to skip, we should advance t (hence decrease K), and
* repeat the same probabilistic test for the next block. The naive
* implementation thus requires an anl_random_fract() call for each block
- * number. But we can reduce this to one anl_random_fract() call per
+ * number. But we can reduce this to one anl_random_fract() call per
* selected block, by noting that each time the while-test succeeds,
* we can reinterpret V as a uniform random number in the range 0 to p.
* Therefore, instead of choosing a new V, we just adjust p to be
@@ -1126,7 +1126,7 @@ acquire_sample_rows(Relation onerel, int elevel,
/*
* We ignore unused and redirect line pointers. DEAD line
* pointers should be counted as dead, because we need vacuum to
- * run to get rid of them. Note that this rule agrees with the
+ * run to get rid of them. Note that this rule agrees with the
* way that heap_page_prune() counts things.
*/
if (!ItemIdIsNormal(itemid))
@@ -1171,7 +1171,7 @@ acquire_sample_rows(Relation onerel, int elevel,
* is the safer option.
*
* A special case is that the inserting transaction might
- * be our own. In this case we should count and sample
+ * be our own. In this case we should count and sample
* the row, to accommodate users who load a table and
* analyze it in one transaction. (pgstat_report_analyze
* has to adjust the numbers we send to the stats
@@ -1213,7 +1213,7 @@ acquire_sample_rows(Relation onerel, int elevel,
/*
* The first targrows sample rows are simply copied into the
* reservoir. Then we start replacing tuples in the sample
- * until we reach the end of the relation. This algorithm is
+ * until we reach the end of the relation. This algorithm is
* from Jeff Vitter's paper (see full citation below). It
* works by repeatedly computing the number of tuples to skip
* before selecting a tuple, which replaces a randomly chosen
@@ -1272,7 +1272,7 @@ acquire_sample_rows(Relation onerel, int elevel,
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
/*
- * Estimate total numbers of rows in relation. For live rows, use
+ * Estimate total numbers of rows in relation. For live rows, use
* vac_estimate_reltuples; for dead rows, we have no source of old
* information, so we have to assume the density is the same in unseen
* pages as in the pages we scanned.
@@ -1595,7 +1595,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel,
* Statistics are stored in several places: the pg_class row for the
* relation has stats about the whole relation, and there is a
* pg_statistic row for each (non-system) attribute that has ever
- * been analyzed. The pg_class values are updated by VACUUM, not here.
+ * been analyzed. The pg_class values are updated by VACUUM, not here.
*
* pg_statistic rows are just added or updated normally. This means
* that pg_statistic will probably contain some deleted rows at the
@@ -1999,7 +1999,7 @@ compute_minimal_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
* avoid repeated detoastings and resultant excess memory usage
- * during the comparisons. Also, check to see if the value is
+ * during the comparisons. Also, check to see if the value is
* excessively wide, and if so don't detoast at all --- just
* ignore the value.
*/
@@ -2119,7 +2119,7 @@ compute_minimal_stats(VacAttrStatsP stats,
* We assume (not very reliably!) that all the multiply-occurring
* values are reflected in the final track[] list, and the other
* nonnull values all appeared but once. (XXX this usually
- * results in a drastic overestimate of ndistinct. Can we do
+ * results in a drastic overestimate of ndistinct. Can we do
* any better?)
*----------
*/
@@ -2156,7 +2156,7 @@ compute_minimal_stats(VacAttrStatsP stats,
* Decide how many values are worth storing as most-common values. If
* we are able to generate a complete MCV list (all the values in the
* sample will fit, and we think these are all the ones in the table),
- * then do so. Otherwise, store only those values that are
+ * then do so. Otherwise, store only those values that are
* significantly more common than the (estimated) average. We set the
* threshold rather arbitrarily at 25% more than average, with at
* least 2 instances in the sample.
@@ -2324,7 +2324,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* If the value is toasted, we want to detoast it just once to
* avoid repeated detoastings and resultant excess memory usage
- * during the comparisons. Also, check to see if the value is
+ * during the comparisons. Also, check to see if the value is
* excessively wide, and if so don't detoast at all --- just
* ignore the value.
*/
@@ -2369,7 +2369,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* accumulate ordering-correlation statistics.
*
* To determine which are most common, we first have to count the
- * number of duplicates of each value. The duplicates are adjacent in
+ * number of duplicates of each value. The duplicates are adjacent in
* the sorted list, so a brute-force approach is to compare successive
* datum values until we find two that are not equal. However, that
* requires N-1 invocations of the datum comparison routine, which are
@@ -2378,7 +2378,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* that are adjacent in the sorted order; otherwise it could not know
* that it's ordered the pair correctly.) We exploit this by having
* compare_scalars remember the highest tupno index that each
- * ScalarItem has been found equal to. At the end of the sort, a
+ * ScalarItem has been found equal to. At the end of the sort, a
* ScalarItem's tupnoLink will still point to itself if and only if it
* is the last item of its group of duplicates (since the group will
* be ordered by tupno).
@@ -2498,7 +2498,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* Decide how many values are worth storing as most-common values. If
* we are able to generate a complete MCV list (all the values in the
* sample will fit, and we think these are all the ones in the table),
- * then do so. Otherwise, store only those values that are
+ * then do so. Otherwise, store only those values that are
* significantly more common than the (estimated) average. We set the
* threshold rather arbitrarily at 25% more than average, with at
* least 2 instances in the sample. Also, we won't suppress values
@@ -2653,7 +2653,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/*
* The object of this loop is to copy the first and last values[]
- * entries along with evenly-spaced values in between. So the
+ * entries along with evenly-spaced values in between. So the
* i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But
* computing that subscript directly risks integer overflow when
* the stats target is more than a couple thousand. Instead we
@@ -2764,7 +2764,7 @@ compute_scalar_stats(VacAttrStatsP stats,
* qsort_arg comparator for sorting ScalarItems
*
* Aside from sorting the items, we update the tupnoLink[] array
- * whenever two ScalarItems are found to contain equal datums. The array
+ * whenever two ScalarItems are found to contain equal datums. The array
* is indexed by tupno; for each ScalarItem, it contains the highest
* tupno that that item's datum has been found to be equal to. This allows
* us to avoid additional comparisons in compute_scalar_stats().
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 3fa96f7b52b..8a6884bbdb6 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -151,7 +151,7 @@
*
* This struct declaration has the maximal length, but in a real queue entry
* the data area is only big enough for the actual channel and payload strings
- * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
+ * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
* entry size, if both channel and payload strings are empty (but note it
* doesn't include alignment padding).
*
@@ -267,7 +267,7 @@ static SlruCtlData AsyncCtlData;
*
* The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2
* pages, because more than that would confuse slru.c into thinking there
- * was a wraparound condition. With the default BLCKSZ this means there
+ * was a wraparound condition. With the default BLCKSZ this means there
* can be up to 8GB of queued-and-not-read data.
*
* Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of
@@ -397,7 +397,7 @@ asyncQueuePagePrecedes(int p, int q)
int diff;
/*
- * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
+ * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
* in the range 0..QUEUE_MAX_PAGE.
*/
Assert(p >= 0 && p <= QUEUE_MAX_PAGE);
@@ -828,7 +828,7 @@ PreCommit_Notify(void)
while (nextNotify != NULL)
{
/*
- * Add the pending notifications to the queue. We acquire and
+ * Add the pending notifications to the queue. We acquire and
* release AsyncQueueLock once per page, which might be overkill
* but it does allow readers to get in while we're doing this.
*
@@ -1044,12 +1044,12 @@ Exec_UnlistenAllCommit(void)
* The reason that this is not done in AtCommit_Notify is that there is
* a nonzero chance of errors here (for example, encoding conversion errors
* while trying to format messages to our frontend). An error during
- * AtCommit_Notify would be a PANIC condition. The timing is also arranged
+ * AtCommit_Notify would be a PANIC condition. The timing is also arranged
* to ensure that a transaction's self-notifies are delivered to the frontend
* before it gets the terminating ReadyForQuery message.
*
* Note that we send signals and process the queue even if the transaction
- * eventually aborted. This is because we need to clean out whatever got
+ * eventually aborted. This is because we need to clean out whatever got
* added to the queue.
*
* NOTE: we are outside of any transaction here.
@@ -1139,7 +1139,7 @@ IsListeningOn(const char *channel)
/*
* Remove our entry from the listeners array when we are no longer listening
- * on any channel. NB: must not fail if we're already not listening.
+ * on any channel. NB: must not fail if we're already not listening.
*/
static void
asyncQueueUnregister(void)
@@ -1181,7 +1181,7 @@ asyncQueueIsFull(void)
/*
* The queue is full if creating a new head page would create a page that
* logically precedes the current global tail pointer, ie, the head
- * pointer would wrap around compared to the tail. We cannot create such
+ * pointer would wrap around compared to the tail. We cannot create such
* a head page for fear of confusing slru.c. For safety we round the tail
* pointer back to a segment boundary (compare the truncation logic in
* asyncQueueAdvanceTail).
@@ -1200,7 +1200,7 @@ asyncQueueIsFull(void)
/*
* Advance the QueuePosition to the next entry, assuming that the current
- * entry is of length entryLength. If we jump to a new page the function
+ * entry is of length entryLength. If we jump to a new page the function
* returns true, else false.
*/
static bool
@@ -1269,7 +1269,7 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
* the last byte which simplifies reading the page later.
*
* We are passed the list cell containing the next notification to write
- * and return the first still-unwritten cell back. Eventually we will return
+ * and return the first still-unwritten cell back. Eventually we will return
* NULL indicating all is done.
*
* We are holding AsyncQueueLock already from the caller and grab AsyncCtlLock
@@ -1346,7 +1346,7 @@ asyncQueueAddEntries(ListCell *nextNotify)
* Page is full, so we're done here, but first fill the next page
* with zeroes. The reason to do this is to ensure that slru.c's
* idea of the head page is always the same as ours, which avoids
- * boundary problems in SimpleLruTruncate. The test in
+ * boundary problems in SimpleLruTruncate. The test in
* asyncQueueIsFull() ensured that there is room to create this
* page without overrunning the queue.
*/
@@ -1520,7 +1520,7 @@ AtAbort_Notify(void)
/*
* If we LISTEN but then roll back the transaction after PreCommit_Notify,
* we have registered as a listener but have not made any entry in
- * listenChannels. In that case, deregister again.
+ * listenChannels. In that case, deregister again.
*/
if (amRegisteredListener && listenChannels == NIL)
asyncQueueUnregister();
@@ -1773,7 +1773,7 @@ EnableNotifyInterrupt(void)
* is disabled until the next EnableNotifyInterrupt call.
*
* The PROCSIG_CATCHUP_INTERRUPT signal handler also needs to call this,
- * so as to prevent conflicts if one signal interrupts the other. So we
+ * so as to prevent conflicts if one signal interrupts the other. So we
* must return the previous state of the flag.
*/
bool
@@ -1868,7 +1868,7 @@ asyncQueueReadAllNotifications(void)
/*
* We copy the data from SLRU into a local buffer, so as to avoid
* holding the AsyncCtlLock while we are examining the entries and
- * possibly transmitting them to our frontend. Copy only the part
+ * possibly transmitting them to our frontend. Copy only the part
* of the page we will actually inspect.
*/
slotno = SimpleLruReadPage_ReadOnly(AsyncCtl, curpage,
@@ -1942,7 +1942,7 @@ asyncQueueReadAllNotifications(void)
* and deliver relevant ones to my frontend.
*
* The current page must have been fetched into page_buffer from shared
- * memory. (We could access the page right in shared memory, but that
+ * memory. (We could access the page right in shared memory, but that
* would imply holding the AsyncCtlLock throughout this routine.)
*
* We stop if we reach the "stop" position, or reach a notification from an
@@ -2148,7 +2148,7 @@ NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid)
pq_endmessage(&buf);
/*
- * NOTE: we do not do pq_flush() here. For a self-notify, it will
+ * NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
* ProcessIncomingNotify will do it after finding all the notifies.
*/
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index c903f0eba7c..6f87170946c 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* cluster.c
- * CLUSTER a table on an index. This is now also used for VACUUM FULL.
+ * CLUSTER a table on an index. This is now also used for VACUUM FULL.
*
* There is hardly anything left of Paul Brown's original implementation...
*
@@ -99,7 +99,7 @@ static void reform_and_rewrite_tuple(HeapTuple tuple,
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation to be specified without index. In that case,
+ * We also allow a relation to be specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -214,7 +214,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives in
+ * Build the list of relations to cluster. Note that this lives in
* cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -252,7 +252,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
*
* This clusters the table by creating a new, clustered table and
* swapping the relfilenodes of the new table and the old table, so
- * the OID of the original table is preserved. Thus we do not lose
+ * the OID of the original table is preserved. Thus we do not lose
* GRANT, inheritance nor references to this table (this was a bug
* in releases thru 7.3).
*
@@ -261,7 +261,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
* them incrementally while we load the table.
*
* If indexOid is InvalidOid, the table will be rewritten in physical order
- * instead of index order. This is the new implementation of VACUUM FULL,
+ * instead of index order. This is the new implementation of VACUUM FULL,
* and error messages should refer to the operation as VACUUM not CLUSTER.
*/
void
@@ -276,7 +276,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
/*
* We grab exclusive access to the target rel and index for the duration
- * of the transaction. (This is redundant for the single-transaction
+ * of the transaction. (This is redundant for the single-transaction
* case, since cluster() already did it.) The index lock is taken inside
* check_index_is_clusterable.
*/
@@ -311,7 +311,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
* check in the "recheck" case is appropriate (which currently means
* somebody is executing a database-wide CLUSTER), because there is
* another check in cluster() which will stop any attempt to cluster
- * remote temp tables by name. There is another check in cluster_rel
+ * remote temp tables by name. There is another check in cluster_rel
* which is redundant, but we leave it for extra safety.
*/
if (RELATION_IS_OTHER_TEMP(OldHeap))
@@ -404,7 +404,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
/*
* All predicate locks on the tuples or pages are about to be made
- * invalid, because we move tuples around. Promote them to relation
+ * invalid, because we move tuples around. Promote them to relation
* locks. Predicate locks on indexes will be promoted when they are
* reindexed.
*/
@@ -453,7 +453,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck, LOCKMOD
/*
* Disallow clustering on incomplete indexes (those that might not index
- * every row of the relation). We could relax this by making a separate
+ * every row of the relation). We could relax this by making a separate
* seqscan pass over the table to copy the missing rows, but that seems
* expensive and tedious.
*/
@@ -658,14 +658,14 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace)
/*
* Create the new heap, using a temporary name in the same namespace as
- * the existing table. NOTE: there is some risk of collision with user
+ * the existing table. NOTE: there is some risk of collision with user
* relnames. Working around this seems more trouble than it's worth; in
* particular, we can't create the new heap in a different namespace from
* the old, or we will have problems with the TEMP status of temp tables.
*
* Note: the new heap is not a shared relation, even if we are rebuilding
* a shared rel. However, we do make the new heap mapped if the source is
- * mapped. This simplifies swap_relation_files, and is absolutely
+ * mapped. This simplifies swap_relation_files, and is absolutely
* necessary for rebuilding pg_class, for reasons explained there.
*/
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap);
@@ -800,12 +800,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* If the OldHeap has a toast table, get lock on the toast table to keep
- * it from being vacuumed. This is needed because autovacuum processes
+ * it from being vacuumed. This is needed because autovacuum processes
* toast tables independently of their main tables, with no lock on the
- * latter. If an autovacuum were to start on the toast table after we
+ * latter. If an autovacuum were to start on the toast table after we
* compute our OldestXmin below, it would use a later OldestXmin, and then
* possibly remove as DEAD toast tuples belonging to main tuples we think
- * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
+ * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
* tuples.
*
* We don't need to open the toast relation here, just lock it. The lock
@@ -826,7 +826,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* If both tables have TOAST tables, perform toast swap by content. It is
* possible that the old table has a toast table but the new one doesn't,
- * if toastable columns have been dropped. In that case we have to do
+ * if toastable columns have been dropped. In that case we have to do
* swap by links. This is okay because swap by content is only essential
* for system catalogs, and we don't support schema changes for them.
*/
@@ -845,7 +845,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
*
* Note that we must hold NewHeap open until we are done writing data,
* since the relcache will not guarantee to remember this setting once
- * the relation is closed. Also, this technique depends on the fact
+ * the relation is closed. Also, this technique depends on the fact
* that no one will try to read from the NewHeap until after we've
* finished writing it and swapping the rels --- otherwise they could
* follow the toast pointers to the wrong place. (It would actually
@@ -941,7 +941,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* Scan through the OldHeap, either in OldIndex order or sequentially;
* copy each tuple into the NewHeap, or transiently to the tuplesort
- * module. Note that we don't bother sorting dead tuples (they won't get
+ * module. Note that we don't bother sorting dead tuples (they won't get
* to the new table anyway).
*/
for (;;)
@@ -1231,7 +1231,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
NameStr(relform2->relname), r2);
/*
- * Send replacement mappings to relmapper. Note these won't actually
+ * Send replacement mappings to relmapper. Note these won't actually
* take effect until CommandCounterIncrement.
*/
RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false);
@@ -1441,7 +1441,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
* non-transient relation.)
*
* Caution: the placement of this step interacts with the decision to
- * handle toast rels by recursion. When we are trying to rebuild pg_class
+ * handle toast rels by recursion. When we are trying to rebuild pg_class
* itself, the smgr close on pg_class must happen after all accesses in
* this function.
*/
@@ -1488,9 +1488,9 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
/*
* Rebuild each index on the relation (but not the toast table, which is
- * all-new at this point). It is important to do this before the DROP
+ * all-new at this point). It is important to do this before the DROP
* step because if we are processing a system catalog that will be used
- * during DROP, we want to have its indexes available. There is no
+ * during DROP, we want to have its indexes available. There is no
* advantage to the other order anyway because this is all transactional,
* so no chance to reclaim disk space before commit. We do not need a
* final CommandCounterIncrement() because reindex_relation does it.
diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c
index f2cdc27a1d0..285b323fb5c 100644
--- a/src/backend/commands/constraint.c
+++ b/src/backend/commands/constraint.c
@@ -50,7 +50,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
bool isnull[INDEX_MAX_KEYS];
/*
- * Make sure this is being called as an AFTER ROW trigger. Note:
+ * Make sure this is being called as an AFTER ROW trigger. Note:
* translatable error strings are shared with ri_triggers.c, so resist the
* temptation to fold the function name into them.
*/
@@ -87,7 +87,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
* If the new_row is now dead (ie, inserted and then deleted within our
* transaction), we can skip the check. However, we have to be careful,
* because this trigger gets queued only in response to index insertions;
- * which means it does not get queued for HOT updates. The row we are
+ * which means it does not get queued for HOT updates. The row we are
* called for might now be dead, but have a live HOT child, in which case
* we still need to make the check. Therefore we have to use
* heap_hot_search, not just HeapTupleSatisfiesVisibility as is done in
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 082f70fdc38..378b102a9b7 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -187,7 +187,7 @@ typedef struct CopyStateData
/*
* Finally, raw_buf holds raw data read from the data source (file or
- * client connection). CopyReadLine parses this data sufficiently to
+ * client connection). CopyReadLine parses this data sufficiently to
* locate line boundaries, then transfers the data to line_buf and
* converts it. Note: we guarantee that there is a \0 at
* raw_buf[raw_buf_len].
@@ -213,7 +213,7 @@ typedef struct
* function call overhead in tight COPY loops.
*
* We must use "if (1)" because the usual "do {...} while(0)" wrapper would
- * prevent the continue/break processing from working. We end the "if (1)"
+ * prevent the continue/break processing from working. We end the "if (1)"
* with "else ((void) 0)" to ensure the "if" does not unintentionally match
* any "else" in the calling code, and to avoid any compiler warnings about
* empty statements. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
@@ -547,7 +547,7 @@ CopySendEndOfRow(CopyState cstate)
* CopyGetData reads data from the source (file or frontend)
*
* We attempt to read at least minread, and at most maxread, bytes from
- * the source. The actual number of bytes read is returned; if this is
+ * the source. The actual number of bytes read is returned; if this is
* less than minread, EOF was detected.
*
* Note: when copying from the frontend, we expect a proper EOF mark per
@@ -764,7 +764,7 @@ CopyLoadRawBuf(CopyState cstate)
* we also support copying the output of an arbitrary SELECT query.
*
* If <pipe> is false, transfer is between the table and the file named
- * <filename>. Otherwise, transfer is between the table and our regular
+ * <filename>. Otherwise, transfer is between the table and our regular
* input/output stream. The latter could be either stdin/stdout or a
* socket, depending on whether we're running under Postmaster control.
*
@@ -1271,7 +1271,7 @@ BeginCopy(bool is_from,
errmsg("COPY (SELECT) WITH OIDS is not supported")));
/*
- * Run parse analysis and rewrite. Note this also acquires sufficient
+ * Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
@@ -1681,7 +1681,7 @@ CopyTo(CopyState cstate)
* Create a temporary memory context that we can reset once per row to
* recover palloc'd memory. This avoids any problems with leaks inside
* datatype output routines, and should be faster than retail pfree's
- * anyway. (We don't need a whole econtext as CopyFrom does.)
+ * anyway. (We don't need a whole econtext as CopyFrom does.)
*/
cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext,
"COPY TO",
@@ -2837,7 +2837,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
* if client chooses to send that now.
*
* Note that we MUST NOT try to read more data in an old-protocol
- * copy, since there is no protocol-level EOF marker then. We
+ * copy, since there is no protocol-level EOF marker then. We
* could go either way for copy from file, but choose to throw
* error if there's data after the EOF marker, for consistency
* with the new-protocol case.
@@ -2899,7 +2899,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
/*
* Now compute and insert any defaults available for the columns not
- * provided by the input data. Anything not processed here or above will
+ * provided by the input data. Anything not processed here or above will
* remain NULL.
*/
for (i = 0; i < num_defaults; i++)
@@ -2934,7 +2934,7 @@ EndCopyFrom(CopyState cstate)
* server encoding.
*
* Result is true if read was terminated by EOF, false if terminated
- * by newline. The terminating newline or EOF marker is not included
+ * by newline. The terminating newline or EOF marker is not included
* in the final value of line_buf.
*/
static bool
@@ -3090,7 +3090,7 @@ CopyReadLineText(CopyState cstate)
* of read-ahead and avoid the many calls to
* IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol
* does not allow us to read too far ahead or we might read into the
- * next data, so we read-ahead only as far we know we can. One
+ * next data, so we read-ahead only as far we know we can. One
* optimization would be to read-ahead four byte here if
* cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
* considering the size of the buffer.
@@ -3100,7 +3100,7 @@ CopyReadLineText(CopyState cstate)
REFILL_LINEBUF;
/*
- * Try to read some more data. This will certainly reset
+ * Try to read some more data. This will certainly reset
* raw_buf_index to zero, and raw_buf_ptr must go with it.
*/
if (!CopyLoadRawBuf(cstate))
@@ -3158,7 +3158,7 @@ CopyReadLineText(CopyState cstate)
/*
* Updating the line count for embedded CR and/or LF chars is
* necessarily a little fragile - this test is probably about the
- * best we can do. (XXX it's arguable whether we should do this
+ * best we can do. (XXX it's arguable whether we should do this
* at all --- is cur_lineno a physical or logical count?)
*/
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
@@ -3337,7 +3337,7 @@ CopyReadLineText(CopyState cstate)
* after a backslash is special, so we skip over that second
* character too. If we didn't do that \\. would be
* considered an eof-of copy, while in non-CSV mode it is a
- * literal backslash followed by a period. In CSV mode,
+ * literal backslash followed by a period. In CSV mode,
* backslashes are not special, so we want to process the
* character after the backslash just like a normal character,
* so we don't increment in those cases.
@@ -3440,7 +3440,7 @@ CopyReadAttributesText(CopyState cstate)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to do
+ * then transfer data without any checks for enough space. We need to do
* it this way because enlarging attribute_buf mid-stream would invalidate
* pointers already stored into cstate->raw_fields[].
*/
@@ -3670,7 +3670,7 @@ CopyReadAttributesCSV(CopyState cstate)
/*
* The de-escaped attributes will certainly not be longer than the input
* data line, so we can just force attribute_buf to be large enough and
- * then transfer data without any checks for enough space. We need to do
+ * then transfer data without any checks for enough space. We need to do
* it this way because enlarging attribute_buf mid-stream would invalidate
* pointers already stored into cstate->raw_fields[].
*/
@@ -3885,7 +3885,7 @@ CopyAttributeOutText(CopyState cstate, char *string)
/*
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
- * are infrequent. To avoid overhead from calling CopySendData once per
+ * are infrequent. To avoid overhead from calling CopySendData once per
* character, we dump out all characters between escaped characters in a
* single call. The loop invariant is that the data from "start" to "ptr"
* can be sent literally, but hasn't yet been.
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index a3509d8c2a3..65c63411600 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -104,7 +104,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
/*
* For materialized views, lock down security-restricted operations and
- * arrange to make GUC variable changes local to this command. This is
+ * arrange to make GUC variable changes local to this command. This is
* not necessary for security, but this keeps the behavior similar to
* REFRESH MATERIALIZED VIEW. Otherwise, one could create a materialized
* view not possible to refresh.
@@ -124,9 +124,9 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
* plancache.c.
*
* Because the rewriter and planner tend to scribble on the input, we make
- * a preliminary copy of the source querytree. This prevents problems in
+ * a preliminary copy of the source querytree. This prevents problems in
* the case that CTAS is in a portal or plpgsql function and is executed
- * repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
+ * repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
*/
rewritten = QueryRewrite((Query *) copyObject(query));
@@ -141,7 +141,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
/*
* Use a snapshot with an updated command ID to ensure this query sees
- * results of any previously executed queries. (This could only matter if
+ * results of any previously executed queries. (This could only matter if
* the planner executed an allegedly-stable function that changed the
* database contents, but let's do it anyway to be parallel to the EXPLAIN
* code path.)
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 003cb75e152..f4f275037d8 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -265,7 +265,7 @@ createdb(const CreatedbStmt *stmt)
* To create a database, must have createdb privilege and must be able to
* become the target role (this does not imply that the target role itself
* must have createdb privilege). The latter provision guards against
- * "giveaway" attacks. Note that a superuser will always have both of
+ * "giveaway" attacks. Note that a superuser will always have both of
* these privileges a fortiori.
*/
if (!have_createdb_privilege())
@@ -397,7 +397,7 @@ createdb(const CreatedbStmt *stmt)
/*
* If we are trying to change the default tablespace of the template,
* we require that the template not have any files in the new default
- * tablespace. This is necessary because otherwise the copied
+ * tablespace. This is necessary because otherwise the copied
* database would contain pg_class rows that refer to its default
* tablespace both explicitly (by OID) and implicitly (as zero), which
* would cause problems. For example another CREATE DATABASE using
@@ -433,7 +433,7 @@ createdb(const CreatedbStmt *stmt)
}
/*
- * Check for db name conflict. This is just to give a more friendly error
+ * Check for db name conflict. This is just to give a more friendly error
* message than "unique index violation". There's a race condition but
* we're willing to accept the less friendly message in that case.
*/
@@ -498,7 +498,7 @@ createdb(const CreatedbStmt *stmt)
/*
* We deliberately set datacl to default (NULL), rather than copying it
- * from the template database. Copying it would be a bad idea when the
+ * from the template database. Copying it would be a bad idea when the
* owner is not the same as the template's owner.
*/
new_record_nulls[Anum_pg_database_datacl - 1] = true;
@@ -550,7 +550,7 @@ createdb(const CreatedbStmt *stmt)
*
* Inconsistency of this sort is inherent to all SnapshotNow scans, unless
* some lock is held to prevent concurrent updates of the rows being
- * sought. There should be a generic fix for that, but in the meantime
+ * sought. There should be a generic fix for that, but in the meantime
* it's worth fixing this case in particular because we are doing very
* heavyweight operations within the scan, so that the elapsed time for
* the scan is vastly longer than for most other catalog scans. That
@@ -1172,7 +1172,7 @@ movedb(const char *dbname, const char *tblspcname)
/*
* Use an ENSURE block to make sure we remove the debris if the copy fails
- * (eg, due to out-of-disk-space). This is not a 100% solution, because
+ * (eg, due to out-of-disk-space). This is not a 100% solution, because
* of the possibility of failure during transaction commit, but it should
* handle most scenarios.
*/
@@ -1659,7 +1659,7 @@ get_db_info(const char *name, LOCKMODE lockmode,
LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode);
/*
- * And now, re-fetch the tuple by OID. If it's still there and still
+ * And now, re-fetch the tuple by OID. If it's still there and still
* the same name, we win; else, drop the lock and loop back to try
* again.
*/
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index 9fa222f5fc0..bbbd17dfc4e 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -183,7 +183,7 @@ defGetInt64(DefElem *def)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid int8
+ * constants by the lexer. Accept these if they are valid int8
* strings.
*/
return DatumGetInt64(DirectFunctionCall1(int8in,
diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c
index f0df7301d02..5c2fd0f8598 100644
--- a/src/backend/commands/event_trigger.c
+++ b/src/backend/commands/event_trigger.c
@@ -606,7 +606,7 @@ filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
}
/*
- * Setup for running triggers for the given event. Return value is an OID list
+ * Setup for running triggers for the given event. Return value is an OID list
* of functions to run; if there are any, trigdata is filled with an
* appropriate EventTriggerData for them to receive.
*/
@@ -625,7 +625,7 @@ EventTriggerCommonSetup(Node *parsetree,
* invoked to match up exactly with the list that CREATE EVENT TRIGGER
* accepts. This debugging cross-check will throw an error if this
* function is invoked for a command tag that CREATE EVENT TRIGGER won't
- * accept. (Unfortunately, there doesn't seem to be any simple, automated
+ * accept. (Unfortunately, there doesn't seem to be any simple, automated
* way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that
* never reaches this control point.)
*
@@ -655,7 +655,7 @@ EventTriggerCommonSetup(Node *parsetree,
/*
* Filter list of event triggers by command tag, and copy them into our
- * memory context. Once we start running the command trigers, or indeed
+ * memory context. Once we start running the command trigers, or indeed
* once we do anything at all that touches the catalogs, an invalidation
* might leave cachelist pointing at garbage, so we must do this before we
* can do much else.
@@ -783,7 +783,7 @@ EventTriggerSQLDrop(Node *parsetree)
return;
/*
- * Use current state to determine whether this event fires at all. If
+ * Use current state to determine whether this event fires at all. If
* there are no triggers for the sql_drop event, then we don't have
* anything to do here. Note that dropped object collection is disabled
* if this is the case, so even if we were to try to run, the list would
@@ -798,7 +798,7 @@ EventTriggerSQLDrop(Node *parsetree)
&trigdata);
/*
- * Nothing to do if run list is empty. Note this shouldn't happen,
+ * Nothing to do if run list is empty. Note this shouldn't happen,
* because if there are no sql_drop events, then objects-to-drop wouldn't
* have been collected in the first place and we would have quitted above.
*/
@@ -813,7 +813,7 @@ EventTriggerSQLDrop(Node *parsetree)
/*
* Make sure pg_event_trigger_dropped_objects only works when running
- * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when
+ * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when
* one trigger fails. (This is perhaps not necessary, as the currentState
* variable will be removed shortly by our caller, but it seems better to
* play safe.)
@@ -1053,7 +1053,7 @@ EventTriggerBeginCompleteQuery(void)
* returned false previously.
*
* Note: this might be called in the PG_CATCH block of a failing transaction,
- * so be wary of running anything unnecessary. (In particular, it's probably
+ * so be wary of running anything unnecessary. (In particular, it's probably
* unwise to try to allocate memory.)
*/
void
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 91bea517ec8..e51c08b85dd 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -191,7 +191,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
* plancache.c.
*
* Because the rewriter and planner tend to scribble on the input, we make
- * a preliminary copy of the source querytree. This prevents problems in
+ * a preliminary copy of the source querytree. This prevents problems in
* the case that the EXPLAIN is in a portal or plpgsql function and is
* executed repeatedly. (See also the same hack in DECLARE CURSOR and
* PREPARE.) XXX FIXME someday.
@@ -540,7 +540,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
* convert a QueryDesc's plan tree to text and append it to es->str
*
* The caller should have set up the options fields of *es, as well as
- * initializing the output buffer es->str. Other fields in *es are
+ * initializing the output buffer es->str. Other fields in *es are
* initialized here.
*
* NB: will not work on utility statements
@@ -2060,7 +2060,7 @@ show_modifytable_info(ModifyTableState *mtstate, ExplainState *es)
/*
* If the first target relation is a foreign table, call its FDW to
- * display whatever additional fields it wants to. For now, we ignore the
+ * display whatever additional fields it wants to. For now, we ignore the
* possibility of other targets being foreign tables, although the API for
* ExplainForeignModify is designed to allow them to be processed.
*/
@@ -2559,7 +2559,7 @@ ExplainXMLTag(const char *tagname, int flags, ExplainState *es)
/*
* Emit a JSON line ending.
*
- * JSON requires a comma after each property but the last. To facilitate this,
+ * JSON requires a comma after each property but the last. To facilitate this,
* in JSON format, the text emitted for each property begins just prior to the
* preceding line-break (and comma, if applicable).
*/
@@ -2580,7 +2580,7 @@ ExplainJSONLineEnding(ExplainState *es)
* YAML lines are ordinarily indented by two spaces per indentation level.
* The text emitted for each property begins just prior to the preceding
* line-break, except for the first property in an unlabelled group, for which
- * it begins immediately after the "- " that introduces the group. The first
+ * it begins immediately after the "- " that introduces the group. The first
* property of the group appears on the same line as the opening "- ".
*/
static void
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index 08e8cade6b1..37f8df1bf73 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -108,7 +108,7 @@ static void ApplyExtensionUpdates(Oid extensionOid,
/*
* get_extension_oid - given an extension name, look up the OID
*
- * If missing_ok is false, throw an error if extension name not found. If
+ * If missing_ok is false, throw an error if extension name not found. If
* true, just return InvalidOid.
*/
Oid
@@ -257,9 +257,9 @@ check_valid_extension_name(const char *extensionname)
errdetail("Extension names must not contain \"--\".")));
/*
- * No leading or trailing dash either. (We could probably allow this, but
+ * No leading or trailing dash either. (We could probably allow this, but
* it would require much care in filename parsing and would make filenames
- * visually if not formally ambiguous. Since there's no real-world use
+ * visually if not formally ambiguous. Since there's no real-world use
* case, let's just forbid it.)
*/
if (extensionname[0] == '-' || extensionname[namelen - 1] == '-')
@@ -435,7 +435,7 @@ get_extension_script_filename(ExtensionControlFile *control,
/*
* Parse contents of primary or auxiliary control file, and fill in
- * fields of *control. We parse primary file if version == NULL,
+ * fields of *control. We parse primary file if version == NULL,
* else the optional auxiliary file for that version.
*
* Control files are supposed to be very short, half a dozen lines,
@@ -677,7 +677,7 @@ read_extension_script_file(const ExtensionControlFile *control,
* filename is used only to report errors.
*
* Note: it's tempting to just use SPI to execute the string, but that does
- * not work very well. The really serious problem is that SPI will parse,
+ * not work very well. The really serious problem is that SPI will parse,
* analyze, and plan the whole string before executing any of it; of course
* this fails if there are any plannable statements referring to objects
* created earlier in the script. A lesser annoyance is that SPI insists
@@ -852,7 +852,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
/*
* Set creating_extension and related variables so that
* recordDependencyOnCurrentExtension and other functions do the right
- * things. On failure, ensure we reset these variables.
+ * things. On failure, ensure we reset these variables.
*/
creating_extension = true;
CurrentExtensionObject = extensionOid;
@@ -1096,7 +1096,7 @@ identify_update_path(ExtensionControlFile *control,
* is still good.
*
* Result is a List of names of versions to transition through (the initial
- * version is *not* included). Returns NIL if no such path.
+ * version is *not* included). Returns NIL if no such path.
*/
static List *
find_update_path(List *evi_list,
@@ -1197,7 +1197,7 @@ CreateExtension(CreateExtensionStmt *stmt)
check_valid_extension_name(stmt->extname);
/*
- * Check for duplicate extension name. The unique index on
+ * Check for duplicate extension name. The unique index on
* pg_extension.extname would catch this anyway, and serves as a backstop
* in case of race conditions; but this is a friendlier error message, and
* besides we need a check to support IF NOT EXISTS.
@@ -1364,7 +1364,7 @@ CreateExtension(CreateExtensionStmt *stmt)
{
/*
* The extension is not relocatable and the author gave us a schema
- * for it. We create the schema here if it does not already exist.
+ * for it. We create the schema here if it does not already exist.
*/
schemaName = control->schema;
schemaOid = get_namespace_oid(schemaName, true);
@@ -1593,7 +1593,7 @@ RemoveExtensionById(Oid extId)
* might write "DROP EXTENSION foo" in foo's own script files, as because
* errors in dependency management in extension script files could give
* rise to cases where an extension is dropped as a result of recursing
- * from some contained object. Because of that, we must test for the case
+ * from some contained object. Because of that, we must test for the case
* here, not at some higher level of the DROP EXTENSION command.
*/
if (extId == CurrentExtensionObject)
@@ -1624,7 +1624,7 @@ RemoveExtensionById(Oid extId)
/*
* This function lists the available extensions (one row per primary control
- * file in the control directory). We parse each control file and report the
+ * file in the control directory). We parse each control file and report the
* interesting fields.
*
* The system view pg_available_extensions provides a user interface to this
@@ -1733,7 +1733,7 @@ pg_available_extensions(PG_FUNCTION_ARGS)
/*
* This function lists the available extension versions (one row per
- * extension installation script). For each version, we parse the related
+ * extension installation script). For each version, we parse the related
* control file(s) and report the interesting fields.
*
* The system view pg_available_extension_versions provides a user interface
@@ -2521,7 +2521,7 @@ AlterExtensionNamespace(List *names, const char *newschema)
Oid dep_oldNspOid;
/*
- * Ignore non-membership dependencies. (Currently, the only other
+ * Ignore non-membership dependencies. (Currently, the only other
* case we could see here is a normal dependency from another
* extension.)
*/
@@ -2933,7 +2933,7 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt)
/*
* Prevent a schema from being added to an extension if the schema
- * contains the extension. That would create a dependency loop.
+ * contains the extension. That would create a dependency loop.
*/
if (object.classId == NamespaceRelationId &&
object.objectId == get_extension_schema(extension.objectId))
diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c
index 056bcae8a10..596db1ad764 100644
--- a/src/backend/commands/foreigncmds.c
+++ b/src/backend/commands/foreigncmds.c
@@ -81,7 +81,7 @@ optionListToArray(List *options)
/*
- * Transform a list of DefElem into text array format. This is substantially
+ * Transform a list of DefElem into text array format. This is substantially
* the same thing as optionListToArray(), except we recognize SET/ADD/DROP
* actions for modifying an existing list of options, which is passed in
* Datum form as oldOptions. Also, if fdwvalidator isn't InvalidOid
@@ -125,7 +125,7 @@ transformGenericOptions(Oid catalogId,
/*
* It is possible to perform multiple SET/DROP actions on the same
- * option. The standard permits this, as long as the options to be
+ * option. The standard permits this, as long as the options to be
* added are unique. Note that an unspecified action is taken to be
* ADD.
*/
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 30e163d4229..eef78498ba6 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -74,7 +74,7 @@
* allow a shell type to be used, or even created if the specified return type
* doesn't exist yet. (Without this, there's no way to define the I/O procs
* for a new type.) But SQL function creation won't cope, so error out if
- * the target language is SQL. (We do this here, not in the SQL-function
+ * the target language is SQL. (We do this here, not in the SQL-function
* validator, so as not to produce a NOTICE and then an ERROR for the same
* condition.)
*/
@@ -425,7 +425,7 @@ examine_parameter_list(List *parameters, Oid languageOid,
* FUNCTION and ALTER FUNCTION and return it via one of the out
* parameters. Returns true if the passed option was recognized. If
* the out parameter we were going to assign to points to non-NULL,
- * raise a duplicate-clause error. (We don't try to detect duplicate
+ * raise a duplicate-clause error. (We don't try to detect duplicate
* SET parameters though --- if you're redundant, the last one wins.)
*/
static bool
@@ -734,7 +734,7 @@ interpret_AS_clause(Oid languageOid, const char *languageName,
{
/*
* For "C" language, store the file name in probin and, when given,
- * the link symbol name in prosrc. If link symbol is omitted,
+ * the link symbol name in prosrc. If link symbol is omitted,
* substitute procedure name. We also allow link symbol to be
* specified as "-", since that was the habit in PG versions before
* 8.4, and there might be dump files out there that don't translate
@@ -1363,7 +1363,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* Restricting the volatility of a cast function may or may not be a
* good idea in the abstract, but it definitely breaks many old
- * user-defined types. Disable this check --- tgl 2/1/03
+ * user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
@@ -1427,7 +1427,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* We know that composite, enum and array types are never binary-
- * compatible with each other. They all have OIDs embedded in them.
+ * compatible with each other. They all have OIDs embedded in them.
*
* Theoretically you could build a user-defined base type that is
* binary-compatible with a composite, enum, or array type. But we
@@ -1456,7 +1456,7 @@ CreateCast(CreateCastStmt *stmt)
* We also disallow creating binary-compatibility casts involving
* domains. Casting from a domain to its base type is already
* allowed, and casting the other way ought to go through domain
- * coercion to permit constraint checking. Again, if you're intent on
+ * coercion to permit constraint checking. Again, if you're intent on
* having your own semantics for that, create a no-op cast function.
*
* NOTE: if we were to relax this, the above checks for composites
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index cf25a9e2ef8..b344c4f7c59 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -102,7 +102,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
* concrete benefit for core types.
* When a comparison or exclusion operator has a polymorphic input type, the
- * actual input types must also match. This defends against the possibility
+ * actual input types must also match. This defends against the possibility
* that operators could vary behavior in response to get_fn_expr_argtype().
* At present, this hazard is theoretical: check_exclusion_constraint() and
* all core index access methods decline to set fn_expr for such calls.
@@ -434,7 +434,7 @@ DefineIndex(Oid relationId,
}
/*
- * Force shared indexes into the pg_global tablespace. This is a bit of a
+ * Force shared indexes into the pg_global tablespace. This is a bit of a
* hack but seems simpler than marking them in the BKI commands. On the
* other hand, if it's not shared, don't allow it to be placed there.
*/
@@ -629,7 +629,7 @@ DefineIndex(Oid relationId,
/*
* For a concurrent build, it's important to make the catalog entries
* visible to other transactions before we start to build the index. That
- * will prevent them from making incompatible HOT updates. The new index
+ * will prevent them from making incompatible HOT updates. The new index
* will be marked not indisready and not indisvalid, so that no one else
* tries to either insert into it or use it for queries.
*
@@ -660,8 +660,8 @@ DefineIndex(Oid relationId,
* Now we must wait until no running transaction could have the table open
* with the old list of indexes. To do this, inquire which xacts
* currently would conflict with ShareLock on the table -- ie, which ones
- * have a lock that permits writing the table. Then wait for each of
- * these xacts to commit or abort. Note we do not need to worry about
+ * have a lock that permits writing the table. Then wait for each of
+ * these xacts to commit or abort. Note we do not need to worry about
* xacts that open the table for writing after this point; they will see
* the new index when they open it.
*
@@ -672,7 +672,7 @@ DefineIndex(Oid relationId,
* error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need not
- * check for that. Also, prepared xacts are not reported, which is fine
+ * check for that. Also, prepared xacts are not reported, which is fine
* since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, ShareLock);
@@ -689,7 +689,7 @@ DefineIndex(Oid relationId,
* indexes. We have waited out all the existing transactions and any new
* transaction will have the new index in its list, but the index is still
* marked as "not-ready-for-inserts". The index is consulted while
- * deciding HOT-safety though. This arrangement ensures that no new HOT
+ * deciding HOT-safety though. This arrangement ensures that no new HOT
* chains can be created where the new tuple and the old tuple in the
* chain have different index keys.
*
@@ -755,7 +755,7 @@ DefineIndex(Oid relationId,
/*
* Now take the "reference snapshot" that will be used by validate_index()
- * to filter candidate tuples. Beware! There might still be snapshots in
+ * to filter candidate tuples. Beware! There might still be snapshots in
* use that treat some transaction as in-progress that our reference
* snapshot treats as committed. If such a recently-committed transaction
* deleted tuples in the table, we will not include them in the index; yet
@@ -780,7 +780,7 @@ DefineIndex(Oid relationId,
* Drop the reference snapshot. We must do this before waiting out other
* snapshot holders, else we will deadlock against other processes also
* doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one
- * they must wait for. But first, save the snapshot's xmin to use as
+ * they must wait for. But first, save the snapshot's xmin to use as
* limitXmin for GetCurrentVirtualXIDs().
*/
limitXmin = snapshot->xmin;
@@ -790,7 +790,7 @@ DefineIndex(Oid relationId,
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted just
+ * interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots. Obtain a list of VXIDs
* of such transactions, and wait for them individually.
@@ -805,7 +805,7 @@ DefineIndex(Oid relationId,
*
* We can also exclude autovacuum processes and processes running manual
* lazy VACUUMs, because they won't be fazed by missing index entries
- * either. (Manual ANALYZEs, however, can't be excluded because they
+ * either. (Manual ANALYZEs, however, can't be excluded because they
* might be within transactions that are going to do arbitrary operations
* later.)
*
@@ -894,7 +894,7 @@ CheckMutability(Expr *expr)
{
/*
* First run the expression through the planner. This has a couple of
- * important consequences. First, function default arguments will get
+ * important consequences. First, function default arguments will get
* inserted, which may affect volatility (consider "default now()").
* Second, inline-able functions will get inlined, which may allow us to
* conclude that the function is really less volatile than it's marked. As
@@ -917,7 +917,7 @@ CheckMutability(Expr *expr)
* Checks that the given partial-index predicate is valid.
*
* This used to also constrain the form of the predicate to forms that
- * indxpath.c could do something with. However, that seems overly
+ * indxpath.c could do something with. However, that seems overly
* restrictive. One useful application of partial indexes is to apply
* a UNIQUE constraint across a subset of a table, and in that scenario
* any evaluatable predicate will work. So accept any predicate here
@@ -1028,7 +1028,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
attcollation = exprCollation(expr);
/*
- * Strip any top-level COLLATE clause. This ensures that we treat
+ * Strip any top-level COLLATE clause. This ensures that we treat
* "x COLLATE y" and "(x COLLATE y)" alike.
*/
while (IsA(expr, CollateExpr))
@@ -1234,7 +1234,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
* 2000/07/30
*
* Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
- * too for awhile. I'm starting to think we need a better approach. tgl
+ * too for awhile. I'm starting to think we need a better approach. tgl
* 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while
@@ -1303,7 +1303,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
NameListToString(opclass), accessMethodName)));
/*
- * Verify that the index operator class accepts this datatype. Note we
+ * Verify that the index operator class accepts this datatype. Note we
* will accept binary compatibility.
*/
opClassId = HeapTupleGetOid(tuple);
@@ -1324,7 +1324,7 @@ GetIndexOpClass(List *opclass, Oid attrType,
* GetDefaultOpClass
*
* Given the OIDs of a datatype and an access method, find the default
- * operator class, if any. Returns InvalidOid if there is none.
+ * operator class, if any. Returns InvalidOid if there is none.
*/
Oid
GetDefaultOpClass(Oid type_id, Oid am_id)
@@ -1419,7 +1419,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
* Create a name for an implicitly created index, sequence, constraint, etc.
*
* The parameters are typically: the original table name, the original field
- * name, and a "type" string (such as "seq" or "pkey"). The field name
+ * name, and a "type" string (such as "seq" or "pkey"). The field name
* and/or type can be NULL if not relevant.
*
* The result is a palloc'd string.
@@ -1427,7 +1427,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id)
* The basic result we want is "name1_name2_label", omitting "_name2" or
* "_label" when those parameters are NULL. However, we must generate
* a name with less than NAMEDATALEN characters! So, we truncate one or
- * both names if necessary to make a short-enough string. The label part
+ * both names if necessary to make a short-enough string. The label part
* is never truncated (so it had better be reasonably short).
*
* The caller is responsible for checking uniqueness of the generated
@@ -1622,7 +1622,7 @@ ChooseIndexNameAddition(List *colnames)
/*
* Select the actual names to be used for the columns of an index, given the
- * list of IndexElems for the columns. This is mostly about ensuring the
+ * list of IndexElems for the columns. This is mostly about ensuring the
* names are unique so we don't get a conflicting-attribute-names error.
*
* Returns a List of plain strings (char *, not String nodes).
@@ -1733,7 +1733,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
/*
* If the relation does exist, check whether it's an index. But note that
* the relation might have been dropped between the time we did the name
- * lookup and now. In that case, there's nothing to do.
+ * lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index 93648a7f4cc..f2edece89cd 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -262,7 +262,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query,
/*
* Use a snapshot with an updated command ID to ensure this query sees
- * results of any previously executed queries. (This could only matter if
+ * results of any previously executed queries. (This could only matter if
* the planner executed an allegedly-stable function that changed the
* database contents, but let's do it anyway to be safe.)
*/
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index f2d78ef6632..bec341893db 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -385,7 +385,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* A minimum expectation therefore is that the caller have execute
* privilege with grant option. Since we don't have a way to make the
* opclass go away if the grant option is revoked, we choose instead to
- * require ownership of the functions. It's also not entirely clear what
+ * require ownership of the functions. It's also not entirely clear what
* permissions should be required on the datatype, but ownership seems
* like a safe choice.
*
@@ -667,7 +667,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
opclassoid, procedures, false);
/*
- * Create dependencies for the opclass proper. Note: we do not create a
+ * Create dependencies for the opclass proper. Note: we do not create a
* dependency link to the AM, because we don't currently support DROP
* ACCESS METHOD.
*/
@@ -1084,7 +1084,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
if (OidIsValid(member->sortfamily))
{
/*
- * Ordering op, check index supports that. (We could perhaps also
+ * Ordering op, check index supports that. (We could perhaps also
* check that the operator returns a type supported by the sortfamily,
* but that seems more trouble than it's worth here. If it does not,
* the operator will never be matchable to any ORDER BY clause, but no
@@ -1213,7 +1213,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
/*
* The default in CREATE OPERATOR CLASS is to use the class' opcintype as
- * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
+ * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
* isn't available, so make the user specify the types.
*/
if (!OidIsValid(member->lefttype))
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 4692b087bef..40e1e593ea2 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -211,7 +211,7 @@ DefineOperator(List *names, List *parameters)
functionOid = LookupFuncName(functionName, nargs, typeId, false);
/*
- * We require EXECUTE rights for the function. This isn't strictly
+ * We require EXECUTE rights for the function. This isn't strictly
* necessary, since EXECUTE will be checked at any attempted use of the
* operator, but it seems like a good idea anyway.
*/
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index 5c3f42cb716..ba31d137c86 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -4,7 +4,7 @@
* Utility commands affecting portals (that is, SQL cursor commands)
*
* Note: see also tcop/pquery.c, which implements portal operations for
- * the FE/BE protocol. This module uses pquery.c for some operations.
+ * the FE/BE protocol. This module uses pquery.c for some operations.
* And both modules depend on utils/mmgr/portalmem.c, which controls
* storage management for portals (but doesn't run any queries in them).
*
@@ -89,7 +89,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
/*----------
* Also copy the outer portal's parameter list into the inner portal's
- * memory context. We want to pass down the parameter values in case we
+ * memory context. We want to pass down the parameter values in case we
* had a command like
* DECLARE c CURSOR FOR SELECT ... WHERE foo = $1
* This will have been parsed using the outer parameter set and the
@@ -106,7 +106,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
* based on whether it would require any additional runtime overhead to do
- * so. Also, we disallow scrolling for FOR UPDATE cursors.
+ * so. Also, we disallow scrolling for FOR UPDATE cursors.
*/
portal->cursorOptions = cstmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -365,7 +365,7 @@ PersistHoldablePortal(Portal portal)
ExecutorRewind(queryDesc);
/*
- * Change the destination to output to the tuplestore. Note we tell
+ * Change the destination to output to the tuplestore. Note we tell
* the tuplestore receiver to detoast all data passed through it.
*/
queryDesc->dest = CreateDestReceiver(DestTuplestore);
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 62208eb9950..6481576ad6f 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -174,7 +174,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
* ExecuteQuery --- implement the 'EXECUTE' utility statement.
*
* This code also supports CREATE TABLE ... AS EXECUTE. That case is
- * indicated by passing a non-null intoClause. The DestReceiver is already
+ * indicated by passing a non-null intoClause. The DestReceiver is already
* set up correctly for CREATE TABLE AS, but we still have to make a few
* other adjustments here.
*
@@ -211,7 +211,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
{
/*
* Need an EState to evaluate parameters; must not delete it till end
- * of query, in case parameters are pass-by-reference. Note that the
+ * of query, in case parameters are pass-by-reference. Note that the
* passed-in "params" could possibly be referenced in the parameter
* expressions.
*/
@@ -237,7 +237,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
/*
* For CREATE TABLE ... AS EXECUTE, we must verify that the prepared
* statement is one that produces tuples. Currently we insist that it be
- * a plain old SELECT. In future we might consider supporting other
+ * a plain old SELECT. In future we might consider supporting other
* things such as INSERT ... RETURNING, but there are a couple of issues
* to be settled first, notably how WITH NO DATA should be handled in such
* a case (do we really want to suppress execution?) and how to pass down
@@ -529,7 +529,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt)
/*
* Given a prepared statement that returns tuples, extract the query
- * targetlist. Returns NIL if the statement doesn't have a determinable
+ * targetlist. Returns NIL if the statement doesn't have a determinable
* targetlist.
*
* Note: this is pretty ugly, but since it's only used in corner cases like
@@ -644,7 +644,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
{
/*
* Need an EState to evaluate parameters; must not delete it till end
- * of query, in case parameters are pass-by-reference. Note that the
+ * of query, in case parameters are pass-by-reference. Note that the
* passed-in "params" could possibly be referenced in the parameter
* expressions.
*/
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 6e4c682072d..8dd451a9b13 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -260,7 +260,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a handler function declared OPAQUE, change it to
* LANGUAGE_HANDLER. (This is probably obsolete and removable?)
*/
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 1d13ba0d304..de9c51f8a66 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -67,7 +67,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
* To create a schema, must have schema-create privilege on the current
* database and must be able to become the target role (this does not
* imply that the target role itself must have create-schema privilege).
- * The latter provision guards against "giveaway" attacks. Note that a
+ * The latter provision guards against "giveaway" attacks. Note that a
* superuser will always have both of these privileges a fortiori.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE);
@@ -132,7 +132,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
/*
* Examine the list of commands embedded in the CREATE SCHEMA command, and
* reorganize them into a sequentially executable order with no forward
- * references. Note that the result is still a list of raw parsetrees ---
+ * references. Note that the result is still a list of raw parsetrees ---
* we cannot, in general, run parse analysis on one statement until we
* have actually executed the prior ones.
*/
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index ddfaf3bd293..a8248dee309 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -61,7 +61,7 @@ typedef struct sequence_magic
* rely on the relcache, since it's only, well, a cache, and may decide to
* discard entries.)
*
- * XXX We use linear search to find pre-existing SeqTable entries. This is
+ * XXX We use linear search to find pre-existing SeqTable entries. This is
* good when only a small number of sequences are touched in a session, but
* would suck with many different sequences. Perhaps use a hashtable someday.
*/
@@ -282,7 +282,7 @@ ResetSequence(Oid seq_relid)
seq->log_cnt = 0;
/*
- * Create a new storage file for the sequence. We want to keep the
+ * Create a new storage file for the sequence. We want to keep the
* sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs.
* Same with relminmxid, since a sequence will never contain multixacts.
*/
@@ -334,8 +334,8 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
* Two special hacks here:
*
* 1. Since VACUUM does not process sequences, we have to force the tuple
- * to have xmin = FrozenTransactionId now. Otherwise it would become
- * invisible to SELECTs after 2G transactions. It is okay to do this
+ * to have xmin = FrozenTransactionId now. Otherwise it would become
+ * invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
*
@@ -512,7 +512,7 @@ nextval(PG_FUNCTION_ARGS)
* XXX: This is not safe in the presence of concurrent DDL, but acquiring
* a lock here is more expensive than letting nextval_internal do it,
* since the latter maintains a cache that keeps us from hitting the lock
- * manager more than once per transaction. It's not clear whether the
+ * manager more than once per transaction. It's not clear whether the
* performance penalty is material in practice, but for now, we do it this
* way.
*/
@@ -592,7 +592,7 @@ nextval_internal(Oid relid)
}
/*
- * Decide whether we should emit a WAL log record. If so, force up the
+ * Decide whether we should emit a WAL log record. If so, force up the
* fetch count to grab SEQ_LOG_VALS more values than we actually need to
* cache. (These will then be usable without logging.)
*
@@ -699,7 +699,7 @@ nextval_internal(Oid relid)
* We must mark the buffer dirty before doing XLogInsert(); see notes in
* SyncOneBuffer(). However, we don't apply the desired changes just yet.
* This looks like a violation of the buffer update protocol, but it is in
- * fact safe because we hold exclusive lock on the buffer. Any other
+ * fact safe because we hold exclusive lock on the buffer. Any other
* process, including a checkpoint, that tries to examine the buffer
* contents will block until we release the lock, and then will see the
* final state that we install below.
@@ -961,7 +961,7 @@ setval3_oid(PG_FUNCTION_ARGS)
* Open the sequence and acquire AccessShareLock if needed
*
* If we haven't touched the sequence already in this transaction,
- * we need to acquire AccessShareLock. We arrange for the lock to
+ * we need to acquire AccessShareLock. We arrange for the lock to
* be owned by the top transaction, so that we don't need to do it
* more than once per xact.
*/
@@ -1055,7 +1055,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
/*
* If the sequence has been transactionally replaced since we last saw it,
- * discard any cached-but-unissued values. We do not touch the currval()
+ * discard any cached-but-unissued values. We do not touch the currval()
* state, however.
*/
if (seqrel->rd_rel->relfilenode != elm->filenode)
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index c9d7a8761e9..020f8a5d494 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -554,7 +554,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
&inheritOids, &old_constraints, &parentOidCount);
/*
- * Create a tuple descriptor from the relation schema. Note that this
+ * Create a tuple descriptor from the relation schema. Note that this
* deals with column names, types, and NOT NULL constraints, but not
* default values or CHECK constraints; we handle those below.
*/
@@ -654,7 +654,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
CommandCounterIncrement();
/*
- * Open the new relation and acquire exclusive lock on it. This isn't
+ * Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't see
* the new rel anyway until we commit), but it keeps the lock manager from
* complaining about deadlock risks.
@@ -1001,10 +1001,10 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * In CASCADE mode, suck in all referencing relations as well. This
+ * In CASCADE mode, suck in all referencing relations as well. This
* requires multiple iterations to find indirectly-dependent relations. At
* each phase, we need to exclusive-lock new rels before looking for their
- * dependencies, else we might miss something. Also, we check each rel as
+ * dependencies, else we might miss something. Also, we check each rel as
* soon as we open it, to avoid a faux pas such as holding lock for a long
* time on a rel we have no permissions for.
*/
@@ -1225,7 +1225,7 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
+ * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
*/
static void
truncate_check_rel(Relation rel)
@@ -1652,7 +1652,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
/*
* Now copy the CHECK constraints of this parent, adjusting attnos
- * using the completed newattno[] map. Identically named constraints
+ * using the completed newattno[] map. Identically named constraints
* are merged if possible, else we throw error.
*/
if (constr && constr->num_check > 0)
@@ -1713,7 +1713,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
+ * commit. That will prevent someone else from deleting or ALTERing
* the parent before the child is committed.
*/
heap_close(relation, NoLock);
@@ -2221,7 +2221,7 @@ renameatt_internal(Oid myrelid,
oldattname)));
/*
- * if the attribute is inherited, forbid the renaming. if this is a
+ * if the attribute is inherited, forbid the renaming. if this is a
* top-level call to renameatt(), then expected_parents will be 0, so the
* effect of this code will be to prohibit the renaming if the attribute
* is inherited at all. if this is a recursive call to renameatt(),
@@ -2522,7 +2522,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal)
newrelname)));
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup is OK
+ * Update pg_class tuple with new relname. (Scribbling on reltup is OK
* because it's a copy...)
*/
namestrcpy(&(relform->relname), newrelname);
@@ -2578,7 +2578,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal)
* We also reject these commands if there are any pending AFTER trigger events
* for the rel. This is certainly necessary for the rewriting variants of
* ALTER TABLE, because they don't preserve tuple TIDs and so the pending
- * events would try to fetch the wrong tuples. It might be overly cautious
+ * events would try to fetch the wrong tuples. It might be overly cautious
* in other cases, but again it seems better to err on the side of paranoia.
*
* REINDEX calls this with "rel" referencing the index to be rebuilt; here
@@ -2634,23 +2634,23 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
* 3. Scan table(s) to check new constraints, and optionally recopy
* the data into new table(s).
* Phase 3 is not performed unless one or more of the subcommands requires
- * it. The intention of this design is to allow multiple independent
+ * it. The intention of this design is to allow multiple independent
* updates of the table schema to be performed with only one pass over the
* data.
*
- * ATPrepCmd performs phase 1. A "work queue" entry is created for
+ * ATPrepCmd performs phase 1. A "work queue" entry is created for
* each table to be affected (there may be multiple affected tables if the
* commands traverse a table inheritance hierarchy). Also we do preliminary
* validation of the subcommands, including parse transformation of those
* expressions that need to be evaluated with respect to the old table
* schema.
*
- * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
+ * ATRewriteCatalogs performs phase 2 for each affected table. (Note that
* phases 2 and 3 normally do no explicit recursion, since phase 1 already
* did it --- although some subcommands have to recurse in phase 2 instead.)
* Certain subcommands need to be performed before others to avoid
* unnecessary conflicts; for example, DROP COLUMN should come before
- * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
+ * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
* lists, one for each logical "pass" of phase 2.
*
* ATRewriteTables performs phase 3 for those tables that need it.
@@ -2662,7 +2662,7 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
* for the subcommands requested. Any subcommand that needs to rewrite
* tuples in the table forces the whole command to be executed with
* AccessExclusiveLock (actually, that is currently required always, but
- * we hope to relax it at some point). We pass the lock level down
+ * we hope to relax it at some point). We pass the lock level down
* so that we can apply it recursively to inherited tables. Note that the
* lock level we want as we recurse might well be higher than required for
* that specific subcommand. So we pass down the overall lock requirement,
@@ -3173,7 +3173,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/*
* ATRewriteCatalogs
*
- * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
+ * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
* dispatched in a "safe" execution order (designed to avoid unnecessary
* conflicts).
*/
@@ -3757,7 +3757,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
{
/*
* All predicate locks on the tuples or pages are about to be made
- * invalid, because we move tuples around. Promote them to
+ * invalid, because we move tuples around. Promote them to
* relation locks.
*/
TransferPredicateLocksToHeapRelation(oldrel);
@@ -4297,7 +4297,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
*
* Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it
* isn't suitable, throw an error. Currently, we require that the type
- * originated with CREATE TYPE AS. We could support any row type, but doing so
+ * originated with CREATE TYPE AS. We could support any row type, but doing so
* would require handling a number of extra corner cases in the DDL commands.
*/
void
@@ -4316,7 +4316,7 @@ check_of_type(HeapTuple typetuple)
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing
+ * commit. That will prevent someone else from deleting or ALTERing
* the type before the typed table creation/conversion commits.
*/
relation_close(typeRelation, NoLock);
@@ -4775,7 +4775,7 @@ add_column_collation_dependency(Oid relid, int32 attnum, Oid collid)
/*
* ALTER TABLE SET WITH OIDS
*
- * Basically this is an ADD COLUMN for the special OID column. We have
+ * Basically this is an ADD COLUMN for the special OID column. We have
* to cons up a ColumnDef node because the ADD COLUMN code needs one.
*/
static void
@@ -5244,7 +5244,7 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE loc
*
* DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism,
* because we have to decide at runtime whether to recurse or not depending
- * on whether attinhcount goes to zero or not. (We can't check this in a
+ * on whether attinhcount goes to zero or not. (We can't check this in a
* static pre-pass because it won't handle multiple inheritance situations
* correctly.)
*/
@@ -5492,7 +5492,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
/*
* If TryReuseIndex() stashed a relfilenode for us, we used it for the new
- * index instead of building from scratch. The DROP of the old edition of
+ * index instead of building from scratch. The DROP of the old edition of
* this index will have scheduled the storage for deletion at commit, so
* cancel that pending deletion.
*/
@@ -5534,7 +5534,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
elog(ERROR, "index \"%s\" is not unique", indexName);
/*
- * Determine name to assign to constraint. We require a constraint to
+ * Determine name to assign to constraint. We require a constraint to
* have the same name as the underlying index; therefore, use the index's
* existing name as the default constraint name, and if the user
* explicitly gives some other name for the constraint, rename the index
@@ -5743,7 +5743,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* Check if ONLY was specified with ALTER TABLE. If so, allow the
- * contraint creation only if there are no children currently. Error out
+ * contraint creation only if there are no children currently. Error out
* otherwise.
*/
if (!recurse && children != NIL)
@@ -5775,7 +5775,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* Add a foreign-key constraint to a single table
*
- * Subroutine for ATExecAddConstraint. Must already hold exclusive
+ * Subroutine for ATExecAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity checks for it.
* We do permissions checks here, however.
*/
@@ -5914,7 +5914,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* Note that we have to be careful about the difference between the actual
* PK column type and the opclass' declared input type, which might be
- * only binary-compatible with it. The declared opcintype is the right
+ * only binary-compatible with it. The declared opcintype is the right
* thing to probe pg_amop with.
*/
if (numfks != numpks)
@@ -6071,7 +6071,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Upon a change to the cast from the FK column to its pfeqop
- * operand, revalidate the constraint. For this evaluation, a
+ * operand, revalidate the constraint. For this evaluation, a
* binary coercion cast is equivalent to no cast at all. While
* type implementors should design implicit casts with an eye
* toward consistency of operations like equality, we cannot
@@ -6089,7 +6089,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* Necessarily, the primary key column must then be of the domain
* type. Since the constraint was previously valid, all values on
* the foreign side necessarily exist on the primary side and in
- * turn conform to the domain. Consequently, we need not treat
+ * turn conform to the domain. Consequently, we need not treat
* domains specially here.
*
* Since we require that all collations share the same notion of
@@ -6099,7 +6099,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* We need not directly consider the PK type. It's necessarily
* binary coercible to the opcintype of the unique index column,
* and ri_triggers.c will only deal with PK datums in terms of
- * that opcintype. Changing the opcintype also changes pfeqop.
+ * that opcintype. Changing the opcintype also changes pfeqop.
*/
old_check_ok = (new_pathtype == old_pathtype &&
new_castfunc == old_castfunc &&
@@ -6382,10 +6382,10 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Also return the index OID and index opclasses of the
+ * for the pkrel. Also return the index OID and index opclasses of the
* index supporting the primary key.
*
- * All parameters except pkrel are output parameters. Also, the function
+ * All parameters except pkrel are output parameters. Also, the function
* return value is the number of attributes in the primary key.
*
* Used when the column list in the REFERENCES specification is omitted.
@@ -6425,7 +6425,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
if (indexStruct->indisprimary && IndexIsValid(indexStruct))
{
/*
- * Refuse to use a deferrable primary key. This is per SQL spec,
+ * Refuse to use a deferrable primary key. This is per SQL spec,
* and there would be a lot of interesting semantic problems if we
* tried to allow it.
*/
@@ -7349,7 +7349,7 @@ ATPrepAlterColumnType(List **wqueue,
tab->relkind == RELKIND_FOREIGN_TABLE)
{
/*
- * For composite types, do this check now. Tables will check it later
+ * For composite types, do this check now. Tables will check it later
* when the table is being rewritten.
*/
find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL);
@@ -7358,7 +7358,7 @@ ATPrepAlterColumnType(List **wqueue,
ReleaseSysCache(tuple);
/*
- * The recursion case is handled by ATSimpleRecursion. However, if we are
+ * The recursion case is handled by ATSimpleRecursion. However, if we are
* told not to recurse, there had better not be any child tables; else the
* alter would put them out of step.
*/
@@ -7467,7 +7467,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
*
* We remove any implicit coercion steps at the top level of the old
* default expression; this has been agreed to satisfy the principle of
- * least surprise. (The conversion to the new column type should act like
+ * least surprise. (The conversion to the new column type should act like
* it started from what the user sees as the stored expression, and the
* implicit coercions aren't going to be shown.)
*/
@@ -7496,7 +7496,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* and record enough information to let us recreate the objects.
*
* The actual recreation does not happen here, but only after we have
- * performed all the individual ALTER TYPE operations. We have to save
+ * performed all the individual ALTER TYPE operations. We have to save
* the info before executing ALTER TYPE, though, else the deparser will
* get confused.
*
@@ -7625,7 +7625,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* used in the trigger's WHEN condition. The first case would
* not require any extra work, but the second case would
* require updating the WHEN expression, which will take a
- * significant amount of new code. Since we can't easily tell
+ * significant amount of new code. Since we can't easily tell
* which case applies, we punt for both. FIXME someday.
*/
ereport(ERROR,
@@ -7901,7 +7901,7 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
/*
* Re-parse the index and constraint definitions, and attach them to the
- * appropriate work queue entries. We do this before dropping because in
+ * appropriate work queue entries. We do this before dropping because in
* the case of a FOREIGN KEY constraint, we might not yet have exclusive
* lock on the table the constraint is attached to, and we need to get
* that before dropping. It's safe because the parser won't actually look
@@ -8956,7 +8956,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
PageSetChecksumInplace(page, blkno);
/*
- * Now write the page. We say isTemp = true even if it's not a temp
+ * Now write the page. We say isTemp = true even if it's not a temp
* rel, because there's no need for smgr to schedule an fsync for this
* write; we'll do it ourselves below.
*/
@@ -8966,7 +8966,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
pfree(buf);
/*
- * If the rel is WAL-logged, must fsync before commit. We use heap_sync
+ * If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too. (For a temp or
* unlogged rel we don't care since the data will be gone after a crash
* anyway.)
@@ -9141,7 +9141,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode)
MergeConstraintsIntoExisting(child_rel, parent_rel);
/*
- * OK, it looks valid. Make the catalog entries that show inheritance.
+ * OK, it looks valid. Make the catalog entries that show inheritance.
*/
StoreCatalogInheritance1(RelationGetRelid(child_rel),
RelationGetRelid(parent_rel),
@@ -9617,7 +9617,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
* Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
* INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
* heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
- * be TypeRelationId). There's no convenient way to do this, so go trawling
+ * be TypeRelationId). There's no convenient way to do this, so go trawling
* through pg_depend.
*/
static void
@@ -9803,7 +9803,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
/*
* ALTER TABLE NOT OF
*
- * Detach a typed table from its originating type. Just clear reloftype and
+ * Detach a typed table from its originating type. Just clear reloftype and
* remove the dependency.
*/
static void
@@ -10377,7 +10377,7 @@ AtEOXact_on_commit_actions(bool isCommit)
* Post-subcommit or post-subabort cleanup for ON COMMIT management.
*
* During subabort, we can immediately remove entries created during this
- * subtransaction. During subcommit, just relabel entries marked during
+ * subtransaction. During subcommit, just relabel entries marked during
* this subtransaction as being the parent's responsibility.
*/
void
@@ -10421,7 +10421,7 @@ AtEOSubXact_on_commit_actions(bool isCommit, SubTransactionId mySubid,
* This is intended as a callback for RangeVarGetRelidExtended(). It allows
* the relation to be locked only if (1) it's a plain table, materialized
* view, or TOAST table and (2) the current user is the owner (or the
- * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
+ * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
* TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be
* used by all.
*/
@@ -10438,7 +10438,7 @@ RangeVarCallbackOwnsTable(const RangeVar *relation,
/*
* If the relation does exist, check whether it's an index. But note that
* the relation might have been dropped between the time we did the name
- * lookup and now. In that case, there's nothing to do.
+ * lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index ef6df0039ca..01ba326a43b 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -31,7 +31,7 @@
* To allow CREATE DATABASE to give a new database a default tablespace
* that's different from the template database's default, we make the
* provision that a zero in pg_class.reltablespace means the database's
- * default tablespace. Without this, CREATE DATABASE would have to go in
+ * default tablespace. Without this, CREATE DATABASE would have to go in
* and munge the system catalogs of the new database.
*
*
@@ -271,7 +271,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
/*
* Check that location isn't too long. Remember that we're going to append
- * 'PG_XXX/<dboid>/<relid>.<nnn>'. FYI, we never actually reference the
+ * 'PG_XXX/<dboid>/<relid>.<nnn>'. FYI, we never actually reference the
* whole path, but mkdir() uses the first two parts.
*/
if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 +
@@ -473,7 +473,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
* Not all files deleted? However, there can be lingering empty files
* in the directories, left behind by for example DROP TABLE, that
* have been scheduled for deletion at next checkpoint (see comments
- * in mdunlink() for details). We could just delete them immediately,
+ * in mdunlink() for details). We could just delete them immediately,
* but we can't tell them apart from important data files that we
* mustn't delete. So instead, we force a checkpoint which will clean
* out any lingering files, and try again.
@@ -551,7 +551,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
TABLESPACE_VERSION_DIRECTORY);
/*
- * Attempt to coerce target directory to safe permissions. If this fails,
+ * Attempt to coerce target directory to safe permissions. If this fails,
* it doesn't exist or has the wrong owner.
*/
if (chmod(location, S_IRWXU) != 0)
@@ -652,7 +652,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
* Attempt to remove filesystem infrastructure for the tablespace.
*
* 'redo' indicates we are redoing a drop from XLOG; in that case we should
- * not throw an ERROR for problems, just LOG them. The worst consequence of
+ * not throw an ERROR for problems, just LOG them. The worst consequence of
* not removing files here would be failure to release some disk space, which
* does not justify throwing an error that would require manual intervention
* to get the database running again.
@@ -689,10 +689,10 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
*
* If redo is true then ENOENT is a likely outcome here, and we allow it
* to pass without comment. In normal operation we still allow it, but
- * with a warning. This is because even though ProcessUtility disallows
+ * with a warning. This is because even though ProcessUtility disallows
* DROP TABLESPACE in a transaction block, it's possible that a previous
* DROP failed and rolled back after removing the tablespace directories
- * and/or symlink. We want to allow a new DROP attempt to succeed at
+ * and/or symlink. We want to allow a new DROP attempt to succeed at
* removing the catalog entries (and symlink if still present), so we
* should not give a hard error here.
*/
@@ -990,7 +990,7 @@ check_default_tablespace(char **newval, void **extra, GucSource source)
{
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the name. Must accept the value on faith.
+ * cannot verify the name. Must accept the value on faith.
*/
if (IsTransactionState())
{
@@ -1105,7 +1105,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
/*
* If we aren't inside a transaction, we cannot do database access so
- * cannot verify the individual names. Must accept the list on faith.
+ * cannot verify the individual names. Must accept the list on faith.
* Fortunately, there's then also no need to pass the data to fd.c.
*/
if (IsTransactionState())
@@ -1132,9 +1132,9 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
}
/*
- * In an interactive SET command, we ereport for bad info. When
+ * In an interactive SET command, we ereport for bad info. When
* source == PGC_S_TEST, we are checking the argument of an ALTER
- * DATABASE SET or ALTER USER SET command. pg_dumpall dumps all
+ * DATABASE SET or ALTER USER SET command. pg_dumpall dumps all
* roles before tablespaces, so if we're restoring a pg_dumpall
* script the tablespace might not yet exist, but will be created
* later. Because of that, issue a NOTICE if source ==
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 26a1869e73f..63fd0bfed4b 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -106,7 +106,7 @@ static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
*
* constraintOid, if nonzero, says that this trigger is being created
* internally to implement that constraint. A suitable pg_depend entry will
- * be made to link the trigger to that constraint. constraintOid is zero when
+ * be made to link the trigger to that constraint. constraintOid is zero when
* executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
* TRIGGER, we build a pg_constraint entry internally.)
*
@@ -393,7 +393,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we see a
+ * We allow OPAQUE just so we can load old dump files. When we see a
* trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
@@ -415,7 +415,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* references one of the built-in RI_FKey trigger functions, assume it is
* from a dump of a pre-7.3 foreign key constraint, and take steps to
* convert this legacy representation into a regular foreign key
- * constraint. Ugly, but necessary for loading old dump files.
+ * constraint. Ugly, but necessary for loading old dump files.
*/
if (stmt->isconstraint && !isInternal &&
list_length(stmt->args) >= 6 &&
@@ -478,7 +478,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
/*
* If trigger is internally generated, modify the provided trigger name to
- * ensure uniqueness by appending the trigger OID. (Callers will usually
+ * ensure uniqueness by appending the trigger OID. (Callers will usually
* supply a simple constant trigger name in these cases.)
*/
if (isInternal)
@@ -602,7 +602,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
int16 attnum;
int j;
- /* Lookup column name. System columns are not allowed */
+ /* Lookup column name. System columns are not allowed */
attnum = attnameAttNum(rel, name, false);
if (attnum == InvalidAttrNumber)
ereport(ERROR,
@@ -707,7 +707,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
else
{
/*
- * User CREATE TRIGGER, so place dependencies. We make trigger be
+ * User CREATE TRIGGER, so place dependencies. We make trigger be
* auto-dropped if its relation is dropped or if the FK relation is
* dropped. (Auto drop is compatible with our pre-7.3 behavior.)
*/
@@ -776,7 +776,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* full-fledged foreign key constraints.
*
* The conversion is complex because a pre-7.3 foreign key involved three
- * separate triggers, which were reported separately in dumps. While the
+ * separate triggers, which were reported separately in dumps. While the
* single trigger on the referencing table adds no new information, we need
* to know the trigger functions of both of the triggers on the referenced
* table to build the constraint declaration. Also, due to lack of proper
@@ -2010,7 +2010,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2085,7 +2085,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2445,7 +2445,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2532,7 +2532,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
@@ -2941,7 +2941,7 @@ typedef SetConstraintStateData *SetConstraintState;
* Per-trigger-event data
*
* The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
- * status bits and one or two tuple CTIDs. Each event record also has an
+ * status bits and one or two tuple CTIDs. Each event record also has an
* associated AfterTriggerSharedData that is shared across all instances
* of similar events within a "chunk".
*
@@ -2955,7 +2955,7 @@ typedef SetConstraintStateData *SetConstraintState;
* Although this is mutable state, we can keep it in AfterTriggerSharedData
* because all instances of the same type of event in a given event list will
* be fired at the same time, if they were queued between the same firing
- * cycles. So we need only ensure that ats_firing_id is zero when attaching
+ * cycles. So we need only ensure that ats_firing_id is zero when attaching
* a new event to an existing AfterTriggerSharedData record.
*/
typedef uint32 TriggerFlags;
@@ -3002,7 +3002,7 @@ typedef struct AfterTriggerEventDataOneCtid
/*
* To avoid palloc overhead, we keep trigger events in arrays in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). The space between CHUNK_DATA_START and freeptr is occupied by
+ * array). The space between CHUNK_DATA_START and freeptr is occupied by
* AfterTriggerEventData records; the space between endfree and endptr is
* occupied by AfterTriggerSharedData records.
*/
@@ -3044,7 +3044,7 @@ typedef struct AfterTriggerEventList
*
* firing_counter is incremented for each call of afterTriggerInvokeEvents.
* We mark firable events with the current firing cycle's ID so that we can
- * tell which ones to work on. This ensures sane behavior if a trigger
+ * tell which ones to work on. This ensures sane behavior if a trigger
* function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
* only fire those events that weren't already scheduled for firing.
*
@@ -3052,7 +3052,7 @@ typedef struct AfterTriggerEventList
* This is saved and restored across failed subtransactions.
*
* events is the current list of deferred events. This is global across
- * all subtransactions of the current transaction. In a subtransaction
+ * all subtransactions of the current transaction. In a subtransaction
* abort, we know that the events added by the subtransaction are at the
* end of the list, so it is relatively easy to discard them. The event
* list chunks themselves are stored in event_cxt.
@@ -3080,12 +3080,12 @@ typedef struct AfterTriggerEventList
* which we similarly use to clean up at subtransaction abort.
*
* firing_stack is a stack of copies of subtransaction-start-time
- * firing_counter. We use this to recognize which deferred triggers were
+ * firing_counter. We use this to recognize which deferred triggers were
* fired (or marked for firing) within an aborted subtransaction.
*
* We use GetCurrentTransactionNestLevel() to determine the correct array
* index in these stacks. maxtransdepth is the number of allocated entries in
- * each stack. (By not keeping our own stack pointer, we can avoid trouble
+ * each stack. (By not keeping our own stack pointer, we can avoid trouble
* in cases where errors during subxact abort cause multiple invocations
* of AfterTriggerEndSubXact() at the same nesting depth.)
*/
@@ -3353,7 +3353,7 @@ afterTriggerRestoreEventList(AfterTriggerEventList *events,
* single trigger function.
*
* Frequently, this will be fired many times in a row for triggers of
- * a single relation. Therefore, we cache the open relation and provide
+ * a single relation. Therefore, we cache the open relation and provide
* fmgr lookup cache space at the caller level. (For triggers fired at
* the end of a query, we can even piggyback on the executor's state.)
*
@@ -3870,7 +3870,7 @@ AfterTriggerFireDeferred(void)
}
/*
- * Run all the remaining triggers. Loop until they are all gone, in case
+ * Run all the remaining triggers. Loop until they are all gone, in case
* some trigger queues more for us to do.
*/
while (afterTriggerMarkEvents(events, NULL, false))
@@ -3933,7 +3933,7 @@ AfterTriggerBeginSubXact(void)
int my_level = GetCurrentTransactionNestLevel();
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* shouldn't happen?)
*/
if (afterTriggers == NULL)
@@ -4012,7 +4012,7 @@ AfterTriggerEndSubXact(bool isCommit)
CommandId subxact_firing_id;
/*
- * Ignore call if the transaction is in aborted state. (Probably
+ * Ignore call if the transaction is in aborted state. (Probably
* unneeded)
*/
if (afterTriggers == NULL)
@@ -4144,7 +4144,7 @@ SetConstraintStateCopy(SetConstraintState origstate)
}
/*
- * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
+ * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
* pointer to the state object (it will change if we have to repalloc).
*/
static SetConstraintState
@@ -4229,7 +4229,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
* First, identify all the named constraints and make a list of their
* OIDs. Since, unlike the SQL spec, we allow multiple constraints of
* the same name within a schema, the specifications are not
- * necessarily unique. Our strategy is to target all matching
+ * necessarily unique. Our strategy is to target all matching
* constraints within the first search-path schema that has any
* matches, but disregard matches in schemas beyond the first match.
* (This is a bit odd but it's the historical behavior.)
@@ -4255,7 +4255,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* If we're given the schema name with the constraint, look only
- * in that schema. If given a bare constraint name, use the
+ * in that schema. If given a bare constraint name, use the
* search path to find the first matching constraint.
*/
if (constraint->schemaname)
@@ -4359,7 +4359,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* Silently skip triggers that are marked as non-deferrable in
- * pg_trigger. This is not an error condition, since a
+ * pg_trigger. This is not an error condition, since a
* deferrable RI constraint may have some non-deferrable
* actions.
*/
@@ -4430,7 +4430,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
/*
* Make sure a snapshot has been established in case trigger
- * functions need one. Note that we avoid setting a snapshot if
+ * functions need one. Note that we avoid setting a snapshot if
* we don't find at least one trigger that has to be fired now.
* This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
* ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
@@ -4490,7 +4490,7 @@ AfterTriggerPendingOnRel(Oid relid)
AfterTriggerShared evtshared = GetTriggerSharedData(event);
/*
- * We can ignore completed events. (Even if a DONE flag is rolled
+ * We can ignore completed events. (Even if a DONE flag is rolled
* back by subxact abort, it's OK because the effects of the TRUNCATE
* or whatever must get rolled back too.)
*/
@@ -4531,7 +4531,7 @@ AfterTriggerPendingOnRel(Oid relid)
* be fired for an event.
*
* NOTE: this is called whenever there are any triggers associated with
- * the event (even if they are disabled). This function decides which
+ * the event (even if they are disabled). This function decides which
* triggers actually need to be queued.
* ----------
*/
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index e5ec7c17700..b08cc2b6b9a 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -512,8 +512,8 @@ DefineType(List *names, List *parameters)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
/*
- * Check permissions on functions. We choose to require the creator/owner
- * of a type to also own the underlying functions. Since creating a type
+ * Check permissions on functions. We choose to require the creator/owner
+ * of a type to also own the underlying functions. Since creating a type
* is tantamount to granting public execute access on the functions, the
* minimum sane check would be for execute-with-grant-option. But we
* don't have a way to make the type go away if the grant option is
@@ -550,7 +550,7 @@ DefineType(List *names, List *parameters)
* now have TypeCreate do all the real work.
*
* Note: the pg_type.oid is stored in user tables as array elements (base
- * types) in ArrayType and in composite types in DatumTupleFields. This
+ * types) in ArrayType and in composite types in DatumTupleFields. This
* oid must be preserved by binary upgrades.
*/
typoid =
@@ -723,7 +723,7 @@ DefineDomain(CreateDomainStmt *stmt)
get_namespace_name(domainNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid2(TYPENAMENSP,
@@ -1074,7 +1074,7 @@ DefineEnum(CreateEnumStmt *stmt)
get_namespace_name(enumNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid2(TYPENAMENSP,
@@ -1191,7 +1191,7 @@ AlterEnum(AlterEnumStmt *stmt, bool isTopLevel)
/*
* Ordinarily we disallow adding values within transaction blocks, because
* we can't cope with enum OID values getting into indexes and then having
- * their defining pg_enum entries go away. However, it's okay if the enum
+ * their defining pg_enum entries go away. However, it's okay if the enum
* type was created in the current transaction, since then there can be no
* such indexes that wouldn't themselves go away on rollback. (We support
* this case because pg_dump --binary-upgrade needs it.) We test this by
@@ -1513,7 +1513,7 @@ DefineRange(CreateRangeStmt *stmt)
* impossible to define a polymorphic constructor; we have to generate new
* constructor functions explicitly for each range type.
*
- * We actually define 4 functions, with 0 through 3 arguments. This is just
+ * We actually define 4 functions, with 0 through 3 arguments. This is just
* to offer more convenience for the user.
*/
static void
@@ -2273,7 +2273,7 @@ AlterDomainNotNull(List *names, bool notNull)
/*
* In principle the auxiliary information for this
* error should be errdatatype(), but errtablecol()
- * seems considerably more useful in practice. Since
+ * seems considerably more useful in practice. Since
* this code only executes in an ALTER DOMAIN command,
* the client should already know which domain is in
* question.
@@ -2295,7 +2295,7 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's a
+ * Okay to update pg_type row. We can scribble on typTup because it's a
* copy.
*/
typTup->typnotnull = notNull;
@@ -2483,7 +2483,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
/*
* Since all other constraint types throw errors, this must be a check
- * constraint. First, process the constraint expression and add an entry
+ * constraint. First, process the constraint expression and add an entry
* to pg_constraint.
*/
@@ -2667,7 +2667,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin)
/*
* In principle the auxiliary information for this error
* should be errdomainconstraint(), but errtablecol()
- * seems considerably more useful in practice. Since this
+ * seems considerably more useful in practice. Since this
* code only executes in an ALTER DOMAIN command, the
* client should already know which domain is in question,
* and which constraint too.
@@ -2849,7 +2849,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in column-number
+ * Okay, add column to result. We store the columns in column-number
* order; this is just a hack to improve predictability of regression
* test output ...
*/
@@ -2936,7 +2936,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Set up a CoerceToDomainValue to represent the occurrence of VALUE in
- * the expression. Note that it will appear to have the type of the base
+ * the expression. Note that it will appear to have the type of the base
* type, not the domain. This seems correct since within the check
* expression, we should not assume the input value can be considered a
* member of the domain.
@@ -3309,7 +3309,7 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
/*
* If it's a composite type, invoke ATExecChangeOwner so that we fix
- * up the pg_class entry properly. That will call back to
+ * up the pg_class entry properly. That will call back to
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
@@ -3456,7 +3456,7 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved)
* Caller must have already checked privileges.
*
* The function automatically recurses to process the type's array type,
- * if any. isImplicitArray should be TRUE only when doing this internal
+ * if any. isImplicitArray should be TRUE only when doing this internal
* recursion (outside callers must never try to move an array type directly).
*
* If errorOnTableType is TRUE, the function errors out if the type is
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 068fff4270f..5c1b3a4474a 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -995,7 +995,7 @@ DropRole(DropRoleStmt *stmt)
ReleaseSysCache(tuple);
/*
- * Remove role from the pg_auth_members table. We have to remove all
+ * Remove role from the pg_auth_members table. We have to remove all
* tuples that show it as either a role or a member.
*
* XXX what about grantor entries? Maybe we should do one heap scan.
@@ -1091,7 +1091,7 @@ RenameRole(const char *oldname, const char *newname)
* XXX Client applications probably store the session user somewhere, so
* renaming it could cause confusion. On the other hand, there may not be
* an actual problem besides a little confusion, so think about this and
- * decide. Same for SET ROLE ... we don't restrict renaming the current
+ * decide. Same for SET ROLE ... we don't restrict renaming the current
* effective userid, though.
*/
@@ -1347,7 +1347,7 @@ AddRoleMems(const char *rolename, Oid roleid,
/*
* Check permissions: must have createrole or admin option on the role to
- * be changed. To mess with a superuser role, you gotta be superuser.
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
@@ -1493,7 +1493,7 @@ DelRoleMems(const char *rolename, Oid roleid,
/*
* Check permissions: must have createrole or admin option on the role to
- * be changed. To mess with a superuser role, you gotta be superuser.
+ * be changed. To mess with a superuser role, you gotta be superuser.
*/
if (superuser_arg(roleid))
{
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index d5d915e280f..09aa1b4ae99 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -417,9 +417,9 @@ vacuum_set_xid_limits(int freeze_min_age,
MultiXactId safeMxactLimit;
/*
- * We can always ignore processes running lazy vacuum. This is because we
+ * We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
- * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
+ * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
* ignore it. In theory it could be problematic to ignore lazy vacuums in
* a full vacuum, but keep in mind that only one vacuum process can be
* working on a particular table at any time, and that each vacuum is
@@ -566,7 +566,7 @@ vacuum_set_xid_limits(int freeze_min_age,
* If we scanned the whole relation then we should just use the count of
* live tuples seen; but if we did not, we should not trust the count
* unreservedly, especially not in VACUUM, which may have scanned a quite
- * nonrandom subset of the table. When we have only partial information,
+ * nonrandom subset of the table. When we have only partial information,
* we take the old value of pg_class.reltuples as a measurement of the
* tuple density in the unscanned pages.
*
@@ -712,7 +712,7 @@ vac_update_relstats(Relation relation,
/*
* If we have discovered that there are no indexes, then there's no
- * primary key either. This could be done more thoroughly...
+ * primary key either. This could be done more thoroughly...
*/
if (pgcform->relhaspkey && !hasindex)
{
@@ -772,7 +772,7 @@ vac_update_relstats(Relation relation,
* truncate pg_clog and pg_multixact.
*
* We violate transaction semantics here by overwriting the database's
- * existing pg_database tuple with the new value. This is reasonably
+ * existing pg_database tuple with the new value. This is reasonably
* safe since the new value is correct whether or not this transaction
* commits. As with vac_update_relstats, this avoids leaving dead tuples
* behind after a VACUUM.
@@ -892,7 +892,7 @@ vac_update_datfrozenxid(void)
* Also update the XID wrap limit info maintained by varsup.c.
*
* The passed XID is simply the one I just wrote into my pg_database
- * entry. It's used to initialize the "min" calculation.
+ * entry. It's used to initialize the "min" calculation.
*
* This routine is only invoked when we've managed to change our
* DB's datfrozenxid entry, or we found that the shared XID-wrap-limit
@@ -976,7 +976,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti)
/*
* Update the wrap limit for GetNewTransactionId and creation of new
* MultiXactIds. Note: these functions will also signal the postmaster
- * for an(other) autovac cycle if needed. XXX should we avoid possibly
+ * for an(other) autovac cycle if needed. XXX should we avoid possibly
* signalling twice?
*/
SetTransactionIdLimit(frozenXID, oldestxid_datoid);
@@ -988,7 +988,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti)
* vacuum_rel() -- vacuum one heap relation
*
* Doing one heap at a time incurs extra overhead, since we need to
- * check that the heap exists again just before we vacuum it. The
+ * check that the heap exists again just before we vacuum it. The
* reason that we do this is so that vacuuming can be spread across
* many small transactions. Otherwise, two-phase locking would require
* us to lock the entire database during one pass of the vacuum cleaner.
@@ -1045,7 +1045,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
}
/*
- * Check for user-requested abort. Note we want this to be inside a
+ * Check for user-requested abort. Note we want this to be inside a
* transaction, so xact.c doesn't issue useless WARNING.
*/
CHECK_FOR_INTERRUPTS();
@@ -1092,7 +1092,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
*
* We allow the user to vacuum a table if he is superuser, the table
* owner, or the database owner (but in the latter case, only if it's not
- * a shared relation). pg_class_ownercheck includes the superuser case.
+ * a shared relation). pg_class_ownercheck includes the superuser case.
*
* Note we choose to treat permissions failure as a WARNING and keep
* trying to vacuum the rest of the DB --- is this appropriate?
@@ -1223,7 +1223,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
/*
* If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that
- * "analyze" will not get done on the toast table. This is good, because
+ * "analyze" will not get done on the toast table. This is good, because
* the toaster always uses hardcoded index access and statistics are
* totally unimportant for toast relations.
*/
@@ -1242,7 +1242,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
/*
* Open all the vacuumable indexes of the given relation, obtaining the
- * specified kind of lock on each. Return an array of Relation pointers for
+ * specified kind of lock on each. Return an array of Relation pointers for
* the indexes into *Irel, and the number of indexes into *nindexes.
*
* We consider an index vacuumable if it is marked insertable (IndexIsReady).
@@ -1292,7 +1292,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode,
}
/*
- * Release the resources acquired by vac_open_indexes. Optionally release
+ * Release the resources acquired by vac_open_indexes. Optionally release
* the locks (say NoLock to keep 'em).
*/
void
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 2eec4e74d97..d2141739c29 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -13,7 +13,7 @@
* We are willing to use at most maintenance_work_mem memory space to keep
* track of dead tuples. We initially allocate an array of TIDs of that size,
* with an upper limit that depends on table size (this limit ensures we don't
- * allocate a huge area uselessly for vacuuming small tables). If the array
+ * allocate a huge area uselessly for vacuuming small tables). If the array
* threatens to overflow, we suspend the heap scan phase and perform a pass of
* index cleanup and page compaction, then resume the heap scan with an empty
* TID array.
@@ -464,7 +464,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Before entering the main loop, establish the invariant that
* next_not_all_visible_block is the next block number >= blkno that's not
* all-visible according to the visibility map, or nblocks if there's no
- * such block. Also, we set up the skipping_all_visible_blocks flag,
+ * such block. Also, we set up the skipping_all_visible_blocks flag,
* which is needed because we need hysteresis in the decision: once we've
* started skipping blocks, we may as well skip everything up to the next
* not-all-visible block.
@@ -966,7 +966,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* It should never be the case that the visibility map page is set
* while the page-level bit is clear, but the reverse is allowed
- * (if checksums are not enabled). Regardless, set the both bits
+ * (if checksums are not enabled). Regardless, set the both bits
* so that we get back in sync.
*
* NB: If the heap page is all-visible but the VM bit is not set,
@@ -1028,8 +1028,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* If we remembered any tuples for deletion, then the page will be
* visited again by lazy_vacuum_heap, which will compute and record
- * its post-compaction free space. If not, then we're done with this
- * page, so remember its free space as-is. (This path will always be
+ * its post-compaction free space. If not, then we're done with this
+ * page, so remember its free space as-is. (This path will always be
* taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index b6af6e7e253..d7820d1cbc4 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -176,7 +176,7 @@ check_datestyle(char **newval, void **extra, GucSource source)
}
/*
- * Prepare the canonical string to return. GUC wants it malloc'd.
+ * Prepare the canonical string to return. GUC wants it malloc'd.
*/
result = (char *) malloc(32);
if (!result)
@@ -274,7 +274,7 @@ check_timezone(char **newval, void **extra, GucSource source)
if (pg_strncasecmp(*newval, "interval", 8) == 0)
{
/*
- * Support INTERVAL 'foo'. This is for SQL spec compliance, not
+ * Support INTERVAL 'foo'. This is for SQL spec compliance, not
* because it has any actual real-world usefulness.
*/
const char *valueptr = *newval;
@@ -298,7 +298,7 @@ check_timezone(char **newval, void **extra, GucSource source)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport(ERROR), which is not desirable for GUC. We did what we
+ * ereport(ERROR), which is not desirable for GUC. We did what we
* could to guard against this in flatten_set_variable_args, but a
* string coming in from postgresql.conf might contain anything.
*/
@@ -374,11 +374,11 @@ check_timezone(char **newval, void **extra, GucSource source)
}
/*
- * Prepare the canonical string to return. GUC wants it malloc'd.
+ * Prepare the canonical string to return. GUC wants it malloc'd.
*
* Note: the result string should be something that we'd accept as input.
* We use the numeric format for interval cases, because it's simpler to
- * reload. In the named-timezone case, *newval is already OK and need not
+ * reload. In the named-timezone case, *newval is already OK and need not
* be changed; it might not have the canonical casing, but that's taken
* care of by show_timezone.
*/
@@ -534,7 +534,7 @@ show_log_timezone(void)
* We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and
* we also always allow changes from read-write to read-only. However,
* read-only may be changed to read-write only when in a top-level transaction
- * that has not yet taken an initial snapshot. Can't do it in a hot standby
+ * that has not yet taken an initial snapshot. Can't do it in a hot standby
* slave, either.
*
* If we are not in a transaction at all, just allow the change; it means
@@ -695,7 +695,7 @@ check_transaction_deferrable(bool *newval, void **extra, GucSource source)
*
* We can't roll back the random sequence on error, and we don't want
* config file reloads to affect it, so we only want interactive SET SEED
- * commands to set it. We use the "extra" storage to ensure that rollbacks
+ * commands to set it. We use the "extra" storage to ensure that rollbacks
* don't try to do the operation again.
*/
@@ -971,7 +971,7 @@ const char *
show_role(void)
{
/*
- * Check whether SET ROLE is active; if not return "none". This is a
+ * Check whether SET ROLE is active; if not return "none". This is a
* kluge to deal with the fact that SET SESSION AUTHORIZATION logically
* resets SET ROLE to NONE, but we cannot set the GUC role variable from
* assign_session_authorization (because we haven't got enough info to
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index 6186a841556..90ec7ea2581 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -324,11 +324,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse)
*rt_entry2;
/*
- * Make a copy of the given parsetree. It's not so much that we don't
+ * Make a copy of the given parsetree. It's not so much that we don't
* want to scribble on our input, it's that the parser has a bad habit of
* outputting multiple links to the same subtree for constructs like
* BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a
- * Var node twice. copyObject will expand any multiply-referenced subtree
+ * Var node twice. copyObject will expand any multiply-referenced subtree
* into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@@ -448,7 +448,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
/*
* If the user didn't explicitly ask for a temporary view, check whether
- * we need one implicitly. We allow TEMP to be inserted automatically as
+ * we need one implicitly. We allow TEMP to be inserted automatically as
* long as the CREATE command is consistent with that --- no explicit
* schema name.
*/
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index a078104eeb5..5d2f09ad82c 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -317,7 +317,7 @@ ExecMarkPos(PlanState *node)
*
* NOTE: the semantics of this are that the first ExecProcNode following
* the restore operation will yield the same tuple as the first one following
- * the mark operation. It is unspecified what happens to the plan node's
+ * the mark operation. It is unspecified what happens to the plan node's
* result TupleTableSlot. (In most cases the result slot is unchanged by
* a restore, but the node may choose to clear it or to load it with the
* restored-to tuple.) Hence the caller should discard any previously
@@ -397,7 +397,7 @@ ExecSupportsMarkRestore(NodeTag plantype)
/*
* T_Result only supports mark/restore if it has a child plan that
* does, so we do not have enough information to give a really
- * correct answer. However, for current uses it's enough to
+ * correct answer. However, for current uses it's enough to
* always say "false", because this routine is not asked about
* gating Result plans, only base-case Results.
*/
diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c
index 49963babb18..2c9cf13059d 100644
--- a/src/backend/executor/execCurrent.c
+++ b/src/backend/executor/execCurrent.c
@@ -142,7 +142,7 @@ execCurrentOf(CurrentOfExpr *cexpr,
/*
* This table didn't produce the cursor's current row; some other
- * inheritance child of the same parent must have. Signal caller to
+ * inheritance child of the same parent must have. Signal caller to
* do nothing on this table.
*/
return false;
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index 9d88966da8d..54037c4a492 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -52,7 +52,7 @@
*
* Initialize the Junk filter.
*
- * The source targetlist is passed in. The output tuple descriptor is
+ * The source targetlist is passed in. The output tuple descriptor is
* built from the non-junk tlist entries, plus the passed specification
* of whether to include room for an OID or not.
* An optional resultSlot can be passed as well.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index a9c8140581b..85d1c6326bd 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -19,7 +19,7 @@
* ExecutorRun accepts direction and count arguments that specify whether
* the plan is to be executed forwards, backwards, and for how many tuples.
* In some cases ExecutorRun may be called multiple times to process all
- * the tuples for a plan. It is also acceptable to stop short of executing
+ * the tuples for a plan. It is also acceptable to stop short of executing
* the whole plan (but only if it is a SELECT).
*
* ExecutorFinish must be called after the final ExecutorRun call and
@@ -328,12 +328,12 @@ standard_ExecutorRun(QueryDesc *queryDesc,
* ExecutorFinish
*
* This routine must be called after the last ExecutorRun call.
- * It performs cleanup such as firing AFTER triggers. It is
+ * It performs cleanup such as firing AFTER triggers. It is
* separate from ExecutorEnd because EXPLAIN ANALYZE needs to
* include these actions in the total runtime.
*
* We provide a function hook variable that lets loadable plugins
- * get control when ExecutorFinish is called. Such a plugin would
+ * get control when ExecutorFinish is called. Such a plugin would
* normally call standard_ExecutorFinish().
*
* ----------------------------------------------------------------
@@ -564,7 +564,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
* userid to check as: current user unless we have a setuid indication.
*
* Note: GetUserId() is presently fast enough that there's no harm in
- * calling it separately for each RTE. If that stops being true, we could
+ * calling it separately for each RTE. If that stops being true, we could
* call it once in ExecCheckRTPerms and pass the userid down from there.
* But for now, no need for the extra clutter.
*/
@@ -1182,7 +1182,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
* if so it doesn't matter which one we pick.) However, it is sometimes
* necessary to fire triggers on other relations; this happens mainly when an
* RI update trigger queues additional triggers on other relations, which will
- * be processed in the context of the outer query. For efficiency's sake,
+ * be processed in the context of the outer query. For efficiency's sake,
* we want to have a ResultRelInfo for those triggers too; that can avoid
* repeated re-opening of the relation. (It also provides a way for EXPLAIN
* ANALYZE to report the runtimes of such triggers.) So we make additional
@@ -1219,7 +1219,7 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
/*
* Open the target relation's relcache entry. We assume that an
* appropriate lock is still held by the backend from whenever the trigger
- * event got queued, so we need take no new lock here. Also, we need not
+ * event got queued, so we need take no new lock here. Also, we need not
* recheck the relkind, so no need for CheckValidResultRel.
*/
rel = heap_open(relid, NoLock);
@@ -1325,7 +1325,7 @@ ExecPostprocessPlan(EState *estate)
/*
* Run any secondary ModifyTable nodes to completion, in case the main
- * query did not fetch all rows from them. (We do this to ensure that
+ * query did not fetch all rows from them. (We do this to ensure that
* such nodes have predictable results.)
*/
foreach(lc, estate->es_auxmodifytables)
@@ -1634,7 +1634,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
*
* This is intentionally very similar to BuildIndexValueDescription, but
* unlike that function, we truncate long field values (to at most maxfieldlen
- * bytes). That seems necessary here since heap field values could be very
+ * bytes). That seems necessary here since heap field values could be very
* long, whereas index entries typically aren't so wide.
*
* Also, unlike the case with index entries, we need to be prepared to ignore
@@ -1828,7 +1828,7 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
*tid = copyTuple->t_self;
/*
- * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
+ * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
*/
EvalPlanQualBegin(epqstate, estate);
@@ -1911,7 +1911,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
/*
* If xmin isn't what we're expecting, the slot must have been
- * recycled and reused for an unrelated tuple. This implies that
+ * recycled and reused for an unrelated tuple. This implies that
* the latest version of the row was deleted, so we need do
* nothing. (Should be safe to examine xmin without getting
* buffer's content lock, since xmin never changes in an existing
@@ -2150,7 +2150,7 @@ EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
/*
* Fetch the current row values for any non-locked relations that need
- * to be scanned by an EvalPlanQual operation. origslot must have been set
+ * to be scanned by an EvalPlanQual operation. origslot must have been set
* to contain the current result row (top-level row) that we need to recheck.
*/
void
@@ -2379,7 +2379,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
/*
* Each EState must have its own es_epqScanDone state, but if we have
- * nested EPQ checks they should share es_epqTuple arrays. This allows
+ * nested EPQ checks they should share es_epqTuple arrays. This allows
* sub-rechecks to inherit the values being examined by an outer recheck.
*/
estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
@@ -2436,7 +2436,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
*
* This is a cut-down version of ExecutorEnd(); basically we want to do most
* of the normal cleanup, but *not* close result relations (which we are
- * just sharing from the outer query). We do, however, have to close any
+ * just sharing from the outer query). We do, however, have to close any
* trigger target relations that got opened, since those are not shared.
* (There probably shouldn't be any of the latter, but just in case...)
*/
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index 76dd62f7112..baf4e451f03 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -52,7 +52,7 @@
* * ExecInitNode() notices that it is looking at a nest loop and
* as the code below demonstrates, it calls ExecInitNestLoop().
* Eventually this calls ExecInitNode() on the right and left subplans
- * and so forth until the entire plan is initialized. The result
+ * and so forth until the entire plan is initialized. The result
* of ExecInitNode() is a plan state tree built with the same structure
* as the underlying plan tree.
*
@@ -575,7 +575,7 @@ MultiExecProcNode(PlanState *node)
* at 'node'.
*
* After this operation, the query plan will not be able to be
- * processed any further. This should be called only after
+ * processed any further. This should be called only after
* the query plan has been fully executed.
* ----------------------------------------------------------------
*/
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index c34802b698a..ab05ec226ea 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -26,7 +26,7 @@
* ExecProject() is used to make tuple projections. Rather then
* trying to speed it up, the execution plan should be pre-processed
* to facilitate attribute sharing between nodes wherever possible,
- * instead of doing needless copying. -cim 5/31/91
+ * instead of doing needless copying. -cim 5/31/91
*
* During expression evaluation, we check_stack_depth only in
* ExecMakeFunctionResult (and substitute routines) rather than at every
@@ -201,7 +201,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
*
* Note: for notational simplicity we declare these functions as taking the
* specific type of ExprState that they work on. This requires casting when
- * assigning the function pointer in ExecInitExpr. Be careful that the
+ * assigning the function pointer in ExecInitExpr. Be careful that the
* function signature is declared correctly, because the cast suppresses
* automatic checking!
*
@@ -236,7 +236,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
* The caller should already have switched into the temporary memory
* context econtext->ecxt_per_tuple_memory. The convenience entry point
* ExecEvalExprSwitchContext() is provided for callers who don't prefer to
- * do the switch in an outer loop. We do not do the switch in these routines
+ * do the switch in an outer loop. We do not do the switch in these routines
* because it'd be a waste of cycles during nested expression evaluation.
* ----------------------------------------------------------------
*/
@@ -366,7 +366,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
* We might have a nested-assignment situation, in which the
* refassgnexpr is itself a FieldStore or ArrayRef that needs to
* obtain and modify the previous value of the array element or slice
- * being replaced. If so, we have to extract that value from the
+ * being replaced. If so, we have to extract that value from the
* array and pass it down via the econtext's caseValue. It's safe to
* reuse the CASE mechanism because there cannot be a CASE between
* here and where the value would be needed, and an array assignment
@@ -439,7 +439,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
/*
* For assignment to varlena arrays, we handle a NULL original array
* by substituting an empty (zero-dimensional) array; insertion of the
- * new element will result in a singleton array value. It does not
+ * new element will result in a singleton array value. It does not
* matter whether the new element is NULL.
*/
if (*isNull)
@@ -829,11 +829,11 @@ ExecEvalWholeRowVar(WholeRowVarExprState *wrvstate, ExprContext *econtext,
* We really only care about numbers of attributes and data types.
* Also, we can ignore type mismatch on columns that are dropped in
* the destination type, so long as (1) the physical storage matches
- * or (2) the actual column value is NULL. Case (1) is helpful in
+ * or (2) the actual column value is NULL. Case (1) is helpful in
* some cases involving out-of-date cached plans, while case (2) is
* expected behavior in situations such as an INSERT into a table with
* dropped columns (the planner typically generates an INT4 NULL
- * regardless of the dropped column type). If we find a dropped
+ * regardless of the dropped column type). If we find a dropped
* column and cannot verify that case (1) holds, we have to use
* ExecEvalWholeRowSlow to check (2) for each row.
*/
@@ -1491,7 +1491,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
* ExecPrepareTuplestoreResult
*
* Subroutine for ExecMakeFunctionResult: prepare to extract rows from a
- * tuplestore function result. We must set up a funcResultSlot (unless
+ * tuplestore function result. We must set up a funcResultSlot (unless
* already done in a previous call cycle) and verify that the function
* returned the expected tuple descriptor.
*/
@@ -1536,7 +1536,7 @@ ExecPrepareTuplestoreResult(FuncExprState *fcache,
}
/*
- * If function provided a tupdesc, cross-check it. We only really need to
+ * If function provided a tupdesc, cross-check it. We only really need to
* do this for functions returning RECORD, but might as well do it always.
*/
if (resultDesc)
@@ -1719,7 +1719,7 @@ restart:
if (fcache->func.fn_retset || hasSetArg)
{
/*
- * We need to return a set result. Complain if caller not ready to
+ * We need to return a set result. Complain if caller not ready to
* accept one.
*/
if (isDone == NULL)
@@ -2046,7 +2046,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
/*
* Normally the passed expression tree will be a FuncExprState, since the
* grammar only allows a function call at the top level of a table
- * function reference. However, if the function doesn't return set then
+ * function reference. However, if the function doesn't return set then
* the planner might have replaced the function call via constant-folding
* or inlining. So if we see any other kind of expression node, execute
* it via the general ExecEvalExpr() code; the only difference is that we
@@ -2085,7 +2085,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
*
* Note: ideally, we'd do this in the per-tuple context, but then the
* argument values would disappear when we reset the context in the
- * inner loop. So do it in caller context. Perhaps we should make a
+ * inner loop. So do it in caller context. Perhaps we should make a
* separate context just to hold the evaluated arguments?
*/
argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext);
@@ -2171,7 +2171,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* Can't do anything very useful with NULL rowtype values. For a
* function returning set, we consider this a protocol violation
* (but another alternative would be to just ignore the result and
- * "continue" to get another row). For a function not returning
+ * "continue" to get another row). For a function not returning
* set, we fall out of the loop; we'll cons up an all-nulls result
* row below.
*/
@@ -2305,7 +2305,7 @@ no_function_result:
}
/*
- * If function provided a tupdesc, cross-check it. We only really need to
+ * If function provided a tupdesc, cross-check it. We only really need to
* do this for functions returning RECORD, but might as well do it always.
*/
if (rsinfo.setDesc)
@@ -2483,7 +2483,7 @@ ExecEvalDistinct(FuncExprState *fcache,
*
* Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean,
* and we combine the results across all array elements using OR and AND
- * (for ANY and ALL respectively). Of course we short-circuit as soon as
+ * (for ANY and ALL respectively). Of course we short-circuit as soon as
* the result is known.
*/
static Datum
@@ -2670,7 +2670,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
* qualification to conjunctive normal form. If we ever get
* an AND to evaluate, we can be sure that it's not a top-level
* clause in the qualification, but appears lower (as a function
- * argument, for example), or in the target list. Not that you
+ * argument, for example), or in the target list. Not that you
* need to know this, mind you...
* ----------------------------------------------------------------
*/
@@ -2801,7 +2801,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
/* ----------------------------------------------------------------
* ExecEvalConvertRowtype
*
- * Evaluate a rowtype coercion operation. This may require
+ * Evaluate a rowtype coercion operation. This may require
* rearranging field positions.
* ----------------------------------------------------------------
*/
@@ -2930,7 +2930,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
/*
* if we have a true test, then we return the result, since the case
- * statement is satisfied. A NULL result from the test is not
+ * statement is satisfied. A NULL result from the test is not
* considered true.
*/
if (DatumGetBool(clause_value) && !*isNull)
@@ -3144,7 +3144,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
* If all items were null or empty arrays, return an empty array;
* otherwise, if some were and some weren't, raise error. (Note: we
* must special-case this somehow to avoid trying to generate a 1-D
- * array formed from empty arrays. It's not ideal...)
+ * array formed from empty arrays. It's not ideal...)
*/
if (haveempty)
{
@@ -4315,7 +4315,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* ExecInitExpr: prepare an expression tree for execution
*
* This function builds and returns an ExprState tree paralleling the given
- * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
+ * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
* for execution. Because the Expr tree itself is read-only as far as
* ExecInitExpr and ExecEvalExpr are concerned, several different executions
* of the same plan tree can occur concurrently.
@@ -4326,9 +4326,9 @@ ExecEvalExprSwitchContext(ExprState *expression,
*
* Any Aggref, WindowFunc, or SubPlan nodes found in the tree are added to the
* lists of such nodes held by the parent PlanState. Otherwise, we do very
- * little initialization here other than building the state-node tree. Any
+ * little initialization here other than building the state-node tree. Any
* nontrivial work associated with initializing runtime info for a node should
- * happen during the first actual evaluation of that node. (This policy lets
+ * happen during the first actual evaluation of that node. (This policy lets
* us avoid work if the node is never actually evaluated.)
*
* Note: there is no ExecEndExpr function; we assume that any resource
@@ -5127,7 +5127,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
- * Evaluate the qual conditions one at a time. If we find a FALSE result,
+ * Evaluate the qual conditions one at a time. If we find a FALSE result,
* we can stop evaluating and return FALSE --- the AND result must be
* FALSE. Also, if we find a NULL result when resultForNull is FALSE, we
* can stop and return FALSE --- the AND result must be FALSE or NULL in
@@ -5286,7 +5286,7 @@ ExecTargetList(List *targetlist,
else
{
/*
- * We have some done and some undone sets. Restart the done ones
+ * We have some done and some undone sets. Restart the done ones
* so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 3ea6460cc75..0b37c630d34 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -30,7 +30,7 @@ static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, Tuple
* ExecScanFetch -- fetch next potential tuple
*
* This routine is concerned with substituting a test tuple if we are
- * inside an EvalPlanQual recheck. If we aren't, just execute
+ * inside an EvalPlanQual recheck. If we aren't, just execute
* the access method's next-tuple routine.
*/
static inline TupleTableSlot *
@@ -155,7 +155,7 @@ ExecScan(ScanState *node,
ResetExprContext(econtext);
/*
- * get a tuple from the access method. Loop until we obtain a tuple that
+ * get a tuple from the access method. Loop until we obtain a tuple that
* passes the qualification.
*/
for (;;)
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 984845d7edc..43d047747f1 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -4,7 +4,7 @@
* Routines dealing with TupleTableSlots. These are used for resource
* management associated with tuples (eg, releasing buffer pins for
* tuples in disk buffers, or freeing the memory occupied by transient
- * tuples). Slots also provide access abstraction that lets us implement
+ * tuples). Slots also provide access abstraction that lets us implement
* "virtual" tuples to reduce data-copying overhead.
*
* Routines dealing with the type information for tuples. Currently,
@@ -261,7 +261,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
ExecClearTuple(slot);
/*
- * Release any old descriptor. Also release old Datum/isnull arrays if
+ * Release any old descriptor. Also release old Datum/isnull arrays if
* present (we don't bother to check if they could be re-used).
*/
if (slot->tts_tupleDescriptor)
@@ -311,7 +311,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
* Another case where it is 'false' is when the referenced tuple is held
* in a tuple table slot belonging to a lower-level executor Proc node.
* In this case the lower-level slot retains ownership and responsibility
- * for eventually releasing the tuple. When this method is used, we must
+ * for eventually releasing the tuple. When this method is used, we must
* be certain that the upper-level Proc node will lose interest in the tuple
* sooner than the lower-level one does! If you're not certain, copy the
* lower-level tuple with heap_copytuple and let the upper-level table
@@ -650,7 +650,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot)
* Fetch the slot's minimal physical tuple.
*
* If the slot contains a virtual tuple, we convert it to minimal
- * physical form. The slot retains ownership of the minimal tuple.
+ * physical form. The slot retains ownership of the minimal tuple.
* If it contains a regular tuple we convert to minimal form and store
* that in addition to the regular tuple (not instead of, because
* callers may hold pointers to Datums within the regular tuple).
@@ -829,7 +829,7 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
* ExecInit{Result,Scan,Extra}TupleSlot
*
* These are convenience routines to initialize the specified slot
- * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
+ * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
* is used for initializing special-purpose slots.
* --------------------------------
*/
@@ -1147,7 +1147,7 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values)
* code would have no way to obtain a tupledesc for the tuple.
*
* Note that if we do build a new tuple, it's palloc'd in the current
- * memory context. Beware of code that changes context between the initial
+ * memory context. Beware of code that changes context between the initial
* heap_form_tuple/etc call and calling HeapTuple(Header)GetDatum.
*
* For performance-critical callers, it could be worthwhile to take extra
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index cf7fb72ffcf..9f2abea644f 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -342,7 +342,7 @@ CreateStandaloneExprContext(void)
* any previously computed pass-by-reference expression result will go away!
*
* If isCommit is false, we are being called in error cleanup, and should
- * not call callbacks but only release memory. (It might be better to call
+ * not call callbacks but only release memory. (It might be better to call
* the callbacks and pass the isCommit flag to them, but that would require
* more invasive code changes than currently seems justified.)
*
@@ -371,7 +371,7 @@ FreeExprContext(ExprContext *econtext, bool isCommit)
* ReScanExprContext
*
* Reset an expression context in preparation for a rescan of its
- * plan node. This requires calling any registered shutdown callbacks,
+ * plan node. This requires calling any registered shutdown callbacks,
* since any partially complete set-returning-functions must be canceled.
*
* Note we make no assumption about the caller's memory context.
@@ -412,7 +412,7 @@ MakePerTupleExprContext(EState *estate)
/* ----------------
* ExecAssignExprContext
*
- * This initializes the ps_ExprContext field. It is only necessary
+ * This initializes the ps_ExprContext field. It is only necessary
* to do this for nodes which use ExecQual or ExecProject
* because those routines require an econtext. Other nodes that
* don't have to evaluate expressions don't need to do this.
@@ -458,7 +458,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
/*
* ExecTypeFromTL needs the parse-time representation of the tlist, not a
- * list of ExprStates. This is good because some plan nodes don't bother
+ * list of ExprStates. This is good because some plan nodes don't bother
* to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
@@ -486,7 +486,7 @@ ExecGetResultType(PlanState *planstate)
* the given tlist should be a list of ExprState nodes, not Expr nodes.
*
* inputDesc can be NULL, but if it is not, we check to see whether simple
- * Vars in the tlist match the descriptor. It is important to provide
+ * Vars in the tlist match the descriptor. It is important to provide
* inputDesc for relation-scan plan nodes, as a cross check that the relation
* hasn't been changed since the plan was made. At higher levels of a plan,
* there is no need to recheck.
@@ -692,7 +692,7 @@ ExecAssignProjectionInfo(PlanState *planstate,
*
* However ... there is no particular need to do it during ExecEndNode,
* because FreeExecutorState will free any remaining ExprContexts within
- * the EState. Letting FreeExecutorState do it allows the ExprContexts to
+ * the EState. Letting FreeExecutorState do it allows the ExprContexts to
* be freed in reverse order of creation, rather than order of creation as
* will happen if we delete them here, which saves O(N^2) work in the list
* cleanup inside FreeExprContext.
@@ -712,7 +712,7 @@ ExecFreeExprContext(PlanState *planstate)
* the following scan type support functions are for
* those nodes which are stubborn and return tuples in
* their Scan tuple slot instead of their Result tuple
- * slot.. luck fur us, these nodes do not do projections
+ * slot.. luck fur us, these nodes do not do projections
* so we don't have to worry about getting the ProjectionInfo
* right for them... -cim 6/3/91
* ----------------------------------------------------------------
@@ -1111,7 +1111,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/*
* If the index has an associated exclusion constraint, check that.
* This is simpler than the process for uniqueness checks since we
- * always insert first and then check. If the constraint is deferred,
+ * always insert first and then check. If the constraint is deferred,
* we check now anyway, but don't throw error on violation; instead
* we'll queue a recheck event.
*
@@ -1295,7 +1295,7 @@ retry:
/*
* If an in-progress transaction is affecting the visibility of this
- * tuple, we need to wait for it to complete and then recheck. For
+ * tuple, we need to wait for it to complete and then recheck. For
* simplicity we do rechecking by just restarting the whole scan ---
* this case probably doesn't happen often enough to be worth trying
* harder, and anyway we don't want to hold any index internal locks
@@ -1356,7 +1356,7 @@ retry:
/*
* Check existing tuple's index values to see if it really matches the
- * exclusion condition against the new_values. Returns true if conflict.
+ * exclusion condition against the new_values. Returns true if conflict.
*/
static bool
index_recheck_constraint(Relation index, Oid *constr_procs,
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 91755e1b39d..ae17ea7722c 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -47,7 +47,7 @@ typedef struct
} DR_sqlfunction;
/*
- * We have an execution_state record for each query in a function. Each
+ * We have an execution_state record for each query in a function. Each
* record contains a plantree for its query. If the query is currently in
* F_EXEC_RUN state then there's a QueryDesc too.
*
@@ -466,7 +466,7 @@ sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo,
* Set up the per-query execution_state records for a SQL function.
*
* The input is a List of Lists of parsed and rewritten, but not planned,
- * querytrees. The sublist structure denotes the original query boundaries.
+ * querytrees. The sublist structure denotes the original query boundaries.
*/
static List *
init_execution_state(List *queryTree_list,
@@ -590,7 +590,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
bool isNull;
/*
- * Create memory context that holds all the SQLFunctionCache data. It
+ * Create memory context that holds all the SQLFunctionCache data. It
* must be a child of whatever context holds the FmgrInfo.
*/
fcontext = AllocSetContextCreate(finfo->fn_mcxt,
@@ -602,7 +602,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
oldcontext = MemoryContextSwitchTo(fcontext);
/*
- * Create the struct proper, link it to fcontext and fn_extra. Once this
+ * Create the struct proper, link it to fcontext and fn_extra. Once this
* is done, we'll be able to recover the memory after failure, even if the
* FmgrInfo is long-lived.
*/
@@ -672,7 +672,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
fcache->src = TextDatumGetCString(tmp);
/*
- * Parse and rewrite the queries in the function text. Use sublists to
+ * Parse and rewrite the queries in the function text. Use sublists to
* keep track of the original query boundaries. But we also build a
* "flat" list of the rewritten queries to pass to check_sql_fn_retval.
* This is because the last canSetTag query determines the result type
@@ -712,7 +712,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK)
* any polymorphic arguments.
*
* Note: we set fcache->returnsTuple according to whether we are returning
- * the whole tuple result or just a single column. In the latter case we
+ * the whole tuple result or just a single column. In the latter case we
* clear returnsTuple because we need not act different from the scalar
* result case, even if it's a rowtype column. (However, we have to force
* lazy eval mode in that case; otherwise we'd need extra code to expand
@@ -944,7 +944,7 @@ postquel_get_single_result(TupleTableSlot *slot,
/*
* Set up to return the function value. For pass-by-reference datatypes,
* be sure to allocate the result in resultcontext, not the current memory
- * context (which has query lifespan). We can't leave the data in the
+ * context (which has query lifespan). We can't leave the data in the
* TupleTableSlot because we intend to clear the slot before returning.
*/
oldcontext = MemoryContextSwitchTo(resultcontext);
@@ -1052,7 +1052,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
/*
* Switch to context in which the fcache lives. This ensures that our
* tuplestore etc will have sufficient lifetime. The sub-executor is
- * responsible for deleting per-tuple information. (XXX in the case of a
+ * responsible for deleting per-tuple information. (XXX in the case of a
* long-lived FmgrInfo, this policy represents more memory leakage, but
* it's not entirely clear where to keep stuff instead.)
*/
@@ -1106,7 +1106,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
* suspend execution before completion is if we are returning a row from a
* lazily-evaluated SELECT. So, when first entering this loop, we'll
* either start a new query (and push a fresh snapshot) or re-establish
- * the active snapshot from the existing query descriptor. If we need to
+ * the active snapshot from the existing query descriptor. If we need to
* start a new query in a subsequent execution of the loop, either we need
* a fresh snapshot (and pushed_snapshot is false) or the existing
* snapshot is on the active stack and we can just bump its command ID.
@@ -1162,7 +1162,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
* Break from loop if we didn't shut down (implying we got a
* lazily-evaluated row). Otherwise we'll press on till the whole
* function is done, relying on the tuplestore to keep hold of the
- * data to eventually be returned. This is necessary since an
+ * data to eventually be returned. This is necessary since an
* INSERT/UPDATE/DELETE RETURNING that sets the result might be
* followed by additional rule-inserted commands, and we want to
* finish doing all those commands before we return anything.
@@ -1184,7 +1184,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
/*
* Flush the current snapshot so that we will take a new one for
- * the new query list. This ensures that new snaps are taken at
+ * the new query list. This ensures that new snaps are taken at
* original-query boundaries, matching the behavior of interactive
* execution.
*/
@@ -1242,7 +1242,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
else if (fcache->lazyEval)
{
/*
- * We are done with a lazy evaluation. Clean up.
+ * We are done with a lazy evaluation. Clean up.
*/
tuplestore_clear(fcache->tstore);
@@ -1266,8 +1266,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
else
{
/*
- * We are done with a non-lazy evaluation. Return whatever is in
- * the tuplestore. (It is now caller's responsibility to free the
+ * We are done with a non-lazy evaluation. Return whatever is in
+ * the tuplestore. (It is now caller's responsibility to free the
* tuplestore when done.)
*/
rsi->returnMode = SFRM_Materialize;
@@ -1379,7 +1379,7 @@ sql_exec_error_callback(void *arg)
/*
* Try to determine where in the function we failed. If there is a query
- * with non-null QueryDesc, finger it. (We check this rather than looking
+ * with non-null QueryDesc, finger it. (We check this rather than looking
* for F_EXEC_RUN state, so that errors during ExecutorStart or
* ExecutorEnd are blamed on the appropriate query; see postquel_start and
* postquel_end.)
@@ -1671,7 +1671,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
* the function that's calling it.
*
* XXX Note that if rettype is RECORD, the IsBinaryCoercible check
- * will succeed for any composite restype. For the moment we rely on
+ * will succeed for any composite restype. For the moment we rely on
* runtime type checking to catch any discrepancy, but it'd be nice to
* do better at parse time.
*/
@@ -1717,7 +1717,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
/*
* Verify that the targetlist matches the return tuple type. We scan
* the non-deleted attributes to ensure that they match the datatypes
- * of the non-resjunk columns. For deleted attributes, insert NULL
+ * of the non-resjunk columns. For deleted attributes, insert NULL
* result columns if the caller asked for that.
*/
tupnatts = tupdesc->natts;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index c741131b257..6fd67677b39 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -23,7 +23,7 @@
* The agg's first input type and transtype must be the same in this case!
*
* If transfunc is marked "strict" then NULL input_values are skipped,
- * keeping the previous transvalue. If transfunc is not strict then it
+ * keeping the previous transvalue. If transfunc is not strict then it
* is called for every input tuple and must deal with NULL initcond
* or NULL input_values for itself.
*
@@ -55,7 +55,7 @@
* it is completely forbidden for functions to modify pass-by-ref inputs,
* but in the aggregate case we know the left input is either the initial
* transition value or a previous function result, and in either case its
- * value need not be preserved. See int8inc() for an example. Notice that
+ * value need not be preserved. See int8inc() for an example. Notice that
* advance_transition_function() is coded to avoid a data copy step when
* the previous transition value pointer is returned. Also, some
* transition functions want to store working state in addition to the
@@ -176,7 +176,7 @@ typedef struct AggStatePerAggData
transtypeByVal;
/*
- * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but
+ * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but
* with the addition of ORDER BY we now need at least a slot for passing
* data to the sort object, which requires a tupledesc, so we might as
* well go whole hog and use ExecProject too.
@@ -196,7 +196,7 @@ typedef struct AggStatePerAggData
* input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT/ORDER BY) aggregate, we just feed the input
- * values straight to the transition function. If it's DISTINCT or
+ * values straight to the transition function. If it's DISTINCT or
* requires ORDER BY, we pass the input values into a Tuplesort object;
* then at completion of the input tuple group, we scan the sorted values,
* eliminate duplicates if needed, and run the transition function on the
@@ -231,7 +231,7 @@ typedef struct AggStatePerGroupData
/*
* Note: noTransValue initially has the same value as transValueIsNull,
- * and if true both are cleared to false at the same time. They are not
+ * and if true both are cleared to false at the same time. They are not
* the same though: if transfn later returns a NULL, we want to keep that
* NULL and not auto-replace it with a later input value. Only the first
* non-NULL input will be auto-substituted.
@@ -241,7 +241,7 @@ typedef struct AggStatePerGroupData
/*
* To implement hashed aggregation, we need a hashtable that stores a
* representative tuple and an array of AggStatePerGroup structs for each
- * distinct set of GROUP BY column values. We compute the hash key from
+ * distinct set of GROUP BY column values. We compute the hash key from
* the GROUP BY columns.
*/
typedef struct AggHashEntryData *AggHashEntry;
@@ -444,7 +444,7 @@ advance_transition_function(AggState *aggstate,
/*
* If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
+ * pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
@@ -468,7 +468,7 @@ advance_transition_function(AggState *aggstate,
}
/*
- * Advance all the aggregates for one input tuple. The input tuple
+ * Advance all the aggregates for one input tuple. The input tuple
* has been stored in tmpcontext->ecxt_outertuple, so that it is accessible
* to ExecEvalExpr. pergroup is the array of per-group structs to use
* (this might be in a hashtable entry).
@@ -546,7 +546,7 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
/*
* Run the transition function for a DISTINCT or ORDER BY aggregate
* with only one input. This is called after we have completed
- * entering all the input values into the sort object. We complete the
+ * entering all the input values into the sort object. We complete the
* sort, read out the values in sorted order, and run the transition
* function on each value (applying DISTINCT if appropriate).
*
@@ -643,7 +643,7 @@ process_ordered_aggregate_single(AggState *aggstate,
/*
* Run the transition function for a DISTINCT or ORDER BY aggregate
* with more than one input. This is called after we have completed
- * entering all the input values into the sort object. We complete the
+ * entering all the input values into the sort object. We complete the
* sort, read out the values in sorted order, and run the transition
* function on each value (applying DISTINCT if appropriate).
*
@@ -968,9 +968,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot)
* the appropriate attribute for each aggregate function use (Aggref
* node) appearing in the targetlist or qual of the node. The number
* of tuples to aggregate over depends on whether grouped or plain
- * aggregation is selected. In grouped aggregation, we produce a result
+ * aggregation is selected. In grouped aggregation, we produce a result
* row for each group; in plain aggregation there's a single result row
- * for the whole query. In either case, the value of each aggregate is
+ * for the whole query. In either case, the value of each aggregate is
* stored in the expression context to be used when ExecProject evaluates
* the result tuple.
*/
@@ -995,7 +995,7 @@ ExecAgg(AggState *node)
}
/*
- * Exit if nothing left to do. (We must do the ps_TupFromTlist check
+ * Exit if nothing left to do. (We must do the ps_TupFromTlist check
* first, because in some cases agg_done gets set before we emit the final
* aggregate tuple, and we have to finish running SRFs for it.)
*/
@@ -1079,7 +1079,7 @@ agg_retrieve_direct(AggState *aggstate)
/*
* Clear the per-output-tuple context for each group, as well as
* aggcontext (which contains any pass-by-ref transvalues of the old
- * group). We also clear any child contexts of the aggcontext; some
+ * group). We also clear any child contexts of the aggcontext; some
* aggregate functions store working state in such contexts.
*/
ResetExprContext(econtext);
@@ -1177,7 +1177,7 @@ agg_retrieve_direct(AggState *aggstate)
/*
* Use the representative input tuple for any references to
- * non-aggregated input columns in the qual and tlist. (If we are not
+ * non-aggregated input columns in the qual and tlist. (If we are not
* grouping, and there are no input rows at all, we will come here
* with an empty firstSlot ... but if not grouping, there can't be any
* references to non-aggregated input columns, so no problem.)
@@ -1405,8 +1405,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
aggstate->hashtable = NULL;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
@@ -1439,7 +1439,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
- * contain other agg calls in their arguments. This would make no sense
+ * contain other agg calls in their arguments. This would make no sense
* under SQL semantics anyway (and it's forbidden by the spec). Because
* that is true, we don't need to worry about evaluating the aggs in any
* particular order.
@@ -1486,7 +1486,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* This is not an error condition: we might be using the Agg node just
* to do hash-based grouping. Even in the regular case,
* constant-expression simplification could optimize away all of the
- * Aggrefs in the targetlist and qual. So keep going, but force local
+ * Aggrefs in the targetlist and qual. So keep going, but force local
* copy of numaggs positive so that palloc()s below don't choke.
*/
numaggs = 1;
@@ -1599,7 +1599,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
peraggstate->sortstate = NULL;
/*
- * Get actual datatypes of the inputs. These could be different from
+ * Get actual datatypes of the inputs. These could be different from
* the agg's declared input types, when the agg accepts ANY or a
* polymorphic type.
*/
@@ -1727,7 +1727,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* If the transfn is strict and the initval is NULL, make sure input
* type and transtype are the same (or at least binary-compatible), so
* that it's OK to use the first input value as the initial
- * transValue. This should have been checked at agg definition time,
+ * transValue. This should have been checked at agg definition time,
* but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
@@ -2001,8 +2001,8 @@ ExecReScanAgg(AggState *node)
*
* The transition and/or final functions of an aggregate may want to verify
* that they are being called as aggregates, rather than as plain SQL
- * functions. They should use this function to do so. The return value
- * is nonzero if being called as an aggregate, or zero if not. (Specific
+ * functions. They should use this function to do so. The return value
+ * is nonzero if being called as an aggregate, or zero if not. (Specific
* nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more
* values could conceivably appear in future.)
*
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index 27007776ff3..12dcba2b021 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -33,7 +33,7 @@
* /
* Append -------+------+------+--- nil
* / \ | | |
- * nil nil ... ... ...
+ * nil nil ... ... ...
* subplans
*
* Append nodes are currently used for unions, and to support
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index d2b27213ffe..064307e6a03 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -5,7 +5,7 @@
*
* NOTE: it is critical that this plan type only be used with MVCC-compliant
* snapshots (ie, regular snapshots, not SnapshotNow or one of the other
- * special snapshots). The reason is that since index and heap scans are
+ * special snapshots). The reason is that since index and heap scans are
* decoupled, there can be no assurance that the index tuple prompting a
* visit to a particular heap TID still exists when the visit is made.
* Therefore the tuple might not exist anymore either (which is OK because
@@ -335,7 +335,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
/*
* We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
+ * visibility. Afterwards, however, the tuples we have found to be
* visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index c0b3525e50d..c64b486dfd3 100644
--- a/src/backend/executor/nodeForeignscan.c
+++ b/src/backend/executor/nodeForeignscan.c
@@ -147,7 +147,7 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags)
scanstate->ss.ss_currentRelation = currentRelation;
/*
- * get the scan type from the relation descriptor. (XXX at some point we
+ * get the scan type from the relation descriptor. (XXX at some point we
* might want to let the FDW editorialize on the scan tupdesc.)
*/
ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation));
diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c
index 24325fee6eb..e09c16177c3 100644
--- a/src/backend/executor/nodeFunctionscan.c
+++ b/src/backend/executor/nodeFunctionscan.c
@@ -280,7 +280,7 @@ ExecReScanFunctionScan(FunctionScanState *node)
/*
* Here we have a choice whether to drop the tuplestore (and recompute the
* function outputs) or just rescan it. We must recompute if the
- * expression contains parameters, else we rescan. XXX maybe we should
+ * expression contains parameters, else we rescan. XXX maybe we should
* recompute if the function is volatile?
*/
if (node->ss.ps.chgParam != NULL)
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 6a2f2367096..ce1fa4a9917 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -365,7 +365,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls)
/*
* Set up for skew optimization, if possible and there's a need for more
- * than one batch. (In a one-batch join, there's no point in it.)
+ * than one batch. (In a one-batch join, there's no point in it.)
*/
if (nbatch > 1)
ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
@@ -407,7 +407,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Estimate tupsize based on footprint of tuple in hashtable... note this
- * does not allow for any palloc overhead. The manipulations of spaceUsed
+ * does not allow for any palloc overhead. The manipulations of spaceUsed
* don't count palloc overhead either.
*/
tupsize = HJTUPLE_OVERHEAD +
@@ -459,7 +459,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
* memory is filled. Set nbatch to the smallest power of 2 that appears
- * sufficient. The Min() steps limit the results so that the pointer
+ * sufficient. The Min() steps limit the results so that the pointer
* arrays we'll try to allocate do not exceed work_mem.
*/
max_pointers = (work_mem * 1024L) / sizeof(void *);
@@ -498,8 +498,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
/*
* Both nbuckets and nbatch must be powers of 2 to make
- * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
- * nbuckets to the next larger power of 2. We also force nbuckets to not
+ * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
+ * nbuckets to the next larger power of 2. We also force nbuckets to not
* be real small, by starting the search at 2^10. (Note: above we made
* sure that nbuckets is not more than INT_MAX / 2, so this loop cannot
* overflow, nor can the final shift to recalculate nbuckets.)
@@ -817,7 +817,7 @@ ExecHashGetHashValue(HashJoinTable hashtable,
* the hash support function as strict even if the operator is not.
*
* Note: currently, all hashjoinable operators must be strict since
- * the hash index AM assumes that. However, it takes so little extra
+ * the hash index AM assumes that. However, it takes so little extra
* code here to allow non-strict that we may as well do it.
*/
if (isNull)
@@ -1237,7 +1237,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
/*
* While we have not hit a hole in the hashtable and have not hit
* the desired bucket, we have collided with some previous hash
- * value, so try the next bucket location. NB: this code must
+ * value, so try the next bucket location. NB: this code must
* match ExecHashGetSkewBucket.
*/
bucket = hashvalue & (nbuckets - 1);
@@ -1435,7 +1435,7 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
* NOTE: this is not nearly as simple as it looks on the surface, because
* of the possibility of collisions in the hashtable. Suppose that hash
* values A and B collide at a particular hashtable entry, and that A was
- * entered first so B gets shifted to a different table entry. If we were
+ * entered first so B gets shifted to a different table entry. If we were
* to remove A first then ExecHashGetSkewBucket would mistakenly start
* reporting that B is not in the hashtable, because it would hit the NULL
* before finding B. However, we always remove entries in the reverse
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index dab8ead6108..ff03f169bf6 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -126,7 +126,7 @@ ExecHashJoin(HashJoinState *node)
* check this when the outer relation's startup cost is less
* than the projected cost of building the hash table.
* Otherwise it's best to build the hash table first and see
- * if the inner relation is empty. (When it's a left join, we
+ * if the inner relation is empty. (When it's a left join, we
* should always make this check, since we aren't going to be
* able to skip the join on the strength of an empty inner
* relation anyway.)
@@ -530,7 +530,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
* tuple slot of the Hash node (which is our inner plan). we can do this
* because Hash nodes don't return tuples via ExecProcNode() -- instead
* the hash join node uses ExecScanHashBucket() to get at the contents of
- * the hash table. -cim 6/9/91
+ * the hash table. -cim 6/9/91
*/
{
HashState *hashstate = (HashState *) innerPlanState(hjstate);
@@ -896,7 +896,7 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
/*
* ExecHashJoinGetSavedTuple
- * read the next tuple from a batch file. Return NULL if no more.
+ * read the next tuple from a batch file. Return NULL if no more.
*
* On success, *hashvalue is set to the tuple's hash value, and the tuple
* itself is stored in the given slot.
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 2f30c55c54a..f533a78ac84 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -88,7 +88,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
* Note on Memory Ordering Effects: visibilitymap_test does not lock
* the visibility map buffer, and therefore the result we read here
* could be slightly stale. However, it can't be stale enough to
- * matter. It suffices to show that (1) there is a read barrier
+ * matter. It suffices to show that (1) there is a read barrier
* between the time we read the index TID and the time we test the
* visibility map; and (2) there is a write barrier between the time
* some other concurrent process clears the visibility map bit and the
@@ -113,7 +113,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
/*
* Only MVCC snapshots are supported here, so there should be no
* need to keep following the HOT chain once a visible entry has
- * been found. If we did want to allow that, we'd need to keep
+ * been found. If we did want to allow that, we'd need to keep
* more state to remember not to call index_getnext_tid next time.
*/
if (scandesc->xs_continue_hot)
@@ -122,7 +122,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
/*
* Note: at this point we are holding a pin on the heap page, as
* recorded in scandesc->xs_cbuf. We could release that pin now,
- * but it's not clear whether it's a win to do so. The next index
+ * but it's not clear whether it's a win to do so. The next index
* entry might require a visit to the same heap page.
*/
}
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index f1062f19f43..b1a4aa8b2d2 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -216,7 +216,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
/*
* For each run-time key, extract the run-time expression and evaluate
- * it with respect to the current context. We then stick the result
+ * it with respect to the current context. We then stick the result
* into the proper scan key.
*
* Note: the result of the eval could be a pass-by-ref value that's
@@ -349,7 +349,7 @@ ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys)
/*
* Note we advance the rightmost array key most quickly, since it will
* correspond to the lowest-order index column among the available
- * qualifications. This is hypothesized to result in better locality of
+ * qualifications. This is hypothesized to result in better locality of
* access in the index.
*/
for (j = numArrayKeys - 1; j >= 0; j--)
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index 41b3d9fe508..6b540e36dce 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -113,7 +113,7 @@ ExecLimit(LimitState *node)
/*
* The subplan is known to return no tuples (or not more than
- * OFFSET tuples, in general). So we return no tuples.
+ * OFFSET tuples, in general). So we return no tuples.
*/
return NULL;
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 5b5c705a96d..2c4514261cd 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -182,7 +182,7 @@ lnext:
tuple.t_self = copyTuple->t_self;
/*
- * Need to run a recheck subquery. Initialize EPQ state if we
+ * Need to run a recheck subquery. Initialize EPQ state if we
* didn't do so already.
*/
if (!epq_started)
@@ -213,7 +213,7 @@ lnext:
{
/*
* First, fetch a copy of any rows that were successfully locked
- * without any update having occurred. (We do this in a separate pass
+ * without any update having occurred. (We do this in a separate pass
* so as to avoid overhead in the common case where there are no
* concurrent updates.)
*/
@@ -318,7 +318,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
/*
* Locate the ExecRowMark(s) that this node is responsible for, and
- * construct ExecAuxRowMarks for them. (InitPlan should already have
+ * construct ExecAuxRowMarks for them. (InitPlan should already have
* built the global list of ExecRowMarks.)
*/
lrstate->lr_arowMarks = NIL;
@@ -340,7 +340,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
/*
- * Only locking rowmarks go into our own list. Non-locking marks are
+ * Only locking rowmarks go into our own list. Non-locking marks are
* passed off to the EvalPlanQual machinery. This is because we don't
* want to bother fetching non-locked rows unless we actually have to
* do an EPQ recheck.
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 7a82f56c61b..ba11f9fe080 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -185,7 +185,7 @@ ExecInitMaterial(Material *node, EState *estate, int eflags)
/*
* Tuplestore's interpretation of the flag bits is subtly different from
* the general executor meaning: it doesn't think BACKWARD necessarily
- * means "backwards all the way to start". If told to support BACKWARD we
+ * means "backwards all the way to start". If told to support BACKWARD we
* must include REWIND in the tuplestore eflags, else tuplestore_trim
* might throw away too much.
*/
diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c
index c3edd618591..eb108e57a42 100644
--- a/src/backend/executor/nodeMergeAppend.c
+++ b/src/backend/executor/nodeMergeAppend.c
@@ -32,7 +32,7 @@
* /
* MergeAppend---+------+------+--- nil
* / \ | | |
- * nil nil ... ... ...
+ * nil nil ... ... ...
* subplans
*/
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index e69bc64a89a..00c05478216 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -41,7 +41,7 @@
*
* Therefore, rather than directly executing the merge join clauses,
* we evaluate the left and right key expressions separately and then
- * compare the columns one at a time (see MJCompare). The planner
+ * compare the columns one at a time (see MJCompare). The planner
* passes us enough information about the sort ordering of the inputs
* to allow us to determine how to make the comparison. We may use the
* appropriate btree comparison function, since Postgres' only notion
@@ -269,7 +269,7 @@ MJExamineQuals(List *mergeclauses,
* input, since we assume mergejoin operators are strict. If the NULL
* is in the first join column, and that column sorts nulls last, then
* we can further conclude that no following tuple can match anything
- * either, since they must all have nulls in the first column. However,
+ * either, since they must all have nulls in the first column. However,
* that case is only interesting if we're not in FillOuter mode, else
* we have to visit all the tuples anyway.
*
@@ -325,7 +325,7 @@ MJEvalOuterValues(MergeJoinState *mergestate)
/*
* MJEvalInnerValues
*
- * Same as above, but for the inner tuple. Here, we have to be prepared
+ * Same as above, but for the inner tuple. Here, we have to be prepared
* to load data from either the true current inner, or the marked inner,
* so caller must tell us which slot to load from.
*/
@@ -736,7 +736,7 @@ ExecMergeJoin(MergeJoinState *node)
case MJEVAL_MATCHABLE:
/*
- * OK, we have the initial tuples. Begin by skipping
+ * OK, we have the initial tuples. Begin by skipping
* non-matching tuples.
*/
node->mj_JoinState = EXEC_MJ_SKIP_TEST;
@@ -1131,7 +1131,7 @@ ExecMergeJoin(MergeJoinState *node)
* which means that all subsequent outer tuples will be
* larger than our marked inner tuples. So we need not
* revisit any of the marked tuples but can proceed to
- * look for a match to the current inner. If there's
+ * look for a match to the current inner. If there's
* no more inners, no more matches are possible.
* ----------------
*/
@@ -1522,7 +1522,7 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags)
* For certain types of inner child nodes, it is advantageous to issue
* MARK every time we advance past an inner tuple we will never return to.
* For other types, MARK on a tuple we cannot return to is a waste of
- * cycles. Detect which case applies and set mj_ExtraMarks if we want to
+ * cycles. Detect which case applies and set mj_ExtraMarks if we want to
* issue "unnecessary" MARK calls.
*
* Currently, only Material wants the extra MARKs, and it will be helpful
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index e934c7b9ab9..0997e3931f9 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -30,7 +30,7 @@
*
* If the query specifies RETURNING, then the ModifyTable returns a
* RETURNING tuple after completing each row insert, update, or delete.
- * It must be called again to continue the operation. Without RETURNING,
+ * It must be called again to continue the operation. Without RETURNING,
* we just loop within the node until all the work is done, then
* return NULL. This avoids useless call/return overhead.
*/
@@ -413,7 +413,7 @@ ldelete:;
* proceed. We don't want to discard the original DELETE
* while keeping the triggered actions based on its deletion;
* and it would be no better to allow the original DELETE
- * while discarding updates that it triggered. The row update
+ * while discarding updates that it triggered. The row update
* carries some information that might be important according
* to business rules; so throwing an error is the only safe
* course.
@@ -485,7 +485,7 @@ ldelete:;
{
/*
* We have to put the target tuple into a slot, which means first we
- * gotta fetch it. We can use the trigger tuple slot.
+ * gotta fetch it. We can use the trigger tuple slot.
*/
TupleTableSlot *rslot;
HeapTupleData deltuple;
@@ -546,7 +546,7 @@ ldelete:;
* note: we can't run UPDATE queries with transactions
* off because UPDATEs are actually INSERTs and our
* scan will mistakenly loop forever, updating the tuple
- * it just inserted.. This should be fixed but until it
+ * it just inserted.. This should be fixed but until it
* is, we don't want to get stuck in an infinite loop
* which corrupts your database..
*
@@ -654,7 +654,7 @@ ExecUpdate(ItemPointer tupleid,
*
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here and recheck constraints. (We don't need to
- * redo triggers, however. If there are any BEFORE triggers then
+ * redo triggers, however. If there are any BEFORE triggers then
* trigger.c will have done heap_lock_tuple to lock the correct tuple,
* so there's no need to do them again.)
*/
@@ -892,7 +892,7 @@ ExecModifyTable(ModifyTableState *node)
/*
* es_result_relation_info must point to the currently active result
- * relation while we are within this ModifyTable node. Even though
+ * relation while we are within this ModifyTable node. Even though
* ModifyTable nodes can't be nested statically, they can be nested
* dynamically (since our subplan could include a reference to a modifying
* CTE). So we have to save and restore the caller's value.
@@ -908,7 +908,7 @@ ExecModifyTable(ModifyTableState *node)
for (;;)
{
/*
- * Reset the per-output-tuple exprcontext. This is needed because
+ * Reset the per-output-tuple exprcontext. This is needed because
* triggers expect to use that context as workspace. It's a bit ugly
* to do this below the top level of the plan, however. We might need
* to rethink this later.
@@ -1075,7 +1075,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* call ExecInitNode on each of the plans to be executed and save the
* results into the array "mt_plans". This is also a convenient place to
* verify that the proposed target relations are valid and open their
- * indexes for insertion of new index entries. Note we *must* set
+ * indexes for insertion of new index entries. Note we *must* set
* estate->es_result_relation_info correctly while we initialize each
* sub-plan; ExecContextForcesOids depends on that!
*/
@@ -1095,7 +1095,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
- * index entries for the tuples we add/update. We need not do this
+ * index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes. Also,
* inside an EvalPlanQual operation, the indexes might be open
* already, since we share the resultrel state with the original
@@ -1139,7 +1139,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* Initialize result tuple slot and assign its rowtype using the first
- * RETURNING list. We assume the rest will look the same.
+ * RETURNING list. We assume the rest will look the same.
*/
tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
false);
@@ -1185,7 +1185,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/*
* If we have any secondary relations in an UPDATE or DELETE, they need to
* be treated like non-locked relations in SELECT FOR UPDATE, ie, the
- * EvalPlanQual mechanism needs to be told about them. Locate the
+ * EvalPlanQual mechanism needs to be told about them. Locate the
* relevant ExecRowMarks.
*/
foreach(l, node->rowMarks)
@@ -1226,7 +1226,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* attribute present --- no need to look first.
*
* If there are multiple result relations, each one needs its own junk
- * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
+ * filter. Note multiple rels are only possible for UPDATE/DELETE, so we
* can't be fooled by some needing a filter and some not.
*
* This section of code is also a convenient place to verify that the
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index 78cef66a6eb..149246fd923 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -316,7 +316,7 @@ ExecReScanRecursiveUnion(RecursiveUnionState *node)
/*
* if chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. Because of above, we only have to do this to the
+ * first ExecProcNode. Because of above, we only have to do this to the
* non-recursive term.
*/
if (outerPlan->chgParam == NULL)
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index d41cf510072..e2a26c4310b 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -5,7 +5,7 @@
*
* The input of a SetOp node consists of tuples from two relations,
* which have been combined into one dataset, with a junk attribute added
- * that shows which relation each tuple came from. In SETOP_SORTED mode,
+ * that shows which relation each tuple came from. In SETOP_SORTED mode,
* the input has furthermore been sorted according to all the grouping
* columns (ie, all the non-junk attributes). The SetOp node scans each
* group of identical tuples to determine how many came from each input
@@ -18,7 +18,7 @@
* relation is the left-hand one for EXCEPT, and tries to make the smaller
* input relation come first for INTERSECT. We build a hash table in memory
* with one entry for each group of identical tuples, and count the number of
- * tuples in the group from each relation. After seeing all the input, we
+ * tuples in the group from each relation. After seeing all the input, we
* scan the hashtable and generate the correct output using those counts.
* We can avoid making hashtable entries for any tuples appearing only in the
* second input relation, since they cannot result in any output.
@@ -268,7 +268,7 @@ setop_retrieve_direct(SetOpState *setopstate)
/*
* Store the copied first input tuple in the tuple table slot reserved
- * for it. The tuple will be deleted when it is cleared from the
+ * for it. The tuple will be deleted when it is cleared from the
* slot.
*/
ExecStoreTuple(setopstate->grp_firstTuple,
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 59aa134e78a..ebc241a6b53 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -261,12 +261,12 @@ ExecScanSubPlan(SubPlanState *node,
* semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
* (ROWCOMPARE_SUBLINK doesn't allow multiple tuples from the subplan.)
* NULL results from the combining operators are handled according to the
- * usual SQL semantics for OR and AND. The result for no input tuples is
+ * usual SQL semantics for OR and AND. The result for no input tuples is
* FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
* ROWCOMPARE_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. If zero tuples are produced, we return
+ * tuple, else an error is raised. If zero tuples are produced, we return
* NULL. Assuming we get a tuple, we just use its first column (there can
* be only one non-junk column in this case).
*
@@ -409,7 +409,7 @@ ExecScanSubPlan(SubPlanState *node,
else if (!found)
{
/*
- * deal with empty subplan result. result/isNull were previously
+ * deal with empty subplan result. result/isNull were previously
* initialized correctly for all sublink types except EXPR and
* ROWCOMPARE; for those, return NULL.
*/
@@ -894,7 +894,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent)
*
* This is called from ExecEvalParamExec() when the value of a PARAM_EXEC
* parameter is requested and the param's execPlan field is set (indicating
- * that the param has not yet been evaluated). This allows lazy evaluation
+ * that the param has not yet been evaluated). This allows lazy evaluation
* of initplans: we don't run the subplan until/unless we need its output.
* Note that this routine MUST clear the execPlan fields of the plan's
* output parameters after evaluating them!
@@ -1122,7 +1122,7 @@ ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent)
/*
* Select the one to be used. For this, we need an estimate of the number
* of executions of the subplan. We use the number of output rows
- * expected from the parent plan node. This is a good estimate if we are
+ * expected from the parent plan node. This is a good estimate if we are
* in the parent's targetlist, and an underestimate (but probably not by
* more than a factor of 2) if we are in the qual.
*/
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index eaed7480328..85aa96b181e 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -194,7 +194,7 @@ ExecReScanSubqueryScan(SubqueryScanState *node)
/*
* ExecReScan doesn't know about my subplan, so I have to do
- * changed-parameter signaling myself. This is just as well, because the
+ * changed-parameter signaling myself. This is just as well, because the
* subplan has its own memory context in which its chgParam state lives.
*/
if (node->ss.ps.chgParam != NULL)
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index 8ac271d22fa..2455fd32c8e 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -4,7 +4,7 @@
* Routines to handle unique'ing of queries where appropriate
*
* Unique is a very simple node type that just filters out duplicate
- * tuples from a stream of sorted tuples from its subplan. It's essentially
+ * tuples from a stream of sorted tuples from its subplan. It's essentially
* a dumbed-down form of Group: the duplicate-removal functionality is
* identical. However, Unique doesn't do projection nor qual checking,
* so it's marginally more efficient for cases where neither is needed.
diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c
index 802820fc6b3..363eca759b1 100644
--- a/src/backend/executor/nodeValuesscan.c
+++ b/src/backend/executor/nodeValuesscan.c
@@ -215,7 +215,7 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
planstate = &scanstate->ss.ps;
/*
- * Create expression contexts. We need two, one for per-sublist
+ * Create expression contexts. We need two, one for per-sublist
* processing and one for execScan.c to use for quals and projections. We
* cheat a little by using ExecAssignExprContext() to build both.
*/
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index d9f0e79d10c..00e4f227ad6 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -4,7 +4,7 @@
* routines to handle WindowAgg nodes.
*
* A WindowAgg node evaluates "window functions" across suitable partitions
- * of the input tuple set. Any one WindowAgg works for just a single window
+ * of the input tuple set. Any one WindowAgg works for just a single window
* specification, though it can evaluate multiple window functions sharing
* identical window specifications. The input tuples are required to be
* delivered in sorted order, with the PARTITION BY columns (if any) as
@@ -14,7 +14,7 @@
*
* Since window functions can require access to any or all of the rows in
* the current partition, we accumulate rows of the partition into a
- * tuplestore. The window functions are called using the WindowObject API
+ * tuplestore. The window functions are called using the WindowObject API
* so that they can access those rows as needed.
*
* We also support using plain aggregate functions as window functions.
@@ -301,7 +301,7 @@ advance_windowaggregate(WindowAggState *winstate,
/*
* If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
+ * pfree the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
@@ -443,7 +443,7 @@ eval_windowaggregates(WindowAggState *winstate)
* TODO: Rerunning aggregates from the frame start can be pretty slow. For
* some aggregates like SUM and COUNT we could avoid that by implementing
* a "negative transition function" that would be called for each row as
- * it exits the frame. We'd have to think about avoiding recalculation of
+ * it exits the frame. We'd have to think about avoiding recalculation of
* volatile arguments of aggregate functions, too.
*/
@@ -514,7 +514,7 @@ eval_windowaggregates(WindowAggState *winstate)
* Advance until we reach a row not in frame (or end of partition).
*
* Note the loop invariant: agg_row_slot is either empty or holds the row
- * at position aggregatedupto. We advance aggregatedupto after processing
+ * at position aggregatedupto. We advance aggregatedupto after processing
* a row.
*/
for (;;)
@@ -778,7 +778,7 @@ spool_tuples(WindowAggState *winstate, int64 pos)
/*
* If the tuplestore has spilled to disk, alternate reading and writing
- * becomes quite expensive due to frequent buffer flushes. It's cheaper
+ * becomes quite expensive due to frequent buffer flushes. It's cheaper
* to force the entire partition to get spooled in one go.
*
* XXX this is a horrid kluge --- it'd be better to fix the performance
@@ -870,7 +870,7 @@ release_partition(WindowAggState *winstate)
* to our window framing rule
*
* The caller must have already determined that the row is in the partition
- * and fetched it into a slot. This function just encapsulates the framing
+ * and fetched it into a slot. This function just encapsulates the framing
* rules.
*/
static bool
@@ -972,7 +972,7 @@ row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot)
*
* Uses the winobj's read pointer for any required fetches; hence, if the
* frame mode is one that requires row comparisons, the winobj's mark must
- * not be past the currently known frame head. Also uses the specified slot
+ * not be past the currently known frame head. Also uses the specified slot
* for any required fetches.
*/
static void
@@ -1077,7 +1077,7 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot)
*
* Uses the winobj's read pointer for any required fetches; hence, if the
* frame mode is one that requires row comparisons, the winobj's mark must
- * not be past the currently known frame tail. Also uses the specified slot
+ * not be past the currently known frame tail. Also uses the specified slot
* for any required fetches.
*/
static void
@@ -1420,8 +1420,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
winstate->ss.ps.state = estate;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a little
* by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &winstate->ss.ps);
@@ -1975,7 +1975,7 @@ window_gettupleslot(WindowObject winobj, int64 pos, TupleTableSlot *slot)
* requested amount of space. Subsequent calls just return the same chunk.
*
* Memory obtained this way is normally used to hold state that should be
- * automatically reset for each new partition. If a window function wants
+ * automatically reset for each new partition. If a window function wants
* to hold state across the whole query, fcinfo->fn_extra can be used in the
* usual way for that.
*/
diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c
index fac922c3e80..a1ed37420d2 100644
--- a/src/backend/executor/nodeWorktablescan.c
+++ b/src/backend/executor/nodeWorktablescan.c
@@ -82,7 +82,7 @@ ExecWorkTableScan(WorkTableScanState *node)
{
/*
* On the first call, find the ancestor RecursiveUnion's state via the
- * Param slot reserved for it. (We can't do this during node init because
+ * Param slot reserved for it. (We can't do this during node init because
* there are corner cases where we'll get the init call before the
* RecursiveUnion does.)
*/
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 7567d94e498..09e331629e5 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -256,7 +256,7 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid)
}
/*
- * Pop the stack entry and reset global variables. Unlike
+ * Pop the stack entry and reset global variables. Unlike
* SPI_finish(), we don't risk switching to memory contexts that might
* be already gone.
*/
@@ -1300,7 +1300,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
}
/*
- * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
+ * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
* check in transformDeclareCursorStmt because the cursor options might
* not have come through there.
*/
@@ -1554,7 +1554,7 @@ SPI_plan_is_valid(SPIPlanPtr plan)
/*
* SPI_result_code_string --- convert any SPI return code to a string
*
- * This is often useful in error messages. Most callers will probably
+ * This is often useful in error messages. Most callers will probably
* only pass negative (error-case) codes, but for generality we recognize
* the success codes too.
*/
@@ -1624,7 +1624,7 @@ SPI_result_code_string(int code)
* CachedPlanSources.
*
* This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself). It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
* spi.sgml because we'd just as soon not have too many places using this.
*/
List *
@@ -1640,7 +1640,7 @@ SPI_plan_get_plan_sources(SPIPlanPtr plan)
* return NULL. Caller is responsible for doing ReleaseCachedPlan().
*
* This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself). It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
* spi.sgml because we'd just as soon not have too many places using this.
*/
CachedPlan *
@@ -2198,7 +2198,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
/*
* The last canSetTag query sets the status values returned to the
- * caller. Be careful to free any tuptables not returned, to
+ * caller. Be careful to free any tuptables not returned, to
* avoid intratransaction memory leak.
*/
if (canSetTag)
diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c
index 0b47a391cf8..c4892123806 100644
--- a/src/backend/executor/tstoreReceiver.c
+++ b/src/backend/executor/tstoreReceiver.c
@@ -5,7 +5,7 @@
* a Tuplestore.
*
* Optionally, we can force detoasting (but not decompression) of out-of-line
- * toasted values. This is to support cursors WITH HOLD, which must retain
+ * toasted values. This is to support cursors WITH HOLD, which must retain
* data even if the underlying table is dropped.
*
*
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index a5b9f2e03e1..42c08e9291c 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -99,7 +99,7 @@ appendStringInfo(StringInfo str, const char *fmt,...)
* appendStringInfoVA
*
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return true; if not (because there's not enough space), return false
* without modifying str. Typically the caller would enlarge str and retry
* on false return --- see appendStringInfo for standard usage pattern.
@@ -255,7 +255,7 @@ enlargeStringInfo(StringInfo str, int needed)
int newlen;
/*
- * Guard against out-of-range "needed" values. Without this, we can get
+ * Guard against out-of-range "needed" values. Without this, we can get
* an overflow or infinite loop in the following.
*/
if (needed < 0) /* should not happen */
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index f3f3b71894b..32d77b4e2d9 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -49,7 +49,7 @@ static int recv_and_check_password_packet(Port *port);
/* Max size of username ident server can return */
#define IDENT_USERNAME_MAX 512
-/* Standard TCP port number for Ident service. Assigned by IANA */
+/* Standard TCP port number for Ident service. Assigned by IANA */
#define IDENT_PORT 113
static int ident_inet(hbaPort *port);
@@ -708,7 +708,7 @@ recv_password_packet(Port *port)
(errmsg("received password packet")));
/*
- * Return the received string. Note we do not attempt to do any
+ * Return the received string. Note we do not attempt to do any
* character-set conversion on it; since we don't yet know the client's
* encoding, there wouldn't be much point.
*/
@@ -1600,7 +1600,7 @@ interpret_ident_response(const char *ident_response,
/*
* Talk to the ident server on host "remote_ip_addr" and find out who
* owns the tcp connection from his port "remote_port" to port
- * "local_port_addr" on host "local_ip_addr". Return the user name the
+ * "local_port_addr" on host "local_ip_addr". Return the user name the
* ident server gives as "*ident_user".
*
* IP addresses and port numbers are in network byte order.
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 34f5b33c60b..d1beda871f8 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -30,13 +30,13 @@
* impersonations.
*
* Another benefit of EDH is that it allows the backend and
- * clients to use DSA keys. DSA keys can only provide digital
+ * clients to use DSA keys. DSA keys can only provide digital
* signatures, not encryption, and are often acceptable in
* jurisdictions where RSA keys are unacceptable.
*
* The downside to EDH is that it makes it impossible to
* use ssldump(1) if there's a problem establishing an SSL
- * session. In this case you'll need to temporarily disable
+ * session. In this case you'll need to temporarily disable
* EDH by commenting out the callback.
*
* ...
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index b288fb8f2e0..387b457ae04 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -1783,7 +1783,7 @@ check_hba(hbaPort *port)
* Read the config file and create a List of HbaLine records for the contents.
*
* The configuration is read into a temporary list, and if any parse error
- * occurs the old list is kept in place and false is returned. Only if the
+ * occurs the old list is kept in place and false is returned. Only if the
* whole file parses OK is the list replaced, and the function returns true.
*
* On a false result, caller will take care of reporting a FATAL error in case
@@ -2269,7 +2269,7 @@ load_ident(void)
/*
* Determine what authentication method should be used when accessing database
- * "database" from frontend "raddr", user "user". Return the method and
+ * "database" from frontend "raddr", user "user". Return the method and
* an optional argument (stored in fields of *port), and STATUS_OK.
*
* If the file does not contain any entry matching the request, we return
diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c
index 4fc8318fa77..dacd335289d 100644
--- a/src/backend/libpq/md5.c
+++ b/src/backend/libpq/md5.c
@@ -2,7 +2,7 @@
* md5.c
*
* Implements the MD5 Message-Digest Algorithm as specified in
- * RFC 1321. This implementation is a simple one, in that it
+ * RFC 1321. This implementation is a simple one, in that it
* needs every input byte to be buffered before doing any
* calculations. I do not expect this file to be used for
* general purpose MD5'ing of large amounts of data, only for
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 516c559d9f3..9ff10ac2f55 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -447,7 +447,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
/*
* Note: This might fail on some OS's, like Linux older than
* 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map
- * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
+ * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4
* connections.
*/
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
@@ -1127,7 +1127,7 @@ pq_getmessage(StringInfo s, int maxlen)
if (len > 0)
{
/*
- * Allocate space for message. If we run out of room (ridiculously
+ * Allocate space for message. If we run out of room (ridiculously
* large message), we will elog(ERROR), but we want to discard the
* message body so as not to lose communication sync.
*/
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index ca79a7652f5..4e1a2f76407 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -120,7 +120,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen)
* pq_sendcountedtext - append a counted text string (with character set conversion)
*
* The data sent to the frontend by this routine is a 4-byte count field
- * followed by the string. The count includes itself or not, as per the
+ * followed by the string. The count includes itself or not, as per the
* countincludesself flag (pre-3.0 protocol requires it to include itself).
* The passed text string need not be null-terminated, and the data sent
* to the frontend isn't either.
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 4d6a07e0af7..d92883ce472 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -70,7 +70,7 @@ main(int argc, char *argv[])
/*
* Remember the physical location of the initially given argv[] array for
- * possible use by ps display. On some platforms, the argv[] storage must
+ * possible use by ps display. On some platforms, the argv[] storage must
* be overwritten in order to set the process title for ps. In such cases
* save_ps_display_args makes and returns a new copy of the argv[] array.
*
@@ -99,10 +99,10 @@ main(int argc, char *argv[])
MemoryContextInit();
/*
- * Set up locale information from environment. Note that LC_CTYPE and
+ * Set up locale information from environment. Note that LC_CTYPE and
* LC_COLLATE will be overridden later from pg_control if we are in an
* already-initialized database. We set them here so that they will be
- * available to fill pg_control during initdb. LC_MESSAGES will get set
+ * available to fill pg_control during initdb. LC_MESSAGES will get set
* later during GUC option processing, but we set it here to allow startup
* error messages to be localized.
*/
@@ -230,9 +230,9 @@ main(int argc, char *argv[])
/*
- * Place platform-specific startup hacks here. This is the right
+ * Place platform-specific startup hacks here. This is the right
* place to put code that must be executed early in the launch of any new
- * server process. Note that this code will NOT be executed when a backend
+ * server process. Note that this code will NOT be executed when a backend
* or sub-bootstrap process is forked, unless we are in a fork/exec
* environment (ie EXEC_BACKEND is defined).
*
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index b18b7a50f1c..8f0877ccaae 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -38,7 +38,7 @@
* where x's are unspecified bits. The two's complement negative is formed
* by inverting all the bits and adding one. Inversion gives
* yyyyyy01111
- * where each y is the inverse of the corresponding x. Incrementing gives
+ * where each y is the inverse of the corresponding x. Incrementing gives
* yyyyyy10000
* and then ANDing with the original value gives
* 00000010000
@@ -797,7 +797,7 @@ bms_join(Bitmapset *a, Bitmapset *b)
/*----------
* bms_first_member - find and remove first member of a set
*
- * Returns -1 if set is empty. NB: set is destructively modified!
+ * Returns -1 if set is empty. NB: set is destructively modified!
*
* This is intended as support for iterating through the members of a set.
* The typical pattern is
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 3de7653a380..9dba72bb592 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -4,7 +4,7 @@
* Copy functions for Postgres tree nodes.
*
* NOTE: we currently support copying all node types found in parse and
- * plan trees. We do not support copying executor state trees; there
+ * plan trees. We do not support copying executor state trees; there
* is no need for that, and no point in maintaining all the code that
* would be needed. We also do not support copying Path trees, mainly
* because the circular linkages between RelOptInfo and Path nodes can't
@@ -30,7 +30,7 @@
/*
* Macros to simplify copying of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in a Copy routine are
* named 'newnode' and 'from'.
*/
@@ -1040,7 +1040,7 @@ _copyIntoClause(const IntoClause *from)
/*
* We don't need a _copyExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out copying the common fields...
*/
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 1e2ab2b8373..210c9b77f57 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -11,7 +11,7 @@
* be handled easily in a simple depth-first traversal.
*
* Currently, in fact, equal() doesn't know how to compare Plan trees
- * either. This might need to be fixed someday.
+ * either. This might need to be fixed someday.
*
* NOTE: it is intentional that parse location fields (in nodes that have
* one) are not compared. This is because we want, for example, a variable
@@ -34,8 +34,8 @@
/*
- * Macros to simplify comparison of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify comparison of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in an Equal routine are
* named 'a' and 'b'.
*/
@@ -131,7 +131,7 @@ _equalIntoClause(const IntoClause *a, const IntoClause *b)
/*
* We don't need an _equalExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out comparing the common fields...
*/
@@ -759,9 +759,9 @@ static bool
_equalPlaceHolderVar(const PlaceHolderVar *a, const PlaceHolderVar *b)
{
/*
- * We intentionally do not compare phexpr. Two PlaceHolderVars with the
+ * We intentionally do not compare phexpr. Two PlaceHolderVars with the
* same ID and levelsup should be considered equal even if the contained
- * expressions have managed to mutate to different states. This will
+ * expressions have managed to mutate to different states. This will
* happen during final plan construction when there are nested PHVs, since
* the inner PHV will get replaced by a Param in some copies of the outer
* PHV. Another way in which it can happen is that initplan sublinks
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index 35eec4f1ed6..f5a585535a7 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -796,7 +796,7 @@ list_union_oid(const List *list1, const List *list2)
* "intersection" if list1 is known unique beforehand.
*
* This variant works on lists of pointers, and determines list
- * membership via equal(). Note that the list1 member will be pointed
+ * membership via equal(). Note that the list1 member will be pointed
* to in the result.
*/
List *
@@ -988,7 +988,7 @@ list_append_unique_oid(List *list, Oid datum)
* via equal().
*
* This is almost the same functionality as list_union(), but list1 is
- * modified in-place rather than being copied. Note also that list2's cells
+ * modified in-place rather than being copied. Note also that list2's cells
* are not inserted in list1, so the analogy to list_concat() isn't perfect.
*/
List *
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 42d6621a822..6ad995fb127 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -239,7 +239,7 @@ exprType(const Node *expr)
/*
* exprTypmod -
* returns the type-specific modifier of the expression's result type,
- * if it can be determined. In many cases, it can't and we return -1.
+ * if it can be determined. In many cases, it can't and we return -1.
*/
int32
exprTypmod(const Node *expr)
@@ -1477,8 +1477,8 @@ leftmostLoc(int loc1, int loc2)
*
* The walker routine should return "false" to continue the tree walk, or
* "true" to abort the walk and immediately return "true" to the top-level
- * caller. This can be used to short-circuit the traversal if the walker
- * has found what it came for. "false" is returned to the top-level caller
+ * caller. This can be used to short-circuit the traversal if the walker
+ * has found what it came for. "false" is returned to the top-level caller
* iff no invocation of the walker returned "true".
*
* The node types handled by expression_tree_walker include all those
@@ -1516,7 +1516,7 @@ leftmostLoc(int loc1, int loc2)
*
* expression_tree_walker will handle SubPlan nodes by recursing normally
* into the "testexpr" and the "args" list (which are expressions belonging to
- * the outer plan). It will not touch the completed subplan, however. Since
+ * the outer plan). It will not touch the completed subplan, however. Since
* there is no link to the original Query, it is not possible to recurse into
* subselects of an already-planned expression tree. This is OK for current
* uses, but may need to be revisited in future.
@@ -2553,7 +2553,7 @@ expression_tree_mutator(Node *node,
* This routine exists just to reduce the number of places that need to know
* where all the expression subtrees of a Query are. Note it can be used
* for starting a walk at top level of a Query regardless of whether the
- * mutator intends to descend into subqueries. It is also useful for
+ * mutator intends to descend into subqueries. It is also useful for
* descending into subqueries within a mutator.
*
* Some callers want to suppress mutating of certain items in the Query,
@@ -2563,7 +2563,7 @@ expression_tree_mutator(Node *node,
* indicated items. (More flag bits may be added as needed.)
*
* Normally the Query node itself is copied, but some callers want it to be
- * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
+ * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
* modified substructure is safely copied in any case.
*/
Query *
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 94e65fcf84c..04959644b3e 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -13,7 +13,7 @@
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
* have an output function defined here (as well as an input function
- * in readfuncs.c). For use in debugging, we also provide output
+ * in readfuncs.c). For use in debugging, we also provide output
* functions for nodes that appear in raw parsetrees, path, and plan trees.
* These nodes however need not have input functions.
*
@@ -30,8 +30,8 @@
/*
- * Macros to simplify output of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify output of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in an Out
* routine.
*/
diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c
index 601659bb525..e4beaa13675 100644
--- a/src/backend/nodes/params.c
+++ b/src/backend/nodes/params.c
@@ -27,7 +27,7 @@
*
* Note: the intent of this function is to make a static, self-contained
* set of parameter values. If dynamic parameter hooks are present, we
- * intentionally do not copy them into the result. Rather, we forcibly
+ * intentionally do not copy them into the result. Rather, we forcibly
* instantiate all available parameter values and copy the datum values.
*/
ParamListInfo
diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c
index 3861fb28e2d..32796d248ec 100644
--- a/src/backend/nodes/read.c
+++ b/src/backend/nodes/read.c
@@ -85,21 +85,21 @@ stringToNode(char *str)
* Backslashes themselves must also be backslashed for consistency.
* Any other character can be, but need not be, backslashed as well.
* * If the resulting token is '<>' (with no backslash), it is returned
- * as a non-NULL pointer to the token but with length == 0. Note that
+ * as a non-NULL pointer to the token but with length == 0. Note that
* there is no other way to get a zero-length token.
*
* Returns a pointer to the start of the next token, and the length of the
- * token (including any embedded backslashes!) in *length. If there are
+ * token (including any embedded backslashes!) in *length. If there are
* no more tokens, NULL and 0 are returned.
*
* NOTE: this routine doesn't remove backslashes; the caller must do so
* if necessary (see "debackslash").
*
* NOTE: prior to release 7.0, this routine also had a special case to treat
- * a token starting with '"' as extending to the next '"'. This code was
+ * a token starting with '"' as extending to the next '"'. This code was
* broken, however, since it would fail to cope with a string containing an
* embedded '"'. I have therefore removed this special case, and instead
- * introduced rules for using backslashes to quote characters. Higher-level
+ * introduced rules for using backslashes to quote characters. Higher-level
* code should add backslashes to a string constant to ensure it is treated
* as a single token.
*/
@@ -259,7 +259,7 @@ nodeTokenType(char *token, int length)
* Slightly higher-level reader.
*
* This routine applies some semantic knowledge on top of the purely
- * lexical tokenizer pg_strtok(). It can read
+ * lexical tokenizer pg_strtok(). It can read
* * Value token nodes (integers, floats, or strings);
* * General nodes (via parseNodeString() from readfuncs.c);
* * Lists of the above;
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 3a16e9db524..6ecc580e007 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -12,7 +12,7 @@
*
* NOTES
* Path and Plan nodes do not have any readfuncs support, because we
- * never have occasion to read them in. (There was once code here that
+ * never have occasion to read them in. (There was once code here that
* claimed to read them, but it was broken as well as unused.) We
* never read executor state trees, either.
*
@@ -34,7 +34,7 @@
/*
* Macros to simplify reading of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in a Read
* routine.
*/
@@ -128,7 +128,7 @@
/*
* NOTE: use atoi() to read values written with %d, or atoui() to read
* values written with %u in outfuncs.c. An exception is OID values,
- * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
+ * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
* but this will probably change in the future.)
*/
#define atoui(x) ((unsigned int) strtoul((x), NULL, 10))
@@ -578,7 +578,7 @@ _readOpExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -609,7 +609,7 @@ _readDistinctExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -640,7 +640,7 @@ _readNullIfExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
@@ -671,7 +671,7 @@ _readScalarArrayOpExpr(void)
/*
* The opfuncid is stored in the textual format primarily for debugging
* and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
+ * it to be re-looked-up in the pg_operator entry. This ensures that
* stored rules don't have hidden dependencies on operators' functions.
* (We don't currently support an ALTER OPERATOR command, but might
* someday.)
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index 43628aceb46..abf98a3be36 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -19,7 +19,7 @@
* of lossiness. In theory we could fall back to page ranges at some
* point, but for now that seems useless complexity.
*
- * We also support the notion of candidate matches, or rechecking. This
+ * We also support the notion of candidate matches, or rechecking. This
* means we know that a search need visit only some tuples on a page,
* but we are not certain that all of those tuples are real matches.
* So the eventual heap scan must recheck the quals for these tuples only,
@@ -48,7 +48,7 @@
/*
* The maximum number of tuples per page is not large (typically 256 with
* 8K pages, or 1024 with 32K pages). So there's not much point in making
- * the per-page bitmaps variable size. We just legislate that the size
+ * the per-page bitmaps variable size. We just legislate that the size
* is this:
*/
#define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage
@@ -61,10 +61,10 @@
* for that page in the page table.
*
* We actually store both exact pages and lossy chunks in the same hash
- * table, using identical data structures. (This is because dynahash.c's
+ * table, using identical data structures. (This is because dynahash.c's
* memory management doesn't allow space to be transferred easily from one
* hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the
- * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
+ * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we
* also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer
* remainder operations. So, define it like this:
*/
@@ -142,7 +142,7 @@ struct TIDBitmap
/*
* When iterating over a bitmap in sorted order, a TBMIterator is used to
- * track our progress. There can be several iterators scanning the same
+ * track our progress. There can be several iterators scanning the same
* bitmap concurrently. Note that the bitmap becomes read-only as soon as
* any iterator is created.
*/
@@ -790,7 +790,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno)
*
* If new, the entry is marked as an exact (non-chunk) entry.
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static PagetableEntry *
@@ -870,7 +870,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno)
/*
* tbm_mark_page_lossy - mark the page number as lossily stored
*
- * This may cause the table to exceed the desired memory size. It is
+ * This may cause the table to exceed the desired memory size. It is
* up to the caller to call tbm_lossify() at the next safe point if so.
*/
static void
@@ -891,7 +891,7 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno)
chunk_pageno = pageno - bitno;
/*
- * Remove any extant non-lossy entry for the page. If the page is its own
+ * Remove any extant non-lossy entry for the page. If the page is its own
* chunk header, however, we skip this and handle the case below.
*/
if (bitno != 0)
@@ -956,7 +956,7 @@ tbm_lossify(TIDBitmap *tbm)
*
* Since we are called as soon as nentries exceeds maxentries, we should
* push nentries down to significantly less than maxentries, or else we'll
- * just end up doing this again very soon. We shoot for maxentries/2.
+ * just end up doing this again very soon. We shoot for maxentries/2.
*/
Assert(!tbm->iterating);
Assert(tbm->status == TBM_HASH);
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index a3ff5b3153c..e5c6bd9d20d 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -82,11 +82,11 @@ geqo_eval(PlannerInfo *root, Gene *tour, int num_gene)
* not already contain some entries. The newly added entries will be
* recycled by the MemoryContextDelete below, so we must ensure that the
* list is restored to its former state before exiting. We can do this by
- * truncating the list to its original length. NOTE this assumes that any
+ * truncating the list to its original length. NOTE this assumes that any
* added entries are appended at the end!
*
* We also must take care not to mess up the outer join_rel_hash, if there
- * is one. We can do this by just temporarily setting the link to NULL.
+ * is one. We can do this by just temporarily setting the link to NULL.
* (If we are dealing with enough join rels, which we very likely are, a
* new hash table will get built and used locally.)
*
@@ -217,7 +217,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
* Merge a "clump" into the list of existing clumps for gimme_tree.
*
* We try to merge the clump into some existing clump, and repeat if
- * successful. When no more merging is possible, insert the clump
+ * successful. When no more merging is possible, insert the clump
* into the list, preserving the list ordering rule (namely, that
* clumps of larger size appear earlier).
*
@@ -268,7 +268,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
/*
* Recursively try to merge the enlarged old_clump with
- * others. When no further merge is possible, we'll reinsert
+ * others. When no further merge is possible, we'll reinsert
* it into the list.
*/
return merge_clump(root, clumps, old_clump, force);
@@ -279,7 +279,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
/*
* No merging is possible, so add new_clump as an independent clump, in
- * proper order according to size. We can be fast for the common case
+ * proper order according to size. We can be fast for the common case
* where it has size 1 --- it should always go at the end.
*/
if (clumps == NIL || new_clump->size == 1)
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index bfd3809a007..5d7c6abad94 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -435,7 +435,7 @@ set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
* set_append_rel_size
* Set size estimates for an "append relation"
*
- * The passed-in rel and RTE represent the entire append relation. The
+ * The passed-in rel and RTE represent the entire append relation. The
* relation's contents are computed by appending together the output of
* the individual member relations. Note that in the inheritance case,
* the first member relation is actually the same table as is mentioned in
@@ -499,7 +499,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* We have to copy the parent's targetlist and quals to the child,
- * with appropriate substitution of variables. However, only the
+ * with appropriate substitution of variables. However, only the
* baserestrictinfo quals are needed before we can check for
* constraint exclusion; so do that first and then check to see if we
* can disregard this child.
@@ -563,7 +563,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* We have to make child entries in the EquivalenceClass data
- * structures as well. This is needed either if the parent
+ * structures as well. This is needed either if the parent
* participates in some eclass joins (because we will want to consider
* inner-indexscan joins on the individual children) or if the parent
* has useful pathkeys (because we should try to build MergeAppend
@@ -604,7 +604,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* Accumulate per-column estimates too. We need not do anything
- * for PlaceHolderVars in the parent list. If child expression
+ * for PlaceHolderVars in the parent list. If child expression
* isn't a Var, or we didn't record a width estimate for it, we
* have to fall back on a datatype-based estimate.
*
@@ -680,7 +680,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Generate access paths for each member relation, and remember the
- * cheapest path for each one. Also, identify all pathkeys (orderings)
+ * cheapest path for each one. Also, identify all pathkeys (orderings)
* and parameterizations (required_outer sets) available for the member
* relations.
*/
@@ -730,7 +730,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Collect lists of all the available path orderings and
- * parameterizations for all the children. We use these as a
+ * parameterizations for all the children. We use these as a
* heuristic to indicate which sort orderings and parameterizations we
* should build Append and MergeAppend paths for.
*/
@@ -816,7 +816,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
* so that not that many cases actually get considered here.)
*
* The Append node itself cannot enforce quals, so all qual checking must
- * be done in the child paths. This means that to have a parameterized
+ * be done in the child paths. This means that to have a parameterized
* Append path, we must have the exact same parameterization for each
* child path; otherwise some children might be failing to check the
* moved-down quals. To make them match up, we can try to increase the
@@ -987,7 +987,7 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
* joinquals to be checked within the path's scan. However, some existing
* paths might check the available joinquals already while others don't;
* therefore, it's not clear which existing path will be cheapest after
- * reparameterization. We have to go through them all and find out.
+ * reparameterization. We have to go through them all and find out.
*/
cheapest = NULL;
foreach(lc, rel->pathlist)
@@ -1101,7 +1101,7 @@ has_multiple_baserels(PlannerInfo *root)
*
* We don't currently support generating parameterized paths for subqueries
* by pushing join clauses down into them; it seems too expensive to re-plan
- * the subquery multiple times to consider different alternatives. So the
+ * the subquery multiple times to consider different alternatives. So the
* subquery will have exactly one path. (The path will be parameterized
* if the subquery contains LATERAL references, otherwise not.) Since there's
* no freedom of action here, there's no need for a separate set_subquery_size
@@ -1510,7 +1510,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* independent jointree items in the query. This is > 1.
*
* 'initial_rels' is a list of RelOptInfo nodes for each independent
- * jointree item. These are the components to be joined together.
+ * jointree item. These are the components to be joined together.
* Note that levels_needed == list_length(initial_rels).
*
* Returns the final level of join relations, i.e., the relation that is
@@ -1526,7 +1526,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* needed for these paths need have been instantiated.
*
* Note to plugin authors: the functions invoked during standard_join_search()
- * modify root->join_rel_list and root->join_rel_hash. If you want to do more
+ * modify root->join_rel_list and root->join_rel_hash. If you want to do more
* than one join-order search, you'll probably need to save and restore the
* original states of those data structures. See geqo_eval() for an example.
*/
@@ -1625,7 +1625,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* column k is found to be unsafe to reference, we set unsafeColumns[k] to
* TRUE, but we don't reject the subquery overall since column k might
* not be referenced by some/all quals. The unsafeColumns[] array will be
- * consulted later by qual_is_pushdown_safe(). It's better to do it this
+ * consulted later by qual_is_pushdown_safe(). It's better to do it this
* way than to make the checks directly in qual_is_pushdown_safe(), because
* when the subquery involves set operations we have to check the output
* expressions in each arm of the set op.
@@ -1718,7 +1718,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
* check_output_expressions - check subquery's output expressions for safety
*
* There are several cases in which it's unsafe to push down an upper-level
- * qual if it references a particular output column of a subquery. We check
+ * qual if it references a particular output column of a subquery. We check
* each output column of the subquery and set unsafeColumns[k] to TRUE if
* that column is unsafe for a pushed-down qual to reference. The conditions
* checked here are:
@@ -1736,7 +1736,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery,
* of rows returned. (This condition is vacuous for DISTINCT, because then
* there are no non-DISTINCT output columns, so we needn't check. But note
* we are assuming that the qual can't distinguish values that the DISTINCT
- * operator sees as equal. This is a bit shaky but we have no way to test
+ * operator sees as equal. This is a bit shaky but we have no way to test
* for the case, and it's unlikely enough that we shouldn't refuse the
* optimization just because it could theoretically happen.)
*/
@@ -1853,7 +1853,7 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
/*
* It would be unsafe to push down window function calls, but at least for
- * the moment we could never see any in a qual anyhow. (The same applies
+ * the moment we could never see any in a qual anyhow. (The same applies
* to aggregates, which we check for in pull_var_clause below.)
*/
Assert(!contain_window_function(qual));
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index a0879dfafdd..34308988d62 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -58,7 +58,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* See clause_selectivity() for the meaning of the additional parameters.
*
* Our basic approach is to take the product of the selectivities of the
- * subclauses. However, that's only right if the subclauses have independent
+ * subclauses. However, that's only right if the subclauses have independent
* probabilities, and in reality they are often NOT independent. So,
* we want to be smarter where we can.
@@ -75,12 +75,12 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause,
* see that hisel is the fraction of the range below the high bound, while
* losel is the fraction above the low bound; so hisel can be interpreted
* directly as a 0..1 value but we need to convert losel to 1-losel before
- * interpreting it as a value. Then the available range is 1-losel to hisel.
+ * interpreting it as a value. Then the available range is 1-losel to hisel.
* However, this calculation double-excludes nulls, so really we need
* hisel + losel + null_frac - 1.)
*
* If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation
- * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
+ * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation
* yields an impossible (negative) result.
*
* A free side-effect is that we can recognize redundant inequalities such
@@ -174,7 +174,7 @@ clauselist_selectivity(PlannerInfo *root,
{
/*
* If it's not a "<" or ">" operator, just merge the
- * selectivity in generically. But if it's the right oprrest,
+ * selectivity in generically. But if it's the right oprrest,
* add the clause to rqlist for later processing.
*/
switch (get_oprrest(expr->opno))
@@ -459,14 +459,14 @@ treat_as_join_clause(Node *clause, RestrictInfo *rinfo,
* nestloop join's inner relation --- varRelid should then be the ID of the
* inner relation.
*
- * When varRelid is 0, all variables are treated as variables. This
+ * When varRelid is 0, all variables are treated as variables. This
* is appropriate for ordinary join clauses and restriction clauses.
*
* jointype is the join type, if the clause is a join clause. Pass JOIN_INNER
* if the clause isn't a join clause.
*
* sjinfo is NULL for a non-join clause, otherwise it provides additional
- * context information about the join being performed. There are some
+ * context information about the join being performed. There are some
* special cases:
* 1. For a special (not INNER) join, sjinfo is always a member of
* root->join_info_list.
@@ -501,7 +501,7 @@ clause_selectivity(PlannerInfo *root,
/*
* If the clause is marked pseudoconstant, then it will be used as a
* gating qual and should not affect selectivity estimates; hence
- * return 1.0. The only exception is that a constant FALSE may be
+ * return 1.0. The only exception is that a constant FALSE may be
* taken as having selectivity 0.0, since it will surely mean no rows
* out of the plan. This case is simple enough that we need not
* bother caching the result.
@@ -520,11 +520,11 @@ clause_selectivity(PlannerInfo *root,
/*
* If possible, cache the result of the selectivity calculation for
- * the clause. We can cache if varRelid is zero or the clause
+ * the clause. We can cache if varRelid is zero or the clause
* contains only vars of that relid --- otherwise varRelid will affect
* the result, so mustn't cache. Outer join quals might be examined
* with either their join's actual jointype or JOIN_INNER, so we need
- * two cache variables to remember both cases. Note: we assume the
+ * two cache variables to remember both cases. Note: we assume the
* result won't change if we are switching the input relations or
* considering a unique-ified case, so we only need one cache variable
* for all non-JOIN_INNER cases.
@@ -685,7 +685,7 @@ clause_selectivity(PlannerInfo *root,
/*
* This is not an operator, so we guess at the selectivity. THIS IS A
* HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE
- * SELECTIVITIES THEMSELVES. -- JMH 7/9/92
+ * SELECTIVITIES THEMSELVES. -- JMH 7/9/92
*/
s1 = (Selectivity) 0.3333333;
}
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index a2cc6979594..53cd079ff22 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -24,7 +24,7 @@
*
* Obviously, taking constants for these values is an oversimplification,
* but it's tough enough to get any useful estimates even at this level of
- * detail. Note that all of these parameters are user-settable, in case
+ * detail. Note that all of these parameters are user-settable, in case
* the default values are drastically off for a particular platform.
*
* seq_page_cost and random_page_cost can also be overridden for an individual
@@ -491,7 +491,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
* computed for us by query_planner.
*
* Caller is expected to have ensured that tuples_fetched is greater than zero
- * and rounded to integer (see clamp_row_est). The result will likewise be
+ * and rounded to integer (see clamp_row_est). The result will likewise be
* greater than zero and integral.
*/
double
@@ -692,7 +692,7 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* For small numbers of pages we should charge spc_random_page_cost
* apiece, while if nearly all the table's pages are being read, it's more
- * appropriate to charge spc_seq_page_cost apiece. The effect is
+ * appropriate to charge spc_seq_page_cost apiece. The effect is
* nonlinear, too. For lack of a better idea, interpolate like this to
* determine the cost per page.
*/
@@ -767,7 +767,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
* Estimate the cost of a BitmapAnd node
*
* Note that this considers only the costs of index scanning and bitmap
- * creation, not the eventual heap access. In that sense the object isn't
+ * creation, not the eventual heap access. In that sense the object isn't
* truly a Path, but it has enough path-like properties (costs in particular)
* to warrant treating it as one. We don't bother to set the path rows field,
* however.
@@ -826,7 +826,7 @@ cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
/*
* We estimate OR selectivity on the assumption that the inputs are
* non-overlapping, since that's often the case in "x IN (list)" type
- * situations. Of course, we clamp to 1.0 at the end.
+ * situations. Of course, we clamp to 1.0 at the end.
*
* The runtime cost of the BitmapOr itself is estimated at 100x
* cpu_operator_cost for each tbm_union needed. Probably too small,
@@ -915,7 +915,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
/*
* We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
- * understands how to do it correctly. Therefore, honor enable_tidscan
+ * understands how to do it correctly. Therefore, honor enable_tidscan
* only when CURRENT OF isn't present. Also note that cost_qual_eval
* counts a CurrentOfExpr as having startup cost disable_cost, which we
* subtract off here; that's to prevent other plan types such as seqscan
@@ -1034,7 +1034,7 @@ cost_functionscan(Path *path, PlannerInfo *root,
*
* Currently, nodeFunctionscan.c always executes the function to
* completion before returning any rows, and caches the results in a
- * tuplestore. So the function eval cost is all startup cost, and per-row
+ * tuplestore. So the function eval cost is all startup cost, and per-row
* costs are minimal.
*
* XXX in principle we ought to charge tuplestore spill costs if the
@@ -1106,7 +1106,7 @@ cost_valuesscan(Path *path, PlannerInfo *root,
*
* Note: this is used for both self-reference and regular CTEs; the
* possible cost differences are below the threshold of what we could
- * estimate accurately anyway. Note that the costs of evaluating the
+ * estimate accurately anyway. Note that the costs of evaluating the
* referenced CTE query are added into the final plan as initplan costs,
* and should NOT be counted here.
*/
@@ -1200,7 +1200,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
* If the total volume exceeds sort_mem, we switch to a tape-style merge
* algorithm. There will still be about t*log2(t) tuple comparisons in
* total, but we will also need to write and read each tuple once per
- * merge pass. We expect about ceil(logM(r)) merge passes where r is the
+ * merge pass. We expect about ceil(logM(r)) merge passes where r is the
* number of initial runs formed and M is the merge order used by tuplesort.c.
* Since the average initial run should be about twice sort_mem, we have
* disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem)))
@@ -1214,7 +1214,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
* accesses (XXX can't we refine that guess?)
*
* By default, we charge two operator evals per tuple comparison, which should
- * be in the right ballpark in most cases. The caller can tweak this by
+ * be in the right ballpark in most cases. The caller can tweak this by
* specifying nonzero comparison_cost; typically that's used for any extra
* work that has to be done to prepare the inputs to the comparison operators.
*
@@ -1338,7 +1338,7 @@ cost_sort(Path *path, PlannerInfo *root,
* Determines and returns the cost of a MergeAppend node.
*
* MergeAppend merges several pre-sorted input streams, using a heap that
- * at any given instant holds the next tuple from each stream. If there
+ * at any given instant holds the next tuple from each stream. If there
* are N streams, we need about N*log2(N) tuple comparisons to construct
* the heap at startup, and then for each output tuple, about log2(N)
* comparisons to delete the top heap entry and another log2(N) comparisons
@@ -1497,7 +1497,7 @@ cost_agg(Path *path, PlannerInfo *root,
* group otherwise. We charge cpu_tuple_cost for each output tuple.
*
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
- * same total CPU cost, but AGG_SORTED has lower startup cost. If the
+ * same total CPU cost, but AGG_SORTED has lower startup cost. If the
* input path is already sorted appropriately, AGG_SORTED should be
* preferred (since it has no risk of memory overflow). This will happen
* as long as the computed total costs are indeed exactly equal --- but if
@@ -2097,10 +2097,10 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
* Unlike other costsize functions, this routine makes one actual decision:
* whether we should materialize the inner path. We do that either because
* the inner path can't support mark/restore, or because it's cheaper to
- * use an interposed Material node to handle mark/restore. When the decision
+ * use an interposed Material node to handle mark/restore. When the decision
* is cost-based it would be logically cleaner to build and cost two separate
* paths with and without that flag set; but that would require repeating most
- * of the cost calculations, which are not all that cheap. Since the choice
+ * of the cost calculations, which are not all that cheap. Since the choice
* will not affect output pathkeys or startup cost, only total cost, there is
* no possibility of wanting to keep both paths. So it seems best to make
* the decision here and record it in the path's materialize_inner field.
@@ -2164,7 +2164,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
/*
- * Get approx # tuples passing the mergequals. We use approx_tuple_count
+ * Get approx # tuples passing the mergequals. We use approx_tuple_count
* here because we need an estimate done with JOIN_INNER semantics.
*/
mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
@@ -2178,7 +2178,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
* estimated approximately as size of merge join output minus size of
* inner relation. Assume that the distinct key values are 1, 2, ..., and
* denote the number of values of each key in the outer relation as m1,
- * m2, ...; in the inner relation, n1, n2, ... Then we have
+ * m2, ...; in the inner relation, n1, n2, ... Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
@@ -2189,7 +2189,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
* This equation works correctly for outer tuples having no inner match
* (nk = 0), but not for inner tuples having no outer match (mk = 0); we
* are effectively subtracting those from the number of rescanned tuples,
- * when we should not. Can we do better without expensive selectivity
+ * when we should not. Can we do better without expensive selectivity
* computations?
*
* The whole issue is moot if we are working from a unique-ified outer
@@ -2209,7 +2209,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
/*
* Decide whether we want to materialize the inner input to shield it from
- * mark/restore and performing re-fetches. Our cost model for regular
+ * mark/restore and performing re-fetches. Our cost model for regular
* re-fetches is that a re-fetch costs the same as an original fetch,
* which is probably an overestimate; but on the other hand we ignore the
* bookkeeping costs of mark/restore. Not clear if it's worth developing
@@ -2302,7 +2302,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic since
+ * clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*
* Note: we could adjust for SEMI/ANTI joins skipping some qual
@@ -2454,7 +2454,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
* If inner relation is too big then we will need to "batch" the join,
* which implies writing and reading most of the tuples to disk an extra
* time. Charge seq_page_cost per page, since the I/O should be nice and
- * sequential. Writing the inner rel counts as startup cost, all the rest
+ * sequential. Writing the inner rel counts as startup cost, all the rest
* as run cost.
*/
if (numbatches > 1)
@@ -2685,7 +2685,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic since
+ * clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
@@ -2738,7 +2738,7 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output we will
+ * evaluation. We need to estimate how much of the output we will
* actually need to scan. NOTE: this logic should agree with the
* tuple_fraction estimates used by make_subplan() in
* plan/subselect.c.
@@ -2786,10 +2786,10 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
/*
* cost_rescan
* Given a finished Path, estimate the costs of rescanning it after
- * having done so the first time. For some Path types a rescan is
+ * having done so the first time. For some Path types a rescan is
* cheaper than an original scan (if no parameters change), and this
* function embodies knowledge about that. The default is to return
- * the same costs stored in the Path. (Note that the cost estimates
+ * the same costs stored in the Path. (Note that the cost estimates
* actually stored in Paths are always for first scans.)
*
* This function is not currently intended to model effects such as rescans
@@ -2830,7 +2830,7 @@ cost_rescan(PlannerInfo *root, Path *path,
{
/*
* These plan types materialize their final result in a
- * tuplestore or tuplesort object. So the rescan cost is only
+ * tuplestore or tuplesort object. So the rescan cost is only
* cpu_tuple_cost per tuple, unless the result is large enough
* to spill to disk.
*/
@@ -2855,8 +2855,8 @@ cost_rescan(PlannerInfo *root, Path *path,
{
/*
* These plan types not only materialize their results, but do
- * not implement qual filtering or projection. So they are
- * even cheaper to rescan than the ones above. We charge only
+ * not implement qual filtering or projection. So they are
+ * even cheaper to rescan than the ones above. We charge only
* cpu_operator_cost per tuple. (Note: keep that in sync with
* the run_cost charge in cost_sort, and also see comments in
* cost_material before you change it.)
@@ -2997,7 +2997,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
* evaluation of AND/OR? Probably *not*, because that would make the
* results depend on the clause ordering, and we are not in any position
* to expect that the current ordering of the clauses is the one that's
- * going to end up being used. The above per-RestrictInfo caching would
+ * going to end up being used. The above per-RestrictInfo caching would
* not mix well with trying to re-order clauses anyway.
*
* Another issue that is entirely ignored here is that if a set-returning
@@ -3119,7 +3119,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
else if (IsA(node, AlternativeSubPlan))
{
/*
- * Arbitrarily use the first alternative plan for costing. (We should
+ * Arbitrarily use the first alternative plan for costing. (We should
* certainly only include one alternative, and we don't yet have
* enough information to know which one the executor is most likely to
* use.)
@@ -3263,13 +3263,13 @@ compute_semi_anti_join_factors(PlannerInfo *root,
/*
* jselec can be interpreted as the fraction of outer-rel rows that have
* any matches (this is true for both SEMI and ANTI cases). And nselec is
- * the fraction of the Cartesian product that matches. So, the average
+ * the fraction of the Cartesian product that matches. So, the average
* number of matches for each outer-rel row that has at least one match is
* nselec * inner_rows / jselec.
*
* Note: it is correct to use the inner rel's "rows" count here, even
* though we might later be considering a parameterized inner path with
- * fewer rows. This is because we have included all the join clauses in
+ * fewer rows. This is because we have included all the join clauses in
* the selectivity estimate.
*/
if (jselec > 0) /* protect against zero divide */
@@ -3597,7 +3597,7 @@ calc_joinrel_size_estimate(PlannerInfo *root,
double nrows;
/*
- * Compute joinclause selectivity. Note that we are only considering
+ * Compute joinclause selectivity. Note that we are only considering
* clauses that become restriction clauses at this join level; we are not
* double-counting them because they were not considered in estimating the
* sizes of the component rels.
@@ -3655,7 +3655,7 @@ calc_joinrel_size_estimate(PlannerInfo *root,
*
* If we are doing an outer join, take that into account: the joinqual
* selectivity has to be clamped using the knowledge that the output must
- * be at least as large as the non-nullable input. However, any
+ * be at least as large as the non-nullable input. However, any
* pushed-down quals are applied after the outer join, so their
* selectivity applies fully.
*
@@ -3726,7 +3726,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
/*
* Compute per-output-column width estimates by examining the subquery's
- * targetlist. For any output that is a plain Var, get the width estimate
+ * targetlist. For any output that is a plain Var, get the width estimate
* that was made while planning the subquery. Otherwise, we leave it to
* set_rel_width to fill in a datatype-based default estimate.
*/
@@ -3745,7 +3745,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
* The subquery could be an expansion of a view that's had columns
* added to it since the current query was parsed, so that there are
* non-junk tlist columns in it that don't correspond to any column
- * visible at our query level. Ignore such columns.
+ * visible at our query level. Ignore such columns.
*/
if (te->resno < rel->min_attr || te->resno > rel->max_attr)
continue;
@@ -3882,7 +3882,7 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
* of estimating baserestrictcost, so we set that, and we also set up width
* using what will be purely datatype-driven estimates from the targetlist.
* There is no way to do anything sane with the rows value, so we just put
- * a default estimate and hope that the wrapper can improve on it. The
+ * a default estimate and hope that the wrapper can improve on it. The
* wrapper's GetForeignRelSize function will be called momentarily.
*
* The rel's targetlist and restrictinfo list must have been constructed
@@ -4003,7 +4003,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
{
/*
* We could be looking at an expression pulled up from a subquery,
- * or a ROW() representing a whole-row child Var, etc. Do what we
+ * or a ROW() representing a whole-row child Var, etc. Do what we
* can using the expression type information.
*/
int32 item_width;
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index baddd34a741..257563ce6d1 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -74,7 +74,7 @@ static bool reconsider_full_join_clause(PlannerInfo *root,
*
* If below_outer_join is true, then the clause was found below the nullable
* side of an outer join, so its sides might validly be both NULL rather than
- * strictly equal. We can still deduce equalities in such cases, but we take
+ * strictly equal. We can still deduce equalities in such cases, but we take
* care to mark an EquivalenceClass if it came from any such clauses. Also,
* we have to check that both sides are either pseudo-constants or strict
* functions of Vars, else they might not both go to NULL above the outer
@@ -141,9 +141,9 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
collation);
/*
- * Reject clauses of the form X=X. These are not as redundant as they
+ * Reject clauses of the form X=X. These are not as redundant as they
* might seem at first glance: assuming the operator is strict, this is
- * really an expensive way to write X IS NOT NULL. So we must not risk
+ * really an expensive way to write X IS NOT NULL. So we must not risk
* just losing the clause, which would be possible if there is already a
* single-element EquivalenceClass containing X. The case is not common
* enough to be worth contorting the EC machinery for, so just reject the
@@ -187,14 +187,14 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
* Sweep through the existing EquivalenceClasses looking for matches to
* item1 and item2. These are the possible outcomes:
*
- * 1. We find both in the same EC. The equivalence is already known, so
+ * 1. We find both in the same EC. The equivalence is already known, so
* there's nothing to do.
*
* 2. We find both in different ECs. Merge the two ECs together.
*
* 3. We find just one. Add the other to its EC.
*
- * 4. We find neither. Make a new, two-entry EC.
+ * 4. We find neither. Make a new, two-entry EC.
*
* Note: since all ECs are built through this process or the similar
* search in get_eclass_for_sort_expr(), it's impossible that we'd match
@@ -294,7 +294,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
/*
* We add ec2's items to ec1, then set ec2's ec_merged link to point
- * to ec1 and remove ec2 from the eq_classes list. We cannot simply
+ * to ec1 and remove ec2 from the eq_classes list. We cannot simply
* delete ec2 because that could leave dangling pointers in existing
* PathKeys. We leave it behind with a link so that the merged EC can
* be found.
@@ -406,7 +406,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
* Also, the expression's exposed collation must match the EC's collation.
* This is important because in comparisons like "foo < bar COLLATE baz",
* only one of the expressions has the correct exposed collation as we receive
- * it from the parser. Forcing both of them to have it ensures that all
+ * it from the parser. Forcing both of them to have it ensures that all
* variant spellings of such a construct behave the same. Again, we can
* stick on a RelabelType to force the right exposed collation. (It might
* work to not label the collation at all in EC members, but this is risky
@@ -511,22 +511,22 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
* single-member EquivalenceClass for it.
*
* expr is the expression, and nullable_relids is the set of base relids
- * that are potentially nullable below it. We actually only care about
+ * that are potentially nullable below it. We actually only care about
* the set of such relids that are used in the expression; but for caller
* convenience, we perform that intersection step here. The caller need
* only be sure that nullable_relids doesn't omit any nullable rels that
* might appear in the expr.
*
* sortref is the SortGroupRef of the originating SortGroupClause, if any,
- * or zero if not. (It should never be zero if the expression is volatile!)
+ * or zero if not. (It should never be zero if the expression is volatile!)
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
- * considered. Otherwise child members are ignored. (Note: since child EC
+ * considered. Otherwise child members are ignored. (Note: since child EC
* members aren't guaranteed unique, a non-NULL value means that there could
* be more than one EC that matches the expression; if so it's order-dependent
* which one you get. This is annoying but it only happens in corner cases,
- * so for now we live with just reporting the first match. See also
+ * so for now we live with just reporting the first match. See also
* generate_implied_equalities_for_column and match_pathkeys_to_index.)
*
* If create_it is TRUE, we'll build a new EquivalenceClass when there is no
@@ -680,7 +680,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
*
* When an EC contains pseudoconstants, our strategy is to generate
* "member = const1" clauses where const1 is the first constant member, for
- * every other member (including other constants). If we are able to do this
+ * every other member (including other constants). If we are able to do this
* then we don't need any "var = var" comparisons because we've successfully
* constrained all the vars at their points of creation. If we fail to
* generate any of these clauses due to lack of cross-type operators, we fall
@@ -705,7 +705,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
* "WHERE a.x = b.y AND b.y = a.z", the scheme breaks down if we cannot
* generate "a.x = a.z" as a restriction clause for A.) In this case we mark
* the EC "ec_broken" and fall back to regurgitating its original source
- * RestrictInfos at appropriate times. We do not try to retract any derived
+ * RestrictInfos at appropriate times. We do not try to retract any derived
* clauses already generated from the broken EC, so the resulting plan could
* be poor due to bad selectivity estimates caused by redundant clauses. But
* the correct solution to that is to fix the opfamilies ...
@@ -968,8 +968,8 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* built any join RelOptInfos.
*
* An annoying special case for parameterized scans is that the inner rel can
- * be an appendrel child (an "other rel"). In this case we must generate
- * appropriate clauses using child EC members. add_child_rel_equivalences
+ * be an appendrel child (an "other rel"). In this case we must generate
+ * appropriate clauses using child EC members. add_child_rel_equivalences
* must already have been done for the child rel.
*
* The results are sufficient for use in merge, hash, and plain nestloop join
@@ -983,7 +983,7 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* we consider different join paths, we avoid generating multiple copies:
* whenever we select a particular pair of EquivalenceMembers to join,
* we check to see if the pair matches any original clause (in ec_sources)
- * or previously-built clause (in ec_derives). This saves memory and allows
+ * or previously-built clause (in ec_derives). This saves memory and allows
* re-use of information cached in RestrictInfos.
*
* join_relids should always equal bms_union(outer_relids, inner_rel->relids).
@@ -1079,7 +1079,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
* First, scan the EC to identify member values that are computable at the
* outer rel, at the inner rel, or at this relation but not in either
* input rel. The outer-rel members should already be enforced equal,
- * likewise for the inner-rel members. We'll need to create clauses to
+ * likewise for the inner-rel members. We'll need to create clauses to
* enforce that any newly computable members are all equal to each other
* as well as to at least one input member, plus enforce at least one
* outer-rel member equal to at least one inner-rel member.
@@ -1105,7 +1105,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
}
/*
- * First, select the joinclause if needed. We can equate any one outer
+ * First, select the joinclause if needed. We can equate any one outer
* member to any one inner member, but we have to find a datatype
* combination for which an opfamily member operator exists. If we have
* choices, we prefer simple Var members (possibly with RelabelType) since
@@ -1323,8 +1323,8 @@ create_join_clause(PlannerInfo *root,
/*
* Search to see if we already built a RestrictInfo for this pair of
- * EquivalenceMembers. We can use either original source clauses or
- * previously-derived clauses. The check on opno is probably redundant,
+ * EquivalenceMembers. We can use either original source clauses or
+ * previously-derived clauses. The check on opno is probably redundant,
* but be safe ...
*/
foreach(lc, ec->ec_sources)
@@ -1455,7 +1455,7 @@ create_join_clause(PlannerInfo *root,
*
* Outer join clauses that are marked outerjoin_delayed are special: this
* condition means that one or both VARs might go to null due to a lower
- * outer join. We can still push a constant through the clause, but only
+ * outer join. We can still push a constant through the clause, but only
* if its operator is strict; and we *have to* throw the clause back into
* regular joinclause processing. By keeping the strict join clause,
* we ensure that any null-extended rows that are mistakenly generated due
@@ -1649,7 +1649,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo,
/*
* Yes it does! Try to generate a clause INNERVAR = CONSTANT for each
- * CONSTANT in the EC. Note that we must succeed with at least one
+ * CONSTANT in the EC. Note that we must succeed with at least one
* constant before we can decide to throw away the outer-join clause.
*/
match = false;
@@ -2051,7 +2051,7 @@ mutate_eclass_expressions(PlannerInfo *root,
* is a redundant list of clauses equating the table/index column to each of
* the other-relation values it is known to be equal to. Any one of
* these clauses can be used to create a parameterized path, and there
- * is no value in using more than one. (But it *is* worthwhile to create
+ * is no value in using more than one. (But it *is* worthwhile to create
* a separate parameterized path for each one, since that leads to different
* join orders.)
*
@@ -2098,12 +2098,12 @@ generate_implied_equalities_for_column(PlannerInfo *root,
continue;
/*
- * Scan members, looking for a match to the target column. Note that
+ * Scan members, looking for a match to the target column. Note that
* child EC members are considered, but only when they belong to the
* target relation. (Unlike regular members, the same expression
* could be a child member of more than one EC. Therefore, it's
* potentially order-dependent which EC a child relation's target
- * column gets matched to. This is annoying but it only happens in
+ * column gets matched to. This is annoying but it only happens in
* corner cases, so for now we live with just reporting the first
* match. See also get_eclass_for_sort_expr.)
*/
@@ -2182,7 +2182,7 @@ generate_implied_equalities_for_column(PlannerInfo *root,
* a joinclause involving the two given relations.
*
* This is essentially a very cut-down version of
- * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
+ * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
* incorrectly. Hence we don't bother with details like whether the lack of a
* cross-type operator might prevent the clause from actually being generated.
*/
@@ -2218,7 +2218,7 @@ have_relevant_eclass_joinclause(PlannerInfo *root,
* OK as a possibly-overoptimistic heuristic.
*
* We don't test ec_has_const either, even though a const eclass won't
- * generate real join clauses. This is because if we had "WHERE a.x =
+ * generate real join clauses. This is because if we had "WHERE a.x =
* b.y and a.x = 42", it is worth considering a join between a and b,
* since the join result is likely to be small even though it'll end
* up being an unqualified nestloop.
@@ -2275,7 +2275,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
* against the specified relation.
*
* This is just a heuristic test and doesn't have to be exact; it's better
- * to say "yes" incorrectly than "no". Hence we don't bother with details
+ * to say "yes" incorrectly than "no". Hence we don't bother with details
* like whether the lack of a cross-type operator might prevent the clause
* from actually being generated.
*/
@@ -2296,7 +2296,7 @@ eclass_useful_for_merging(EquivalenceClass *eclass,
/*
* Note we don't test ec_broken; if we did, we'd need a separate code path
- * to look through ec_sources. Checking the members anyway is OK as a
+ * to look through ec_sources. Checking the members anyway is OK as a
* possibly-overoptimistic heuristic.
*/
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 606734a1221..3387ae2cae0 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -221,7 +221,7 @@ static Const *string_to_const(const char *str, Oid datatype);
* Note: in cases involving LATERAL references in the relation's tlist, it's
* possible that rel->lateral_relids is nonempty. Currently, we include
* lateral_relids into the parameterization reported for each path, but don't
- * take it into account otherwise. The fact that any such rels *must* be
+ * take it into account otherwise. The fact that any such rels *must* be
* available as parameter sources perhaps should influence our choices of
* index quals ... but for now, it doesn't seem worth troubling over.
* In particular, comments below about "unparameterized" paths should be read
@@ -269,7 +269,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
match_restriction_clauses_to_index(rel, index, &rclauseset);
/*
- * Build index paths from the restriction clauses. These will be
+ * Build index paths from the restriction clauses. These will be
* non-parameterized paths. Plain paths go directly to add_path(),
* bitmap paths are added to bitindexpaths to be handled below.
*/
@@ -277,10 +277,10 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
&bitindexpaths);
/*
- * Identify the join clauses that can match the index. For the moment
- * we keep them separate from the restriction clauses. Note that this
+ * Identify the join clauses that can match the index. For the moment
+ * we keep them separate from the restriction clauses. Note that this
* step finds only "loose" join clauses that have not been merged into
- * EquivalenceClasses. Also, collect join OR clauses for later.
+ * EquivalenceClasses. Also, collect join OR clauses for later.
*/
MemSet(&jclauseset, 0, sizeof(jclauseset));
match_join_clauses_to_index(root, rel, index,
@@ -344,9 +344,9 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
/*
* Likewise, if we found anything usable, generate BitmapHeapPaths for the
- * most promising combinations of join bitmap index paths. Our strategy
+ * most promising combinations of join bitmap index paths. Our strategy
* is to generate one such path for each distinct parameterization seen
- * among the available bitmap index paths. This may look pretty
+ * among the available bitmap index paths. This may look pretty
* expensive, but usually there won't be very many distinct
* parameterizations. (This logic is quite similar to that in
* consider_index_join_clauses, but we're working with whole paths not
@@ -462,7 +462,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
*
* For simplicity in selecting relevant clauses, we represent each set of
* outer rels as a maximum set of clause_relids --- that is, the indexed
- * relation itself is also included in the relids set. considered_relids
+ * relation itself is also included in the relids set. considered_relids
* lists all relids sets we've already tried.
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
@@ -551,7 +551,7 @@ consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel,
/*
* If this clause was derived from an equivalence class, the
* clause list may contain other clauses derived from the same
- * eclass. We should not consider that combining this clause with
+ * eclass. We should not consider that combining this clause with
* one of those clauses generates a usefully different
* parameterization; so skip if any clause derived from the same
* eclass would already have been included when using oldrelids.
@@ -634,9 +634,9 @@ get_join_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * Add applicable eclass join clauses. The clauses generated for each
+ * Add applicable eclass join clauses. The clauses generated for each
* column are redundant (cf generate_implied_equalities_for_column),
- * so we need at most one. This is the only exception to the general
+ * so we need at most one. This is the only exception to the general
* rule of using all available index clauses.
*/
foreach(lc, eclauseset->indexclauses[indexcol])
@@ -723,7 +723,7 @@ bms_equal_any(Relids relids, List *relids_list)
* bitmap indexpaths are added to *bitindexpaths for later processing.
*
* This is a fairly simple frontend to build_index_paths(). Its reason for
- * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
+ * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
* index AM supports them natively, we should just include them in simple
* index paths. If not, we should exclude them while building simple index
* paths, and then make a separate attempt to include them in bitmap paths.
@@ -737,7 +737,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
ListCell *lc;
/*
- * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
+ * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
* clauses only if the index AM supports them natively.
*/
indexpaths = build_index_paths(root, rel,
@@ -749,7 +749,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
* Submit all the ones that can form plain IndexScan plans to add_path. (A
* plain IndexPath can represent either a plain IndexScan or an
* IndexOnlyScan, but for our purposes here that distinction does not
- * matter. However, some of the indexes might support only bitmap scans,
+ * matter. However, some of the indexes might support only bitmap scans,
* and those we mustn't submit to add_path here.)
*
* Also, pick out the ones that are usable as bitmap scans. For that, we
@@ -793,7 +793,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
* We return a list of paths because (1) this routine checks some cases
* that should cause us to not generate any IndexPath, and (2) in some
* cases we want to consider both a forward and a backward scan, so as
- * to obtain both sort orders. Note that the paths are just returned
+ * to obtain both sort orders. Note that the paths are just returned
* to the caller and not immediately fed to add_path().
*
* At top level, useful_predicate should be exactly the index's predOK flag
@@ -976,7 +976,7 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * 3. Check if an index-only scan is possible. If we're not building
+ * 3. Check if an index-only scan is possible. If we're not building
* plain indexscans, this isn't relevant since bitmap scans don't support
* index data retrieval anyway.
*/
@@ -1081,13 +1081,13 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
continue;
/*
- * Ignore partial indexes that do not match the query. If a partial
+ * Ignore partial indexes that do not match the query. If a partial
* index is marked predOK then we know it's OK. Otherwise, we have to
* test whether the added clauses are sufficient to imply the
* predicate. If so, we can use the index in the current context.
*
* We set useful_predicate to true iff the predicate was proven using
- * the current set of clauses. This is needed to prevent matching a
+ * the current set of clauses. This is needed to prevent matching a
* predOK index to an arm of an OR, which would be a legal but
* pointlessly inefficient plan. (A better plan will be generated by
* just scanning the predOK index alone, no OR.)
@@ -1270,7 +1270,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
*
* This is a helper for generate_bitmap_or_paths(). We leave OR clauses
* in the list whether they are joins or not, since we might be able to
- * extract a restriction item from an OR list. It's safe to leave such
+ * extract a restriction item from an OR list. It's safe to leave such
* clauses in the list because match_clauses_to_index() will ignore them,
* so there's no harm in passing such clauses to build_paths_for_OR().
*/
@@ -1298,7 +1298,7 @@ drop_indexable_join_clauses(RelOptInfo *rel, List *clauses)
* Given a nonempty list of bitmap paths, AND them into one path.
*
* This is a nontrivial decision since we can legally use any subset of the
- * given path set. We want to choose a good tradeoff between selectivity
+ * given path set. We want to choose a good tradeoff between selectivity
* and cost of computing the bitmap.
*
* The result is either a single one of the inputs, or a BitmapAndPath
@@ -1325,12 +1325,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. Moreover, it's completely impractical if there are a large
+ * OR clauses. Moreover, it's completely impractical if there are a large
* number of paths, since the work would grow as O(2^N).
*
* As a heuristic, we first check for paths using exactly the same sets of
* WHERE clauses + index predicate conditions, and reject all but the
- * cheapest-to-scan in any such group. This primarily gets rid of indexes
+ * cheapest-to-scan in any such group. This primarily gets rid of indexes
* that include the interesting columns but also irrelevant columns. (In
* situations where the DBA has gone overboard on creating variant
* indexes, this can make for a very large reduction in the number of
@@ -1350,14 +1350,14 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* costsize.c and clausesel.c aren't very smart about redundant clauses.
* They will usually double-count the redundant clauses, producing a
* too-small selectivity that makes a redundant AND step look like it
- * reduces the total cost. Perhaps someday that code will be smarter and
+ * reduces the total cost. Perhaps someday that code will be smarter and
* we can remove this limitation. (But note that this also defends
* against flat-out duplicate input paths, which can happen because
* match_join_clauses_to_index will find the same OR join clauses that
* create_or_index_quals has pulled OR restriction clauses out of.)
*
* For the same reason, we reject AND combinations in which an index
- * predicate clause duplicates another clause. Here we find it necessary
+ * predicate clause duplicates another clause. Here we find it necessary
* to be even stricter: we'll reject a partial index if any of its
* predicate clauses are implied by the set of WHERE clauses and predicate
* clauses used so far. This covers cases such as a condition "x = 42"
@@ -1420,7 +1420,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
/*
* For each surviving index, consider it as an "AND group leader", and see
* whether adding on any of the later indexes results in an AND path with
- * cheaper total cost than before. Then take the cheapest AND group.
+ * cheaper total cost than before. Then take the cheapest AND group.
*/
for (i = 0; i < npaths; i++)
{
@@ -1753,7 +1753,7 @@ find_indexpath_quals(Path *bitmapqual, List **quals, List **preds)
/*
* find_list_position
* Return the given node's position (counting from 0) in the given
- * list of nodes. If it's not equal() to any existing list member,
+ * list of nodes. If it's not equal() to any existing list member,
* add it at the end, and return that position.
*/
static int
@@ -1859,7 +1859,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
* Since we produce parameterized paths before we've begun to generate join
* relations, it's impossible to predict exactly how many times a parameterized
* path will be iterated; we don't know the size of the relation that will be
- * on the outside of the nestloop. However, we should try to account for
+ * on the outside of the nestloop. However, we should try to account for
* multiple iterations somehow in costing the path. The heuristic embodied
* here is to use the rowcount of the smallest other base relation needed in
* the join clauses used by the path. (We could alternatively consider the
@@ -2074,7 +2074,7 @@ match_clause_to_index(IndexOptInfo *index,
* doesn't involve a volatile function or a Var of the index's relation.
* In particular, Vars belonging to other relations of the query are
* accepted here, since a clause of that form can be used in a
- * parameterized indexscan. It's the responsibility of higher code levels
+ * parameterized indexscan. It's the responsibility of higher code levels
* to manage restriction and join clauses appropriately.
*
* Note: we do need to check for Vars of the index's relation on the
@@ -2098,7 +2098,7 @@ match_clause_to_index(IndexOptInfo *index,
* It is also possible to match RowCompareExpr clauses to indexes (but
* currently, only btree indexes handle this). In this routine we will
* report a match if the first column of the row comparison matches the
- * target index column. This is sufficient to guarantee that some index
+ * target index column. This is sufficient to guarantee that some index
* condition can be constructed from the RowCompareExpr --- whether the
* remaining columns match the index too is considered in
* adjust_rowcompare_for_index().
@@ -2136,7 +2136,7 @@ match_clause_to_indexcol(IndexOptInfo *index,
bool plain_op;
/*
- * Never match pseudoconstants to indexes. (Normally this could not
+ * Never match pseudoconstants to indexes. (Normally this could not
* happen anyway, since a pseudoconstant clause couldn't contain a Var,
* but what if someone builds an expression index on a constant? It's not
* totally unreasonable to do so with a partial index, either.)
@@ -2420,7 +2420,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
* We allow any column of the index to match each pathkey; they
* don't have to match left-to-right as you might expect. This is
* correct for GiST, which is the sole existing AM supporting
- * amcanorderbyop. We might need different logic in future for
+ * amcanorderbyop. We might need different logic in future for
* other implementations.
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
@@ -2471,7 +2471,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
* Note that we currently do not consider the collation of the ordering
* operator's result. In practical cases the result type will be numeric
* and thus have no collation, and it's not very clear what to match to
- * if it did have a collation. The index's collation should match the
+ * if it did have a collation. The index's collation should match the
* ordering operator's input collation, not its result.
*
* If successful, return 'clause' as-is if the indexkey is on the left,
@@ -2721,7 +2721,7 @@ ec_member_matches_indexcol(PlannerInfo *root, RelOptInfo *rel,
* if it is true.
* 2. A list of expressions in this relation, and a corresponding list of
* equality operators. The caller must have already checked that the operators
- * represent equality. (Note: the operators could be cross-type; the
+ * represent equality. (Note: the operators could be cross-type; the
* expressions should correspond to their RHS inputs.)
*
* The caller need only supply equality conditions arising from joins;
@@ -2910,7 +2910,7 @@ match_index_to_operand(Node *operand,
int indkey;
/*
- * Ignore any RelabelType node above the operand. This is needed to be
+ * Ignore any RelabelType node above the operand. This is needed to be
* able to apply indexscanning in binary-compatible-operator cases. Note:
* we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
@@ -2977,10 +2977,10 @@ match_index_to_operand(Node *operand,
* indexscan machinery. The key idea is that these operators allow us
* to derive approximate indexscan qual clauses, such that any tuples
* that pass the operator clause itself must also satisfy the simpler
- * indexscan condition(s). Then we can use the indexscan machinery
+ * indexscan condition(s). Then we can use the indexscan machinery
* to avoid scanning as much of the table as we'd otherwise have to,
* while applying the original operator as a qpqual condition to ensure
- * we deliver only the tuples we want. (In essence, we're using a regular
+ * we deliver only the tuples we want. (In essence, we're using a regular
* index as if it were a lossy index.)
*
* An example of what we're doing is
@@ -2994,7 +2994,7 @@ match_index_to_operand(Node *operand,
*
* Another thing that we do with this machinery is to provide special
* smarts for "boolean" indexes (that is, indexes on boolean columns
- * that support boolean equality). We can transform a plain reference
+ * that support boolean equality). We can transform a plain reference
* to the indexkey into "indexkey = true", or "NOT indexkey" into
* "indexkey = false", so as to make the expression indexable using the
* regular index operators. (As of Postgres 8.1, we must do this here
@@ -3416,7 +3416,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily, Oid idxcollation)
/*
* LIKE and regex operators are not members of any btree index opfamily,
* but they can be members of opfamilies for more exotic index types such
- * as GIN. Therefore, we should only do expansion if the operator is
+ * as GIN. Therefore, we should only do expansion if the operator is
* actually not in the opfamily. But checking that requires a syscache
* lookup, so it's best to first see if the operator is one we are
* interested in.
@@ -3534,7 +3534,7 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
* column matches) or a simple OpExpr (if the first-column match is all
* there is). In these cases the modified clause is always "<=" or ">="
* even when the original was "<" or ">" --- this is necessary to match all
- * the rows that could match the original. (We are essentially building a
+ * the rows that could match the original. (We are essentially building a
* lossy version of the row comparison when we do this.)
*
* *indexcolnos receives an integer list of the index column numbers (zero
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 5b477e52d3f..c02c9052262 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -107,7 +107,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* If it's SEMI or ANTI join, compute correction factors for cost
- * estimation. These will be the same for all paths.
+ * estimation. These will be the same for all paths.
*/
if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
compute_semi_anti_join_factors(root, outerrel, innerrel,
@@ -122,7 +122,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* to the parameter source rel instead of joining to the other input rel.
* This restriction reduces the number of parameterized paths we have to
* deal with at higher join levels, without compromising the quality of
- * the resulting plan. We express the restriction as a Relids set that
+ * the resulting plan. We express the restriction as a Relids set that
* must overlap the parameterization of any proposed join path.
*/
foreach(lc, root->join_info_list)
@@ -155,7 +155,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* However, when a LATERAL subquery is involved, we have to be a bit
* laxer, because there will simply not be any paths for the joinrel that
* aren't parameterized by whatever the subquery is parameterized by,
- * unless its parameterization is resolved within the joinrel. Hence, add
+ * unless its parameterization is resolved within the joinrel. Hence, add
* to param_source_rels anything that is laterally referenced in either
* input and is not in the join already.
*/
@@ -208,7 +208,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 1. Consider mergejoin paths where both relations must be explicitly
- * sorted. Skip this if we can't mergejoin.
+ * sorted. Skip this if we can't mergejoin.
*/
if (mergejoin_allowed)
sort_inner_and_outer(root, joinrel, outerrel, innerrel,
@@ -233,7 +233,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* 3. Consider paths where the inner relation need not be explicitly
- * sorted. This includes mergejoins only (nestloops were already built in
+ * sorted. This includes mergejoins only (nestloops were already built in
* match_unsorted_outer).
*
* Diked out as redundant 2/13/2000 -- tgl. There isn't any really
@@ -507,7 +507,7 @@ try_hashjoin_path(PlannerInfo *root,
* We already know that the clause is a binary opclause referencing only the
* rels in the current join. The point here is to check whether it has the
* form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
- * rather than mixing outer and inner vars on either side. If it matches,
+ * rather than mixing outer and inner vars on either side. If it matches,
* we set the transient flag outer_is_left to identify which side is which.
*/
static inline bool
@@ -572,7 +572,7 @@ sort_inner_and_outer(PlannerInfo *root,
* sort.
*
* This function intentionally does not consider parameterized input
- * paths, except when the cheapest-total is parameterized. If we did so,
+ * paths, except when the cheapest-total is parameterized. If we did so,
* we'd have a combinatorial explosion of mergejoin paths of dubious
* value. This interacts with decisions elsewhere that also discriminate
* against mergejoins with parameterized inputs; see comments in
@@ -619,7 +619,7 @@ sort_inner_and_outer(PlannerInfo *root,
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * partially redundant (refer to the same EquivalenceClasses). Therefore,
+ * partially redundant (refer to the same EquivalenceClasses). Therefore,
* what we do is convert the mergeclause list to a list of canonical
* pathkeys, and then consider different orderings of the pathkeys.
*
@@ -713,7 +713,7 @@ sort_inner_and_outer(PlannerInfo *root,
* cheapest-total inner-indexscan path (if any), and one on the
* cheapest-startup inner-indexscan path (if different).
*
- * We also consider mergejoins if mergejoin clauses are available. We have
+ * We also consider mergejoins if mergejoin clauses are available. We have
* two ways to generate the inner path for a mergejoin: sort the cheapest
* inner path, or use an inner path that is already suitably ordered for the
* merge. If we have several mergeclauses, it could be that there is no inner
@@ -845,8 +845,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* If we need to unique-ify the outer path, it's pointless to consider
- * any but the cheapest outer. (XXX we don't consider parameterized
- * outers, nor inners, for unique-ified cases. Should we?)
+ * any but the cheapest outer. (XXX we don't consider parameterized
+ * outers, nor inners, for unique-ified cases. Should we?)
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
@@ -887,7 +887,7 @@ match_unsorted_outer(PlannerInfo *root,
{
/*
* Consider nestloop joins using this outer path and various
- * available paths for the inner relation. We consider the
+ * available paths for the inner relation. We consider the
* cheapest-total paths for each available parameterization of the
* inner relation, including the unparameterized case.
*/
@@ -1042,7 +1042,7 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Look for an inner path ordered well enough for the first
- * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified
* destructively, which is why we made a copy...
*/
trialsortkeys = list_truncate(trialsortkeys, sortkeycnt);
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index d627f9e130c..fd6a4f19b1d 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -213,7 +213,7 @@ join_search_one_level(PlannerInfo *root, int level)
/*----------
* When special joins are involved, there may be no legal way
- * to make an N-way join for some values of N. For example consider
+ * to make an N-way join for some values of N. For example consider
*
* SELECT ... FROM t1 WHERE
* x IN (SELECT ... FROM t2,t3 WHERE ...) AND
@@ -337,7 +337,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
ListCell *l;
/*
- * Ensure output params are set on failure return. This is just to
+ * Ensure output params are set on failure return. This is just to
* suppress uninitialized-variable warnings from overly anal compilers.
*/
*sjinfo_p = NULL;
@@ -345,7 +345,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* If we have any special joins, the proposed join might be illegal; and
- * in any case we have to determine its join type. Scan the join info
+ * in any case we have to determine its join type. Scan the join info
* list for conflicts.
*/
match_sjinfo = NULL;
@@ -609,7 +609,7 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
/*
* If it's a plain inner join, then we won't have found anything in
- * join_info_list. Make up a SpecialJoinInfo so that selectivity
+ * join_info_list. Make up a SpecialJoinInfo so that selectivity
* estimation functions will know what's being joined.
*/
if (sjinfo == NULL)
@@ -916,7 +916,7 @@ have_join_order_restriction(PlannerInfo *root,
*
* Essentially, this tests whether have_join_order_restriction() could
* succeed with this rel and some other one. It's OK if we sometimes
- * say "true" incorrectly. (Therefore, we don't bother with the relatively
+ * say "true" incorrectly. (Therefore, we don't bother with the relatively
* expensive has_legal_joinclause test.)
*/
static bool
@@ -1027,7 +1027,7 @@ is_dummy_rel(RelOptInfo *rel)
* dummy.
*
* Also, when called during GEQO join planning, we are in a short-lived
- * memory context. We must make sure that the dummy path attached to a
+ * memory context. We must make sure that the dummy path attached to a
* baserel survives the GEQO cycle, else the baserel is trashed for future
* GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
* we don't want the dummy path to clutter the main planning context. Upshot
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index 16f29d350fd..8cebe8aa4b0 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -41,7 +41,7 @@
*
* The added quals are partially redundant with the original OR, and therefore
* will cause the size of the joinrel to be underestimated when it is finally
- * formed. (This would be true of a full transformation to CNF as well; the
+ * formed. (This would be true of a full transformation to CNF as well; the
* fault is not really in the transformation, but in clauselist_selectivity's
* inability to recognize redundant conditions.) To minimize the collateral
* damage, we want to minimize the number of quals added. Therefore we do
@@ -56,7 +56,7 @@
* it is finally formed. This is a MAJOR HACK: it depends on the fact
* that clause selectivities are cached and on the fact that the same
* RestrictInfo node will appear in every joininfo list that might be used
- * when the joinrel is formed. And it probably isn't right in cases where
+ * when the joinrel is formed. And it probably isn't right in cases where
* the size estimation is nonlinear (i.e., outer and IN joins). But it
* beats not doing anything.
*
@@ -109,7 +109,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* Use the generate_bitmap_or_paths() machinery to estimate the
* value of each OR clause. We can use regular restriction
* clauses along with the OR clause contents to generate
- * indexquals. We pass restriction_only = true so that any
+ * indexquals. We pass restriction_only = true so that any
* sub-clauses that are actually joins will be ignored.
*/
List *orpaths;
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 032b2cdc133..897be6cb985 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -46,7 +46,7 @@ static bool right_merge_direction(PlannerInfo *root, PathKey *pathkey);
* entry if there's not one already.
*
* Note that this function must not be used until after we have completed
- * merging EquivalenceClasses. (We don't try to enforce that here; instead,
+ * merging EquivalenceClasses. (We don't try to enforce that here; instead,
* equivclass.c will complain if a merge occurs after root->canon_pathkeys
* has become nonempty.)
*/
@@ -120,7 +120,7 @@ make_canonical_pathkey(PlannerInfo *root,
*
* Both the given pathkey and the list members must be canonical for this
* to work properly, but that's okay since we no longer ever construct any
- * non-canonical pathkeys. (Note: the notion of a pathkey *list* being
+ * non-canonical pathkeys. (Note: the notion of a pathkey *list* being
* canonical includes the additional requirement of no redundant entries,
* which is exactly what we are checking for here.)
*
@@ -162,7 +162,7 @@ pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys)
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
- * considered. Otherwise child members are ignored. (See the comments for
+ * considered. Otherwise child members are ignored. (See the comments for
* get_eclass_for_sort_expr.)
*
* create_it is TRUE if we should create any missing EquivalenceClass
@@ -192,7 +192,7 @@ make_pathkey_from_sortinfo(PlannerInfo *root,
/*
* EquivalenceClasses need to contain opfamily lists based on the family
* membership of mergejoinable equality operators, which could belong to
- * more than one opfamily. So we have to look up the opfamily's equality
+ * more than one opfamily. So we have to look up the opfamily's equality
* operator and get its membership.
*/
equality_op = get_opfamily_member(opfamily,
@@ -355,7 +355,7 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys,
/*
* Since cost comparison is a lot cheaper than pathkey comparison, do
- * that first. (XXX is that still true?)
+ * that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_path_costs(matched_path, path, cost_criterion) <= 0)
@@ -397,7 +397,7 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
/*
* Since cost comparison is a lot cheaper than pathkey comparison, do
- * that first. (XXX is that still true?)
+ * that first. (XXX is that still true?)
*/
if (matched_path != NULL &&
compare_fractional_path_costs(matched_path, path, fraction) <= 0)
@@ -504,7 +504,7 @@ build_index_pathkeys(PlannerInfo *root,
/*
* convert_subquery_pathkeys
* Build a pathkeys list that describes the ordering of a subquery's
- * result, in the terms of the outer query. This is essentially a
+ * result, in the terms of the outer query. This is essentially a
* task of conversion.
*
* 'rel': outer query's RelOptInfo for the subquery relation.
@@ -557,7 +557,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* Note: it might look funny to be setting sortref = 0 for a
- * reference to a volatile sub_eclass. However, the
+ * reference to a volatile sub_eclass. However, the
* expression is *not* volatile in the outer query: it's just
* a Var referencing whatever the subquery emitted. (IOW, the
* outer query isn't going to re-execute the volatile
@@ -594,7 +594,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* Otherwise, the sub_pathkey's EquivalenceClass could contain
* multiple elements (representing knowledge that multiple items
- * are effectively equal). Each element might match none, one, or
+ * are effectively equal). Each element might match none, one, or
* more of the output columns that are visible to the outer query.
* This means we may have multiple possible representations of the
* sub_pathkey in the context of the outer query. Ideally we
@@ -822,7 +822,7 @@ make_pathkeys_for_sortclauses(PlannerInfo *root,
* right sides.
*
* Note this is called before EC merging is complete, so the links won't
- * necessarily point to canonical ECs. Before they are actually used for
+ * necessarily point to canonical ECs. Before they are actually used for
* anything, update_mergeclause_eclasses must be called to ensure that
* they've been updated to point to canonical ECs.
*/
@@ -956,7 +956,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* It's possible that multiple matching clauses might have different
* ECs on the other side, in which case the order we put them into our
* result makes a difference in the pathkeys required for the other
- * input path. However this routine hasn't got any info about which
+ * input path. However this routine hasn't got any info about which
* order would be best, so we don't worry about that.
*
* It's also possible that the selected mergejoin clauses produce
@@ -987,7 +987,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can still
+ * sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched_restrictinfos == NIL)
@@ -1019,7 +1019,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* Returns a pathkeys list that can be applied to the outer relation.
*
* Since we assume here that a sort is required, there is no particular use
- * in matching any available ordering of the outerrel. (joinpath.c has an
+ * in matching any available ordering of the outerrel. (joinpath.c has an
* entirely separate code path for considering sort-free mergejoins.) Rather,
* it's interesting to try to match the requested query_pathkeys so that a
* second output sort may be avoided; and failing that, we try to list "more
@@ -1350,7 +1350,7 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
/*
* If we didn't find a mergeclause, we're done --- any additional
- * sort-key positions in the pathkeys are useless. (But we can still
+ * sort-key positions in the pathkeys are useless. (But we can still
* mergejoin if we found at least one mergeclause.)
*/
if (matched)
@@ -1380,7 +1380,7 @@ right_merge_direction(PlannerInfo *root, PathKey *pathkey)
pathkey->pk_opfamily == query_pathkey->pk_opfamily)
{
/*
- * Found a matching query sort column. Prefer this pathkey's
+ * Found a matching query sort column. Prefer this pathkey's
* direction iff it matches. Note that we ignore pk_nulls_first,
* which means that a sort might be needed anyway ... but we still
* want to prefer only one of the two possible directions, and we
@@ -1456,13 +1456,13 @@ truncate_useless_pathkeys(PlannerInfo *root,
* useful according to truncate_useless_pathkeys().
*
* This is a cheap test that lets us skip building pathkeys at all in very
- * simple queries. It's OK to err in the direction of returning "true" when
+ * simple queries. It's OK to err in the direction of returning "true" when
* there really aren't any usable pathkeys, but erring in the other direction
* is bad --- so keep this in sync with the routines above!
*
* We could make the test more complex, for example checking to see if any of
* the joinclauses are really mergejoinable, but that likely wouldn't win
- * often enough to repay the extra cycles. Queries with neither a join nor
+ * often enough to repay the extra cycles. Queries with neither a join nor
* a sort are reasonably common, though, so this much work seems worthwhile.
*/
bool
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index 256856da35e..948169faacf 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -19,7 +19,7 @@
* representation all the way through to execution.
*
* There is currently no special support for joins involving CTID; in
- * particular nothing corresponding to best_inner_indexscan(). Since it's
+ * particular nothing corresponding to best_inner_indexscan(). Since it's
* not very useful to store TIDs of one table in another table, there
* doesn't seem to be enough use-case to justify adding a lot of code
* for that.
@@ -57,7 +57,7 @@ static List *TidQualFromRestrictinfo(List *restrictinfo, int varno);
* or
* pseudoconstant = CTID
*
- * We check that the CTID Var belongs to relation "varno". That is probably
+ * We check that the CTID Var belongs to relation "varno". That is probably
* redundant considering this is only applied to restriction clauses, but
* let's be safe.
*/
diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c
index 2271a7c35e0..35d730ddc2f 100644
--- a/src/backend/optimizer/plan/analyzejoins.c
+++ b/src/backend/optimizer/plan/analyzejoins.c
@@ -40,7 +40,7 @@ static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved);
* Check for relations that don't actually need to be joined at all,
* and remove them from the query.
*
- * We are passed the current joinlist and return the updated list. Other
+ * We are passed the current joinlist and return the updated list. Other
* data structures that have to be updated are accessible via "root".
*/
List *
@@ -90,7 +90,7 @@ restart:
* Restart the scan. This is necessary to ensure we find all
* removable joins independently of ordering of the join_info_list
* (note that removal of attr_needed bits may make a join appear
- * removable that did not before). Also, since we just deleted the
+ * removable that did not before). Also, since we just deleted the
* current list cell, we'd have to have some kluge to continue the
* list scan anyway.
*/
@@ -107,7 +107,7 @@ restart:
* We already know that the clause is a binary opclause referencing only the
* rels in the current join. The point here is to check whether it has the
* form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
- * rather than mixing outer and inner vars on either side. If it matches,
+ * rather than mixing outer and inner vars on either side. If it matches,
* we set the transient flag outer_is_left to identify which side is which.
*/
static inline bool
@@ -154,7 +154,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* Currently, we only know how to remove left joins to a baserel with
- * unique indexes. We can check most of these criteria pretty trivially
+ * unique indexes. We can check most of these criteria pretty trivially
* to avoid doing useless extra work. But checking whether any of the
* indexes are unique would require iterating over the indexlist, so for
* now we just make sure there are indexes of some sort or other. If none
@@ -203,7 +203,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo)
* actually references some inner-rel attributes; but the correct check
* for that is relatively expensive, so we first check against ph_eval_at,
* which must mention the inner rel if the PHV uses any inner-rel attrs as
- * non-lateral references. Note that if the PHV's syntactic scope is just
+ * non-lateral references. Note that if the PHV's syntactic scope is just
* the inner rel, we can't drop the rel even if the PHV is variable-free.
*/
foreach(l, root->placeholder_list)
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index c501737a267..46df0daf280 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -173,7 +173,7 @@ static Material *make_material(Plan *lefttree);
/*
* create_plan
* Creates the access plan for a query by recursively processing the
- * desired tree of pathnodes, starting at the node 'best_path'. For
+ * desired tree of pathnodes, starting at the node 'best_path'. For
* every pathnode found, we create a corresponding plan node containing
* appropriate id, target list, and qualification information.
*
@@ -288,7 +288,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
/*
* For table scans, rather than using the relation targetlist (which is
* only those Vars actually needed by the query), we prefer to generate a
- * tlist containing all Vars in order. This will allow the executor to
+ * tlist containing all Vars in order. This will allow the executor to
* optimize away projection of the table tuples, if possible. (Note that
* planner.c may replace the tlist we generate here, forcing projection to
* occur.)
@@ -525,7 +525,7 @@ use_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
*
* If the plan node immediately above a scan would prefer to get only
* needed Vars and not a physical tlist, it must call this routine to
- * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
+ * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
* and Material nodes want this, so they don't have to store useless columns.
*/
static void
@@ -656,7 +656,7 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path)
/*
* * Expensive function pullups may have pulled local predicates * into
- * this path node. Put them in the qpqual of the plan node. * JMH,
+ * this path node. Put them in the qpqual of the plan node. * JMH,
* 6/15/92
*/
if (get_loc_restrictinfo(best_path) != NIL)
@@ -1172,10 +1172,10 @@ create_indexscan_plan(PlannerInfo *root,
/*
* The qpqual list must contain all restrictions not automatically handled
* by the index, other than pseudoconstant clauses which will be handled
- * by a separate gating plan node. All the predicates in the indexquals
+ * by a separate gating plan node. All the predicates in the indexquals
* will be checked (either by the index itself, or by nodeIndexscan.c),
* but if there are any "special" operators involved then they must be
- * included in qpqual. The upshot is that qpqual must contain
+ * included in qpqual. The upshot is that qpqual must contain
* scan_clauses minus whatever appears in indexquals.
*
* In normal cases simple pointer equality checks will be enough to spot
@@ -1312,15 +1312,15 @@ create_bitmap_scan_plan(PlannerInfo *root,
/*
* The qpqual list must contain all restrictions not automatically handled
* by the index, other than pseudoconstant clauses which will be handled
- * by a separate gating plan node. All the predicates in the indexquals
+ * by a separate gating plan node. All the predicates in the indexquals
* will be checked (either by the index itself, or by
* nodeBitmapHeapscan.c), but if there are any "special" operators
- * involved then they must be added to qpqual. The upshot is that qpqual
+ * involved then they must be added to qpqual. The upshot is that qpqual
* must contain scan_clauses minus whatever appears in indexquals.
*
* This loop is similar to the comparable code in create_indexscan_plan(),
* but with some differences because it has to compare the scan clauses to
- * stripped (no RestrictInfos) indexquals. See comments there for more
+ * stripped (no RestrictInfos) indexquals. See comments there for more
* info.
*
* In normal cases simple equal() checks will be enough to spot duplicate
@@ -1365,7 +1365,7 @@ create_bitmap_scan_plan(PlannerInfo *root,
/*
* When dealing with special operators, we will at this point have
- * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
+ * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
* 'em from bitmapqualorig, since there's no point in making the tests
* twice.
*/
@@ -1480,7 +1480,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -1576,7 +1576,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
/*
* We know that the index predicate must have been implied by the
* query condition as a whole, but it may or may not be implied by
- * the conditions that got pushed into the bitmapqual. Avoid
+ * the conditions that got pushed into the bitmapqual. Avoid
* generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
@@ -1963,14 +1963,14 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
Assert(rte->rtekind == RTE_RELATION);
/*
- * Sort clauses into best execution order. We do this first since the FDW
+ * Sort clauses into best execution order. We do this first since the FDW
* might have more info than we do and wish to adjust the ordering.
*/
scan_clauses = order_qual_clauses(root, scan_clauses);
/*
* Let the FDW perform its processing on the restriction clauses and
- * generate the plan node. Note that the FDW might remove restriction
+ * generate the plan node. Note that the FDW might remove restriction
* clauses that it intends to execute remotely, or even add more (if it
* has selected some join clauses for remote use but also wants them
* rechecked locally).
@@ -2624,7 +2624,7 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root)
*
* Note that after doing this, we might have different
* representations of the contents of the same PHV in different
- * parts of the plan tree. This is OK because equal() will just
+ * parts of the plan tree. This is OK because equal() will just
* match on phid/phlevelsup, so setrefs.c will still recognize an
* upper-level reference to a lower-level copy of the same PHV.
*/
@@ -2802,7 +2802,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path)
/*
* Check to see if the indexkey is on the right; if so, commute
- * the clause. The indexkey should be the side that refers to
+ * the clause. The indexkey should be the side that refers to
* (only) the base relation.
*/
if (!bms_equal(rinfo->left_relids, index->rel->relids))
@@ -2896,7 +2896,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path)
*
* This is a simplified version of fix_indexqual_references. The input does
* not have RestrictInfo nodes, and we assume that indxpath.c already
- * commuted the clauses to put the index keys on the left. Also, we don't
+ * commuted the clauses to put the index keys on the left. Also, we don't
* bother to support any cases except simple OpExprs, since nothing else
* is allowed for ordering operators.
*/
@@ -3135,7 +3135,7 @@ order_qual_clauses(PlannerInfo *root, List *clauses)
/*
* Sort. We don't use qsort() because it's not guaranteed stable for
- * equal keys. The expected number of entries is small enough that a
+ * equal keys. The expected number of entries is small enough that a
* simple insertion sort should be good enough.
*/
for (i = 1; i < nitems; i++)
@@ -3786,7 +3786,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
* prepare_sort_from_pathkeys
* Prepare to sort according to given pathkeys
*
- * This is used to set up for both Sort and MergeAppend nodes. It calculates
+ * This is used to set up for both Sort and MergeAppend nodes. It calculates
* the executor's representation of the sort key information, and adjusts the
* plan targetlist if needed to add resjunk sort columns.
*
@@ -3799,7 +3799,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
*
* We must convert the pathkey information into arrays of sort key column
* numbers, sort operator OIDs, collation OIDs, and nulls-first flags,
- * which is the representation the executor wants. These are returned into
+ * which is the representation the executor wants. These are returned into
* the output parameters *p_numsortkeys etc.
*
* When looking for matches to an EquivalenceClass's members, we will only
@@ -4241,7 +4241,7 @@ make_material(Plan *lefttree)
* materialize_finished_plan: stick a Material node atop a completed plan
*
* There are a couple of places where we want to attach a Material node
- * after completion of subquery_planner(). This currently requires hackery.
+ * after completion of subquery_planner(). This currently requires hackery.
* Since subquery_planner has already run SS_finalize_plan on the subplan
* tree, we have to kluge up parameter lists for the Material node.
* Possibly this could be fixed by postponing SS_finalize_plan processing
@@ -4447,7 +4447,7 @@ make_group(PlannerInfo *root,
/*
* distinctList is a list of SortGroupClauses, identifying the targetlist items
- * that should be considered by the Unique filter. The input path must
+ * that should be considered by the Unique filter. The input path must
* already be sorted accordingly.
*/
Unique *
@@ -4465,7 +4465,7 @@ make_unique(Plan *lefttree, List *distinctList)
/*
* Charge one cpu_operator_cost per comparison per input tuple. We assume
- * all columns get compared at most of the tuples. (XXX probably this is
+ * all columns get compared at most of the tuples. (XXX probably this is
* an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
@@ -4721,7 +4721,7 @@ make_result(PlannerInfo *root,
* Build a ModifyTable plan node
*
* Currently, we don't charge anything extra for the actual table modification
- * work, nor for the RETURNING expressions if any. It would only be window
+ * work, nor for the RETURNING expressions if any. It would only be window
* dressing, since these are always top-level nodes and there is no way for
* the costs to change any higher-level planning choices. But we might want
* to make it look better sometime.
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 69226f6f3bd..1ce12c92202 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -87,12 +87,12 @@ static void check_hashjoinable(RestrictInfo *restrictinfo);
* appearing in the jointree.
*
* The initial invocation must pass root->parse->jointree as the value of
- * jtnode. Internally, the function recurses through the jointree.
+ * jtnode. Internally, the function recurses through the jointree.
*
* At the end of this process, there should be one baserel RelOptInfo for
* every non-join RTE that is used in the query. Therefore, this routine
* is the only place that should call build_simple_rel with reloptkind
- * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
+ * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
* "other rel" RelOptInfos for the members of any appendrels we find here.)
*/
void
@@ -234,10 +234,10 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
* means setting suitable where_needed values for them.
*
* Note that this only deals with lateral references in unflattened LATERAL
- * subqueries. When we flatten a LATERAL subquery, its lateral references
+ * subqueries. When we flatten a LATERAL subquery, its lateral references
* become plain Vars in the parent query, but they may have to be wrapped in
* PlaceHolderVars if they need to be forced NULL by outer joins that don't
- * also null the LATERAL subquery. That's all handled elsewhere.
+ * also null the LATERAL subquery. That's all handled elsewhere.
*
* This has to run before deconstruct_jointree, since it might result in
* creation of PlaceHolderInfos.
@@ -360,7 +360,7 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex)
/*
* We mark the Vars as being "needed" at the LATERAL RTE. This is a bit
* of a cheat: a more formal approach would be to mark each one as needed
- * at the join of the LATERAL RTE with its source RTE. But it will work,
+ * at the join of the LATERAL RTE with its source RTE. But it will work,
* and it's much less tedious than computing a separate where_needed for
* each Var.
*/
@@ -568,7 +568,7 @@ create_lateral_join_info(PlannerInfo *root)
* add_lateral_info
* Add a LateralJoinInfo to root->lateral_info_list, if needed
*
- * We suppress redundant list entries. The passed Relids are copied if saved.
+ * We suppress redundant list entries. The passed Relids are copied if saved.
*/
static void
add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs)
@@ -615,7 +615,7 @@ add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs)
* deconstruct_jointree
* Recursively scan the query's join tree for WHERE and JOIN/ON qual
* clauses, and add these to the appropriate restrictinfo and joininfo
- * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
+ * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes
* to root->join_info_list for any outer joins appearing in the query tree.
* Return a "joinlist" data structure showing the join order decisions
* that need to be made by make_one_rel().
@@ -632,9 +632,9 @@ add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs)
* be evaluated at the lowest level where all the variables it mentions are
* available. However, we cannot push a qual down into the nullable side(s)
* of an outer join since the qual might eliminate matching rows and cause a
- * NULL row to be incorrectly emitted by the join. Therefore, we artificially
+ * NULL row to be incorrectly emitted by the join. Therefore, we artificially
* OR the minimum-relids of such an outer join into the required_relids of
- * clauses appearing above it. This forces those clauses to be delayed until
+ * clauses appearing above it. This forces those clauses to be delayed until
* application of the outer join (or maybe even higher in the join tree).
*/
List *
@@ -755,7 +755,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
*inner_join_rels = *qualscope;
/*
- * Try to process any quals postponed by children. If they need
+ * Try to process any quals postponed by children. If they need
* further postponement, add them to my output postponed_qual_list.
*/
foreach(l, child_postponed_quals)
@@ -807,7 +807,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
* regard for whether this level is an outer join, which is correct.
* Then we place our own join quals, which are restricted by lower
* outer joins in any case, and are forced to this level if this is an
- * outer join and they mention the outer side. Finally, if this is an
+ * outer join and they mention the outer side. Finally, if this is an
* outer join, we create a join_info_list entry for the join. This
* will prevent quals above us in the join tree that use those rels
* from being pushed down below this level. (It's okay for upper
@@ -897,7 +897,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
nullable_rels);
/*
- * Try to process any quals postponed by children. If they need
+ * Try to process any quals postponed by children. If they need
* further postponement, add them to my output postponed_qual_list.
* Quals that can be processed now must be included in my_quals, so
* that they'll be handled properly in make_outerjoininfo.
@@ -1059,7 +1059,7 @@ make_outerjoininfo(PlannerInfo *root,
* complain if any nullable rel is FOR [KEY] UPDATE/SHARE.
*
* You might be wondering why this test isn't made far upstream in the
- * parser. It's because the parser hasn't got enough info --- consider
+ * parser. It's because the parser hasn't got enough info --- consider
* FOR UPDATE applied to a view. Only after rewriting and flattening do
* we know whether the view contains an outer join.
*
@@ -1117,7 +1117,7 @@ make_outerjoininfo(PlannerInfo *root,
min_lefthand = bms_intersect(clause_relids, left_rels);
/*
- * Similarly for required RHS. But here, we must also include any lower
+ * Similarly for required RHS. But here, we must also include any lower
* inner joins, to ensure we don't try to commute with any of them.
*/
min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels),
@@ -1169,7 +1169,7 @@ make_outerjoininfo(PlannerInfo *root,
* Here, we have to consider that "our join condition" includes any
* clauses that syntactically appeared above the lower OJ and below
* ours; those are equivalent to degenerate clauses in our OJ and must
- * be treated as such. Such clauses obviously can't reference our
+ * be treated as such. Such clauses obviously can't reference our
* LHS, and they must be non-strict for the lower OJ's RHS (else
* reduce_outer_joins would have reduced the lower OJ to a plain
* join). Hence the other ways in which we handle clauses within our
@@ -1248,7 +1248,7 @@ make_outerjoininfo(PlannerInfo *root,
* distribute_qual_to_rels
* Add clause information to either the baserestrictinfo or joininfo list
* (depending on whether the clause is a join) of each base relation
- * mentioned in the clause. A RestrictInfo node is created and added to
+ * mentioned in the clause. A RestrictInfo node is created and added to
* the appropriate list for each rel. Alternatively, if the clause uses a
* mergejoinable operator and is not delayed by outer-join rules, enter
* the left- and right-side expressions into the query's list of
@@ -1313,7 +1313,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* In ordinary SQL, a WHERE or JOIN/ON clause can't reference any rels
* that aren't within its syntactic scope; however, if we pulled up a
* LATERAL subquery then we might find such references in quals that have
- * been pulled up. We need to treat such quals as belonging to the join
+ * been pulled up. We need to treat such quals as belonging to the join
* level that includes every rel they reference. Although we could make
* pull_up_subqueries() place such quals correctly to begin with, it's
* easier to handle it here. When we find a clause that contains Vars
@@ -1357,10 +1357,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* gating Result plan node. We put such a clause into the regular
* RestrictInfo lists for the moment, but eventually createplan.c will
* pull it out and make a gating Result node immediately above whatever
- * plan node the pseudoconstant clause is assigned to. It's usually best
+ * plan node the pseudoconstant clause is assigned to. It's usually best
* to put a gating node as high in the plan tree as possible. If we are
* not below an outer join, we can actually push the pseudoconstant qual
- * all the way to the top of the tree. If we are below an outer join, we
+ * all the way to the top of the tree. If we are below an outer join, we
* leave the qual at its original syntactic level (we could push it up to
* just below the outer join, but that seems more complex than it's
* worth).
@@ -1414,7 +1414,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* Note: it is not immediately obvious that a simple boolean is enough
* for this: if for some reason we were to attach a degenerate qual to
* its original join level, it would need to be treated as an outer join
- * qual there. However, this cannot happen, because all the rels the
+ * qual there. However, this cannot happen, because all the rels the
* clause mentions must be in the outer join's min_righthand, therefore
* the join it needs must be formed before the outer join; and we always
* attach quals to the lowest level where they can be evaluated. But
@@ -1448,7 +1448,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* We can't use such a clause to deduce equivalence (the left and
* right sides might be unequal above the join because one of them has
* gone to NULL) ... but we might be able to use it for more limited
- * deductions, if it is mergejoinable. So consider adding it to the
+ * deductions, if it is mergejoinable. So consider adding it to the
* lists of set-aside outer-join clauses.
*/
is_pushed_down = false;
@@ -1478,7 +1478,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
else
{
/*
- * Normal qual clause or degenerate outer-join clause. Either way, we
+ * Normal qual clause or degenerate outer-join clause. Either way, we
* can mark it as pushed-down.
*/
is_pushed_down = true;
@@ -1598,7 +1598,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
*
* In all cases, it's important to initialize the left_ec and right_ec
* fields of a mergejoinable clause, so that all possibly mergejoinable
- * expressions have representations in EquivalenceClasses. If
+ * expressions have representations in EquivalenceClasses. If
* process_equivalence is successful, it will take care of that;
* otherwise, we have to call initialize_mergeclause_eclasses to do it.
*/
@@ -1674,7 +1674,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* For an is_pushed_down qual, we can evaluate the qual as soon as (1) we have
* all the rels it mentions, and (2) we are at or above any outer joins that
* can null any of these rels and are below the syntactic location of the
- * given qual. We must enforce (2) because pushing down such a clause below
+ * given qual. We must enforce (2) because pushing down such a clause below
* the OJ might cause the OJ to emit null-extended rows that should not have
* been formed, or that should have been rejected by the clause. (This is
* only an issue for non-strict quals, since if we can prove a qual mentioning
@@ -1700,7 +1700,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* required relids overlap the LHS too) causes that OJ's delay_upper_joins
* flag to be set TRUE. This will prevent any higher-level OJs from
* being interchanged with that OJ, which would result in not having any
- * correct place to evaluate the qual. (The case we care about here is a
+ * correct place to evaluate the qual. (The case we care about here is a
* sub-select WHERE clause within the RHS of some outer join. The WHERE
* clause must effectively be treated as a degenerate clause of that outer
* join's condition. Rather than trying to match such clauses with joins
@@ -1928,7 +1928,7 @@ distribute_restrictinfo_to_rels(PlannerInfo *root,
* that provides all its variables.
*
* "nullable_relids" is the set of relids used in the expressions that are
- * potentially nullable below the expressions. (This has to be supplied by
+ * potentially nullable below the expressions. (This has to be supplied by
* caller because this function is used after deconstruct_jointree, so we
* don't have knowledge of where the clause items came from.)
*
@@ -2098,7 +2098,7 @@ check_mergejoinable(RestrictInfo *restrictinfo)
* info fields in the restrictinfo.
*
* Currently, we support hashjoin for binary opclauses where
- * the operator is a hashjoinable operator. The arguments can be
+ * the operator is a hashjoinable operator. The arguments can be
* anything --- as long as there are no volatile functions in them.
*/
static void
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index e5c57280baf..9ae1bc0bd3f 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -10,9 +10,9 @@
* ORDER BY col ASC/DESC
* LIMIT 1)
* Given a suitable index on tab.col, this can be much faster than the
- * generic scan-all-the-rows aggregation plan. We can handle multiple
+ * generic scan-all-the-rows aggregation plan. We can handle multiple
* MIN/MAX aggregates by generating multiple subqueries, and their
- * orderings can be different. However, if the query contains any
+ * orderings can be different. However, if the query contains any
* non-optimizable aggregates, there's no point since we'll have to
* scan all the rows anyway.
*
@@ -128,7 +128,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
/*
* Scan the tlist and HAVING qual to find all the aggregates and verify
- * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
+ * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
*/
aggs_list = NIL;
if (find_minmax_aggs_walker((Node *) tlist, &aggs_list))
@@ -163,7 +163,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
* We can use either an ordering that gives NULLS FIRST or one that
* gives NULLS LAST; furthermore there's unlikely to be much
* performance difference between them, so it doesn't seem worth
- * costing out both ways if we get a hit on the first one. NULLS
+ * costing out both ways if we get a hit on the first one. NULLS
* FIRST is more likely to be available if the operator is a
* reverse-sort operator, so try that first if reverse.
*/
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index 284929f125e..16d3af32ed0 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -36,7 +36,7 @@
* which may involve joins but not any fancier features.
*
* Since query_planner does not handle the toplevel processing (grouping,
- * sorting, etc) it cannot select the best path by itself. It selects
+ * sorting, etc) it cannot select the best path by itself. It selects
* two paths: the cheapest path that produces all the required tuples,
* independent of any ordering considerations, and the cheapest path that
* produces the expected fraction of the required tuples in the required
@@ -100,7 +100,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If the query has an empty join tree, then it's something easy like
- * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
+ * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
*/
if (parse->jointree->fromlist == NIL)
{
@@ -160,7 +160,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Examine the targetlist and join tree, adding entries to baserel
* targetlists for all referenced Vars, and generating PlaceHolderInfo
- * entries for all referenced PlaceHolderVars. Restrict and join clauses
+ * entries for all referenced PlaceHolderVars. Restrict and join clauses
* are added to appropriate lists belonging to the mentioned relations. We
* also build EquivalenceClasses for provably equivalent expressions. The
* SpecialJoinInfo list is also built to hold information about join order
@@ -184,7 +184,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If we formed any equivalence classes, generate additional restriction
- * clauses as appropriate. (Implied join clauses are formed on-the-fly
+ * clauses as appropriate. (Implied join clauses are formed on-the-fly
* later.)
*/
generate_base_implied_equalities(root);
@@ -199,14 +199,14 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Examine any "placeholder" expressions generated during subquery pullup.
* Make sure that the Vars they need are marked as needed at the relevant
- * join level. This must be done before join removal because it might
+ * join level. This must be done before join removal because it might
* cause Vars or placeholders to be needed above a join when they weren't
* so marked before.
*/
fix_placeholder_input_needed_levels(root);
/*
- * Remove any useless outer joins. Ideally this would be done during
+ * Remove any useless outer joins. Ideally this would be done during
* jointree preprocessing, but the necessary information isn't available
* until we've built baserel data structures and classified qual clauses.
*/
@@ -299,7 +299,7 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If both GROUP BY and ORDER BY are specified, we will need two
* levels of sort --- and, therefore, certainly need to read all the
- * tuples --- unless ORDER BY is a subset of GROUP BY. Likewise if we
+ * tuples --- unless ORDER BY is a subset of GROUP BY. Likewise if we
* have both DISTINCT and GROUP BY, or if we have a window
* specification not compatible with the GROUP BY.
*/
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 4a390c493dc..686e176e703 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -191,7 +191,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
/*
* We document cursor_tuple_fraction as simply being a fraction, which
- * means the edge cases 0 and 1 have to be treated specially here. We
+ * means the edge cases 0 and 1 have to be treated specially here. We
* convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
*/
if (tuple_fraction >= 1.0)
@@ -384,7 +384,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
}
/*
- * Preprocess RowMark information. We need to do this after subquery
+ * Preprocess RowMark information. We need to do this after subquery
* pullup (so that all non-inherited RTEs are present) and before
* inheritance expansion (so that the info is available for
* expand_inherited_tables to examine and modify).
@@ -492,7 +492,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
* to execute that we're better off doing it only once per group, despite
* the loss of selectivity. This is hard to estimate short of doing the
* entire planning process twice, so we use a heuristic: clauses
- * containing subplans are left in HAVING. Otherwise, we move or copy the
+ * containing subplans are left in HAVING. Otherwise, we move or copy the
* HAVING clause into WHERE, in hopes of eliminating tuples before
* aggregation instead of after.
*
@@ -1044,7 +1044,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* If there's a top-level ORDER BY, assume we have to fetch all the
- * tuples. This might be too simplistic given all the hackery below
+ * tuples. This might be too simplistic given all the hackery below
* to possibly avoid the sort; but the odds of accurate estimates here
* are pretty low anyway.
*/
@@ -1071,7 +1071,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* We should not need to call preprocess_targetlist, since we must be
- * in a SELECT query node. Instead, use the targetlist returned by
+ * in a SELECT query node. Instead, use the targetlist returned by
* plan_set_operations (since this tells whether it returned any
* resjunk columns!), and transfer any sort key information from the
* original tlist.
@@ -1467,7 +1467,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Furthermore, there cannot be any variables in either HAVING
* or the targetlist, so we actually do not need the FROM
* table at all! We can just throw away the plan-so-far and
- * generate a Result node. This is a sufficiently unusual
+ * generate a Result node. This is a sufficiently unusual
* corner case that it's not worth contorting the structure of
* this routine to avoid having to generate the plan in the
* first place.
@@ -1511,14 +1511,14 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* The "base" targetlist for all steps of the windowing process is
- * a flat tlist of all Vars and Aggs needed in the result. (In
+ * a flat tlist of all Vars and Aggs needed in the result. (In
* some cases we wouldn't need to propagate all of these all the
* way to the top, since they might only be needed as inputs to
* WindowFuncs. It's probably not worth trying to optimize that
* though.) We also add window partitioning and sorting
* expressions to the base tlist, to ensure they're computed only
* once at the bottom of the stack (that's critical for volatile
- * functions). As we climb up the stack, we'll add outputs for
+ * functions). As we climb up the stack, we'll add outputs for
* the WindowFuncs computed at each level.
*/
window_tlist = make_windowInputTargetList(root,
@@ -1527,7 +1527,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* The copyObject steps here are needed to ensure that each plan
- * node has a separately modifiable tlist. (XXX wouldn't a
+ * node has a separately modifiable tlist. (XXX wouldn't a
* shallow list copy do for that?)
*/
result_plan->targetlist = (List *) copyObject(window_tlist);
@@ -1812,7 +1812,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*
* Once grouping_planner() has applied a general tlist to the topmost
* scan/join plan node, any tlist eval cost for added-on nodes should be
- * accounted for as we create those nodes. Presently, of the node types we
+ * accounted for as we create those nodes. Presently, of the node types we
* can add on later, only Agg, WindowAgg, and Group project new tlists (the
* rest just copy their input tuples) --- so make_agg(), make_windowagg() and
* make_group() are responsible for calling this function to account for their
@@ -1978,7 +1978,7 @@ preprocess_rowmarks(PlannerInfo *root)
/*
* Currently, it is syntactically impossible to have FOR UPDATE et al
- * applied to an update/delete target rel. If that ever becomes
+ * applied to an update/delete target rel. If that ever becomes
* possible, we should drop the target from the PlanRowMark list.
*/
Assert(rc->rti != parse->resultRelation);
@@ -2062,7 +2062,7 @@ preprocess_rowmarks(PlannerInfo *root)
* preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
*
* We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
- * results back in *count_est and *offset_est. These variables are set to
+ * results back in *count_est and *offset_est. These variables are set to
* 0 if the corresponding clause is not present, and -1 if it's present
* but we couldn't estimate the value for it. (The "0" convention is OK
* for OFFSET but a little bit bogus for LIMIT: effectively we estimate
@@ -2071,7 +2071,7 @@ preprocess_rowmarks(PlannerInfo *root)
* be passed to make_limit, which see if you change this code.
*
* The return value is the suitably adjusted tuple_fraction to use for
- * planning the query. This adjustment is not overridable, since it reflects
+ * planning the query. This adjustment is not overridable, since it reflects
* plan actions that grouping_planner() will certainly take, not assumptions
* about context.
*/
@@ -2195,7 +2195,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
else if (*offset_est != 0 && tuple_fraction > 0.0)
{
/*
- * We have an OFFSET but no LIMIT. This acts entirely differently
+ * We have an OFFSET but no LIMIT. This acts entirely differently
* from the LIMIT case: here, we need to increase rather than decrease
* the caller's tuple_fraction, because the OFFSET acts to cause more
* tuples to be fetched instead of fewer. This only matters if we got
@@ -2210,7 +2210,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
/*
* If we have absolute counts from both caller and OFFSET, add them
- * together; likewise if they are both fractional. If one is
+ * together; likewise if they are both fractional. If one is
* fractional and the other absolute, we want to take the larger, and
* we heuristically assume that's the fractional one.
*/
@@ -2251,7 +2251,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
*
* If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
* a Limit node. This is worth checking for because "OFFSET 0" is a common
- * locution for an optimization fence. (Because other places in the planner
+ * locution for an optimization fence. (Because other places in the planner
* merely check whether parse->limitOffset isn't NULL, it will still work as
* an optimization fence --- we're just suppressing unnecessary run-time
* overhead.)
@@ -2494,7 +2494,7 @@ choose_hashed_grouping(PlannerInfo *root,
/*
* Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
- * aggregates. (Doing so would imply storing *all* the input values in
+ * aggregates. (Doing so would imply storing *all* the input values in
* the hash table, and/or running many sorts in parallel, either of which
* seems like a certain loser.)
*/
@@ -2636,7 +2636,7 @@ choose_hashed_grouping(PlannerInfo *root,
* pass in the costs as individual variables.)
*
* But note that making the two choices independently is a bit bogus in
- * itself. If the two could be combined into a single choice operation
+ * itself. If the two could be combined into a single choice operation
* it'd probably be better, but that seems far too unwieldy to be practical,
* especially considering that the combination of GROUP BY and DISTINCT
* isn't very common in real queries. By separating them, we are giving
@@ -2733,7 +2733,7 @@ choose_hashed_distinct(PlannerInfo *root,
0.0, work_mem, limit_tuples);
/*
- * Now for the GROUP case. See comments in grouping_planner about the
+ * Now for the GROUP case. See comments in grouping_planner about the
* sorting choices here --- this code should match that code.
*/
sorted_p.startup_cost = sorted_startup_cost;
@@ -2927,7 +2927,7 @@ make_subplanTargetList(PlannerInfo *root,
* add them to the result tlist if not already present. (A Var used
* directly as a GROUP BY item will be present already.) Note this
* includes Vars used in resjunk items, so we are covering the needs of
- * ORDER BY and window specifications. Vars used within Aggrefs will be
+ * ORDER BY and window specifications. Vars used within Aggrefs will be
* pulled out here, too.
*/
non_group_vars = pull_var_clause((Node *) non_group_cols,
@@ -2978,7 +2978,7 @@ get_grouping_column_index(Query *parse, TargetEntry *tle)
* Locate grouping columns in the tlist chosen by create_plan.
*
* This is only needed if we don't use the sub_tlist chosen by
- * make_subplanTargetList. We have to forget the column indexes found
+ * make_subplanTargetList. We have to forget the column indexes found
* by that routine and re-locate the grouping exprs in the real sub_tlist.
* We assume the grouping exprs are just Vars (see make_subplanTargetList).
*/
@@ -3009,11 +3009,11 @@ locate_grouping_columns(PlannerInfo *root,
/*
* The grouping column returned by create_plan might not have the same
- * typmod as the original Var. (This can happen in cases where a
+ * typmod as the original Var. (This can happen in cases where a
* set-returning function has been inlined, so that we now have more
* knowledge about what it returns than we did when the original Var
* was created.) So we can't use tlist_member() to search the tlist;
- * instead use tlist_member_match_var. For safety, still check that
+ * instead use tlist_member_match_var. For safety, still check that
* the vartype matches.
*/
if (!(groupexpr && IsA(groupexpr, Var)))
@@ -3139,7 +3139,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
*
* When grouping_planner inserts one or more WindowAgg nodes into the plan,
* this function computes the initial target list to be computed by the node
- * just below the first WindowAgg. This list must contain all values needed
+ * just below the first WindowAgg. This list must contain all values needed
* to evaluate the window functions, compute the final target list, and
* perform any required final sort step. If multiple WindowAggs are needed,
* each intermediate one adds its window function results onto this tlist;
@@ -3147,7 +3147,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
*
* This function is much like make_subplanTargetList, though not quite enough
* like it to share code. As in that function, we flatten most expressions
- * into their component variables. But we do not want to flatten window
+ * into their component variables. But we do not want to flatten window
* PARTITION BY/ORDER BY clauses, since that might result in multiple
* evaluations of them, which would be bad (possibly even resulting in
* inconsistent answers, if they contain volatile functions). Also, we must
@@ -3320,7 +3320,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* This depends on the behavior of make_pathkeys_for_window()!
*
* We are given the target WindowClause and an array of the input column
- * numbers associated with the resulting pathkeys. In the easy case, there
+ * numbers associated with the resulting pathkeys. In the easy case, there
* are the same number of pathkey columns as partitioning + ordering columns
* and we just have to copy some data around. However, it's possible that
* some of the original partitioning + ordering columns were eliminated as
@@ -3332,7 +3332,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* determine which keys are significant.
*
* The method used here is a bit brute-force: add the sort columns to a list
- * one at a time and note when the resulting pathkey list gets longer. But
+ * one at a time and note when the resulting pathkey list gets longer. But
* it's a sufficiently uncommon case that a faster way doesn't seem worth
* the amount of code refactoring that'd be needed.
*----------
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 36f1d84adbb..21512bd7de9 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -145,7 +145,7 @@ static bool extract_query_dependencies_walker(Node *node,
/*
* set_plan_references
*
- * This is the final processing pass of the planner/optimizer. The plan
+ * This is the final processing pass of the planner/optimizer. The plan
* tree is complete; we just have to adjust some representational details
* for the convenience of the executor:
*
@@ -189,7 +189,7 @@ static bool extract_query_dependencies_walker(Node *node,
* and root->glob->invalItems (for everything else).
*
* Notice that we modify Plan nodes in-place, but use expression_tree_mutator
- * to process targetlist and qual expressions. We can assume that the Plan
+ * to process targetlist and qual expressions. We can assume that the Plan
* nodes were just built by the planner and are not multiply referenced, but
* it's not so safe to assume that for expression tree nodes.
*/
@@ -262,7 +262,7 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing)
/*
* If there are any dead subqueries, they are not referenced in the Plan
* tree, so we must add RTEs contained in them to the flattened rtable
- * separately. (If we failed to do this, the executor would not perform
+ * separately. (If we failed to do this, the executor would not perform
* expected permission checks for tables mentioned in such subqueries.)
*
* Note: this pass over the rangetable can't be combined with the previous
@@ -292,7 +292,7 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing)
/*
* The subquery might never have been planned at all, if it
* was excluded on the basis of self-contradictory constraints
- * in our query level. In this case apply
+ * in our query level. In this case apply
* flatten_unplanned_rtes.
*
* If it was planned but the plan is dummy, we assume that it
@@ -594,7 +594,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
/*
* These plan types don't actually bother to evaluate their
* targetlists, because they just return their unmodified input
- * tuples. Even though the targetlist won't be used by the
+ * tuples. Even though the targetlist won't be used by the
* executor, we fix it up for possible use by EXPLAIN (not to
* mention ease of debugging --- wrong varnos are very confusing).
*/
@@ -612,7 +612,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
/*
* Like the plan types above, LockRows doesn't evaluate its
- * tlist or quals. But we have to fix up the RT indexes in
+ * tlist or quals. But we have to fix up the RT indexes in
* its rowmarks.
*/
set_dummy_tlist_references(plan, rtoffset);
@@ -730,7 +730,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
* Set up the visible plan targetlist as being the same as
* the first RETURNING list. This is for the use of
* EXPLAIN; the executor won't pay any attention to the
- * targetlist. We postpone this step until here so that
+ * targetlist. We postpone this step until here so that
* we don't have to do set_returning_clause_references()
* twice on identical targetlists.
*/
@@ -956,7 +956,7 @@ set_subqueryscan_references(PlannerInfo *root,
else
{
/*
- * Keep the SubqueryScan node. We have to do the processing that
+ * Keep the SubqueryScan node. We have to do the processing that
* set_plan_references would otherwise have done on it. Notice we do
* not do set_upper_references() here, because a SubqueryScan will
* always have been created with correct references to its subplan's
@@ -1428,7 +1428,7 @@ set_dummy_tlist_references(Plan *plan, int rtoffset)
*
* In most cases, subplan tlists will be "flat" tlists with only Vars,
* so we try to optimize that case by extracting information about Vars
- * in advance. Matching a parent tlist to a child is still an O(N^2)
+ * in advance. Matching a parent tlist to a child is still an O(N^2)
* operation, but at least with a much smaller constant factor than plain
* tlist_member() searches.
*
@@ -1873,7 +1873,7 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context)
* adjust any Vars that refer to other tables to reference junk tlist
* entries in the top subplan's targetlist. Vars referencing the result
* table should be left alone, however (the executor will evaluate them
- * using the actual heap tuple, after firing triggers if any). In the
+ * using the actual heap tuple, after firing triggers if any). In the
* adjusted RETURNING list, result-table Vars will have their original
* varno (plus rtoffset), but Vars for other rels will have varno OUTER_VAR.
*
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 5102f05d745..b9255abcb6c 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -434,7 +434,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
Node *result;
/*
- * Copy the source Query node. This is a quick and dirty kluge to resolve
+ * Copy the source Query node. This is a quick and dirty kluge to resolve
* the fact that the parser can generate trees with multiple links to the
* same sub-Query node, but the planner wants to scribble on the Query.
* Try to clean this up when we do querytree redesign...
@@ -459,7 +459,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
* path/costsize.c.
*
* XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash
- * its output. In that case it would've been better to specify full
+ * its output. In that case it would've been better to specify full
* retrieval. At present, however, we can only check hashability after
* we've made the subplan :-(. (Determining whether it'll fit in work_mem
* is the really hard part.) Therefore, we don't want to be too
@@ -496,7 +496,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
/*
* If it's a correlated EXISTS with an unimportant targetlist, we might be
* able to transform it to the equivalent of an IN and then implement it
- * by hashing. We don't have enough information yet to tell which way is
+ * by hashing. We don't have enough information yet to tell which way is
* likely to be better (it depends on the expected number of executions of
* the EXISTS qual, and we are much too early in planning the outer query
* to be able to guess that). So we generate both plans, if possible, and
@@ -724,7 +724,7 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot,
* Otherwise, we have the option to tack a Material node onto the top
* of the subplan, to reduce the cost of reading it repeatedly. This
* is pointless for a direct-correlated subplan, since we'd have to
- * recompute its results each time anyway. For uncorrelated/undirect
+ * recompute its results each time anyway. For uncorrelated/undirect
* correlated subplans, we add Material unless the subplan's top plan
* node would materialize its output anyway. Also, if enable_material
* is false, then the user does not want us to materialize anything
@@ -750,10 +750,10 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot,
/*
* A parameterless subplan (not initplan) should be prepared to handle
- * REWIND efficiently. If it has direct parameters then there's no point
+ * REWIND efficiently. If it has direct parameters then there's no point
* since it'll be reset on each scan anyway; and if it's an initplan then
* there's no point since it won't get re-run without parameter changes
- * anyway. The input of a hashed subplan doesn't need REWIND either.
+ * anyway. The input of a hashed subplan doesn't need REWIND either.
*/
if (splan->parParam == NIL && !isInitPlan && !splan->useHashTable)
root->glob->rewindPlanIDs = bms_add_member(root->glob->rewindPlanIDs,
@@ -853,7 +853,7 @@ generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno)
/*
* convert_testexpr: convert the testexpr given by the parser into
* actually executable form. This entails replacing PARAM_SUBLINK Params
- * with Params or Vars representing the results of the sub-select. The
+ * with Params or Vars representing the results of the sub-select. The
* nodes to be substituted are passed in as the List result from
* generate_subquery_params or generate_subquery_vars.
*/
@@ -955,7 +955,7 @@ testexpr_is_hashable(Node *testexpr)
*
* The combining operators must be hashable and strict. The need for
* hashability is obvious, since we want to use hashing. Without
- * strictness, behavior in the presence of nulls is too unpredictable. We
+ * strictness, behavior in the presence of nulls is too unpredictable. We
* actually must assume even more than plain strictness: they can't yield
* NULL for non-null inputs, either (see nodeSubplan.c). However, hash
* indexes and hash joins assume that too.
@@ -1063,7 +1063,7 @@ SS_process_ctes(PlannerInfo *root)
}
/*
- * Copy the source Query node. Probably not necessary, but let's keep
+ * Copy the source Query node. Probably not necessary, but let's keep
* this similar to make_subplan.
*/
subquery = (Query *) copyObject(cte->ctequery);
@@ -1089,7 +1089,7 @@ SS_process_ctes(PlannerInfo *root)
elog(ERROR, "unexpected outer reference in CTE query");
/*
- * Make a SubPlan node for it. This is just enough unlike
+ * Make a SubPlan node for it. This is just enough unlike
* build_subplan that we can't share code.
*
* Note plan_id, plan_name, and cost fields are set further down.
@@ -1313,7 +1313,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
/*
* See if the subquery can be simplified based on the knowledge that it's
- * being used in EXISTS(). If we aren't able to get rid of its
+ * being used in EXISTS(). If we aren't able to get rid of its
* targetlist, we have to fail, because the pullup operation leaves us
* with noplace to evaluate the targetlist.
*/
@@ -1362,9 +1362,9 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
* what pull_up_subqueries has to go through.
*
* In fact, it's even easier than what convert_ANY_sublink_to_join has to
- * do. The machinations of simplify_EXISTS_query ensured that there is
+ * do. The machinations of simplify_EXISTS_query ensured that there is
* nothing interesting in the subquery except an rtable and jointree, and
- * even the jointree FromExpr no longer has quals. So we can just append
+ * even the jointree FromExpr no longer has quals. So we can just append
* the rtable to our own and use the FromExpr in our jointree. But first,
* adjust all level-zero varnos in the subquery to account for the rtable
* merger.
@@ -1495,7 +1495,7 @@ simplify_EXISTS_query(Query *query)
*
* On success, the modified subselect is returned, and we store a suitable
* upper-level test expression at *testexpr, plus a list of the subselect's
- * output Params at *paramIds. (The test expression is already Param-ified
+ * output Params at *paramIds. (The test expression is already Param-ified
* and hence need not go through convert_testexpr, which is why we have to
* deal with the Param IDs specially.)
*
@@ -1658,7 +1658,7 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
return NULL;
/*
- * Also reject sublinks in the stuff we intend to pull up. (It might be
+ * Also reject sublinks in the stuff we intend to pull up. (It might be
* possible to support this, but doesn't seem worth the complication.)
*/
if (contain_subplans((Node *) leftargs))
@@ -1860,7 +1860,7 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
* is needed for a bare List.)
*
* Anywhere within the top-level AND/OR clause structure, we can tell
- * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
+ * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
* propagates down in both cases. (Note that this is unlike the meaning
* of "top level qual" used in most other places in Postgres.)
*/
@@ -1966,7 +1966,7 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans)
* Now determine the set of params that are validly referenceable in this
* query level; to wit, those available from outer query levels plus the
* output parameters of any local initPlans. (We do not include output
- * parameters of regular subplans. Those should only appear within the
+ * parameters of regular subplans. Those should only appear within the
* testexpr of SubPlan nodes, and are taken care of locally within
* finalize_primnode. Likewise, special parameters that are generated by
* nodes such as ModifyTable are handled within finalize_plan.)
@@ -2142,7 +2142,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params,
/*
* In a SubqueryScan, SS_finalize_plan has already been run on the
* subplan by the inner invocation of subquery_planner, so there's
- * no need to do it again. Instead, just pull out the subplan's
+ * no need to do it again. Instead, just pull out the subplan's
* extParams list, which represents the params it needs from my
* level and higher levels.
*/
@@ -2476,7 +2476,7 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
/*
* Remove any param IDs of output parameters of the subplan that were
- * referenced in the testexpr. These are not interesting for
+ * referenced in the testexpr. These are not interesting for
* parameter change signaling since we always re-evaluate the subplan.
* Note that this wouldn't work too well if there might be uses of the
* same param IDs elsewhere in the plan, but that can't happen because
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index b7112c45d4b..393bed10e63 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -116,7 +116,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
*
* A clause "foo op ANY (sub-SELECT)" can be processed by pulling the
* sub-SELECT up to become a rangetable entry and treating the implied
- * comparisons as quals of a semijoin. However, this optimization *only*
+ * comparisons as quals of a semijoin. However, this optimization *only*
* works at the top level of WHERE or a JOIN/ON clause, because we cannot
* distinguish whether the ANY ought to return FALSE or NULL in cases
* involving NULL inputs. Also, in an outer join's ON clause we can only
@@ -133,7 +133,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
* transformations if any are found.
*
* This routine has to run before preprocess_expression(), so the quals
- * clauses are not yet reduced to implicit-AND format. That means we need
+ * clauses are not yet reduced to implicit-AND format. That means we need
* to recursively search through explicit AND clauses, which are
* probably only binary ANDs. We stop as soon as we hit a non-AND item.
*/
@@ -287,7 +287,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
/*
* Although we could include the pulled-up subqueries in the returned
* relids, there's no need since upper quals couldn't refer to their
- * outputs anyway. But we *do* need to include the join's own rtindex
+ * outputs anyway. But we *do* need to include the join's own rtindex
* because we haven't yet collapsed join alias variables, so upper
* levels would mistakenly think they couldn't use references to this
* join.
@@ -612,7 +612,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode)
*
* If this jointree node is within either side of an outer join, then
* lowest_outer_join references the lowest such JoinExpr node; otherwise
- * it is NULL. We use this to constrain the effects of LATERAL subqueries.
+ * it is NULL. We use this to constrain the effects of LATERAL subqueries.
*
* If this jointree node is within the nullable side of an outer join, then
* lowest_nulling_outer_join references the lowest such JoinExpr node;
@@ -762,7 +762,7 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
* Attempt to pull up a single simple subquery.
*
* jtnode is a RangeTblRef that has been tentatively identified as a simple
- * subquery by pull_up_subqueries. We return the replacement jointree node,
+ * subquery by pull_up_subqueries. We return the replacement jointree node,
* or jtnode itself if we determine that the subquery can't be pulled up after
* all.
*
@@ -795,7 +795,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Create a PlannerInfo data structure for this subquery.
*
* NOTE: the next few steps should match the first processing in
- * subquery_planner(). Can we refactor to avoid code duplication, or
+ * subquery_planner(). Can we refactor to avoid code duplication, or
* would that just make things uglier?
*/
subroot = makeNode(PlannerInfo);
@@ -845,7 +845,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
/*
* Now we must recheck whether the subquery is still simple enough to pull
- * up. If not, abandon processing it.
+ * up. If not, abandon processing it.
*
* We don't really need to recheck all the conditions involved, but it's
* easier just to keep this "if" looking the same as the one in
@@ -862,7 +862,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Give up, return unmodified RangeTblRef.
*
* Note: The work we just did will be redone when the subquery gets
- * planned on its own. Perhaps we could avoid that by storing the
+ * planned on its own. Perhaps we could avoid that by storing the
* modified subquery back into the rangetable, but I'm not gonna risk
* it now.
*/
@@ -903,7 +903,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* non-nullable items and lateral references may have to be turned into
* PlaceHolderVars. If we are dealing with an appendrel member then
* anything that's not a simple Var has to be turned into a
- * PlaceHolderVar. Set up required context data for pullup_replace_vars.
+ * PlaceHolderVar. Set up required context data for pullup_replace_vars.
*/
rvcontext.root = root;
rvcontext.targetlist = subquery->targetList;
@@ -928,7 +928,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* replace any of the jointree structure. (This'd be a lot cleaner if we
* could use query_tree_mutator.) We have to use PHVs in the targetList,
* returningList, and havingQual, since those are certainly above any
- * outer join. replace_vars_in_jointree tracks its location in the
+ * outer join. replace_vars_in_jointree tracks its location in the
* jointree and uses PHVs or not appropriately.
*/
parse->targetList = (List *)
@@ -1087,7 +1087,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Pull up a single simple UNION ALL subquery.
*
* jtnode is a RangeTblRef that has been identified as a simple UNION ALL
- * subquery by pull_up_subqueries. We pull up the leaf subqueries and
+ * subquery by pull_up_subqueries. We pull up the leaf subqueries and
* build an "append relation" for the union set. The result value is just
* jtnode, since we don't actually need to change the query jointree.
*/
@@ -1101,7 +1101,7 @@ pull_up_simple_union_all(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte)
/*
* Make a modifiable copy of the subquery's rtable, so we can adjust
- * upper-level Vars in it. There are no such Vars in the setOperations
+ * upper-level Vars in it. There are no such Vars in the setOperations
* tree proper, so fixing the rtable should be sufficient.
*/
rtable = copyObject(subquery->rtable);
@@ -1373,7 +1373,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte,
/*
* Don't pull up a subquery that has any set-returning functions in its
- * targetlist. Otherwise we might well wind up inserting set-returning
+ * targetlist. Otherwise we might well wind up inserting set-returning
* functions into places where they mustn't go, such as quals of higher
* queries.
*/
@@ -1382,7 +1382,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte,
/*
* Don't pull up a subquery that has any volatile functions in its
- * targetlist. Otherwise we might introduce multiple evaluations of these
+ * targetlist. Otherwise we might introduce multiple evaluations of these
* functions, if they get copied to multiple places in the upper query,
* leading to surprising results. (Note: the PlaceHolderVar mechanism
* doesn't quite guarantee single evaluation; else we could pull up anyway
@@ -1612,7 +1612,7 @@ replace_vars_in_jointree(Node *jtnode,
/*
* If the RangeTblRef refers to a LATERAL subquery (that isn't the
* same subquery we're pulling up), it might contain references to the
- * target subquery, which we must replace. We drive this from the
+ * target subquery, which we must replace. We drive this from the
* jointree scan, rather than a scan of the rtable, for a couple of
* reasons: we can avoid processing no-longer-referenced RTEs, and we
* can use the appropriate setting of need_phvs depending on whether
@@ -1773,7 +1773,7 @@ pullup_replace_vars_callback(Var *var,
/*
* Insert PlaceHolderVar if needed. Notice that we are wrapping one
* PlaceHolderVar around the whole RowExpr, rather than putting one
- * around each element of the row. This is because we need the
+ * around each element of the row. This is because we need the
* expression to yield NULL, not ROW(NULL,NULL,...) when it is forced
* to null by an outer join.
*/
@@ -1875,7 +1875,7 @@ pullup_replace_vars_callback(Var *var,
/*
* Cache it if possible (ie, if the attno is in range, which it
- * probably always should be). We can cache the value even if we
+ * probably always should be). We can cache the value even if we
* decided we didn't need a PHV, since this result will be
* suitable for any request that has need_phvs.
*/
@@ -1918,7 +1918,7 @@ pullup_replace_vars_subquery(Query *query,
*
* If a query's setOperations tree consists entirely of simple UNION ALL
* operations, flatten it into an append relation, which we can process more
- * intelligently than the general setops case. Otherwise, do nothing.
+ * intelligently than the general setops case. Otherwise, do nothing.
*
* In most cases, this can succeed only for a top-level query, because for a
* subquery in FROM, the parent query's invocation of pull_up_subqueries would
@@ -2030,7 +2030,7 @@ flatten_simple_union_all(PlannerInfo *root)
* SELECT ... FROM a LEFT JOIN b ON (a.x = b.y) WHERE b.y IS NULL;
* If the join clause is strict for b.y, then only null-extended rows could
* pass the upper WHERE, and we can conclude that what the query is really
- * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
+ * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
* to JOIN_ANTI. The IS NULL clause then becomes redundant, and must be
* removed to prevent bogus selectivity calculations, but we leave it to
* distribute_qual_to_rels to get rid of such clauses.
@@ -2270,7 +2270,7 @@ reduce_outer_joins_pass2(Node *jtnode,
/*
* See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case if
* the join's own quals are strict for any var that was forced null by
- * higher qual levels. NOTE: there are other ways that we could
+ * higher qual levels. NOTE: there are other ways that we could
* detect an anti-join, in particular if we were to check whether Vars
* coming from the RHS must be non-null because of table constraints.
* That seems complicated and expensive though (in particular, one
@@ -2428,7 +2428,7 @@ reduce_outer_joins_pass2(Node *jtnode,
* pulled-up relid, and change them to reference the replacement relid(s).
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. This should be OK since the tree was copied by
+ * nodes in-place. This should be OK since the tree was copied by
* pullup_replace_vars earlier. Avoid scribbling on the original values of
* the bitmapsets, though, because expression_tree_mutator doesn't copy those.
*/
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index b8733aeba9c..684b1a5e4c5 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -54,12 +54,12 @@ static Expr *process_duplicate_ors(List *orlist);
* Although this can be invoked on its own, it's mainly intended as a helper
* for eval_const_expressions(), and that context drives several design
* decisions. In particular, if the input is already AND/OR flat, we must
- * preserve that property. We also don't bother to recurse in situations
+ * preserve that property. We also don't bother to recurse in situations
* where we can assume that lower-level executions of eval_const_expressions
* would already have simplified sub-clauses of the input.
*
* The difference between this and a simple make_notclause() is that this
- * tries to get rid of the NOT node by logical simplification. It's clearly
+ * tries to get rid of the NOT node by logical simplification. It's clearly
* always a win if the NOT node can be eliminated altogether. However, our
* use of DeMorgan's laws could result in having more NOT nodes rather than
* fewer. We do that unconditionally anyway, because in WHERE clauses it's
@@ -152,7 +152,7 @@ negate_clause(Node *node)
* those properties. For example, if no direct child of
* the given AND clause is an AND or a NOT-above-OR, then
* the recursive calls of negate_clause() can't return any
- * OR clauses. So we needn't call pull_ors() before
+ * OR clauses. So we needn't call pull_ors() before
* building a new OR clause. Similarly for the OR case.
*--------------------
*/
@@ -293,7 +293,7 @@ canonicalize_qual(Expr *qual)
/*
* Pull up redundant subclauses in OR-of-AND trees. We do this only
* within the top-level AND/OR structure; there's no point in looking
- * deeper. Also remove any NULL constants in the top-level structure.
+ * deeper. Also remove any NULL constants in the top-level structure.
*/
newqual = find_duplicate_ors(qual);
@@ -374,7 +374,7 @@ pull_ors(List *orlist)
*
* This may seem like a fairly useless activity, but it turns out to be
* applicable to many machine-generated queries, and there are also queries
- * in some of the TPC benchmarks that need it. This was in fact almost the
+ * in some of the TPC benchmarks that need it. This was in fact almost the
* sole useful side-effect of the old prepqual code that tried to force
* the query into canonical AND-of-ORs form: the canonical equivalent of
* ((A AND B) OR (A AND C))
@@ -400,7 +400,7 @@ pull_ors(List *orlist)
* results, so it's valid to treat NULL::boolean the same as FALSE and then
* simplify AND/OR accordingly.
*
- * Returns the modified qualification. AND/OR flatness is preserved.
+ * Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_duplicate_ors(Expr *qual)
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index fb67f9e4447..09c910d93ab 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -4,7 +4,7 @@
* Routines to preprocess the parse tree target list
*
* For INSERT and UPDATE queries, the targetlist must contain an entry for
- * each attribute of the target relation in the correct order. For all query
+ * each attribute of the target relation in the correct order. For all query
* types, we may need to add junk tlist entries for Vars used in the RETURNING
* list and row ID information needed for SELECT FOR UPDATE locking and/or
* EvalPlanQual checking.
@@ -79,7 +79,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* Add necessary junk columns for rowmarked rels. These values are needed
* for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual
- * rechecking. See comments for PlanRowMark in plannodes.h.
+ * rechecking. See comments for PlanRowMark in plannodes.h.
*/
foreach(lc, root->rowMarks)
{
@@ -144,7 +144,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* If the query has a RETURNING list, add resjunk entries for any Vars
* used in RETURNING that belong to other relations. We need to do this
- * to make these Vars available for the RETURNING calculation. Vars that
+ * to make these Vars available for the RETURNING calculation. Vars that
* belong to the result rel don't need to be added, because they will be
* made to refer to the actual heap tuple.
*/
@@ -252,9 +252,9 @@ expand_targetlist(List *tlist, int command_type,
* When generating a NULL constant for a dropped column, we label
* it INT4 (any other guaranteed-to-exist datatype would do as
* well). We can't label it with the dropped column's datatype
- * since that might not exist anymore. It does not really matter
+ * since that might not exist anymore. It does not really matter
* what we claim the type is, since NULL is NULL --- its
- * representation is datatype-independent. This could perhaps
+ * representation is datatype-independent. This could perhaps
* confuse code comparing the finished plan to the target
* relation, however.
*/
@@ -336,7 +336,7 @@ expand_targetlist(List *tlist, int command_type,
/*
* The remaining tlist entries should be resjunk; append them all to the
* end of the new tlist, making sure they have resnos higher than the last
- * real attribute. (Note: although the rewriter already did such
+ * real attribute. (Note: although the rewriter already did such
* renumbering, we have to do it again here in case we are doing an UPDATE
* in a table with dropped columns, or an inheritance child table with
* extra columns.)
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index e2496280b84..620725c642e 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -6,14 +6,14 @@
*
* There are two code paths in the planner for set-operation queries.
* If a subquery consists entirely of simple UNION ALL operations, it
- * is converted into an "append relation". Otherwise, it is handled
+ * is converted into an "append relation". Otherwise, it is handled
* by the general code in this module (plan_set_operations and its
* subroutines). There is some support code here for the append-relation
* case, but most of the heavy lifting for that is done elsewhere,
* notably in prepjointree.c and allpaths.c.
*
* There is also some code here to support planning of queries that use
- * inheritance (SELECT FROM foo*). Inheritance trees are converted into
+ * inheritance (SELECT FROM foo*). Inheritance trees are converted into
* append relations, and thenceforth share code with the UNION ALL case.
*
*
@@ -576,7 +576,7 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
*
* The tlist for an Append plan isn't important as far as the Append is
* concerned, but we must make it look real anyway for the benefit of the
- * next plan level up. In fact, it has to be real enough that the flag
+ * next plan level up. In fact, it has to be real enough that the flag
* column is shown as a variable not a constant, else setrefs.c will get
* confused.
*/
@@ -969,7 +969,7 @@ generate_setop_tlist(List *colTypes, List *colCollations,
* Ensure the tlist entry's exposed collation matches the set-op. This
* is necessary because plan_set_operations() reports the result
* ordering as a list of SortGroupClauses, which don't carry collation
- * themselves but just refer to tlist entries. If we don't show the
+ * themselves but just refer to tlist entries. If we don't show the
* right collation then planner.c might do the wrong thing in
* higher-level queries.
*
@@ -1183,7 +1183,7 @@ generate_setop_grouplist(SetOperationStmt *op, List *targetlist)
/*
* expand_inherited_tables
* Expand each rangetable entry that represents an inheritance set
- * into an "append relation". At the conclusion of this process,
+ * into an "append relation". At the conclusion of this process,
* the "inh" flag is set in all and only those RTEs that are append
* relation parents.
*/
@@ -1215,7 +1215,7 @@ expand_inherited_tables(PlannerInfo *root)
* Check whether a rangetable entry represents an inheritance set.
* If so, add entries for all the child tables to the query's
* rangetable, and build AppendRelInfo nodes for all the child tables
- * and add them to root->append_rel_list. If not, clear the entry's
+ * and add them to root->append_rel_list. If not, clear the entry's
* "inh" flag to prevent later code from looking for AppendRelInfos.
*
* Note that the original RTE is considered to represent the whole
@@ -1526,7 +1526,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation,
* parent rel's attribute numbering to the child's.
*
* The only surprise here is that we don't translate a parent whole-row
- * reference into a child whole-row reference. That would mean requiring
+ * reference into a child whole-row reference. That would mean requiring
* permissions on all child columns, which is overly strict, since the
* query is really only going to reference the inherited columns. Instead
* we set the per-column bits for all inherited columns.
@@ -1855,7 +1855,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid)
*
* The expressions have already been fixed, but we have to make sure that
* the target resnos match the child table (they may not, in the case of
- * a column that was added after-the-fact by ALTER TABLE). In some cases
+ * a column that was added after-the-fact by ALTER TABLE). In some cases
* this can force us to re-order the tlist to preserve resno ordering.
* (We do all this work in special cases so that preptlist.c is fast for
* the typical case.)
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 098c23d2c73..dfef0778afa 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -526,7 +526,7 @@ count_agg_clauses_walker(Node *node, count_agg_clauses_context *context)
/*
* If the transition type is pass-by-value then it doesn't add
- * anything to the required size of the hashtable. If it is
+ * anything to the required size of the hashtable. If it is
* pass-by-reference then we have to add the estimated size of the
* value itself, plus palloc overhead.
*/
@@ -818,7 +818,7 @@ contain_subplans_walker(Node *node, void *context)
* Recursively search for mutable functions within a clause.
*
* Returns true if any mutable function (or operator implemented by a
- * mutable function) is found. This test is needed so that we don't
+ * mutable function) is found. This test is needed so that we don't
* mistakenly think that something like "WHERE random() < 0.5" can be treated
* as a constant qualification.
*
@@ -945,7 +945,7 @@ contain_mutable_functions_walker(Node *node, void *context)
* invalid conversions of volatile expressions into indexscan quals.
*
* We will recursively look into Query nodes (i.e., SubLink sub-selects)
- * but not into SubPlans. This is a bit odd, but intentional. If we are
+ * but not into SubPlans. This is a bit odd, but intentional. If we are
* looking at a SubLink, we are probably deciding whether a query tree
* transformation is safe, and a contained sub-select should affect that;
* for example, duplicating a sub-select containing a volatile function
@@ -1076,7 +1076,7 @@ contain_volatile_functions_walker(Node *node, void *context)
* The idea here is that the caller has verified that the expression contains
* one or more Var or Param nodes (as appropriate for the caller's need), and
* now wishes to prove that the expression result will be NULL if any of these
- * inputs is NULL. If we return false, then the proof succeeded.
+ * inputs is NULL. If we return false, then the proof succeeded.
*/
bool
contain_nonstrict_functions(Node *clause)
@@ -1195,7 +1195,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
* Recursively search for leaky functions within a clause.
*
* Returns true if any function call with side-effect may be present in the
- * clause. Qualifiers from outside the a security_barrier view should not
+ * clause. Qualifiers from outside the a security_barrier view should not
* be pushed down into the view, lest the contents of tuples intended to be
* filtered out be revealed via side effects.
*/
@@ -1334,7 +1334,7 @@ contain_leaky_functions_walker(Node *node, void *context)
*
* Returns the set of all Relids that are referenced in the clause in such
* a way that the clause cannot possibly return TRUE if any of these Relids
- * is an all-NULL row. (It is OK to err on the side of conservatism; hence
+ * is an all-NULL row. (It is OK to err on the side of conservatism; hence
* the analysis here is simplistic.)
*
* The semantics here are subtly different from contain_nonstrict_functions:
@@ -1440,7 +1440,7 @@ find_nonnullable_rels_walker(Node *node, bool top_level)
* could be FALSE (hence not NULL). However, if *all* the
* arms produce NULL then the result is NULL, so we can take
* the intersection of the sets of nonnullable rels, just as
- * for OR. Fall through to share code.
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
@@ -1648,7 +1648,7 @@ find_nonnullable_vars_walker(Node *node, bool top_level)
* could be FALSE (hence not NULL). However, if *all* the
* arms produce NULL then the result is NULL, so we can take
* the intersection of the sets of nonnullable vars, just as
- * for OR. Fall through to share code.
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
@@ -1918,7 +1918,7 @@ is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK)
* variables of the current query level and no uses of volatile functions.
* Such an expr is not necessarily a true constant: it can still contain
* Params and outer-level Vars, not to mention functions whose results
- * may vary from one statement to the next. However, the expr's value
+ * may vary from one statement to the next. However, the expr's value
* will be constant over any one scan of the current query, so it can be
* used as, eg, an indexscan key.
*
@@ -2180,7 +2180,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
* expression tree, for example "2 + 2" => "4". More interestingly,
* we can reduce certain boolean expressions even when they contain
* non-constant subexpressions: "x OR true" => "true" no matter what
- * the subexpression x is. (XXX We assume that no such subexpression
+ * the subexpression x is. (XXX We assume that no such subexpression
* will have important side-effects, which is not necessarily a good
* assumption in the presence of user-defined functions; do we need a
* pg_proc flag that prevents discarding the execution of a function?)
@@ -2193,7 +2193,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
*
* Whenever a function is eliminated from the expression by means of
* constant-expression evaluation or inlining, we add the function to
- * root->glob->invalItems. This ensures the plan is known to depend on
+ * root->glob->invalItems. This ensures the plan is known to depend on
* such functions, even though they aren't referenced anymore.
*
* We assume that the tree has already been type-checked and contains
@@ -2370,7 +2370,7 @@ eval_const_expressions_mutator(Node *node,
/*
* Code for op/func reduction is pretty bulky, so split it out
- * as a separate function. Note: exprTypmod normally returns
+ * as a separate function. Note: exprTypmod normally returns
* -1 for a FuncExpr, but not when the node is recognizably a
* length coercion; we want to preserve the typmod in the
* eventual Const if so.
@@ -2414,7 +2414,7 @@ eval_const_expressions_mutator(Node *node,
OpExpr *newexpr;
/*
- * Need to get OID of underlying function. Okay to scribble
+ * Need to get OID of underlying function. Okay to scribble
* on input to this extent.
*/
set_opfuncid(expr);
@@ -2517,7 +2517,7 @@ eval_const_expressions_mutator(Node *node,
/* (NOT okay to try to inline it, though!) */
/*
- * Need to get OID of underlying function. Okay to
+ * Need to get OID of underlying function. Okay to
* scribble on input to this extent.
*/
set_opfuncid((OpExpr *) expr); /* rely on struct
@@ -2882,13 +2882,13 @@ eval_const_expressions_mutator(Node *node,
* TRUE: drop all remaining alternatives
* If the first non-FALSE alternative is a constant TRUE,
* we can simplify the entire CASE to that alternative's
- * expression. If there are no non-FALSE alternatives,
+ * expression. If there are no non-FALSE alternatives,
* we simplify the entire CASE to the default result (ELSE).
*
* If we have a simple-form CASE with constant test
* expression, we substitute the constant value for contained
* CaseTestExpr placeholder nodes, so that we have the
- * opportunity to reduce constant test conditions. For
+ * opportunity to reduce constant test conditions. For
* example this allows
* CASE 0 WHEN 0 THEN 1 ELSE 1/0 END
* to reduce to 1 rather than drawing a divide-by-0 error.
@@ -3110,7 +3110,7 @@ eval_const_expressions_mutator(Node *node,
{
/*
* We can optimize field selection from a whole-row Var into a
- * simple Var. (This case won't be generated directly by the
+ * simple Var. (This case won't be generated directly by the
* parser, because ParseComplexProjection short-circuits it.
* But it can arise while simplifying functions.) Also, we
* can optimize field selection from a RowExpr construct.
@@ -3368,7 +3368,7 @@ simplify_or_arguments(List *args,
/*
* Since the parser considers OR to be a binary operator, long OR lists
* become deeply nested expressions. We must flatten these into long
- * argument lists of a single OR operator. To avoid blowing out the stack
+ * argument lists of a single OR operator. To avoid blowing out the stack
* with recursion of eval_const_expressions, we resort to some tenseness
* here: we keep a list of not-yet-processed inputs, and handle flattening
* of nested ORs by prepending to the to-do list instead of recursing.
@@ -3416,7 +3416,7 @@ simplify_or_arguments(List *args,
}
/*
- * OK, we have a const-simplified non-OR argument. Process it per
+ * OK, we have a const-simplified non-OR argument. Process it per
* comments above.
*/
if (IsA(arg, Const))
@@ -3651,7 +3651,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
* deliver a constant result, use a transform function to generate a
* substitute node tree, or expand in-line the body of the function
* definition (which only works for simple SQL-language functions, but
- * that is a common case). Each case needs access to the function's
+ * that is a common case). Each case needs access to the function's
* pg_proc tuple, so fetch it just once.
*
* Note: the allow_non_const flag suppresses both the second and third
@@ -3689,7 +3689,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
if (!newexpr && allow_non_const && OidIsValid(func_form->protransform))
{
/*
- * Build a dummy FuncExpr node containing the simplified arg list. We
+ * Build a dummy FuncExpr node containing the simplified arg list. We
* use this approach to present a uniform interface to the transform
* function regardless of how the function is actually being invoked.
*/
@@ -3897,7 +3897,7 @@ fetch_function_defaults(HeapTuple func_tuple)
*
* It is possible for some of the defaulted arguments to be polymorphic;
* therefore we can't assume that the default expressions have the correct
- * data types already. We have to re-resolve polymorphics and do coercion
+ * data types already. We have to re-resolve polymorphics and do coercion
* just like the parser did.
*
* This should be a no-op if there are no polymorphic arguments,
@@ -4060,7 +4060,7 @@ evaluate_function(Oid funcid, Oid result_type, int32 result_typmod,
* do not re-expand them. Also, if a parameter is used more than once
* in the SQL-function body, we require it not to contain any volatile
* functions (volatiles might deliver inconsistent answers) nor to be
- * unreasonably expensive to evaluate. The expensiveness check not only
+ * unreasonably expensive to evaluate. The expensiveness check not only
* prevents us from doing multiple evaluations of an expensive parameter
* at runtime, but is a safety value to limit growth of an expression due
* to repeated inlining.
@@ -4103,7 +4103,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. (The nargs check is just paranoia.)
+ * properties. (The nargs check is just paranoia.)
*/
if (funcform->prolang != SQLlanguageId ||
funcform->prosecdef ||
@@ -4181,7 +4181,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* We just do parsing and parse analysis, not rewriting, because rewriting
* will not affect table-free-SELECT-only queries, which is all that we
- * care about. Also, we can punt as soon as we detect more than one
+ * care about. Also, we can punt as soon as we detect more than one
* command in the function body.
*/
raw_parsetree_list = pg_parse_query(src);
@@ -4223,7 +4223,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* a RelabelType if needed to make the tlist expression match the declared
* type of the function.
*
@@ -4268,7 +4268,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
/*
* We may be able to do it; there are still checks on parameter usage to
* make, but those are most easily done in combination with the actual
- * substitution of the inputs. So start building expression with inputs
+ * substitution of the inputs. So start building expression with inputs
* substituted.
*/
usecounts = (int *) palloc0(funcform->pronargs * sizeof(int));
@@ -4468,7 +4468,7 @@ evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod,
fix_opfuncids((Node *) expr);
/*
- * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
+ * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
* because it'd result in recursively invoking eval_const_expressions.)
*/
exprstate = ExecInitExpr(expr, NULL);
@@ -4580,7 +4580,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
* Refuse to inline if the arguments contain any volatile functions or
* sub-selects. Volatile functions are rejected because inlining may
* result in the arguments being evaluated multiple times, risking a
- * change in behavior. Sub-selects are rejected partly for implementation
+ * change in behavior. Sub-selects are rejected partly for implementation
* reasons (pushing them down another level might change their behavior)
* and partly because they're likely to be expensive and so multiple
* evaluation would be bad.
@@ -4607,7 +4607,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. In particular it mustn't be declared STRICT, since we
+ * properties. In particular it mustn't be declared STRICT, since we
* couldn't enforce that. It also mustn't be VOLATILE, because that is
* supposed to cause it to be executed with its own snapshot, rather than
* sharing the snapshot of the calling query. (Rechecking proretset is
@@ -4637,9 +4637,9 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* When we call eval_const_expressions below, it might try to add items to
- * root->glob->invalItems. Since it is running in the temp context, those
+ * root->glob->invalItems. Since it is running in the temp context, those
* items will be in that context, and will need to be copied out if we're
- * successful. Temporarily reset the list so that we can keep those items
+ * successful. Temporarily reset the list so that we can keep those items
* separate from the pre-existing list contents.
*/
saveInvalItems = root->glob->invalItems;
@@ -4669,7 +4669,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Run eval_const_expressions on the function call. This is necessary to
* ensure that named-argument notation is converted to positional notation
- * and any default arguments are inserted. It's a bit of overkill for the
+ * and any default arguments are inserted. It's a bit of overkill for the
* arguments, since they'll get processed again later, but no harm will be
* done.
*/
@@ -4721,7 +4721,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* RelabelType(s) and/or NULL columns if needed to make the tlist
* expression(s) match the declared type of the function.
*
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
index 1c57c7a44b9..d5f92b34234 100644
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -83,7 +83,7 @@ have_relevant_joinclause(PlannerInfo *root,
* Add 'restrictinfo' to the joininfo list of each relation it requires.
*
* Note that the same copy of the restrictinfo node is linked to by all the
- * lists it is in. This allows us to exploit caching of information about
+ * lists it is in. This allows us to exploit caching of information about
* the restriction clause (but we must be careful that the information does
* not depend on context).
*
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 64b17051913..0652a2d3598 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -127,11 +127,11 @@ compare_fractional_path_costs(Path *path1, Path *path2,
*
* The fuzz_factor argument must be 1.0 plus delta, where delta is the
* fraction of the smaller cost that is considered to be a significant
- * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
+ * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
* be 1% of the smaller cost.
*
* The two paths are said to have "equal" costs if both startup and total
- * costs are fuzzily the same. Path1 is said to be better than path2 if
+ * costs are fuzzily the same. Path1 is said to be better than path2 if
* it has fuzzily better startup cost and fuzzily no worse total cost,
* or if it has fuzzily better total cost and fuzzily no worse startup cost.
* Path2 is better than path1 if the reverse holds. Finally, if one path
@@ -207,12 +207,12 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor,
*
* cheapest_total_path is normally the cheapest-total-cost unparameterized
* path; but if there are no unparameterized paths, we assign it to be the
- * best (cheapest least-parameterized) parameterized path. However, only
+ * best (cheapest least-parameterized) parameterized path. However, only
* unparameterized paths are considered candidates for cheapest_startup_path,
* so that will be NULL if there are no unparameterized paths.
*
* The cheapest_parameterized_paths list collects all parameterized paths
- * that have survived the add_path() tournament for this relation. (Since
+ * that have survived the add_path() tournament for this relation. (Since
* add_path ignores pathkeys and startup cost for a parameterized path,
* these will be paths that have best total cost or best row count for their
* parameterization.) cheapest_parameterized_paths always includes the
@@ -431,7 +431,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
p1_next = lnext(p1);
/*
- * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
+ * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
* percentage need to be user-configurable?)
*/
costcmp = compare_path_costs_fuzzily(new_path, old_path, 1.01,
@@ -607,7 +607,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
* and have lower bounds for its costs.
*
* Note that we do not know the path's rowcount, since getting an estimate for
- * that is too expensive to do before prechecking. We assume here that paths
+ * that is too expensive to do before prechecking. We assume here that paths
* of a superset parameterization will generate fewer rows; if that holds,
* then paths with different parameterizations cannot dominate each other
* and so we can simply ignore existing paths of another parameterization.
@@ -907,7 +907,7 @@ create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer)
* Compute rows and costs as sums of subplan rows and costs. We charge
* nothing extra for the Append itself, which perhaps is too optimistic,
* but since it doesn't do any selection or projection, it is a pretty
- * cheap node. If you change this, see also make_append().
+ * cheap node. If you change this, see also make_append().
*/
pathnode->path.rows = 0;
pathnode->path.startup_cost = 0;
@@ -1456,7 +1456,7 @@ translate_sub_tlist(List *tlist, int relid)
*
* colnos is an integer list of output column numbers (resno's). We are
* interested in whether rows consisting of just these columns are certain
- * to be distinct. "Distinctness" is defined according to whether the
+ * to be distinct. "Distinctness" is defined according to whether the
* corresponding upper-level equality operators listed in opids would think
* the values are distinct. (Note: the opids entries could be cross-type
* operators, and thus not exactly the equality operators that the subquery
@@ -1577,7 +1577,7 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
* distinct_col_search - subroutine for query_is_distinct_for
*
* If colno is in colnos, return the corresponding element of opids,
- * else return InvalidOid. (We expect colnos does not contain duplicates,
+ * else return InvalidOid. (We expect colnos does not contain duplicates,
* so the result is well-defined.)
*/
static Oid
@@ -1977,10 +1977,10 @@ create_hashjoin_path(PlannerInfo *root,
/*
* A hashjoin never has pathkeys, since its output ordering is
- * unpredictable due to possible batching. XXX If the inner relation is
+ * unpredictable due to possible batching. XXX If the inner relation is
* small enough, we could instruct the executor that it must not batch,
* and then we could assume that the output inherits the outer relation's
- * ordering, which might save a sort step. However there is considerable
+ * ordering, which might save a sort step. However there is considerable
* downside if our estimate of the inner relation size is badly off. For
* the moment we don't risk it. (Note also that if we wanted to take this
* seriously, joinpath.c would have to consider many more paths for the
@@ -2007,7 +2007,7 @@ create_hashjoin_path(PlannerInfo *root,
* same parameterization level, ensuring that they all enforce the same set
* of join quals (and thus that that parameterization can be attributed to
* an append path built from such paths). Currently, only a few path types
- * are supported here, though more could be added at need. We return NULL
+ * are supported here, though more could be added at need. We return NULL
* if we can't reparameterize the given path.
*
* Note: we intentionally do not pass created paths to add_path(); it would
@@ -2039,7 +2039,7 @@ reparameterize_path(PlannerInfo *root, Path *path,
/*
* We can't use create_index_path directly, and would not want
* to because it would re-compute the indexqual conditions
- * which is wasted effort. Instead we hack things a bit:
+ * which is wasted effort. Instead we hack things a bit:
* flat-copy the path node, revise its param_info, and redo
* the cost estimate.
*/
diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c
index 5049ba1c5a9..983c0ee61b7 100644
--- a/src/backend/optimizer/util/placeholder.c
+++ b/src/backend/optimizer/util/placeholder.c
@@ -60,7 +60,7 @@ make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels)
* We build PlaceHolderInfos only for PHVs that are still present in the
* simplified query passed to query_planner().
*
- * Note: this should only be called after query_planner() has started. Also,
+ * Note: this should only be called after query_planner() has started. Also,
* create_new_ph must not be TRUE after deconstruct_jointree begins, because
* make_outerjoininfo assumes that we already know about all placeholders.
*/
@@ -94,7 +94,7 @@ find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv,
/*
* Any referenced rels that are outside the PHV's syntactic scope are
* LATERAL references, which should be included in ph_lateral but not in
- * ph_eval_at. If no referenced rels are within the syntactic scope,
+ * ph_eval_at. If no referenced rels are within the syntactic scope,
* force evaluation at the syntactic location.
*/
rels_used = pull_varnos((Node *) phv->phexpr);
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 954666ce04c..c8ebe535e93 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -427,12 +427,12 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* minimum size estimate of 10 pages. The idea here is to avoid
* assuming a newly-created table is really small, even if it
* currently is, because that may not be true once some data gets
- * loaded into it. Once a vacuum or analyze cycle has been done
+ * loaded into it. Once a vacuum or analyze cycle has been done
* on it, it's more reasonable to believe the size is somewhat
* stable.
*
* (Note that this is only an issue if the plan gets cached and
- * used again after the table has been filled. What we're trying
+ * used again after the table has been filled. What we're trying
* to avoid is using a nestloop-type plan on a table that has
* grown substantially since the plan was made. Normally,
* autovacuum/autoanalyze will occur once enough inserts have
@@ -441,7 +441,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* such as temporary tables.)
*
* We approximate "never vacuumed" by "has relpages = 0", which
- * means this will also fire on genuinely empty relations. Not
+ * means this will also fire on genuinely empty relations. Not
* great, but fortunately that's a seldom-seen case in the real
* world, and it shouldn't degrade the quality of the plan too
* much anyway to err in this direction.
@@ -786,7 +786,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
return false;
/*
- * OK to fetch the constraint expressions. Include "col IS NOT NULL"
+ * OK to fetch the constraint expressions. Include "col IS NOT NULL"
* expressions for attnotnull columns, in case we can refute those.
*/
constraint_pred = get_relation_constraints(root, rte->relid, rel, true);
@@ -834,7 +834,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
* Exception: if there are any dropped columns, we punt and return NIL.
* Ideally we would like to handle the dropped-column case too. However this
* creates problems for ExecTypeFromTL, which may be asked to build a tupdesc
- * for a tlist that includes vars of no-longer-existent types. In theory we
+ * for a tlist that includes vars of no-longer-existent types. In theory we
* could dig out the required info from the pg_attribute entries of the
* relation, but that data is not readily available to ExecTypeFromTL.
* For now, we don't apply the physical-tlist optimization when there are
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 2ea2d7bafd9..20dd44dac57 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -133,7 +133,7 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -191,7 +191,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -225,7 +225,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
* OR-expr A => AND-expr B iff: A => each of B's components
* OR-expr A => OR-expr B iff: each of A's components => any of B's
*
- * An "atom" is anything other than an AND or OR node. Notice that we don't
+ * An "atom" is anything other than an AND or OR node. Notice that we don't
* have any special logic to handle NOT nodes; these should have been pushed
* down or eliminated where feasible by prepqual.c.
*
@@ -658,7 +658,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
* We cannot make the stronger conclusion that B is refuted if B
* implies A's arg; that would only prove that B is not-TRUE, not
* that it's not NULL either. Hence use equal() rather than
- * predicate_implied_by_recurse(). We could do the latter if we
+ * predicate_implied_by_recurse(). We could do the latter if we
* ever had a need for the weak form of refutation.
*/
not_arg = extract_strong_not_arg(clause);
@@ -820,7 +820,7 @@ predicate_classify(Node *clause, PredIterInfo info)
}
/*
- * PredIterInfo routines for iterating over regular Lists. The iteration
+ * PredIterInfo routines for iterating over regular Lists. The iteration
* state variable is the next ListCell to visit.
*/
static void
@@ -1014,13 +1014,13 @@ arrayexpr_cleanup_fn(PredIterInfo info)
* implies another:
*
* A simple and general way is to see if they are equal(); this works for any
- * kind of expression. (Actually, there is an implied assumption that the
+ * kind of expression. (Actually, there is an implied assumption that the
* functions in the expression are immutable, ie dependent only on their input
* arguments --- but this was checked for the predicate by the caller.)
*
* When the predicate is of the form "foo IS NOT NULL", we can conclude that
* the predicate is implied if the clause is a strict operator or function
- * that has "foo" as an input. In this case the clause must yield NULL when
+ * that has "foo" as an input. In this case the clause must yield NULL when
* "foo" is NULL, which we can take as equivalent to FALSE because we know
* we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is
* already known immutable, so the clause will certainly always fail.)
@@ -1244,7 +1244,7 @@ list_member_strip(List *list, Expr *datum)
*
* The strategy numbers defined by btree indexes (see access/skey.h) are:
* (1) < (2) <= (3) = (4) >= (5) >
- * and in addition we use (6) to represent <>. <> is not a btree-indexable
+ * and in addition we use (6) to represent <>. <> is not a btree-indexable
* operator, but we assume here that if an equality operator of a btree
* opfamily has a negator operator, the negator behaves as <> for the opfamily.
* (This convention is also known to get_op_btree_interpretation().)
@@ -1328,7 +1328,7 @@ static const StrategyNumber BT_refute_table[6][6] = {
* if not able to prove it.
*
* What we look for here is binary boolean opclauses of the form
- * "foo op constant", where "foo" is the same in both clauses. The operators
+ * "foo op constant", where "foo" is the same in both clauses. The operators
* and constants can be different but the operators must be in the same btree
* operator family. We use the above operator implication tables to
* derive implications between nonidentical clauses. (Note: "foo" is known
@@ -1418,7 +1418,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
/*
* Check for matching subexpressions on the non-Const sides. We used to
* only allow a simple Var, but it's about as easy to allow any
- * expression. Remember we already know that the pred expression does not
+ * expression. Remember we already know that the pred expression does not
* contain any non-immutable functions, so identical expressions should
* yield identical results.
*/
@@ -1690,7 +1690,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
* Last check: test_op must be immutable.
*
* Note that we require only the test_op to be immutable, not the
- * original clause_op. (pred_op is assumed to have been checked
+ * original clause_op. (pred_op is assumed to have been checked
* immutable by the caller.) Essentially we are assuming that the
* opfamily is consistent even if it contains operators that are
* merely stable.
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index b6265b31675..1fd3160d22f 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -262,7 +262,7 @@ RelOptInfo *
find_join_rel(PlannerInfo *root, Relids relids)
{
/*
- * Switch to using hash lookup when list grows "too long". The threshold
+ * Switch to using hash lookup when list grows "too long". The threshold
* is arbitrary and is known only here.
*/
if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
@@ -448,7 +448,7 @@ build_join_rel(PlannerInfo *root,
/*
* Also, if dynamic-programming join search is active, add the new joinrel
- * to the appropriate sublist. Note: you might think the Assert on number
+ * to the appropriate sublist. Note: you might think the Assert on number
* of members should be for equality, but some of the level 1 rels might
* have been joinrels already, so we can only assert <=.
*/
@@ -529,7 +529,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
* the join list need only be computed once for any join RelOptInfo.
* The join list is fully determined by the set of rels making up the
* joinrel, so we should get the same results (up to ordering) from any
- * candidate pair of sub-relations. But the restriction list is whatever
+ * candidate pair of sub-relations. But the restriction list is whatever
* is not handled in the sub-relations, so it depends on which
* sub-relations are considered.
*
@@ -538,7 +538,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
* we put it into the joininfo list for the joinrel. Otherwise,
* the clause is now a restrict clause for the joined relation, and we
* return it to the caller of build_joinrel_restrictlist() to be stored in
- * join paths made from this pair of sub-relations. (It will not need to
+ * join paths made from this pair of sub-relations. (It will not need to
* be considered further up the join tree.)
*
* In many case we will find the same RestrictInfos in both input
@@ -557,7 +557,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
*
* NB: Formerly, we made deep(!) copies of each input RestrictInfo to pass
* up to the join relation. I believe this is no longer necessary, because
- * RestrictInfo nodes are no longer context-dependent. Instead, just include
+ * RestrictInfo nodes are no longer context-dependent. Instead, just include
* the original nodes in the lists made for the join relation.
*/
static List *
@@ -577,7 +577,7 @@ build_joinrel_restrictlist(PlannerInfo *root,
result = subbuild_joinrel_restrictlist(joinrel, inner_rel->joininfo, result);
/*
- * Add on any clauses derived from EquivalenceClasses. These cannot be
+ * Add on any clauses derived from EquivalenceClasses. These cannot be
* redundant with the clauses in the joininfo lists, so don't bother
* checking.
*/
@@ -915,7 +915,7 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
*restrict_clauses);
/*
- * And now we can build the ParamPathInfo. No point in saving the
+ * And now we can build the ParamPathInfo. No point in saving the
* input-pair-dependent clause list, though.
*
* Note: in GEQO mode, we'll be called in a temporary memory context, but
@@ -935,8 +935,8 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
* Get the ParamPathInfo for a parameterized path for an append relation.
*
* For an append relation, the rowcount estimate will just be the sum of
- * the estimates for its children. However, we still need a ParamPathInfo
- * to flag the fact that the path requires parameters. So this just creates
+ * the estimates for its children. However, we still need a ParamPathInfo
+ * to flag the fact that the path requires parameters. So this just creates
* a suitable struct with zero ppi_rows (and no ppi_clauses either, since
* the Append node isn't responsible for checking quals).
*/
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 55ce9d86543..aaf26b6fcd4 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -152,7 +152,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
/*
* Here, we only detect qual-free subplans. A qual-free subplan would
* cause us to generate "... OR true ..." which we may as well reduce
- * to just "true". We do not try to eliminate redundant subclauses
+ * to just "true". We do not try to eliminate redundant subclauses
* because (a) it's not as likely as in the AND case, and (b) we might
* well be working with hundreds or even thousands of OR conditions,
* perhaps from a long IN list. The performance of list_append_unique
@@ -250,7 +250,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
* We know that the index predicate must have been implied by
* the query condition as a whole, but it may or may not be
* implied by the conditions that got pushed into the
- * bitmapqual. Avoid generating redundant conditions.
+ * bitmapqual. Avoid generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), result))
result = lappend(result,
@@ -397,7 +397,7 @@ make_restrictinfo_internal(Expr *clause,
/*
* Fill in all the cacheable fields with "not yet set" markers. None of
- * these will be computed until/unless needed. Note in particular that we
+ * these will be computed until/unless needed. Note in particular that we
* don't mark a binary opclause as mergejoinable or hashjoinable here;
* that happens only if it appears in the right context (top level of a
* joinclause list).
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index 5cc3cdc15a6..c6cf64951c9 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -26,7 +26,7 @@
/*
* tlist_member
* Finds the (first) member of the given tlist whose expression is
- * equal() to the given expression. Result is NULL if no such member.
+ * equal() to the given expression. Result is NULL if no such member.
*/
TargetEntry *
tlist_member(Node *node, List *targetlist)
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index 4a3d5c8408e..5cfefd34519 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -165,7 +165,7 @@ pull_varnos_walker(Node *node, pull_varnos_context *context)
* lower than that if it references only a subset of the rels in its
* syntactic scope. It might also contain lateral references, but we
* should ignore such references when computing the set of varnos in
- * an expression tree. Also, if the PHV contains no variables within
+ * an expression tree. Also, if the PHV contains no variables within
* its syntactic scope, it will be forced to be evaluated exactly at
* the syntactic scope, so take that as the relid set.
*/
@@ -364,7 +364,7 @@ contain_var_clause_walker(Node *node, void *context)
*
* Returns true if any such Var found.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
bool
contain_vars_of_level(Node *node, int levelsup)
@@ -424,10 +424,10 @@ contain_vars_of_level_walker(Node *node, int *sublevels_up)
* Find the parse location of any Var of the specified query level.
*
* Returns -1 if no such Var is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*
* Note: it might seem appropriate to merge this functionality into
* contain_vars_of_level, but that would complicate that function's API.
@@ -514,7 +514,7 @@ locate_var_of_level_walker(Node *node,
* Upper-level vars (with varlevelsup > 0) should not be seen here,
* likewise for upper-level Aggrefs and PlaceHolderVars.
*
- * Returns list of nodes found. Note the nodes themselves are not
+ * Returns list of nodes found. Note the nodes themselves are not
* copied, only referenced.
*
* Does not examine subqueries, therefore must only be used after reduction
@@ -591,7 +591,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
* flatten_join_alias_vars
* Replace Vars that reference JOIN outputs with references to the original
* relation variables instead. This allows quals involving such vars to be
- * pushed down. Whole-row Vars that reference JOIN relations are expanded
+ * pushed down. Whole-row Vars that reference JOIN relations are expanded
* into RowExpr constructs that name the individual output Vars. This
* is necessary since we will not scan the JOIN as a base relation, which
* is the only way that the executor can directly handle whole-row Vars.
@@ -603,7 +603,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
* entries might now be arbitrary expressions, not just Vars. This affects
* this function in one important way: we might find ourselves inserting
* SubLink expressions into subqueries, and we must make sure that their
- * Query.hasSubLinks fields get set to TRUE if so. If there are any
+ * Query.hasSubLinks fields get set to TRUE if so. If there are any
* SubLinks in the join alias lists, the outer Query should already have
* hasSubLinks = TRUE, so this is only relevant to un-flattened subqueries.
*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 7c31b9d65e8..655bb55c287 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -79,7 +79,7 @@ static void transformLockingClause(ParseState *pstate, Query *qry,
* Optionally, information about $n parameter types can be supplied.
* References to $n indexes not defined by paramTypes[] are disallowed.
*
- * The result is a Query node. Optimizable statements require considerable
+ * The result is a Query node. Optimizable statements require considerable
* transformation, while utility-type statements are simply hung off
* a dummy CMD_UTILITY Query node.
*/
@@ -457,7 +457,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
/*
* If a non-nil rangetable/namespace was passed in, and we are doing
* INSERT/SELECT, arrange to pass the rangetable/namespace down to the
- * SELECT. This can only happen if we are inside a CREATE RULE, and in
+ * SELECT. This can only happen if we are inside a CREATE RULE, and in
* that case we want the rule's OLD and NEW rtable entries to appear as
* part of the SELECT's rtable, not as outer references for it. (Kluge!)
* The SELECT's joinlist is not affected however. We must do this before
@@ -642,7 +642,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* We must assign collations now because assign_query_collations
* doesn't process rangetable entries. We just assign all the
* collations independently in each row, and don't worry about
- * whether they are consistent vertically. The outer INSERT query
+ * whether they are consistent vertically. The outer INSERT query
* isn't going to care about the collations of the VALUES columns,
* so it's not worth the effort to identify a common collation for
* each one here. (But note this does have one user-visible
@@ -691,7 +691,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
else
{
/*
- * Process INSERT ... VALUES with a single VALUES sublist. We treat
+ * Process INSERT ... VALUES with a single VALUES sublist. We treat
* this case separately for efficiency. The sublist is just computed
* directly as the Query's targetlist, with no VALUES RTE. So it
* works just like a SELECT without any FROM.
@@ -789,7 +789,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
* Check length of expr list. It must not have more expressions than
* there are target columns. We allow fewer, but only if no explicit
* columns list was given (the remaining columns are implicitly
- * defaulted). Note we must check this *after* transformation because
+ * defaulted). Note we must check this *after* transformation because
* that could expand '*' into multiple items.
*/
if (list_length(exprlist) > list_length(icolumns))
@@ -859,7 +859,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
* return -1 if expression isn't a RowExpr or a Var referencing one.
*
* This is currently used only for hint purposes, so we aren't terribly
- * tense about recognizing all possible cases. The Var case is interesting
+ * tense about recognizing all possible cases. The Var case is interesting
* because that's what we'll get in the INSERT ... SELECT (...) case.
*/
static int
@@ -1191,7 +1191,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
/*
* Ordinarily there can't be any current-level Vars in the expression
* lists, because the namespace was empty ... but if we're inside CREATE
- * RULE, then NEW/OLD references might appear. In that case we have to
+ * RULE, then NEW/OLD references might appear. In that case we have to
* mark the VALUES RTE as LATERAL.
*/
if (pstate->p_rtable != NIL &&
@@ -1413,7 +1413,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* As a first step towards supporting sort clauses that are expressions
* using the output columns, generate a namespace entry that makes the
- * output columns visible. A Join RTE node is handy for this, since we
+ * output columns visible. A Join RTE node is handy for this, since we
* can easily control the Vars generated upon matches.
*
* Note: we don't yet do anything useful with such cases, but at least
@@ -1493,7 +1493,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
* Recursively transform leaves and internal nodes of a set-op tree
*
* In addition to returning the transformed node, if targetlist isn't NULL
- * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
+ * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
* set-op node these are the actual targetlist entries; otherwise they are
* dummy entries created to carry the type, typmod, collation, and location
* (for error messages) of each output column of the set-op node. This info
@@ -1536,7 +1536,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
/*
* If an internal node of a set-op tree has ORDER BY, LIMIT, FOR UPDATE,
* or WITH clauses attached, we need to treat it like a leaf node to
- * generate an independent sub-Query tree. Otherwise, it can be
+ * generate an independent sub-Query tree. Otherwise, it can be
* represented by a SetOperationStmt node underneath the parent Query.
*/
if (stmt->op == SETOP_NONE)
@@ -1712,7 +1712,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
rescoltypmod = -1;
/*
- * Verify the coercions are actually possible. If not, we'd fail
+ * Verify the coercions are actually possible. If not, we'd fail
* later anyway, but we want to fail now while we have sufficient
* context to produce an error cursor position.
*
@@ -1721,7 +1721,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
* child query's semantics.
*
* If a child expression is an UNKNOWN-type Const or Param, we
- * want to replace it with the coerced expression. This can only
+ * want to replace it with the coerced expression. This can only
* happen when the child is a leaf set-op node. It's safe to
* replace the expression because if the child query's semantics
* depended on the type of this output column, it'd have already
@@ -2207,7 +2207,7 @@ transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt)
/*
* A materialized view would either need to save parameters for use in
- * maintaining/loading the data or prohibit them entirely. The latter
+ * maintaining/loading the data or prohibit them entirely. The latter
* seems safer and more sane.
*/
if (query_contains_extern_params(query))
diff --git a/src/backend/parser/kwlookup.c b/src/backend/parser/kwlookup.c
index 4ac195efa21..4c6bf6c60dc 100644
--- a/src/backend/parser/kwlookup.c
+++ b/src/backend/parser/kwlookup.c
@@ -52,7 +52,7 @@ ScanKeywordLookup(const char *text,
return NULL;
/*
- * Apply an ASCII-only downcasing. We must not use tolower() since it may
+ * Apply an ASCII-only downcasing. We must not use tolower() since it may
* produce the wrong translation in some locales (eg, Turkish).
*/
for (i = 0; i < len; i++)
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 7380618fae3..d2520c9c76b 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -66,7 +66,7 @@ static bool check_ungrouped_columns_walker(Node *node,
*
* Here we convert the args list into a targetlist by inserting TargetEntry
* nodes, and then transform the aggorder and agg_distinct specifications to
- * produce lists of SortGroupClause nodes. (That might also result in adding
+ * produce lists of SortGroupClause nodes. (That might also result in adding
* resjunk expressions to the targetlist.)
*
* We must also determine which query level the aggregate actually belongs to,
@@ -690,7 +690,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
/*
* If there are join alias vars involved, we have to flatten them to the
* underlying vars, so that aliased and unaliased vars will be correctly
- * taken as equal. We can skip the expense of doing this if no rangetable
+ * taken as equal. We can skip the expense of doing this if no rangetable
* entries are RTE_JOIN kind. We use the planner's flatten_join_alias_vars
* routine to do the flattening; it wants a PlannerInfo root node, which
* fortunately can be mostly dummy.
@@ -728,7 +728,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
*
* Note: because we check resjunk tlist elements as well as regular ones,
* this will also find ungrouped variables that came from ORDER BY and
- * WINDOW clauses. For that matter, it's also going to examine the
+ * WINDOW clauses. For that matter, it's also going to examine the
* grouping expressions themselves --- but they'll all pass the test ...
*/
clause = (Node *) qry->targetList;
@@ -836,7 +836,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* If we have an ungrouped Var of the original query level, we have a
* failure. Vars below the original query level are not a problem, and
- * neither are Vars from above it. (If such Vars are ungrouped as far as
+ * neither are Vars from above it. (If such Vars are ungrouped as far as
* their own query level is concerned, that's someone else's problem...)
*/
if (IsA(node, Var))
@@ -867,7 +867,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* Check whether the Var is known functionally dependent on the GROUP
- * BY columns. If so, we can allow the Var to be used, because the
+ * BY columns. If so, we can allow the Var to be used, because the
* grouping is really a no-op for this table. However, this deduction
* depends on one or more constraints of the table, so we have to add
* those constraints to the query's constraintDeps list, because it's
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index f2c506f940c..29fd7b1d619 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -152,7 +152,7 @@ transformFromClause(ParseState *pstate, List *frmList)
*
* If alsoSource is true, add the target to the query's joinlist and
* namespace. For INSERT, we don't want the target to be joined to;
- * it's a destination of tuples, not a source. For UPDATE/DELETE,
+ * it's a destination of tuples, not a source. For UPDATE/DELETE,
* we do need to scan or join the target. (NOTE: we do not bother
* to check for namespace conflict; we assume that the namespace was
* initially empty in these cases.)
@@ -222,7 +222,7 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
* Simplify InhOption (yes/no/default) into boolean yes/no.
*
* The reason we do things this way is that we don't want to examine the
- * SQL_inheritance option flag until parse_analyze() is run. Otherwise,
+ * SQL_inheritance option flag until parse_analyze() is run. Otherwise,
* we'd do the wrong thing with query strings that intermix SET commands
* with queries.
*/
@@ -399,7 +399,7 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j, List *namespace)
/*
* The namespace that the join expression should see is just the two
* subtrees of the JOIN plus any outer references from upper pstate
- * levels. Temporarily set this pstate's namespace accordingly. (We need
+ * levels. Temporarily set this pstate's namespace accordingly. (We need
* not check for refname conflicts, because transformFromClauseItem()
* already did.) All namespace items are marked visible regardless of
* LATERAL state.
@@ -493,7 +493,7 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
pstate->p_expr_kind = EXPR_KIND_NONE;
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
*/
if (!IsA(query, Query) ||
@@ -527,7 +527,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* Get function name for possible use as alias. We use the same
- * transformation rules as for a SELECT output expression. For a FuncCall
+ * transformation rules as for a SELECT output expression. For a FuncCall
* node, the result will be the function name, but it is possible for the
* grammar to hand back other node types.
*/
@@ -605,10 +605,10 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
* (We could extract this from the function return node, but it saves cycles
* to pass it back separately.)
*
- * *top_rti: receives the rangetable index of top_rte. (Ditto.)
+ * *top_rti: receives the rangetable index of top_rte. (Ditto.)
*
* *namespace: receives a List of ParseNamespaceItems for the RTEs exposed
- * as table/column names by this item. (The lateral_only flags in these items
+ * as table/column names by this item. (The lateral_only flags in these items
* are indeterminate and should be explicitly set by the caller before use.)
*/
static Node *
@@ -721,7 +721,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
* right side, by temporarily adding them to the pstate's namespace
* list. Per SQL:2008, if the join type is not INNER or LEFT then the
* left-side names must still be exposed, but it's an error to
- * reference them. (Stupid design, but that's what it says.) Hence,
+ * reference them. (Stupid design, but that's what it says.) Hence,
* we always push them into the namespace, but mark them as not
* lateral_ok if the jointype is wrong.
*
@@ -985,7 +985,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
*
* Note: if there are nested alias-less JOINs, the lower-level ones
* will remain in the list although they have neither p_rel_visible
- * nor p_cols_visible set. We could delete such list items, but it's
+ * nor p_cols_visible set. We could delete such list items, but it's
* unclear that it's worth expending cycles to do so.
*/
if (j->alias != NULL)
@@ -1322,9 +1322,9 @@ checkTargetlistEntrySQL92(ParseState *pstate, TargetEntry *tle,
*
* This function supports the old SQL92 ORDER BY interpretation, where the
* expression is an output column name or number. If we fail to find a
- * match of that sort, we fall through to the SQL99 rules. For historical
+ * match of that sort, we fall through to the SQL99 rules. For historical
* reasons, Postgres also allows this interpretation for GROUP BY, though
- * the standard never did. However, for GROUP BY we prefer a SQL99 match.
+ * the standard never did. However, for GROUP BY we prefer a SQL99 match.
* This function is *not* used for WINDOW definitions.
*
* node the ORDER BY, GROUP BY, or DISTINCT ON expression to be matched
@@ -1342,7 +1342,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
*
* 1. Bare ColumnName (no qualifier or subscripts)
* For a bare identifier, we search for a matching column name
- * in the existing target list. Multiple matches are an error
+ * in the existing target list. Multiple matches are an error
* unless they refer to identical values; for example,
* we allow SELECT a, a FROM table ORDER BY a
* but not SELECT a AS b, b FROM table ORDER BY b
@@ -1351,7 +1351,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
* For GROUP BY, it is incorrect to match the grouping item against
* targetlist entries: according to SQL92, an identifier in GROUP BY
* is a reference to a column name exposed by FROM, not to a target
- * list column. However, many implementations (including pre-7.0
+ * list column. However, many implementations (including pre-7.0
* PostgreSQL) accept this anyway. So for GROUP BY, we look first
* to see if the identifier matches any FROM column name, and only
* try for a targetlist name if it doesn't. This ensures that we
@@ -1509,7 +1509,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
/*
* Convert the untransformed node to a transformed expression, and search
* for a match in the tlist. NOTE: it doesn't really matter whether there
- * is more than one match. Also, we are willing to match an existing
+ * is more than one match. Also, we are willing to match an existing
* resjunk target here, though the SQL92 cases above must ignore resjunk
* targets.
*/
@@ -1537,7 +1537,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
/*
* If no matches, construct a new target entry which is appended to the
- * end of the target list. This target is given resjunk = TRUE so that it
+ * end of the target list. This target is given resjunk = TRUE so that it
* will not be projected into the final tuple.
*/
target_result = transformTargetEntry(pstate, node, expr, exprKind,
@@ -1748,7 +1748,7 @@ transformWindowDefinitions(ParseState *pstate,
* <window clause> syntax rule 10 and general rule 1. The frame
* clause rule is especially bizarre because it makes "OVER foo"
* different from "OVER (foo)", and requires the latter to throw an
- * error if foo has a nondefault frame clause. Well, ours not to
+ * error if foo has a nondefault frame clause. Well, ours not to
* reason why, but we do go out of our way to throw a useful error
* message for such cases.
*/
@@ -1851,7 +1851,7 @@ transformDistinctClause(ParseState *pstate,
/*
* The distinctClause should consist of all ORDER BY items followed by all
- * other non-resjunk targetlist items. There must not be any resjunk
+ * other non-resjunk targetlist items. There must not be any resjunk
* ORDER BY items --- that would imply that we are sorting by a value that
* isn't necessarily unique within a DISTINCT group, so the results
* wouldn't be well-defined. This construction ensures we follow the rule
@@ -1974,7 +1974,7 @@ transformDistinctOnClause(ParseState *pstate, List *distinctlist,
/*
* Now add any remaining DISTINCT ON items, using default sort/group
- * semantics for their data types. (Note: this is pretty questionable; if
+ * semantics for their data types. (Note: this is pretty questionable; if
* the ORDER BY list doesn't include all the DISTINCT ON items and more
* besides, you certainly aren't using DISTINCT ON in the intended way,
* and you probably aren't going to get consistent results. It might be
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index b6df2c60b46..909f4ebb01e 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -56,12 +56,12 @@ static bool typeIsOfTypedTable(Oid reltypeId, Oid reloftypeId);
* Convert an expression to a target type and typmod.
*
* This is the general-purpose entry point for arbitrary type coercion
- * operations. Direct use of the component operations can_coerce_type,
+ * operations. Direct use of the component operations can_coerce_type,
* coerce_type, and coerce_type_typmod should be restricted to special
* cases (eg, when the conversion is expected to succeed).
*
* Returns the possibly-transformed expression tree, or NULL if the type
- * conversion is not possible. (We do this, rather than ereport'ing directly,
+ * conversion is not possible. (We do this, rather than ereport'ing directly,
* so that callers can generate custom error messages indicating context.)
*
* pstate - parse state (can be NULL, see coerce_type)
@@ -145,7 +145,7 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
* already be properly coerced to the specified typmod.
*
* pstate is only used in the case that we are able to resolve the type of
- * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
+ * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
* caller does not want type information updated for Params.
*
* Note: this function must not modify the given expression tree, only add
@@ -175,7 +175,7 @@ coerce_type(ParseState *pstate, Node *node,
*
* Note: by returning the unmodified node here, we are saying that
* it's OK to treat an UNKNOWN constant as a valid input for a
- * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
+ * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
* all right, since an UNKNOWN value is still a perfectly valid Datum.
*
* NB: we do NOT want a RelabelType here: the exposed type of the
@@ -250,7 +250,7 @@ coerce_type(ParseState *pstate, Node *node,
/*
* If the target type is a domain, we want to call its base type's
- * input routine, not domain_in(). This is to avoid premature failure
+ * input routine, not domain_in(). This is to avoid premature failure
* when the domain applies a typmod: existing input routines follow
* implicit-coercion semantics for length checks, which is not always
* what we want here. The needed check will be applied properly
@@ -263,7 +263,7 @@ coerce_type(ParseState *pstate, Node *node,
* For most types we pass typmod -1 to the input routine, because
* existing input routines follow implicit-coercion semantics for
* length checks, which is not always what we want here. Any length
- * constraint will be applied later by our caller. An exception
+ * constraint will be applied later by our caller. An exception
* however is the INTERVAL type, for which we *must* pass the typmod
* or it won't be able to obey the bizarre SQL-spec input rules. (Ugly
* as sin, but so is this part of the spec...)
@@ -343,7 +343,7 @@ coerce_type(ParseState *pstate, Node *node,
{
/*
* If we have a COLLATE clause, we have to push the coercion
- * underneath the COLLATE. This is really ugly, but there is little
+ * underneath the COLLATE. This is really ugly, but there is little
* choice because the above hacks on Consts and Params wouldn't happen
* otherwise. This kluge has consequences in coerce_to_target_type.
*/
@@ -366,7 +366,7 @@ coerce_type(ParseState *pstate, Node *node,
{
/*
* Generate an expression tree representing run-time application
- * of the conversion function. If we are dealing with a domain
+ * of the conversion function. If we are dealing with a domain
* target type, the conversion function will yield the base type,
* and we need to extract the correct typmod to use from the
* domain's typtypmod.
@@ -402,7 +402,7 @@ coerce_type(ParseState *pstate, Node *node,
* to have the intended type when inspected by higher-level code.
*
* Also, domains may have value restrictions beyond the base type
- * that must be accounted for. If the destination is a domain
+ * that must be accounted for. If the destination is a domain
* then we won't need a RelabelType node.
*/
result = coerce_to_domain(node, InvalidOid, -1, targetTypeId,
@@ -649,7 +649,7 @@ coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId,
}
/*
- * Now build the domain coercion node. This represents run-time checking
+ * Now build the domain coercion node. This represents run-time checking
* of any constraints currently attached to the domain. This also ensures
* that the expression is properly labeled as to result type.
*/
@@ -722,7 +722,7 @@ coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod,
* Mark a coercion node as IMPLICIT so it will never be displayed by
* ruleutils.c. We use this when we generate a nest of coercion nodes
* to implement what is logically one conversion; the inner nodes are
- * forced to IMPLICIT_CAST format. This does not change their semantics,
+ * forced to IMPLICIT_CAST format. This does not change their semantics,
* only display behavior.
*
* It is caller error to call this on something that doesn't have a
@@ -1181,7 +1181,7 @@ select_common_type(ParseState *pstate, List *exprs, const char *context,
}
/*
- * Nope, so set up for the full algorithm. Note that at this point, lc
+ * Nope, so set up for the full algorithm. Note that at this point, lc
* points to the first list item with type different from pexpr's; we need
* not re-examine any items the previous loop advanced over.
*/
@@ -1476,7 +1476,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
*
* If any polymorphic pseudotype is used in a function's arguments or
* return type, we make sure the actual data types are consistent with
- * each other. The argument consistency rules are shown above for
+ * each other. The argument consistency rules are shown above for
* check_generic_type_consistency().
*
* If we have UNKNOWN input (ie, an untyped literal) for any polymorphic
@@ -1498,7 +1498,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
* impossible to determine the range type from the subtype alone.)
* 4) If return type is ANYARRAY, but no argument is ANYARRAY or ANYELEMENT,
* generate an error. Similarly, if return type is ANYRANGE, but no
- * argument is ANYRANGE, generate an error. (These conditions are
+ * argument is ANYRANGE, generate an error. (These conditions are
* prevented by CREATE FUNCTION and therefore are not expected here.)
* 5) If return type is ANYELEMENT, and any argument is ANYELEMENT, use the
* argument's actual type as the function's return type.
@@ -1508,7 +1508,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
* type or the range type's corresponding subtype (or both, in which case
* they must match).
* 7) If return type is ANYELEMENT, no argument is ANYELEMENT, ANYARRAY, or
- * ANYRANGE, generate an error. (This condition is prevented by CREATE
+ * ANYRANGE, generate an error. (This condition is prevented by CREATE
* FUNCTION and therefore is not expected here.)
* 8) ANYENUM is treated the same as ANYELEMENT except that if it is used
* (alone or in combination with plain ANYELEMENT), we add the extra
@@ -1525,14 +1525,14 @@ check_generic_type_consistency(Oid *actual_arg_types,
*
* When allow_poly is false, we are not expecting any of the actual_arg_types
* to be polymorphic, and we should not return a polymorphic result type
- * either. When allow_poly is true, it is okay to have polymorphic "actual"
+ * either. When allow_poly is true, it is okay to have polymorphic "actual"
* arg types, and we can return ANYARRAY, ANYRANGE, or ANYELEMENT as the
- * result. (This case is currently used only to check compatibility of an
+ * result. (This case is currently used only to check compatibility of an
* aggregate's declaration with the underlying transfn.)
*
* A special case is that we could see ANYARRAY as an actual_arg_type even
* when allow_poly is false (this is possible only because pg_statistic has
- * columns shown as anyarray in the catalogs). We allow this to match a
+ * columns shown as anyarray in the catalogs). We allow this to match a
* declared ANYARRAY argument, but only if there is no ANYELEMENT argument
* or result (since we can't determine a specific element type to match to
* ANYELEMENT). Note this means that functions taking ANYARRAY had better
@@ -1638,7 +1638,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
/*
* Fast Track: if none of the arguments are polymorphic, return the
- * unmodified rettype. We assume it can't be polymorphic either.
+ * unmodified rettype. We assume it can't be polymorphic either.
*/
if (!have_generics)
return rettype;
@@ -1981,8 +1981,8 @@ IsPreferredType(TYPCATEGORY category, Oid type)
* Check if srctype is binary-coercible to targettype.
*
* This notion allows us to cheat and directly exchange values without
- * going through the trouble of calling a conversion function. Note that
- * in general, this should only be an implementation shortcut. Before 7.4,
+ * going through the trouble of calling a conversion function. Note that
+ * in general, this should only be an implementation shortcut. Before 7.4,
* this was also used as a heuristic for resolving overloaded functions and
* operators, but that's basically a bad idea.
*
@@ -1995,7 +1995,7 @@ IsPreferredType(TYPCATEGORY category, Oid type)
* types.
*
* This function replaces IsBinaryCompatible(), which was an inherently
- * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
+ * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
* the order of the operands is now significant.
*/
bool
@@ -2177,7 +2177,7 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
* Hack: disallow coercions to oidvector and int2vector, which
* otherwise tend to capture coercions that should go to "real" array
* types. We want those types to be considered "real" arrays for many
- * purposes, but not this one. (Also, ArrayCoerceExpr isn't
+ * purposes, but not this one. (Also, ArrayCoerceExpr isn't
* guaranteed to produce an output that meets the restrictions of
* these datatypes, such as being 1-dimensional.)
*/
diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c
index aed1407efbe..b3b9d50f3a7 100644
--- a/src/backend/parser/parse_collate.c
+++ b/src/backend/parser/parse_collate.c
@@ -14,19 +14,19 @@
* 1. The output collation of each expression node, or InvalidOid if it
* returns a noncollatable data type. This can also be InvalidOid if the
* result type is collatable but the collation is indeterminate.
- * 2. The collation to be used in executing each function. InvalidOid means
+ * 2. The collation to be used in executing each function. InvalidOid means
* that there are no collatable inputs or their collation is indeterminate.
* This value is only stored in node types that might call collation-using
* functions.
*
* You might think we could get away with storing only one collation per
- * node, but the two concepts really need to be kept distinct. Otherwise
+ * node, but the two concepts really need to be kept distinct. Otherwise
* it's too confusing when a function produces a collatable output type but
* has no collatable inputs or produces noncollatable output from collatable
* inputs.
*
* Cases with indeterminate collation might result in an error being thrown
- * at runtime. If we knew exactly which functions require collation
+ * at runtime. If we knew exactly which functions require collation
* information, we could throw those errors at parse time instead.
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
@@ -231,7 +231,7 @@ select_common_collation(ParseState *pstate, List *exprs, bool none_ok)
* Recursive guts of collation processing.
*
* Nodes with no children (eg, Vars, Consts, Params) must have been marked
- * when built. All upper-level nodes are marked here.
+ * when built. All upper-level nodes are marked here.
*
* Note: if this is invoked directly on a List, it will attempt to infer a
* common collation for all the list members. In particular, it will throw
@@ -431,7 +431,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
/*
* TargetEntry can have only one child, and should bubble that
- * state up to its parent. We can't use the general-case code
+ * state up to its parent. We can't use the general-case code
* below because exprType and friends don't work on TargetEntry.
*/
collation = loccontext.collation;
@@ -446,7 +446,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
* There are some cases where there might not be a failure, for
* example if the planner chooses to use hash aggregation instead
* of sorting for grouping; but it seems better to predictably
- * throw an error. (Compare transformSetOperationTree, which will
+ * throw an error. (Compare transformSetOperationTree, which will
* throw error for indeterminate collation of set-op columns, even
* though the planner might be able to implement the set-op
* without sorting.)
@@ -484,7 +484,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
* SubLink. Act as though the Query returns its first output
* column, which indeed is what it does for EXPR_SUBLINK and
* ARRAY_SUBLINK cases. In the cases where the SubLink
- * returns boolean, this info will be ignored. Special case:
+ * returns boolean, this info will be ignored. Special case:
* in EXISTS, the Query might return no columns, in which case
* we need do nothing.
*
diff --git a/src/backend/parser/parse_cte.c b/src/backend/parser/parse_cte.c
index 0e4080293dd..24a847a54ea 100644
--- a/src/backend/parser/parse_cte.c
+++ b/src/backend/parser/parse_cte.c
@@ -181,7 +181,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause)
checkWellFormedRecursion(&cstate);
/*
- * Set up the ctenamespace for parse analysis. Per spec, all the WITH
+ * Set up the ctenamespace for parse analysis. Per spec, all the WITH
* items are visible to all others, so stuff them all in before parse
* analysis. We build the list in safe processing order so that the
* planner can process the queries in sequence.
@@ -207,7 +207,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause)
{
/*
* For non-recursive WITH, just analyze each CTE in sequence and then
- * add it to the ctenamespace. This corresponds to the spec's
+ * add it to the ctenamespace. This corresponds to the spec's
* definition of the scope of each WITH name. However, to allow error
* reports to be aware of the possibility of an erroneous reference,
* we maintain a list in p_future_ctes of the not-yet-visible CTEs.
@@ -245,7 +245,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
cte->ctequery = (Node *) query;
/*
- * Check that we got something reasonable. These first two cases should
+ * Check that we got something reasonable. These first two cases should
* be prevented by the grammar.
*/
if (!IsA(query, Query))
@@ -393,7 +393,7 @@ analyzeCTETargetList(ParseState *pstate, CommonTableExpr *cte, List *tlist)
/*
* If the CTE is recursive, force the exposed column type of any
- * "unknown" column to "text". This corresponds to the fact that
+ * "unknown" column to "text". This corresponds to the fact that
* SELECT 'foo' UNION SELECT 'bar' will ultimately produce text. We
* might see "unknown" as a result of an untyped literal in the
* non-recursive term's select list, and if we don't convert to text
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 06f6512c4e4..5c898dd5956 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -505,7 +505,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
} crerr = CRERR_NO_COLUMN;
/*
- * Give the PreParseColumnRefHook, if any, first shot. If it returns
+ * Give the PreParseColumnRefHook, if any, first shot. If it returns
* non-null then that's all, folks.
*/
if (pstate->p_pre_columnref_hook != NULL)
@@ -576,7 +576,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
}
/*
- * Try to find the name as a relation. Note that only
+ * Try to find the name as a relation. Note that only
* relations already entered into the rangetable will be
* recognized.
*
@@ -807,7 +807,7 @@ transformParamRef(ParseState *pstate, ParamRef *pref)
Node *result;
/*
- * The core parser knows nothing about Params. If a hook is supplied,
+ * The core parser knows nothing about Params. If a hook is supplied,
* call it. If not, or if the hook returns NULL, throw a generic error.
*/
if (pstate->p_paramref_hook != NULL)
@@ -1107,7 +1107,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
* We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is only
* possible if there is a suitable array type available. If not, we fall
* back to a boolean condition tree with multiple copies of the lefthand
- * expression. Also, any IN-list items that contain Vars are handled as
+ * expression. Also, any IN-list items that contain Vars are handled as
* separate boolean conditions, because that gives the planner more scope
* for optimization on such clauses.
*
@@ -1138,7 +1138,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
Oid array_type;
/*
- * Try to select a common type for the array elements. Note that
+ * Try to select a common type for the array elements. Note that
* since the LHS' type is first in the list, it will be preferred when
* there is doubt (eg, when all the RHS items are unknown literals).
*
@@ -1493,7 +1493,7 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
qtree = parse_sub_analyze(sublink->subselect, pstate, NULL, false);
/*
- * Check that we got something reasonable. Many of these conditions are
+ * Check that we got something reasonable. Many of these conditions are
* impossible given restrictions of the grammar, but check 'em anyway.
*/
if (!IsA(qtree, Query) ||
@@ -1908,7 +1908,7 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
newx->location = x->location;
/*
- * gram.y built the named args as a list of ResTarget. Transform each,
+ * gram.y built the named args as a list of ResTarget. Transform each,
* and break the names out as a separate list.
*/
newx->named_args = NIL;
@@ -2171,9 +2171,9 @@ transformWholeRowRef(ParseState *pstate, RangeTblEntry *rte, int location)
vnum = RTERangeTablePosn(pstate, rte, &sublevels_up);
/*
- * Build the appropriate referencing node. Note that if the RTE is a
+ * Build the appropriate referencing node. Note that if the RTE is a
* function returning scalar, we create just a plain reference to the
- * function value, not a composite containing a single column. This is
+ * function value, not a composite containing a single column. This is
* pretty inconsistent at first sight, but it's what we've done
* historically. One argument for it is that "rel" and "rel.*" mean the
* same thing for composite relations, so why not for scalar functions...
@@ -2357,7 +2357,7 @@ make_row_comparison_op(ParseState *pstate, List *opname,
/*
* Now we must determine which row comparison semantics (= <> < <= > >=)
- * apply to this set of operators. We look for btree opfamilies
+ * apply to this set of operators. We look for btree opfamilies
* containing the operators, and see which interpretations (strategy
* numbers) exist for each operator.
*/
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index de7cf4fc058..dc9a2e82fc0 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -83,7 +83,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Most of the rest of the parser just assumes that functions do not have
- * more than FUNC_MAX_ARGS parameters. We have to test here to protect
+ * more than FUNC_MAX_ARGS parameters. We have to test here to protect
* against array overruns, etc. Of course, this may not be a function,
* but the test doesn't hurt.
*/
@@ -100,7 +100,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* Extract arg type info in preparation for function lookup.
*
* If any arguments are Param markers of type VOID, we discard them from
- * the parameter list. This is a hack to allow the JDBC driver to not
+ * the parameter list. This is a hack to allow the JDBC driver to not
* have to distinguish "input" and "output" parameter symbols while
* parsing function-call constructs. We can't use foreach() because we
* may modify the list ...
@@ -310,7 +310,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
* If there are default arguments, we have to include their types in
* actual_arg_types for the purpose of checking generic type consistency.
* However, we do NOT put them into the generated parse node, because
- * their actual values might change before the query gets run. The
+ * their actual values might change before the query gets run. The
* planner has to insert the up-to-date values at plan time.
*/
nargsplusdefs = nargs;
@@ -412,7 +412,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (fargs == NIL && !agg_star)
ereport(ERROR,
@@ -478,7 +478,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (wfunc->winagg && fargs == NIL && !agg_star)
ereport(ERROR,
@@ -647,7 +647,7 @@ func_select_candidate(int nargs,
* matches" in the exact-match heuristic; it also makes it possible to do
* something useful with the type-category heuristics. Note that this
* makes it difficult, but not impossible, to use functions declared to
- * take a domain as an input datatype. Such a function will be selected
+ * take a domain as an input datatype. Such a function will be selected
* over the base-type function only if it is an exact match at all
* argument positions, and so was already chosen by our caller.
*
@@ -771,7 +771,7 @@ func_select_candidate(int nargs,
/*
* The next step examines each unknown argument position to see if we can
- * determine a "type category" for it. If any candidate has an input
+ * determine a "type category" for it. If any candidate has an input
* datatype of STRING category, use STRING category (this bias towards
* STRING is appropriate since unknown-type literals look like strings).
* Otherwise, if all the candidates agree on the type category of this
@@ -782,7 +782,7 @@ func_select_candidate(int nargs,
* the candidates takes a preferred datatype within the category.
*
* Having completed this examination, remove candidates that accept the
- * wrong category at any unknown position. Also, if at least one
+ * wrong category at any unknown position. Also, if at least one
* candidate accepted a preferred type at a position, remove candidates
* that accept non-preferred types. If just one candidate remains, return
* that one. However, if this rule turns out to reject all candidates,
@@ -911,7 +911,7 @@ func_select_candidate(int nargs,
* type, and see if that gives us a unique match. If so, use that match.
*
* NOTE: for a binary operator with one unknown and one non-unknown input,
- * we already tried this heuristic in binary_oper_exact(). However, that
+ * we already tried this heuristic in binary_oper_exact(). However, that
* code only finds exact matches, whereas here we will handle matches that
* involve coercion, polymorphic type resolution, etc.
*/
@@ -1077,7 +1077,7 @@ func_get_detail(List *funcname,
*
* NB: it's important that this code does not exceed what coerce_type
* can do, because the caller will try to apply coerce_type if we
- * return FUNCDETAIL_COERCION. If we return that result for something
+ * return FUNCDETAIL_COERCION. If we return that result for something
* coerce_type can't handle, we'll cause infinite recursion between
* this module and coerce_type!
*/
@@ -1253,7 +1253,7 @@ func_get_detail(List *funcname,
{
/*
* This is a bit tricky in named notation, since the supplied
- * arguments could replace any subset of the defaults. We
+ * arguments could replace any subset of the defaults. We
* work by making a bitmapset of the argnumbers of defaulted
* arguments, then scanning the defaults list and selecting
* the needed items. (This assumes that defaulted arguments
@@ -1403,7 +1403,7 @@ FuncNameAsType(List *funcname)
* ParseComplexProjection -
* handles function calls with a single argument that is of complex type.
* If the function call is actually a column projection, return a suitably
- * transformed expression tree. If not, return NULL.
+ * transformed expression tree. If not, return NULL.
*/
static Node *
ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg,
@@ -1477,7 +1477,7 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg,
* The result is something like "foo(integer)".
*
* If argnames isn't NIL, it is a list of C strings representing the actual
- * arg names for the last N arguments. This must be considered part of the
+ * arg names for the last N arguments. This must be considered part of the
* function signature too, when dealing with named-notation function calls.
*
* This is typically used in the construction of function-not-found error
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index e0ea43a8104..54f7bcbca25 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -99,8 +99,8 @@ free_parsestate(ParseState *pstate)
* is a dummy (always 0, in fact).
*
* The locations stored in raw parsetrees are byte offsets into the source
- * string. We have to convert them to 1-based character indexes for reporting
- * to clients. (We do things this way to avoid unnecessary overhead in the
+ * string. We have to convert them to 1-based character indexes for reporting
+ * to clients. (We do things this way to avoid unnecessary overhead in the
* normal non-error case: computing character indexes would be much more
* expensive than storing token offsets.)
*/
@@ -129,7 +129,7 @@ parser_errposition(ParseState *pstate, int location)
* Sometimes the parser calls functions that aren't part of the parser
* subsystem and can't reasonably be passed a ParseState; yet we would
* like any errors thrown in those functions to be tagged with a parse
- * error location. Use this function to set up an error context stack
+ * error location. Use this function to set up an error context stack
* entry that will accomplish that. Usage pattern:
*
* declare a local variable "ParseCallbackState pcbstate"
@@ -221,7 +221,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
* If the input is a domain, smash to base type, and extract the actual
* typmod to be applied to the base type. Subscripting a domain is an
* operation that necessarily works on the base array type, not the domain
- * itself. (Note that we provide no method whereby the creator of a
+ * itself. (Note that we provide no method whereby the creator of a
* domain over an array type could hide its ability to be subscripted.)
*/
*arrayType = getBaseTypeAndTypmod(*arrayType, arrayTypmod);
@@ -269,7 +269,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
*
* In an array assignment, we are given a destination array value plus a
* source value that is to be assigned to a single element or a slice of
- * that array. We produce an expression that represents the new array value
+ * that array. We produce an expression that represents the new array value
* with the source data inserted into the right part of the array.
*
* For both cases, if the source array is of a domain-over-array type,
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index dd80fa9f95d..31e558bd1f5 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -447,7 +447,7 @@ oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId,
*
* This is tighter than oper() because it will not return an operator that
* requires coercion of the input datatypes (but binary-compatible operators
- * are accepted). Otherwise, the semantics are the same.
+ * are accepted). Otherwise, the semantics are the same.
*/
Operator
compatible_oper(ParseState *pstate, List *op, Oid arg1, Oid arg2,
@@ -980,7 +980,7 @@ make_scalar_array_op(ParseState *pstate, List *opname,
* mapping is pretty expensive to compute, especially for ambiguous operators;
* this is mainly because there are a *lot* of instances of popular operator
* names such as "=", and we have to check each one to see which is the
- * best match. So once we have identified the correct mapping, we save it
+ * best match. So once we have identified the correct mapping, we save it
* in a cache that need only be flushed on pg_operator or pg_cast change.
* (pg_cast must be considered because changes in the set of implicit casts
* affect the set of applicable operators for any given input datatype.)
diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c
index 4f9168b074a..f9e7d47beb8 100644
--- a/src/backend/parser/parse_param.c
+++ b/src/backend/parser/parse_param.c
@@ -256,7 +256,7 @@ variable_coerce_param_hook(ParseState *pstate, Param *param,
* of parsing with parse_variable_parameters.
*
* Note: this code intentionally does not check that all parameter positions
- * were used, nor that all got non-UNKNOWN types assigned. Caller of parser
+ * were used, nor that all got non-UNKNOWN types assigned. Caller of parser
* should enforce that if it's important.
*/
void
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index e2cfa990780..ebfa4affb3f 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -71,7 +71,7 @@ static bool isQueryUsingTempRelation_walker(Node *node, void *context);
*
* A qualified refname (schemaname != NULL) can only match a relation RTE
* that (a) has no alias and (b) is for the same relation identified by
- * schemaname.refname. In this case we convert schemaname.refname to a
+ * schemaname.refname. In this case we convert schemaname.refname to a
* relation OID and search by relid, rather than by alias name. This is
* peculiar, but it's what SQL says to do.
*/
@@ -181,7 +181,7 @@ scanNameSpaceForRefname(ParseState *pstate, const char *refname, int location)
/*
* Search the query's table namespace for a relation RTE matching the
- * given relation OID. Return the RTE if a unique match, or NULL
+ * given relation OID. Return the RTE if a unique match, or NULL
* if no match. Raise error if multiple matches.
*
* See the comments for refnameRangeTblEntry to understand why this
@@ -285,7 +285,7 @@ isFutureCTE(ParseState *pstate, const char *refname)
*
* This is different from refnameRangeTblEntry in that it considers every
* entry in the ParseState's rangetable(s), not only those that are currently
- * visible in the p_namespace list(s). This behavior is invalid per the SQL
+ * visible in the p_namespace list(s). This behavior is invalid per the SQL
* spec, and it may give ambiguous results (there might be multiple equally
* valid matches, but only one will be returned). This must be used ONLY
* as a heuristic in giving suitable error messages. See errorMissingRTE.
@@ -308,8 +308,8 @@ searchRangeTableForRel(ParseState *pstate, RangeVar *relation)
* relation.
*
* NB: It's not critical that RangeVarGetRelid return the correct answer
- * here in the face of concurrent DDL. If it doesn't, the worst case
- * scenario is a less-clear error message. Also, the tables involved in
+ * here in the face of concurrent DDL. If it doesn't, the worst case
+ * scenario is a less-clear error message. Also, the tables involved in
* the query are already locked, which reduces the number of cases in
* which surprising behavior can occur. So we do the name lookup
* unlocked.
@@ -431,7 +431,7 @@ check_lateral_ref_ok(ParseState *pstate, ParseNamespaceItem *nsitem,
/*
* given an RTE, return RT index (starting with 1) of the entry,
- * and optionally get its nesting depth (0 = current). If sublevels_up
+ * and optionally get its nesting depth (0 = current). If sublevels_up
* is NULL, only consider rels at the current nesting level.
* Raises error if RTE not found.
*/
@@ -663,7 +663,7 @@ colNameToVar(ParseState *pstate, char *colname, bool localonly,
*
* This is different from colNameToVar in that it considers every entry in
* the ParseState's rangetable(s), not only those that are currently visible
- * in the p_namespace list(s). This behavior is invalid per the SQL spec,
+ * in the p_namespace list(s). This behavior is invalid per the SQL spec,
* and it may give ambiguous results (there might be multiple equally valid
* matches, but only one will be returned). This must be used ONLY as a
* heuristic in giving suitable error messages. See errorMissingColumn.
@@ -1014,7 +1014,7 @@ addRangeTableEntry(ParseState *pstate,
/*
* Get the rel's OID. This access also ensures that we have an up-to-date
- * relcache entry for the rel. Since this is typically the first access
+ * relcache entry for the rel. Since this is typically the first access
* to a rel in a statement, be careful to get the right access level
* depending on whether we're doing SELECT FOR UPDATE/SHARE.
*/
@@ -2574,7 +2574,7 @@ errorMissingRTE(ParseState *pstate, RangeVar *relation)
/*
* Check to see if there are any potential matches in the query's
- * rangetable. (Note: cases involving a bad schema name in the RangeVar
+ * rangetable. (Note: cases involving a bad schema name in the RangeVar
* will throw error immediately here. That seems OK.)
*/
rte = searchRangeTableForRel(pstate, relation);
@@ -2628,7 +2628,7 @@ errorMissingColumn(ParseState *pstate,
RangeTblEntry *rte;
/*
- * If relname was given, just play dumb and report it. (In practice, a
+ * If relname was given, just play dumb and report it. (In practice, a
* bad qualification name should end up at errorMissingRTE, not here, so
* no need to work hard on this case.)
*/
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index d56e00ffde6..e6374c2eca5 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -182,7 +182,7 @@ transformTargetList(ParseState *pstate, List *targetlist,
* This is the identical transformation to transformTargetList, except that
* the input list elements are bare expressions without ResTarget decoration,
* and the output elements are likewise just expressions without TargetEntry
- * decoration. We use this for ROW() and VALUES() constructs.
+ * decoration. We use this for ROW() and VALUES() constructs.
*/
List *
transformExpressionList(ParseState *pstate, List *exprlist,
@@ -348,7 +348,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
/*
* transformAssignedExpr()
- * This is used in INSERT and UPDATE statements only. It prepares an
+ * This is used in INSERT and UPDATE statements only. It prepares an
* expression for assignment to a column of the target table.
* This includes coercing the given value to the target column's type
* (if necessary), and dealing with any subfield names or subscripts
@@ -367,7 +367,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle,
*
* Note: location points at the target column name (SET target or INSERT
* column name list entry), and must therefore be -1 in an INSERT that
- * omits the column name list. So we should usually prefer to use
+ * omits the column name list. So we should usually prefer to use
* exprLocation(expr) for errors that can happen in a default INSERT.
*/
Expr *
@@ -442,7 +442,7 @@ transformAssignedExpr(ParseState *pstate,
/*
* If there is indirection on the target column, prepare an array or
- * subfield assignment expression. This will generate a new column value
+ * subfield assignment expression. This will generate a new column value
* that the source value has been inserted into, which can then be placed
* in the new tuple constructed by INSERT or UPDATE.
*/
@@ -550,7 +550,7 @@ updateTargetListEntry(ParseState *pstate,
/*
* Set the resno to identify the target column --- the rewriter and
- * planner depend on this. We also set the resname to identify the target
+ * planner depend on this. We also set the resname to identify the target
* column, but this is only for debugging purposes; it should not be
* relied on. (In particular, it might be out of date in a stored rule.)
*/
@@ -998,7 +998,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
*
* Note: this code is a lot like transformColumnRef; it's tempting to
* call that instead and then replace the resulting whole-row Var with
- * a list of Vars. However, that would leave us with the RTE's
+ * a list of Vars. However, that would leave us with the RTE's
* selectedCols bitmap showing the whole row as needing select
* permission, as well as the individual columns. That would be
* incorrect (since columns added later shouldn't need select
@@ -1017,7 +1017,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
} crserr = CRSERR_NO_RTE;
/*
- * Give the PreParseColumnRefHook, if any, first shot. If it returns
+ * Give the PreParseColumnRefHook, if any, first shot. If it returns
* non-null then we should use that expression.
*/
if (pstate->p_pre_columnref_hook != NULL)
@@ -1133,7 +1133,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
* Transforms '*' (in the target list) into a list of targetlist entries.
*
* tlist entries are generated for each relation visible for unqualified
- * column name access. We do not consider qualified-name-only entries because
+ * column name access. We do not consider qualified-name-only entries because
* that would include input tables of aliasless JOINs, NEW/OLD pseudo-entries,
* etc.
*
@@ -1280,7 +1280,7 @@ ExpandRowReference(ParseState *pstate, Node *expr,
/*
* If the rowtype expression is a whole-row Var, we can expand the fields
- * as simple Vars. Note: if the RTE is a relation, this case leaves us
+ * as simple Vars. Note: if the RTE is a relation, this case leaves us
* with the RTE's selectedCols bitmap showing the whole row as needing
* select permission, as well as the individual columns. However, we can
* only get here for weird notations like (table.*).*, so it's not worth
@@ -1362,7 +1362,7 @@ ExpandRowReference(ParseState *pstate, Node *expr,
* Get the tuple descriptor for a Var of type RECORD, if possible.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
* the tupdesc from it. We ereport if we can't determine the tupdesc.
*
@@ -1445,7 +1445,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of ParseState
+ * to. We have to build an additional level of ParseState
* to keep in step with varlevelsup in the subselect.
*/
ParseState mypstate;
@@ -1519,7 +1519,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 07fce8a0112..c4fd125b120 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -35,7 +35,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName,
/*
* LookupTypeName
* Given a TypeName object, lookup the pg_type syscache entry of the type.
- * Returns NULL if no such type can be found. If the type is found,
+ * Returns NULL if no such type can be found. If the type is found,
* the typmod value represented in the TypeName struct is computed and
* stored into *typmod_p.
*
@@ -48,7 +48,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName,
*
* typmod_p can be passed as NULL if the caller does not care to know the
* typmod value, but the typmod decoration (if any) will be validated anyway,
- * except in the case where the type is not found. Note that if the type is
+ * except in the case where the type is not found. Note that if the type is
* found but is a shell, and there is typmod decoration, an error will be
* thrown --- this is intentional.
*
@@ -113,7 +113,7 @@ LookupTypeName(ParseState *pstate, const TypeName *typeName,
* Look up the field.
*
* XXX: As no lock is taken here, this might fail in the presence of
- * concurrent DDL. But taking a lock would carry a performance
+ * concurrent DDL. But taking a lock would carry a performance
* penalty and would also require a permissions check.
*/
relid = RangeVarGetRelid(rel, NoLock, false);
@@ -578,7 +578,7 @@ typeTypeCollation(Type typ)
/*
* Given a type structure and a string, returns the internal representation
- * of that string. The "string" can be NULL to perform conversion of a NULL
+ * of that string. The "string" can be NULL to perform conversion of a NULL
* (which might result in failure, if the input function rejects NULLs).
*/
Datum
@@ -602,7 +602,7 @@ stringTypeDatum(Type tp, char *string, int32 atttypmod)
* instability in the input function is that comparison of Const nodes
* relies on bytewise comparison of the datums, so if the input function
* leaves garbage then subexpressions that should be identical may not get
- * recognized as such. See pgsql-hackers discussion of 2008-04-04.
+ * recognized as such. See pgsql-hackers discussion of 2008-04-04.
*/
if (string && !typform->typbyval)
{
@@ -649,7 +649,7 @@ pts_error_callback(void *arg)
/*
* Currently we just suppress any syntax error position report, rather
- * than transforming to an "internal query" error. It's unlikely that a
+ * than transforming to an "internal query" error. It's unlikely that a
* type name is complex enough to need positioning.
*/
errposition(0);
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index ee2a3357c3d..333136fca33 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -157,7 +157,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
stmt = (CreateStmt *) copyObject(stmt);
/*
- * Look up the creation namespace. This also checks permissions on the
+ * Look up the creation namespace. This also checks permissions on the
* target namespace, locks it against concurrent drops, checks for a
* preexisting relation in that namespace with the same name, and updates
* stmt->relation->relpersistence if the select namespace is temporary.
@@ -183,7 +183,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
* If the target relation name isn't schema-qualified, make it so. This
* prevents some corner cases in which added-on rewritten commands might
* think they should apply to other relations that have the same name and
- * are earlier in the search path. But a local temp table is effectively
+ * are earlier in the search path. But a local temp table is effectively
* specified to be in pg_temp, so no need for anything extra in that case.
*/
if (stmt->relation->schemaname == NULL
@@ -719,7 +719,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
constr = tupleDesc->constr;
/*
- * Initialize column number map for map_variable_attnos(). We need this
+ * Initialize column number map for map_variable_attnos(). We need this
* since dropped columns in the source table aren't copied, so the new
* table can have different column numbers.
*/
@@ -933,7 +933,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
- * commit. That will prevent someone else from deleting or ALTERing the
+ * commit. That will prevent someone else from deleting or ALTERing the
* parent before the child is committed.
*/
heap_close(relation, NoLock);
@@ -1613,7 +1613,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
parser_errposition(cxt->pstate, constraint->location)));
/*
- * Insist on it being a btree. That's the only kind that supports
+ * Insist on it being a btree. That's the only kind that supports
* uniqueness at the moment anyway; but we must have an index that
* exactly matches what you'd get from plain ADD CONSTRAINT syntax,
* else dump and reload will produce a different index (breaking
@@ -1640,7 +1640,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
/*
* We shouldn't see attnum == 0 here, since we already rejected
- * expression indexes. If we do, SystemAttributeDefinition will
+ * expression indexes. If we do, SystemAttributeDefinition will
* throw an error.
*/
if (attnum > 0)
@@ -1654,7 +1654,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
attname = pstrdup(NameStr(attform->attname));
/*
- * Insist on default opclass and sort options. While the index
+ * Insist on default opclass and sort options. While the index
* would still work as a constraint with non-default settings, it
* might not provide exactly the same uniqueness semantics as
* you'd get from a normally-created constraint; and there's also
@@ -1905,7 +1905,7 @@ transformFKConstraints(CreateStmtContext *cxt,
* transformIndexStmt - parse analysis for CREATE INDEX and ALTER TABLE
*
* Note: this is a no-op for an index not using either index expressions or
- * a predicate expression. There are several code paths that create indexes
+ * a predicate expression. There are several code paths that create indexes
* without bothering to call this, because they know they don't have any
* such expressions to deal with.
*
@@ -2028,7 +2028,7 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString,
/*
* To avoid deadlock, make sure the first thing we do is grab
- * AccessExclusiveLock on the target relation. This will be needed by
+ * AccessExclusiveLock on the target relation. This will be needed by
* DefineQueryRewrite(), and we don't want to grab a lesser lock
* beforehand.
*/
diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c
index b8ec7904666..50a8dac218a 100644
--- a/src/backend/parser/parser.c
+++ b/src/backend/parser/parser.c
@@ -65,7 +65,7 @@ raw_parser(const char *str)
* Intermediate filter between parser and core lexer (core_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
diff --git a/src/backend/port/darwin/system.c b/src/backend/port/darwin/system.c
index d571f26ef8c..1cd52669290 100644
--- a/src/backend/port/darwin/system.c
+++ b/src/backend/port/darwin/system.c
@@ -24,7 +24,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c
index 484eb43b5c3..ccd92c39d4b 100644
--- a/src/backend/port/dynloader/darwin.c
+++ b/src/backend/port/dynloader/darwin.c
@@ -47,7 +47,7 @@ pg_dlerror(void)
/*
* These routines were taken from the Apache source, but were made
- * available with a PostgreSQL-compatible license. Kudos Wilfredo
+ * available with a PostgreSQL-compatible license. Kudos Wilfredo
* Sánchez <wsanchez@apple.com>.
*/
diff --git a/src/backend/port/dynloader/freebsd.c b/src/backend/port/dynloader/freebsd.c
index c76f87b5898..f0d42133dbc 100644
--- a/src/backend/port/dynloader/freebsd.c
+++ b/src/backend/port/dynloader/freebsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/netbsd.c b/src/backend/port/dynloader/netbsd.c
index 2d3d096b26f..77e27374743 100644
--- a/src/backend/port/dynloader/netbsd.c
+++ b/src/backend/port/dynloader/netbsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/dynloader/openbsd.c b/src/backend/port/dynloader/openbsd.c
index f0e7dc29ef9..5a71b54c67b 100644
--- a/src/backend/port/dynloader/openbsd.c
+++ b/src/backend/port/dynloader/openbsd.c
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c
index 061fd2d3fdf..3c4cefe4afe 100644
--- a/src/backend/port/posix_sema.c
+++ b/src/backend/port/posix_sema.c
@@ -138,7 +138,7 @@ PosixSemaphoreKill(sem_t * sem)
*
* This is called during postmaster start or shared memory reinitialization.
* It should do whatever is needed to be able to support up to maxSemas
- * subsequent PGSemaphoreCreate calls. Also, if any system resources
+ * subsequent PGSemaphoreCreate calls. Also, if any system resources
* are acquired here or in PGSemaphoreCreate, register an on_shmem_exit
* callback to release them.
*
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index a7fccdfc059..59a3125cf52 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -253,7 +253,7 @@ IpcSemaphoreCreate(int numSems)
/*
* Can only get here if some other process managed to create the same
- * sema key before we did. Let him have that one, loop around to try
+ * sema key before we did. Let him have that one, loop around to try
* next key.
*/
}
@@ -278,12 +278,12 @@ IpcSemaphoreCreate(int numSems)
*
* This is called during postmaster start or shared memory reinitialization.
* It should do whatever is needed to be able to support up to maxSemas
- * subsequent PGSemaphoreCreate calls. Also, if any system resources
+ * subsequent PGSemaphoreCreate calls. Also, if any system resources
* are acquired here or in PGSemaphoreCreate, register an on_shmem_exit
* callback to release them.
*
* The port number is passed for possible use as a key (for SysV, we use
- * it to generate the starting semaphore key). In a standalone backend,
+ * it to generate the starting semaphore key). In a standalone backend,
* zero will be passed.
*
* In the SysV implementation, we acquire semaphore sets on-demand; the
@@ -378,7 +378,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* from the operation prematurely because we were sent a signal. So we
* try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. On
+ * Each time around the loop, we check for a cancel/die interrupt. On
* some platforms, if such an interrupt comes in while we are waiting, it
* will cause the semop() call to exit with errno == EINTR, allowing us to
* service the interrupt (if not in a critical section already) during the
@@ -396,7 +396,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
* execute directly. However, there is a huge pitfall: there is another
* window of a few instructions after the semop() before we are able to
- * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
+ * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose
* control, which means that the lock has been acquired but our caller did
* not get a chance to record the fact. Therefore, we only set
* ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the
@@ -409,9 +409,9 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* On some platforms, signals marked SA_RESTART (which is most, for us)
* will not interrupt the semop(); it will just keep waiting. Therefore
* it's necessary for cancel/die interrupts to be serviced directly by the
- * signal handler. On these platforms the behavior is really the same
+ * signal handler. On these platforms the behavior is really the same
* whether the signal arrives just before the semop() begins, or while it
- * is waiting. The loop on EINTR is thus important only for platforms
+ * is waiting. The loop on EINTR is thus important only for platforms
* without SA_RESTART.
*/
do
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 9d170ca69c1..f746c811ec7 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -250,7 +250,7 @@ IpcMemoryDelete(int status, Datum shmId)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
@@ -349,14 +349,14 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2)
* the storage.
*
* Dead Postgres segments are recycled if found, but we do not fail upon
- * collision with non-Postgres shmem segments. The idea here is to detect and
+ * collision with non-Postgres shmem segments. The idea here is to detect and
* re-use keys that may have been assigned by a crashed postmaster or backend.
*
* makePrivate means to always create a new segment, rather than attach to
* or recycle any existing segment.
*
* The port number is passed for possible use as a key (for SysV, we use
- * it to generate the starting shmem key). In a standalone backend,
+ * it to generate the starting shmem key). In a standalone backend,
* zero will be passed.
*/
PGShmemHeader *
@@ -537,7 +537,7 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
/*
* PGSharedMemoryReAttach
*
- * Re-attach to an already existing shared memory segment. In the non
+ * Re-attach to an already existing shared memory segment. In the non
* EXEC_BACKEND case this is not used, because postmaster children inherit
* the shared memory segment attachment via fork().
*
@@ -579,7 +579,7 @@ PGSharedMemoryReAttach(void)
*
* Detach from the shared memory segment, if still attached. This is not
* intended for use by the process that originally created the segment
- * (it will have an on_shmem_exit callback registered to do that). Rather,
+ * (it will have an on_shmem_exit callback registered to do that). Rather,
* this is for subprocesses that have inherited an attachment and want to
* get rid of it.
*/
diff --git a/src/backend/port/unix_latch.c b/src/backend/port/unix_latch.c
index 842ed0334b2..850f11d052d 100644
--- a/src/backend/port/unix_latch.c
+++ b/src/backend/port/unix_latch.c
@@ -239,7 +239,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
/*
* Initialize timeout if requested. We must record the current time so
* that we can determine the remaining timeout if the poll() or select()
- * is interrupted. (On some platforms, select() will update the contents
+ * is interrupted. (On some platforms, select() will update the contents
* of "tv" for us, but unfortunately we can't rely on that.)
*/
if (wakeEvents & WL_TIMEOUT)
@@ -500,7 +500,7 @@ SetLatch(volatile Latch *latch)
/*
* XXX there really ought to be a memory barrier operation right here, to
* ensure that any flag variables we might have changed get flushed to
- * main memory before we check/set is_set. Without that, we have to
+ * main memory before we check/set is_set. Without that, we have to
* require that callers provide their own synchronization for machines
* with weak memory ordering (see latch.h).
*/
@@ -559,7 +559,7 @@ ResetLatch(volatile Latch *latch)
/*
* XXX there really ought to be a memory barrier operation right here, to
* ensure that the write to is_set gets flushed to main memory before we
- * examine any flag variables. Otherwise a concurrent SetLatch might
+ * examine any flag variables. Otherwise a concurrent SetLatch might
* falsely conclude that it needn't signal us, even though we have missed
* seeing some flag updates that SetLatch was supposed to inform us of.
* For the moment, callers must supply their own synchronization of flag
diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c
index e349511fedf..f48a7ce9062 100644
--- a/src/backend/port/win32/socket.c
+++ b/src/backend/port/win32/socket.c
@@ -151,7 +151,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
(errmsg_internal("could not reset socket waiting event: error code %lu", GetLastError())));
/*
- * Track whether socket is UDP or not. (NB: most likely, this is both
+ * Track whether socket is UDP or not. (NB: most likely, this is both
* useless and wrong; there is no reason to think that the behavior of
* WSAEventSelect is different for TCP and UDP.)
*/
@@ -160,7 +160,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
current_socket = s;
/*
- * Attach event to socket. NOTE: we must detach it again before
+ * Attach event to socket. NOTE: we must detach it again before
* returning, since other bits of code may try to attach other events to
* the socket.
*/
diff --git a/src/backend/port/win32_latch.c b/src/backend/port/win32_latch.c
index 6c417b92038..1d62bf998cf 100644
--- a/src/backend/port/win32_latch.c
+++ b/src/backend/port/win32_latch.c
@@ -246,7 +246,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
rc == WAIT_OBJECT_0 + pmdeath_eventno)
{
/*
- * Postmaster apparently died. Since the consequences of falsely
+ * Postmaster apparently died. Since the consequences of falsely
* returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
* take the trouble to positively verify this with
* PostmasterIsAlive(), even though there is no known reason to
diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c
index 0db8e8f8ddc..95e1d6fa09c 100644
--- a/src/backend/port/win32_shmem.c
+++ b/src/backend/port/win32_shmem.c
@@ -78,7 +78,7 @@ GetSharedMemName(void)
* Is a previously-existing shmem segment still existing and in use?
*
* The point of this exercise is to detect the case where a prior postmaster
- * crashed, but it left child backends that are still running. Therefore
+ * crashed, but it left child backends that are still running. Therefore
* we only care about shmem segments that are associated with the intended
* DataDir. This is an important consideration since accidental matches of
* shmem segment IDs are reasonably common.
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index f29032c5ccc..88ea2fb16a4 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -21,21 +21,21 @@
* There is an autovacuum shared memory area, where the launcher stores
* information about the database it wants vacuumed. When it wants a new
* worker to start, it sets a flag in shared memory and sends a signal to the
- * postmaster. Then postmaster knows nothing more than it must start a worker;
- * so it forks a new child, which turns into a worker. This new process
+ * postmaster. Then postmaster knows nothing more than it must start a worker;
+ * so it forks a new child, which turns into a worker. This new process
* connects to shared memory, and there it can inspect the information that the
* launcher has set up.
*
* If the fork() call fails in the postmaster, it sets a flag in the shared
* memory area, and sends a signal to the launcher. The launcher, upon
* noticing the flag, can try starting the worker again by resending the
- * signal. Note that the failure can only be transient (fork failure due to
+ * signal. Note that the failure can only be transient (fork failure due to
* high load, memory pressure, too many processes, etc); more permanent
* problems, like failure to connect to a database, are detected later in the
* worker and dealt with just by having the worker exit normally. The launcher
* will launch a new worker again later, per schedule.
*
- * When the worker is done vacuuming it sends SIGUSR2 to the launcher. The
+ * When the worker is done vacuuming it sends SIGUSR2 to the launcher. The
* launcher then wakes up and is able to launch another worker, if the schedule
* is so tight that a new worker is needed immediately. At this time the
* launcher can also balance the settings for the various remaining workers'
@@ -243,7 +243,7 @@ typedef enum
/*-------------
* The main autovacuum shmem struct. On shared memory we store this main
- * struct and the array of WorkerInfo structs. This struct keeps:
+ * struct and the array of WorkerInfo structs. This struct keeps:
*
* av_signal set by other processes to indicate various conditions
* av_launcherpid the PID of the autovacuum launcher
@@ -430,7 +430,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has any
+ * can signal any child processes too. (autovacuum probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -440,7 +440,7 @@ AutoVacLauncherMain(int argc, char *argv[])
#endif
/*
- * Set up signal handlers. We operate on databases much like a regular
+ * Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*/
@@ -547,7 +547,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* Force zero_damaged_pages OFF in the autovac process, even if it is set
- * in postgresql.conf. We don't really want such a dangerous option being
+ * in postgresql.conf. We don't really want such a dangerous option being
* applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
@@ -870,7 +870,7 @@ launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval * nap)
* this the "new" database, because when the database was already present on
* the list, we expect that this function is not called at all). The
* preexisting list, if any, will be used to preserve the order of the
- * databases in the autovacuum_naptime period. The new database is put at the
+ * databases in the autovacuum_naptime period. The new database is put at the
* end of the interval. The actual values are not saved, which should not be
* much of a problem.
*/
@@ -1074,7 +1074,7 @@ db_comparator(const void *a, const void *b)
*
* Bare-bones procedure for starting an autovacuum worker from the launcher.
* It determines what database to work on, sets up shared memory stuff and
- * signals postmaster to start the worker. It fails gracefully if invoked when
+ * signals postmaster to start the worker. It fails gracefully if invoked when
* autovacuum_workers are already active.
*
* Return value is the OID of the database that the worker is going to process,
@@ -1346,7 +1346,7 @@ launch_worker(TimestampTz now)
/*
* Called from postmaster to signal a failure to fork a process to become
- * worker. The postmaster should kill(SIGUSR2) the launcher shortly
+ * worker. The postmaster should kill(SIGUSR2) the launcher shortly
* after calling this function.
*/
void
@@ -1498,7 +1498,7 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has any
+ * can signal any child processes too. (autovacuum probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -1508,7 +1508,7 @@ AutoVacWorkerMain(int argc, char *argv[])
#endif
/*
- * Set up signal handlers. We operate on databases much like a regular
+ * Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*
@@ -1559,7 +1559,7 @@ AutoVacWorkerMain(int argc, char *argv[])
EmitErrorReport();
/*
- * We can now go away. Note that because we called InitProcess, a
+ * We can now go away. Note that because we called InitProcess, a
* callback was registered to do ProcKill, which will clean up
* necessary state.
*/
@@ -1573,7 +1573,7 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* Force zero_damaged_pages OFF in the autovac process, even if it is set
- * in postgresql.conf. We don't really want such a dangerous option being
+ * in postgresql.conf. We don't really want such a dangerous option being
* applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
@@ -1701,7 +1701,7 @@ FreeWorkerInfo(int code, Datum arg)
/*
* Wake the launcher up so that he can launch a new worker immediately
* if required. We only save the launcher's PID in local memory here;
- * the actual signal will be sent when the PGPROC is recycled. Note
+ * the actual signal will be sent when the PGPROC is recycled. Note
* that we always do this, so that the launcher can rebalance the cost
* limit setting of the remaining workers.
*
@@ -1809,7 +1809,7 @@ autovac_balance_cost(void)
/*
* We put a lower bound of 1 on the cost_limit, to avoid division-
- * by-zero in the vacuum code. Also, in case of roundoff trouble
+ * by-zero in the vacuum code. Also, in case of roundoff trouble
* in these calculations, let's be sure we don't ever set
* cost_limit to more than the base value.
*/
@@ -1852,7 +1852,7 @@ get_database_list(void)
/*
* Start a transaction so we can access pg_database, and get a snapshot.
* We don't have a use for the snapshot itself, but we're interested in
- * the secondary effect that it sets RecentGlobalXmin. (This is critical
+ * the secondary effect that it sets RecentGlobalXmin. (This is critical
* for anything that reads heap pages, because HOT may decide to prune
* them even if the process doesn't attempt to modify any tuples.)
*/
@@ -2269,14 +2269,14 @@ do_autovacuum(void)
}
/*
- * Ok, good to go. Store the table in shared memory before releasing
+ * Ok, good to go. Store the table in shared memory before releasing
* the lock so that other workers don't vacuum it concurrently.
*/
MyWorkerInfo->wi_tableoid = relid;
LWLockRelease(AutovacuumScheduleLock);
/*
- * Remember the prevailing values of the vacuum cost GUCs. We have to
+ * Remember the prevailing values of the vacuum cost GUCs. We have to
* restore these at the bottom of the loop, else we'll compute wrong
* values in the next iteration of autovac_balance_cost().
*/
@@ -2305,7 +2305,7 @@ do_autovacuum(void)
/*
* Save the relation name for a possible error message, to avoid a
- * catalog lookup in case of an error. If any of these return NULL,
+ * catalog lookup in case of an error. If any of these return NULL,
* then the relation has been dropped since last we checked; skip it.
* Note: they must live in a long-lived memory context because we call
* vacuum and analyze in different transactions.
@@ -2759,7 +2759,7 @@ relation_needs_vacanalyze(Oid relid,
{
/*
* Skip a table not found in stat hash, unless we have to force vacuum
- * for anti-wrap purposes. If it's not acted upon, there's no need to
+ * for anti-wrap purposes. If it's not acted upon, there's no need to
* vacuum it.
*/
*dovacuum = force_vacuum;
@@ -2961,7 +2961,7 @@ AutoVacuumShmemInit(void)
* Refresh pgstats data for an autovacuum process
*
* Cause the next pgstats read operation to obtain fresh data, but throttle
- * such refreshing in the autovacuum launcher. This is mostly to avoid
+ * such refreshing in the autovacuum launcher. This is mostly to avoid
* rereading the pgstats files too many times in quick succession when there
* are many databases.
*
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index 286ae867955..9576251780c 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -2,11 +2,11 @@
*
* bgwriter.c
*
- * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
+ * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
* to keep regular backends from having to write out dirty shared buffers
* (which they would only do when needing to free a shared buffer to read in
* another page). In the best scenario all writes from shared buffers will
- * be issued by the background writer process. However, regular backends are
+ * be issued by the background writer process. However, regular backends are
* still empowered to issue writes if the bgwriter fails to maintain enough
* clean shared buffers.
*
@@ -99,7 +99,7 @@ BackgroundWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (bgwriter probably never has any
+ * can signal any child processes too. (bgwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -172,7 +172,7 @@ BackgroundWriterMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in bgwriter, but we do have LWLocks, buffers, and temp files.
*/
LWLockReleaseAll();
@@ -269,7 +269,7 @@ BackgroundWriterMain(void)
if (FirstCallSinceLastCheckpoint())
{
/*
- * After any checkpoint, close all smgr files. This is so we
+ * After any checkpoint, close all smgr files. This is so we
* won't hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -304,7 +304,7 @@ BackgroundWriterMain(void)
* and the time we call StrategyNotifyBgWriter. While it's not
* critical that we not hibernate anyway, we try to reduce the odds of
* that by only hibernating when BgBufferSync says nothing's happening
- * for two consecutive cycles. Also, we mitigate any possible
+ * for two consecutive cycles. Also, we mitigate any possible
* consequences of a missed wakeup by not hibernating forever.
*/
if (rc == WL_TIMEOUT && can_hibernate && prev_hibernate)
@@ -358,7 +358,7 @@ bg_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index fdf6625c58b..1dbd23cb773 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -2,7 +2,7 @@
*
* checkpointer.c
*
- * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
+ * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
* Checkpoints are automatically dispatched after a certain amount of time has
* elapsed since the last one, and it can be signaled to perform requested
* checkpoints as well. (The GUC parameter that mandates a checkpoint every
@@ -14,7 +14,7 @@
* subprocess finishes, or as soon as recovery begins if we are doing archive
* recovery. It remains alive until the postmaster commands it to terminate.
* Normal termination is by SIGUSR2, which instructs the checkpointer to
- * execute a shutdown checkpoint and then exit(0). (All backends must be
+ * execute a shutdown checkpoint and then exit(0). (All backends must be
* stopped before SIGUSR2 is issued!) Emergency termination is by SIGQUIT;
* like any backend, the checkpointer will simply abort and exit on SIGQUIT.
*
@@ -198,7 +198,7 @@ CheckpointerMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (checkpointer probably never has
+ * can signal any child processes too. (checkpointer probably never has
* any child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -211,7 +211,7 @@ CheckpointerMain(void)
* Properly accept or ignore signals the postmaster might send us
*
* Note: we deliberately ignore SIGTERM, because during a standard Unix
- * system shutdown cycle, init will SIGTERM all processes at once. We
+ * system shutdown cycle, init will SIGTERM all processes at once. We
* want to wait for the backends to exit, whereupon the postmaster will
* tell us it's okay to shut down (via SIGUSR2).
*/
@@ -279,7 +279,7 @@ CheckpointerMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in checkpointer, but we do have LWLocks, buffers, and temp
* files.
*/
@@ -506,7 +506,7 @@ CheckpointerMain(void)
ckpt_performed = CreateRestartPoint(flags);
/*
- * After any checkpoint, close all smgr files. This is so we
+ * After any checkpoint, close all smgr files. This is so we
* won't hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -639,7 +639,7 @@ CheckArchiveTimeout(void)
}
/*
- * Returns true if an immediate checkpoint request is pending. (Note that
+ * Returns true if an immediate checkpoint request is pending. (Note that
* this does not check the *current* checkpoint's IMMEDIATE flag, but whether
* there is one pending behind it.)
*/
@@ -826,7 +826,7 @@ chkpt_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -977,7 +977,7 @@ RequestCheckpoint(int flags)
CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
/*
- * After any checkpoint, close all smgr files. This is so we won't
+ * After any checkpoint, close all smgr files. This is so we won't
* hang onto smgr references to deleted files indefinitely.
*/
smgrcloseall();
@@ -1108,7 +1108,7 @@ RequestCheckpoint(int flags)
* to the requests[] queue without checking for duplicates. The checkpointer
* will have to eliminate dups internally anyway. However, if we discover
* that the queue is full, we make a pass over the entire queue to compact
- * it. This is somewhat expensive, but the alternative is for the backend
+ * it. This is somewhat expensive, but the alternative is for the backend
* to perform its own fsync, which is far more expensive in practice. It
* is theoretically possible a backend fsync might still be necessary, if
* the queue is full and contains no duplicate entries. In that case, we
@@ -1134,7 +1134,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* If the checkpointer isn't running or the request queue is full, the
- * backend will have to perform its own fsync request. But before forcing
+ * backend will have to perform its own fsync request. But before forcing
* that to happen, we can try to compact the request queue.
*/
if (CheckpointerShmem->checkpointer_pid == 0 ||
@@ -1178,7 +1178,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
* Although a full fsync request queue is not common, it can lead to severe
* performance problems when it does happen. So far, this situation has
* only been observed to occur when the system is under heavy write load,
- * and especially during the "sync" phase of a checkpoint. Without this
+ * and especially during the "sync" phase of a checkpoint. Without this
* logic, each backend begins doing an fsync for every block written, which
* gets very expensive and can slow down the whole system.
*
diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c
index aa54721f5a5..afd79279472 100644
--- a/src/backend/postmaster/fork_process.c
+++ b/src/backend/postmaster/fork_process.c
@@ -101,7 +101,7 @@ fork_process(void)
#endif /* LINUX_OOM_SCORE_ADJ */
/*
- * Older Linux kernels have oom_adj not oom_score_adj. This works
+ * Older Linux kernels have oom_adj not oom_score_adj. This works
* similarly except with a different scale of adjustment values. If
* it's necessary to build Postgres to work with either API, you can
* define both LINUX_OOM_SCORE_ADJ and LINUX_OOM_ADJ.
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index 2bb572ef686..c6ef9e1083e 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -593,9 +593,9 @@ pgarch_archiveXlog(char *xlog)
{
/*
* If either the shell itself, or a called command, died on a signal,
- * abort the archiver. We do this because system() ignores SIGINT and
+ * abort the archiver. We do this because system() ignores SIGINT and
* SIGQUIT while waiting; so a signal is very likely something that
- * should have interrupted us too. If we overreact it's no big deal,
+ * should have interrupted us too. If we overreact it's no big deal,
* the postmaster will just start the archiver again.
*
* Per the Single Unix Spec, shells report exit status > 128 when a
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index ea3d2d7657b..99701bb5322 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -352,7 +352,7 @@ pgstat_init(void)
* On some platforms, pg_getaddrinfo_all() may return multiple addresses
* only one of which will actually work (eg, both IPv6 and IPv4 addresses
* when kernel will reject IPv6). Worse, the failure may occur at the
- * bind() or perhaps even connect() stage. So we must loop through the
+ * bind() or perhaps even connect() stage. So we must loop through the
* results till we find a working combination. We will generate LOG
* messages, but no error, for bogus combinations.
*/
@@ -600,7 +600,7 @@ pgstat_reset_remove_files(const char *directory)
/*
* pgstat_reset_all() -
*
- * Remove the stats files. This is currently used only if WAL
+ * Remove the stats files. This is currently used only if WAL
* recovery is needed after a crash.
*/
void
@@ -661,7 +661,7 @@ pgstat_start(void)
/*
* Do nothing if too soon since last collector start. This is a safety
* valve to protect against continuous respawn attempts if the collector
- * is dying immediately at launch. Note that since we will be re-called
+ * is dying immediately at launch. Note that since we will be re-called
* from the postmaster main loop, we will get another chance later.
*/
curtime = time(NULL);
@@ -1105,7 +1105,7 @@ pgstat_vacuum_stat(void)
*
* Collect the OIDs of all objects listed in the specified system catalog
* into a temporary hash table. Caller should hash_destroy the result
- * when done with it. (However, we make the table in CurrentMemoryContext
+ * when done with it. (However, we make the table in CurrentMemoryContext
* so that it will be freed properly in event of an error.)
* ----------
*/
@@ -1350,7 +1350,7 @@ pgstat_report_analyze(Relation rel,
* have counted such rows as live or dead respectively. Because we will
* report our counts of such rows at transaction end, we should subtract
* off these counts from what we send to the collector now, else they'll
- * be double-counted after commit. (This approach also ensures that the
+ * be double-counted after commit. (This approach also ensures that the
* collector ends up with the right numbers if we abort instead of
* committing.)
*/
@@ -1581,7 +1581,7 @@ pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu, bool finalize)
/*
* Compute the new f_total_time as the total elapsed time added to the
- * pre-call value of f_total_time. This is necessary to avoid
+ * pre-call value of f_total_time. This is necessary to avoid
* double-counting any time taken by recursive calls of myself. (We do
* not need any similar kluge for self time, since that already excludes
* any recursive calls.)
@@ -2067,7 +2067,7 @@ AtPrepare_PgStat(void)
* Clean up after successful PREPARE.
*
* All we need do here is unlink the transaction stats state from the
- * nontransactional state. The nontransactional action counts will be
+ * nontransactional state. The nontransactional action counts will be
* reported to the stats collector immediately, while the effects on live
* and dead tuple counts are preserved in the 2PC state file.
*
@@ -2607,7 +2607,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
{
/*
* track_activities is disabled, but we last reported a
- * non-disabled state. As our final update, change the state and
+ * non-disabled state. As our final update, change the state and
* clear fields we will not be updating anymore.
*/
beentry->st_changecount++;
@@ -2828,12 +2828,12 @@ pgstat_read_current_status(void)
* pgstat_get_backend_current_activity() -
*
* Return a string representing the current activity of the backend with
- * the specified PID. This looks directly at the BackendStatusArray,
+ * the specified PID. This looks directly at the BackendStatusArray,
* and so will provide current information regardless of the age of our
* transaction's snapshot of the status array.
*
* It is the caller's responsibility to invoke this only for backends whose
- * state is expected to remain stable while the result is in use. The
+ * state is expected to remain stable while the result is in use. The
* only current use is in deadlock reporting, where we can expect that
* the target backend is blocked on a lock. (There are corner cases
* where the target's wait could get aborted while we are looking at it,
@@ -2901,7 +2901,7 @@ pgstat_get_backend_current_activity(int pid, bool checkUser)
* pgstat_get_crashed_backend_activity() -
*
* Return a string representing the current activity of the backend with
- * the specified PID. Like the function above, but reads shared memory with
+ * the specified PID. Like the function above, but reads shared memory with
* the expectation that it may be corrupt. On success, copy the string
* into the "buffer" argument and return that pointer. On failure,
* return NULL.
@@ -2910,7 +2910,7 @@ pgstat_get_backend_current_activity(int pid, bool checkUser)
* query that crashed a backend. In particular, no attempt is made to
* follow the correct concurrency protocol when accessing the
* BackendStatusArray. But that's OK, in the worst case we'll return a
- * corrupted message. We also must take care not to trip on ereport(ERROR).
+ * corrupted message. We also must take care not to trip on ereport(ERROR).
* ----------
*/
const char *
@@ -3056,7 +3056,7 @@ pgstat_send_bgwriter(void)
/* ----------
* PgstatCollectorMain() -
*
- * Start up the statistics collector process. This is the body of the
+ * Start up the statistics collector process. This is the body of the
* postmaster child process.
*
* The argc/argv parameters are valid only in EXEC_BACKEND case.
@@ -3077,7 +3077,7 @@ PgstatCollectorMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (pgstat probably never has any
+ * can signal any child processes too. (pgstat probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -3302,7 +3302,7 @@ PgstatCollectorMain(int argc, char *argv[])
/*
* Windows, at least in its Windows Server 2003 R2 incarnation,
- * sometimes loses FD_READ events. Waking up and retrying the recv()
+ * sometimes loses FD_READ events. Waking up and retrying the recv()
* fixes that, so don't sleep indefinitely. This is a crock of the
* first water, but until somebody wants to debug exactly what's
* happening there, this is the best we can do. The two-second
@@ -4159,7 +4159,7 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent,
const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename;
/*
- * Try to open the stats file. As above, anything but ENOENT is worthy of
+ * Try to open the stats file. As above, anything but ENOENT is worthy of
* complaining about.
*/
if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL)
@@ -4295,7 +4295,7 @@ backend_read_statsfile(void)
*
* We don't recompute min_ts after sleeping, except in the
* unlikely case that cur_ts went backwards. So we might end up
- * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In
+ * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In
* practice that shouldn't happen, though, as long as the sleep
* time is less than PGSTAT_STAT_INTERVAL; and we don't want to
* tell the collector that our cutoff time is less than what we'd
@@ -4388,7 +4388,7 @@ pgstat_setup_memcxt(void)
/* ----------
* pgstat_clear_snapshot() -
*
- * Discard any data collected in the current transaction. Any subsequent
+ * Discard any data collected in the current transaction. Any subsequent
* request will cause new snapshots to be read.
*
* This is also invoked during transaction commit or abort to discard
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 6977e43a8f6..7d587ef652e 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2,7 +2,7 @@
*
* postmaster.c
* This program acts as a clearing house for requests to the
- * POSTGRES system. Frontend programs send a startup message
+ * POSTGRES system. Frontend programs send a startup message
* to the Postmaster and the postmaster uses the info in the
* message to setup a backend process.
*
@@ -15,7 +15,7 @@
* The postmaster process creates the shared memory and semaphore
* pools during startup, but as a rule does not touch them itself.
* In particular, it is not a member of the PGPROC array of backends
- * and so it cannot participate in lock-manager operations. Keeping
+ * and so it cannot participate in lock-manager operations. Keeping
* the postmaster away from shared memory operations makes it simpler
* and more reliable. The postmaster is almost always able to recover
* from crashes of individual backends by resetting shared memory;
@@ -146,7 +146,7 @@
* children we have and send them appropriate signals when necessary.
*
* "Special" children such as the startup, bgwriter and autovacuum launcher
- * tasks are not in this list. Autovacuum worker and walsender are in it.
+ * tasks are not in this list. Autovacuum worker and walsender are in it.
* Also, "dead_end" children are in it: these are children launched just for
* the purpose of sending a friendly rejection message to a would-be client.
* We must track them because they are attached to shared memory, but we know
@@ -163,7 +163,7 @@ typedef struct bkend
int child_slot; /* PMChildSlot for this backend, if any */
/*
- * Flavor of backend or auxiliary process. Note that BACKEND_TYPE_WALSND
+ * Flavor of backend or auxiliary process. Note that BACKEND_TYPE_WALSND
* backends initially announce themselves as BACKEND_TYPE_NORMAL, so if
* bkend_type is normal, you should check for a recent transition.
*/
@@ -183,7 +183,7 @@ static Backend *ShmemBackendArray;
* List of background workers.
*
* A worker that requests a database connection during registration will have
- * rw_backend set, and will be present in BackendList. Note: do not rely on
+ * rw_backend set, and will be present in BackendList. Note: do not rely on
* rw_backend being non-NULL for shmem-connected workers!
*/
typedef struct RegisteredBgWorker
@@ -236,10 +236,10 @@ static char ExtraOptions[MAXPGPATH];
/*
* These globals control the behavior of the postmaster in case some
- * backend dumps core. Normally, it kills all peers of the dead backend
+ * backend dumps core. Normally, it kills all peers of the dead backend
* and reinitializes shared memory. By specifying -s or -n, we can have
* the postmaster stop (rather than kill) peers and not reinitialize
- * shared data structures. (Reinit is currently dead code, though.)
+ * shared data structures. (Reinit is currently dead code, though.)
*/
static bool Reinit = true;
static int SendStop = false;
@@ -289,7 +289,7 @@ static bool RecoveryError = false; /* T if WAL recovery failed */
* state and the startup process is launched. The startup process begins by
* reading the control file and other preliminary initialization steps.
* In a normal startup, or after crash recovery, the startup process exits
- * with exit code 0 and we switch to PM_RUN state. However, archive recovery
+ * with exit code 0 and we switch to PM_RUN state. However, archive recovery
* is handled specially since it takes much longer and we would like to support
* hot standby during archive recovery.
*
@@ -298,7 +298,7 @@ static bool RecoveryError = false; /* T if WAL recovery failed */
* checkpointer are launched, while the startup process continues applying WAL.
* If Hot Standby is enabled, then, after reaching a consistent point in WAL
* redo, startup process signals us again, and we switch to PM_HOT_STANDBY
- * state and begin accepting connections to perform read-only queries. When
+ * state and begin accepting connections to perform read-only queries. When
* archive recovery is finished, the startup process exits with exit code 0
* and we switch to PM_RUN state.
*
@@ -629,7 +629,7 @@ PostmasterMain(int argc, char *argv[])
opterr = 1;
/*
- * Parse command-line options. CAUTION: keep this in sync with
+ * Parse command-line options. CAUTION: keep this in sync with
* tcop/postgres.c (the option sets should not conflict) and with the
* common help() function in main/main.c.
*/
@@ -1228,7 +1228,7 @@ PostmasterMain(int argc, char *argv[])
/*
- * Remove old temporary files. At this point there can be no other
+ * Remove old temporary files. At this point there can be no other
* Postgres processes running in this directory, so this should be safe.
*/
RemovePgTempFiles();
@@ -1452,7 +1452,7 @@ DetermineSleepTime(struct timeval * timeout)
/*
* When there are crashed bgworkers, we sleep just long enough that
- * they are restarted when they request to be. Scan the list to
+ * they are restarted when they request to be. Scan the list to
* determine the minimum of all wakeup times according to most recent
* crash time and requested restart interval.
*/
@@ -1723,7 +1723,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
{
/*
* EOF after SSLdone probably means the client didn't like our
- * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
+ * response to NEGOTIATE_SSL_CODE. That's not an error condition, so
* don't clutter the log with a complaint.
*/
if (!SSLdone)
@@ -1848,7 +1848,7 @@ retry1:
int32 offset = sizeof(ProtocolVersion);
/*
- * Scan packet body for name/option pairs. We can assume any string
+ * Scan packet body for name/option pairs. We can assume any string
* beginning within the packet body is null-terminated, thanks to
* zeroing extra byte above.
*/
@@ -2268,7 +2268,7 @@ reset_shared(int port)
*
* Note: in each "cycle of life" we will normally assign the same IPC keys
* (if using SysV shmem and/or semas), since the port number is used to
- * determine IPC keys. This helps ensure that we will clean up dead IPC
+ * determine IPC keys. This helps ensure that we will clean up dead IPC
* objects if the postmaster crashes and is restarted.
*/
CreateSharedMemoryAndSemaphores(false, port);
@@ -2629,7 +2629,7 @@ reaper(SIGNAL_ARGS)
/*
* OK, we saw normal exit of the checkpointer after it's been
* told to shut down. We expect that it wrote a shutdown
- * checkpoint. (If for some reason it didn't, recovery will
+ * checkpoint. (If for some reason it didn't, recovery will
* occur on next postmaster start.)
*
* At this point we should have no normal backend children
@@ -2705,7 +2705,7 @@ reaper(SIGNAL_ARGS)
/*
* Was it the autovacuum launcher? Normal exit can be ignored; we'll
* start a new one at the next iteration of the postmaster's main
- * loop, if necessary. Any other exit condition is treated as a
+ * loop, if necessary. Any other exit condition is treated as a
* crash.
*/
if (pid == AutoVacPID)
@@ -2847,7 +2847,7 @@ CleanupBackgroundWorker(int pid,
if (!ReleasePostmasterChildSlot(rw->rw_child_slot))
{
/*
- * Uh-oh, the child failed to clean itself up. Treat as a crash
+ * Uh-oh, the child failed to clean itself up. Treat as a crash
* after all.
*/
rw->rw_crashed_at = GetCurrentTimestamp();
@@ -2931,7 +2931,7 @@ CleanupBackend(int pid,
if (!ReleasePostmasterChildSlot(bp->child_slot))
{
/*
- * Uh-oh, the child failed to clean itself up. Treat as a
+ * Uh-oh, the child failed to clean itself up. Treat as a
* crash after all.
*/
HandleChildCrash(pid, exitstatus, _("server process"));
@@ -3004,7 +3004,7 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
else
{
/*
- * This worker is still alive. Unless we did so already, tell it
+ * This worker is still alive. Unless we did so already, tell it
* to commit hara-kiri.
*
* SIGQUIT is the special signal that says exit without proc_exit
@@ -3328,7 +3328,7 @@ PostmasterStateMachine(void)
if (FatalError)
{
/*
- * Start waiting for dead_end children to die. This state
+ * Start waiting for dead_end children to die. This state
* change causes ServerLoop to stop creating new ones.
*/
pmState = PM_WAIT_DEAD_END;
@@ -3427,7 +3427,7 @@ PostmasterStateMachine(void)
/*
* If we've been told to shut down, we exit as soon as there are no
- * remaining children. If there was a crash, cleanup will occur at the
+ * remaining children. If there was a crash, cleanup will occur at the
* next startup. (Before PostgreSQL 8.3, we tried to recover from the
* crash before exiting, but that seems unwise if we are quitting because
* we got SIGTERM from init --- there may well not be time for recovery
@@ -3503,7 +3503,7 @@ PostmasterStateMachine(void)
* system().
*
* There is a race condition for recently-forked children: they might not
- * have executed setsid() yet. So we signal the child directly as well as
+ * have executed setsid() yet. So we signal the child directly as well as
* the group. We assume such a child will handle the signal before trying
* to spawn any grandchild processes. We also assume that signaling the
* child twice will not cause any problems.
@@ -3724,7 +3724,7 @@ BackendStartup(Port *port)
/*
* Try to report backend fork() failure to client before we close the
- * connection. Since we do not care to risk blocking the postmaster on
+ * connection. Since we do not care to risk blocking the postmaster on
* this connection, we set the connection to non-blocking and try only once.
*
* This is grungy special-purpose code; we cannot use backend libpq since
@@ -3778,7 +3778,7 @@ BackendInitialize(Port *port)
/*
* PreAuthDelay is a debugging aid for investigating problems in the
* authentication cycle: it can be set in postgresql.conf to allow time to
- * attach to the newly-forked backend with a debugger. (See also
+ * attach to the newly-forked backend with a debugger. (See also
* PostAuthDelay, which we allow clients to pass through PGOPTIONS, but it
* is not honored until after authentication.)
*/
@@ -3805,7 +3805,7 @@ BackendInitialize(Port *port)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (We do this now on the off chance
+ * can signal any child processes too. (We do this now on the off chance
* that something might spawn a child process during authentication.)
*/
#ifdef HAVE_SETSID
@@ -3815,7 +3815,7 @@ BackendInitialize(Port *port)
/*
* We arrange for a simple exit(1) if we receive SIGTERM or SIGQUIT or
- * timeout while trying to collect the startup packet. Otherwise the
+ * timeout while trying to collect the startup packet. Otherwise the
* postmaster cannot shutdown the database FAST or IMMED cleanly if a
* buggy client fails to send the packet promptly.
*/
@@ -3902,7 +3902,7 @@ BackendInitialize(Port *port)
status = ProcessStartupPacket(port, false);
/*
- * Stop here if it was bad or a cancel packet. ProcessStartupPacket
+ * Stop here if it was bad or a cancel packet. ProcessStartupPacket
* already did any appropriate error reporting.
*/
if (status != STATUS_OK)
@@ -4453,7 +4453,7 @@ SubPostmasterMain(int argc, char *argv[])
read_nondefault_variables();
/*
- * Reload any libraries that were preloaded by the postmaster. Since we
+ * Reload any libraries that were preloaded by the postmaster. Since we
* exec'd this process, those libraries didn't come along with us; but we
* should load them into all child processes to be consistent with the
* non-EXEC_BACKEND behavior.
@@ -4506,7 +4506,7 @@ SubPostmasterMain(int argc, char *argv[])
*
* This prevents a randomized stack base address that causes child
* shared memory to be at a different address than the parent, making
- * it impossible to attached to shared memory. Return the value to
+ * it impossible to attached to shared memory. Return the value to
* '1' when finished.
*/
CreateSharedMemoryAndSemaphores(false, 0);
@@ -4626,7 +4626,7 @@ ExitPostmaster(int status)
/* should cleanup shared memory and kill all backends */
/*
- * Not sure of the semantics here. When the Postmaster dies, should the
+ * Not sure of the semantics here. When the Postmaster dies, should the
* backends all be killed? probably not.
*
* MUST -- vadim 05-10-1999
@@ -4924,7 +4924,7 @@ CountChildren(int target)
/*
* StartChildProcess -- start an auxiliary process for the postmaster
*
- * xlop determines what kind of child will be started. All child types
+ * xlop determines what kind of child will be started. All child types
* initially go to AuxiliaryProcessMain, which will handle common setup.
*
* Return value of StartChildProcess is subprocess' PID, or 0 if failed
@@ -5148,7 +5148,7 @@ CreateOptsFile(int argc, char *argv[], char *fullprogname)
* These arrays include regular backends, autovac workers, walsenders
* and background workers, but not special children nor dead_end children.
* This allows the arrays to have a fixed maximum size, to wit the same
- * too-many-children limit enforced by canAcceptConnections(). The exact value
+ * too-many-children limit enforced by canAcceptConnections(). The exact value
* isn't too critical as long as it's more than MaxBackends.
*/
int
@@ -5355,9 +5355,9 @@ bgworker_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(0) here, not exit(2) like quickdie. The reason is that
+ * Note we do exit(0) here, not exit(2) like quickdie. The reason is that
* we don't want to be seen this worker as independently crashed, because
- * then postmaster would delay restarting it again afterwards. If some
+ * then postmaster would delay restarting it again afterwards. If some
* idiot DBA manually sends SIGQUIT to a random bgworker, the "dead man
* switch" will ensure that postmaster sees this as a crash.
*/
@@ -5508,7 +5508,7 @@ StartBackgroundWorker(void)
#endif
/*
- * Note that in normal processes, we would call InitPostgres here. For a
+ * Note that in normal processes, we would call InitPostgres here. For a
* worker, however, we don't know what database to connect to, yet; so we
* need to wait until the user code does it via
* BackgroundWorkerInitializeConnection().
@@ -6172,7 +6172,7 @@ ShmemBackendArrayRemove(Backend *bn)
#ifdef WIN32
/*
- * Subset implementation of waitpid() for Windows. We assume pid is -1
+ * Subset implementation of waitpid() for Windows. We assume pid is -1
* (that is, check all child processes) and options is WNOHANG (don't wait).
*/
static pid_t
diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c
index 7ebf5004418..b0104d384ea 100644
--- a/src/backend/postmaster/startup.c
+++ b/src/backend/postmaster/startup.c
@@ -81,7 +81,7 @@ startupproc_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index 8b00aa525b2..81c55bbd29e 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -66,7 +66,7 @@
/*
- * GUC parameters. Logging_collector cannot be changed after postmaster
+ * GUC parameters. Logging_collector cannot be changed after postmaster
* start, but the rest can change at SIGHUP.
*/
bool Logging_collector = false;
@@ -192,7 +192,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* If we restarted, our stderr is already redirected into our own input
* pipe. This is of course pretty useless, not to mention that it
- * interferes with detecting pipe EOF. Point stderr to /dev/null. This
+ * interferes with detecting pipe EOF. Point stderr to /dev/null. This
* assumes that all interesting messages generated in the syslogger will
* come through elog.c and will be sent to write_syslogger_file.
*/
@@ -202,7 +202,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* The closes might look redundant, but they are not: we want to be
- * darn sure the pipe gets closed even if the open failed. We can
+ * darn sure the pipe gets closed even if the open failed. We can
* survive running with stderr pointing nowhere, but we can't afford
* to have extra pipe input descriptors hanging around.
*/
@@ -243,7 +243,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (syslogger probably never has any
+ * can signal any child processes too. (syslogger probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -413,7 +413,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* Calculate time till next time-based rotation, so that we don't
- * sleep longer than that. We assume the value of "now" obtained
+ * sleep longer than that. We assume the value of "now" obtained
* above is still close enough. Note we can't make this calculation
* until after calling logfile_rotate(), since it will advance
* next_rotation_time.
@@ -517,7 +517,7 @@ SysLoggerMain(int argc, char *argv[])
(errmsg("logger shutting down")));
/*
- * Normal exit from the syslogger is here. Note that we
+ * Normal exit from the syslogger is here. Note that we
* deliberately do not close syslogFile before exiting; this is to
* allow for the possibility of elog messages being generated
* inside proc_exit. Regular exit() will take care of flushing
@@ -1347,7 +1347,7 @@ set_next_rotation_time(void)
/*
* The requirements here are to choose the next time > now that is a
* "multiple" of the log rotation interval. "Multiple" can be interpreted
- * fairly loosely. In this version we align to log_timezone rather than
+ * fairly loosely. In this version we align to log_timezone rather than
* GMT.
*/
rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */
diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c
index 8359da6d285..9e7ad189cd1 100644
--- a/src/backend/postmaster/walwriter.c
+++ b/src/backend/postmaster/walwriter.c
@@ -103,7 +103,7 @@ WalWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (walwriter probably never has any
+ * can signal any child processes too. (walwriter probably never has any
* child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -176,7 +176,7 @@ WalWriterMain(void)
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about in walwriter, but we do have LWLocks, and perhaps buffers?
*/
LWLockReleaseAll();
@@ -250,7 +250,7 @@ WalWriterMain(void)
int rc;
/*
- * Advertise whether we might hibernate in this cycle. We do this
+ * Advertise whether we might hibernate in this cycle. We do this
* before resetting the latch to ensure that any async commits will
* see the flag set if they might possibly need to wake us up, and
* that we won't miss any signal they send us. (If we discover work
@@ -341,7 +341,7 @@ wal_quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c
index e6aa899518f..c495cee3003 100644
--- a/src/backend/regex/regc_color.c
+++ b/src/backend/regex/regc_color.c
@@ -2,7 +2,7 @@
* colorings of characters
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_cvec.c b/src/backend/regex/regc_cvec.c
index 580a693161e..921a7d7f92a 100644
--- a/src/backend/regex/regc_cvec.c
+++ b/src/backend/regex/regc_cvec.c
@@ -2,7 +2,7 @@
* Utility functions for handling cvecs
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c
index 3360cfb0e9f..0e87ad2deba 100644
--- a/src/backend/regex/regc_lex.c
+++ b/src/backend/regex/regc_lex.c
@@ -2,7 +2,7 @@
* lexical analyzer
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c
index da597053448..e7bbb50ef46 100644
--- a/src/backend/regex/regc_locale.c
+++ b/src/backend/regex/regc_locale.c
@@ -30,7 +30,7 @@
*
* THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+ * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
* IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
* NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
* MODIFICATIONS.
@@ -38,7 +38,7 @@
* GOVERNMENT USE: If you are acquiring this software on behalf of the
* U.S. government, the Government shall have only "Restricted Rights"
* in the software and related documentation as defined in the Federal
- * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+ * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
* are acquiring the software on behalf of the Department of Defense, the
* software shall be classified as "Commercial Computer Software" and the
* Government shall have only "Restricted Rights" as defined in Clause
@@ -667,7 +667,7 @@ allcases(struct vars * v, /* context */
/*
* cmp - chr-substring compare
*
- * Backrefs need this. It should preferably be efficient.
+ * Backrefs need this. It should preferably be efficient.
* Note that it does not need to report anything except equal/unequal.
* Note also that the length is exact, and the comparison should not
* stop at embedded NULs!
diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c
index f6dad013b54..3487734a64e 100644
--- a/src/backend/regex/regc_nfa.c
+++ b/src/backend/regex/regc_nfa.c
@@ -2,7 +2,7 @@
* NFA utilities.
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -1304,7 +1304,7 @@ fixempties(struct nfa * nfa,
}
/*
- * And remove any states that have become useless. (This cleanup is not
+ * And remove any states that have become useless. (This cleanup is not
* very thorough, and would be even less so if we tried to combine it with
* the previous step; but cleanup() will take care of anything we miss.)
*/
@@ -1372,7 +1372,7 @@ replaceempty(struct nfa * nfa,
* non-EMPTY out-arcs), we must keep it so, so always push forward in that
* case.
*
- * The fan-out/fan-in comparison should count only non-EMPTY arcs. If
+ * The fan-out/fan-in comparison should count only non-EMPTY arcs. If
* "from" is doomed, we can skip counting "to"'s arcs, since we want to
* force taking the copyins path in that case.
*/
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index c1b2862815d..6f78eeb679a 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -24,7 +24,7 @@
* several implementation strategies depending on the situation:
*
* 1. In C/POSIX collations, we use hard-wired code. We can't depend on
- * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
+ * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
* collations don't give a fig about multibyte characters.
*
* 2. In the "default" collation (which is supposed to obey LC_CTYPE):
@@ -36,10 +36,10 @@
*
* 2b. In all other encodings, or on machines that lack <wctype.h>, we use
* the <ctype.h> functions for pg_wchar values up to 255, and punt for values
- * above that. This is only 100% correct in single-byte encodings such as
- * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
+ * above that. This is only 100% correct in single-byte encodings such as
+ * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
* character sets for which the properties being tested here aren't very
- * relevant for higher code values anyway. The difficulty with using the
+ * relevant for higher code values anyway. The difficulty with using the
* <wctype.h> functions with non-Unicode multibyte encodings is that we can
* have no certainty that the platform's wchar_t representation matches
* what we do in pg_wchar conversions.
@@ -730,7 +730,7 @@ store_match(pg_ctype_cache *pcc, pg_wchar chr1, int nchrs)
/*
* Given a probe function (e.g., pg_wc_isalpha) get a struct cvec for all
- * chrs satisfying the probe function. The active collation is the one
+ * chrs satisfying the probe function. The active collation is the one
* previously set by pg_set_regex_collation. Return NULL if out of memory.
*
* Note that the result must not be freed or modified by caller.
@@ -777,7 +777,7 @@ pg_ctype_get_cache(pg_wc_probefunc probefunc)
* UTF8 go up to 0x7FF, which is a pretty arbitrary cutoff but we cannot
* extend it as far as we'd like (say, 0xFFFF, the end of the Basic
* Multilingual Plane) without creating significant performance issues due
- * to too many characters being fed through the colormap code. This will
+ * to too many characters being fed through the colormap code. This will
* need redesign to fix reasonably, but at least for the moment we have
* all common European languages covered. Otherwise (not C, not UTF8) go
* up to 255. These limits are interrelated with restrictions discussed
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index bd2bb60f5b3..69e73a32409 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -2,7 +2,7 @@
* re_*comp and friends - compile REs
* This file #includes several others (see the bottom).
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -564,7 +564,7 @@ makesearch(struct vars * v,
* constraints, often knowing when you were in the pre state tells you
* little; it's the next state(s) that are informative. But some of them
* may have other inarcs, i.e. it may be possible to make actual progress
- * and then return to one of them. We must de-optimize such cases,
+ * and then return to one of them. We must de-optimize such cases,
* splitting each such state into progress and no-progress states.
*/
@@ -610,7 +610,7 @@ makesearch(struct vars * v,
* parse - parse an RE
*
* This is actually just the top level, which parses a bunch of branches
- * tied together with '|'. They appear in the tree as the left children
+ * tied together with '|'. They appear in the tree as the left children
* of a chain of '|' subres.
*/
static struct subre *
@@ -1352,7 +1352,7 @@ bracket(struct vars * v,
/*
* cbracket - handle complemented bracket expression
* We do it by calling bracket() with dummy endpoints, and then complementing
- * the result. The alternative would be to invoke rainbow(), and then delete
+ * the result. The alternative would be to invoke rainbow(), and then delete
* arcs as the b.e. is seen... but that gets messy.
*/
static void
diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c
index 7a7ba5b89cf..d367a77e854 100644
--- a/src/backend/regex/rege_dfa.c
+++ b/src/backend/regex/rege_dfa.c
@@ -2,7 +2,7 @@
* DFA routines
* This file is #included by regexec.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regerror.c b/src/backend/regex/regerror.c
index f6a3f2667f8..5f3079399dd 100644
--- a/src/backend/regex/regerror.c
+++ b/src/backend/regex/regerror.c
@@ -1,7 +1,7 @@
/*
* regerror - error-code expansion
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c
index 2e976627f52..7f41437cb58 100644
--- a/src/backend/regex/regexec.c
+++ b/src/backend/regex/regexec.c
@@ -1,7 +1,7 @@
/*
* re_*exec and friends - match REs
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
@@ -955,7 +955,7 @@ citerdissect(struct vars * v,
}
/*
- * We need workspace to track the endpoints of each sub-match. Normally
+ * We need workspace to track the endpoints of each sub-match. Normally
* we consider only nonzero-length sub-matches, so there can be at most
* end-begin of them. However, if min is larger than that, we will also
* consider zero-length sub-matches in order to find enough matches.
@@ -984,8 +984,8 @@ citerdissect(struct vars * v,
/*
* Our strategy is to first find a set of sub-match endpoints that are
* valid according to the child node's DFA, and then recursively dissect
- * each sub-match to confirm validity. If any validity check fails,
- * backtrack the last sub-match and try again. And, when we next try for
+ * each sub-match to confirm validity. If any validity check fails,
+ * backtrack the last sub-match and try again. And, when we next try for
* a validity check, we need not recheck any successfully verified
* sub-matches that we didn't move the endpoints of. nverified remembers
* how many sub-matches are currently known okay.
@@ -1036,7 +1036,7 @@ citerdissect(struct vars * v,
/*
* We've identified a way to divide the string into k sub-matches that
- * works so far as the child DFA can tell. If k is an allowed number
+ * works so far as the child DFA can tell. If k is an allowed number
* of matches, start the slow part: recurse to verify each sub-match.
* We always have k <= max_matches, needn't check that.
*/
@@ -1140,7 +1140,7 @@ creviterdissect(struct vars * v,
}
/*
- * We need workspace to track the endpoints of each sub-match. Normally
+ * We need workspace to track the endpoints of each sub-match. Normally
* we consider only nonzero-length sub-matches, so there can be at most
* end-begin of them. However, if min is larger than that, we will also
* consider zero-length sub-matches in order to find enough matches.
@@ -1169,8 +1169,8 @@ creviterdissect(struct vars * v,
/*
* Our strategy is to first find a set of sub-match endpoints that are
* valid according to the child node's DFA, and then recursively dissect
- * each sub-match to confirm validity. If any validity check fails,
- * backtrack the last sub-match and try again. And, when we next try for
+ * each sub-match to confirm validity. If any validity check fails,
+ * backtrack the last sub-match and try again. And, when we next try for
* a validity check, we need not recheck any successfully verified
* sub-matches that we didn't move the endpoints of. nverified remembers
* how many sub-matches are currently known okay.
@@ -1223,7 +1223,7 @@ creviterdissect(struct vars * v,
/*
* We've identified a way to divide the string into k sub-matches that
- * works so far as the child DFA can tell. If k is an allowed number
+ * works so far as the child DFA can tell. If k is an allowed number
* of matches, start the slow part: recurse to verify each sub-match.
* We always have k <= max_matches, needn't check that.
*/
diff --git a/src/backend/regex/regfree.c b/src/backend/regex/regfree.c
index b291749bd1a..ae17ae70eb6 100644
--- a/src/backend/regex/regfree.c
+++ b/src/backend/regex/regfree.c
@@ -1,7 +1,7 @@
/*
* regfree - free an RE
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
*
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
diff --git a/src/backend/regex/regprefix.c b/src/backend/regex/regprefix.c
index abda80c094e..dc96f9ff0a8 100644
--- a/src/backend/regex/regprefix.c
+++ b/src/backend/regex/regprefix.c
@@ -38,7 +38,7 @@ static int findprefix(struct cnfa * cnfa, struct colormap * cm,
*
* This function does not analyze all complex cases (such as lookahead
* constraints) exactly. Therefore it is possible that some strings matching
- * the reported prefix or exact-match string do not satisfy the regex. But
+ * the reported prefix or exact-match string do not satisfy the regex. But
* it should never be the case that a string satisfying the regex does not
* match the reported prefix or exact-match string.
*/
@@ -150,7 +150,7 @@ findprefix(struct cnfa * cnfa,
* We could find a state with multiple out-arcs that are all labeled with
* the same singleton color; this comes from patterns like "^ab(cde|cxy)".
* In that case we add the chr "c" to the output string but then exit the
- * loop with nextst == -1. This leaves a little bit on the table: if the
+ * loop with nextst == -1. This leaves a little bit on the table: if the
* pattern is like "^ab(cde|cdy)", we won't notice that "d" could be added
* to the prefix. But chasing multiple parallel state chains doesn't seem
* worth the trouble.
@@ -201,14 +201,14 @@ findprefix(struct cnfa * cnfa,
/*
* Identify the color's sole member chr and add it to the prefix
- * string. In general the colormap data structure doesn't provide a
+ * string. In general the colormap data structure doesn't provide a
* way to find color member chrs, except by trying GETCOLOR() on each
* possible chr value, which won't do at all. However, for the cases
* we care about it should be sufficient to test the "firstchr" value,
* that is the first chr ever added to the color. There are cases
* where this might no longer be a member of the color (so we do need
* to test), but none of them are likely to arise for a character that
- * is a member of a common prefix. If we do hit such a corner case,
+ * is a member of a common prefix. If we do hit such a corner case,
* we just fall out without adding anything to the prefix string.
*/
c = cm->cd[thiscolor].firstchr;
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
index 7c106904c39..1fca9411225 100644
--- a/src/backend/replication/basebackup.c
+++ b/src/backend/replication/basebackup.c
@@ -758,7 +758,7 @@ sendFileWithContent(const char *filename, const char *content)
/*
* Include the tablespace directory pointed to by 'path' in the output tar
- * stream. If 'sizeonly' is true, we just calculate a total length and return
+ * stream. If 'sizeonly' is true, we just calculate a total length and return
* it, without actually sending anything.
*
* Only used to send auxiliary tablespaces, not PGDATA.
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 5424281b425..01d876f447a 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -516,7 +516,7 @@ SyncRepGetStandbyPriority(void)
}
/*
- * Walk the specified queue from head. Set the state of any backends that
+ * Walk the specified queue from head. Set the state of any backends that
* need to be woken, remove them from the queue, and then wake them.
* Pass all = true to wake whole queue; otherwise, just wake up to
* the walsender's LSN.
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index 60ad86d2a60..62d642ad5ec 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -256,7 +256,7 @@ WalReceiverMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (walreceiver probably never has
+ * can signal any child processes too. (walreceiver probably never has
* any child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -783,7 +783,7 @@ WalRcvQuickDieHandler(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c
index e5ad84393fa..9870d9cf894 100644
--- a/src/backend/replication/walreceiverfuncs.c
+++ b/src/backend/replication/walreceiverfuncs.c
@@ -284,7 +284,7 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo)
* Returns the last+1 byte position that walreceiver has written.
*
* Optionally, returns the previous chunk start, that is the first byte
- * written in the most recent walreceiver flush cycle. Callers not
+ * written in the most recent walreceiver flush cycle. Callers not
* interested in that value may pass NULL for latestChunkStart. Same for
* receiveTLI.
*/
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 4abb0007c7c..057a7ea2c32 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -72,7 +72,7 @@
#include "utils/timestamp.h"
/*
- * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
+ * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
*
* We don't have a good idea of what a good value would be; there's some
* overhead per message in both walsender and walreceiver, but on the other
@@ -942,7 +942,7 @@ ProcessStandbyHSFeedbackMessage(void)
* perhaps far enough to make feedbackXmin wrap around. In that case the
* xmin we set here would be "in the future" and have no effect. No point
* in worrying about this since it's too late to save the desired data
- * anyway. Assuming that the standby sends us an increasing sequence of
+ * anyway. Assuming that the standby sends us an increasing sequence of
* xmins, this could only happen during the first reply cycle, else our
* own xmin would prevent nextXid from advancing so far.
*
@@ -1526,7 +1526,7 @@ XLogSend(bool *caughtup)
*
* Attempt to send all data that's already been written out and
* fsync'd to disk. We cannot go further than what's been written out
- * given the current implementation of XLogRead(). And in any case
+ * given the current implementation of XLogRead(). And in any case
* it's unsafe to send WAL that is not securely down to disk on the
* master: if the master subsequently crashes and restarts, slaves
* must not have applied any WAL that gets lost on the master.
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 4d39724ce53..35698f37abc 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -204,7 +204,7 @@ DefineRule(RuleStmt *stmt, const char *queryString)
transformRuleStmt(stmt, queryString, &actions, &whereClause);
/*
- * Find and lock the relation. Lock level should match
+ * Find and lock the relation. Lock level should match
* DefineQueryRewrite.
*/
relId = RangeVarGetRelid(stmt->relation, AccessExclusiveLock, false);
@@ -412,7 +412,7 @@ DefineQueryRewrite(char *rulename,
*
* If so, check that the relation is empty because the storage for the
* relation is going to be deleted. Also insist that the rel not have
- * any triggers, indexes, or child tables. (Note: these tests are too
+ * any triggers, indexes, or child tables. (Note: these tests are too
* strict, because they will reject relations that once had such but
* don't anymore. But we don't really care, because this whole
* business of converting relations to views is just a kluge to allow
@@ -715,7 +715,7 @@ checkRuleResultList(List *targetList, TupleDesc resultDesc, bool isSelect,
* Note: for a view (ON SELECT rule), the checkAsUser field of the OLD
* RTE entry will be overridden when the view rule is expanded, and the
* checkAsUser field of the NEW entry is irrelevant because that entry's
- * requiredPerms bits will always be zero. However, for other types of rules
+ * requiredPerms bits will always be zero. However, for other types of rules
* it's important to set these fields to match the rule owner. So we just set
* them always.
*/
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 1ac048d78e5..6a31c62331e 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -208,7 +208,7 @@ AcquireRewriteLocks(Query *parsetree,
/*
* The elements of an alias list have to refer to
* earlier RTEs of the same rtable, because that's the
- * order the planner builds things in. So we already
+ * order the planner builds things in. So we already
* processed the referenced RTE, and so it's safe to
* use get_rte_attribute_is_dropped on it. (This might
* not hold after rewriting or planning, but it's OK
@@ -370,7 +370,7 @@ rewriteRuleAction(Query *parsetree,
/*
* Generate expanded rtable consisting of main parsetree's rtable plus
* rule action's rtable; this becomes the complete rtable for the rule
- * action. Some of the entries may be unused after we finish rewriting,
+ * action. Some of the entries may be unused after we finish rewriting,
* but we leave them all in place for two reasons:
*
* We'd have a much harder job to adjust the query's varnos if we
@@ -436,7 +436,7 @@ rewriteRuleAction(Query *parsetree,
* that if the rule action refers to OLD, its jointree will add a
* reference to rt_index. If the rule action doesn't refer to OLD, but
* either the rule_qual or the user query quals do, then we need to keep
- * the original rtindex in the jointree to provide data for the quals. We
+ * the original rtindex in the jointree to provide data for the quals. We
* don't want the original rtindex to be joined twice, however, so avoid
* keeping it if the rule action mentions it.
*
@@ -458,7 +458,7 @@ rewriteRuleAction(Query *parsetree,
{
/*
* If sub_action is a setop, manipulating its jointree will do no
- * good at all, because the jointree is dummy. (Perhaps someday
+ * good at all, because the jointree is dummy. (Perhaps someday
* we could push the joining and quals down to the member
* statements of the setop?)
*/
@@ -661,7 +661,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
* then junk fields (these in no particular order).
*
* We must do items 1,2,3 before firing rewrite rules, else rewritten
- * references to NEW.foo will produce wrong or incomplete results. Item 4
+ * references to NEW.foo will produce wrong or incomplete results. Item 4
* is not needed for rewriting, but will be needed by the planner, and we
* can do it essentially for free while handling the other items.
*
@@ -868,7 +868,7 @@ process_matched_tle(TargetEntry *src_tle,
}
/*----------
- * Multiple assignments to same attribute. Allow only if all are
+ * Multiple assignments to same attribute. Allow only if all are
* FieldStore or ArrayRef assignment operations. This is a bit
* tricky because what we may actually be looking at is a nest of
* such nodes; consider
@@ -886,7 +886,7 @@ process_matched_tle(TargetEntry *src_tle,
* assignments appear to occur left-to-right.
*
* For FieldStore, instead of nesting we can generate a single
- * FieldStore with multiple target fields. We must nest when
+ * FieldStore with multiple target fields. We must nest when
* ArrayRefs are involved though.
*----------
*/
@@ -1178,7 +1178,7 @@ rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation, List *attrnos)
* rewriteTargetListUD - rewrite UPDATE/DELETE targetlist as needed
*
* This function adds a "junk" TLE that is needed to allow the executor to
- * find the original row for the update or delete. When the target relation
+ * find the original row for the update or delete. When the target relation
* is a regular table, the junk TLE emits the ctid attribute of the original
* row. When the target relation is a view, there is no ctid, so we instead
* emit a whole-row Var that will contain the "old" values of the view row.
@@ -1351,9 +1351,9 @@ ApplyRetrieveRule(Query *parsetree,
* fine as the result relation.
*
* For UPDATE/DELETE, we need to expand the view so as to have source
- * data for the operation. But we also need an unmodified RTE to
+ * data for the operation. But we also need an unmodified RTE to
* serve as the target. So, copy the RTE and add the copy to the
- * rangetable. Note that the copy does not get added to the jointree.
+ * rangetable. Note that the copy does not get added to the jointree.
* Also note that there's a hack in fireRIRrules to avoid calling this
* function again when it arrives at the copied RTE.
*/
@@ -1525,7 +1525,7 @@ markQueryForLocking(Query *qry, Node *jtnode,
* in the given tree.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * SubLink nodes in-place. It is caller's responsibility to ensure that
+ * SubLink nodes in-place. It is caller's responsibility to ensure that
* no unwanted side-effects occur!
*
* This is unlike most of the other routines that recurse into subselects,
@@ -1730,7 +1730,7 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
* not just "NOT x" which the planner is much smarter about, else we will
* do the wrong thing when the qual evaluates to NULL.)
*
- * The rule_qual may contain references to OLD or NEW. OLD references are
+ * The rule_qual may contain references to OLD or NEW. OLD references are
* replaced by references to the specified rt_index (the relation that the
* rule applies to). NEW references are only possible for INSERT and UPDATE
* queries on the relation itself, and so they should be replaced by copies
@@ -1803,7 +1803,7 @@ CopyAndAddInvertedQual(Query *parsetree,
* rows that the qualified action doesn't act on. (If there are multiple
* qualified INSTEAD rules, we AND all the negated quals onto a single
* modified original query.) We won't execute the original, unmodified
- * query if we find either qualified or unqualified INSTEAD rules. If
+ * query if we find either qualified or unqualified INSTEAD rules. If
* we find both, the modified original query is discarded too.
*/
static List *
@@ -1961,7 +1961,7 @@ view_has_instead_trigger(Relation view, CmdType event)
*
* Caller must have verified that relation is a view!
*
- * Note that the checks performed here are local to this view. We do not
+ * Note that the checks performed here are local to this view. We do not
* check whether the view's underlying base relation is updatable; that
* will be dealt with in later, recursive processing.
*
@@ -2051,7 +2051,7 @@ view_is_auto_updatable(Relation view)
* columns of the base relation, and no two should refer to the same
* column.
*
- * Note however that we should ignore resjunk entries. This proviso is
+ * Note however that we should ignore resjunk entries. This proviso is
* relevant because ORDER BY is not disallowed, and we shouldn't reject a
* view defined like "SELECT * FROM t ORDER BY a+b".
*/
@@ -2091,7 +2091,7 @@ view_is_auto_updatable(Relation view)
* relation supports.
*
* This is used for the information_schema views, which have separate concepts
- * of "updatable" and "trigger updatable". A relation is "updatable" if it
+ * of "updatable" and "trigger updatable". A relation is "updatable" if it
* can be updated without the need for triggers (either because it has a
* suitable RULE, or because it is simple enough to be automatically updated).
* A relation is "trigger updatable" if it has a suitable INSTEAD OF trigger.
@@ -2103,7 +2103,7 @@ view_is_auto_updatable(Relation view)
* to have trigger updatability included in the result.
*
* The return value is a bitmask of rule event numbers indicating which of
- * the INSERT, UPDATE and DELETE operations are supported. (We do it this way
+ * the INSERT, UPDATE and DELETE operations are supported. (We do it this way
* so that we can test for UPDATE plus DELETE support in a single call.)
*/
int
@@ -2315,7 +2315,7 @@ adjust_view_column_set(Bitmapset *cols, List *targetlist)
* the view's base relation becomes the target relation.
*
* Note that the base relation here may itself be a view, which may or may not
- * have INSTEAD OF triggers or rules to handle the update. That is handled by
+ * have INSTEAD OF triggers or rules to handle the update. That is handled by
* the recursion in RewriteQuery.
*/
static Query *
@@ -2467,7 +2467,7 @@ rewriteTargetView(Query *parsetree, Relation view)
* that does not correspond to what happens in ordinary SELECT usage of a
* view: all referenced columns must have read permission, even if
* optimization finds that some of them can be discarded during query
- * transformation. The flattening we're doing here is an optional
+ * transformation. The flattening we're doing here is an optional
* optimization, too. (If you are unpersuaded and want to change this,
* note that applying adjust_view_column_set to view_rte->selectedCols is
* clearly *not* the right answer, since that neglects base-rel columns
@@ -2770,7 +2770,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
/*
* At this point product_queries contains any DO ALSO rule
- * actions. Add the rewritten query before or after those. This
+ * actions. Add the rewritten query before or after those. This
* must match the handling the original query would have gotten
* below, if we allowed it to be included again.
*/
@@ -2990,7 +2990,7 @@ QueryRewrite(Query *parsetree)
*
* If the original query is still in the list, it sets the command tag.
* Otherwise, the last INSTEAD query of the same kind as the original is
- * allowed to set the tag. (Note these rules can leave us with no query
+ * allowed to set the tag. (Note these rules can leave us with no query
* setting the tag. The tcop code has to cope with this by setting up a
* default tag based on the original un-rewritten query.)
*
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 6ea91f5b211..4da9c65a771 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -58,7 +58,7 @@ static Relids adjust_relid_set(Relids relids, int oldrelid, int newrelid);
* specified query level.
*
* The objective of this routine is to detect whether there are aggregates
- * belonging to the given query level. Aggregates belonging to subqueries
+ * belonging to the given query level. Aggregates belonging to subqueries
* or outer queries do NOT cause a true result. We must recurse into
* subqueries to detect outer-reference aggregates that logically belong to
* the specified query level.
@@ -113,7 +113,7 @@ contain_aggs_of_level_walker(Node *node,
* Find the parse location of any aggregate of the specified query level.
*
* Returns -1 if no such agg is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Note: it might seem appropriate to merge this functionality into
@@ -208,7 +208,7 @@ contain_windowfuncs_walker(Node *node, void *context)
* Find the parse location of any windowfunc of the current query level.
*
* Returns -1 if no such windowfunc is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Note: it might seem appropriate to merge this functionality into
@@ -287,11 +287,11 @@ checkExprHasSubLink_walker(Node *node, void *context)
*
* Find all Var nodes in the given tree with varlevelsup == sublevels_up,
* and increment their varno fields (rangetable indexes) by 'offset'.
- * The varnoold fields are adjusted similarly. Also, adjust other nodes
+ * The varnoold fields are adjusted similarly. Also, adjust other nodes
* that contain rangetable indexes, such as RangeTblRef and JoinExpr.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -449,11 +449,11 @@ offset_relid_set(Relids relids, int offset)
*
* Find all Var nodes in the given tree belonging to a specific relation
* (identified by sublevels_up and rt_index), and change their varno fields
- * to 'new_index'. The varnoold fields are changed too. Also, adjust other
+ * to 'new_index'. The varnoold fields are changed too. Also, adjust other
* nodes that contain rangetable indexes, such as RangeTblRef and JoinExpr.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. The given expression tree should have been copied
+ * nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -646,7 +646,7 @@ adjust_relid_set(Relids relids, int oldrelid, int newrelid)
* Likewise for other nodes containing levelsup fields, such as Aggref.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * Var nodes in-place. The given expression tree should have been copied
+ * Var nodes in-place. The given expression tree should have been copied
* earlier to ensure that no unwanted side-effects occur!
*/
@@ -1221,7 +1221,7 @@ replace_rte_variables_mutator(Node *node,
* If the expression tree contains a whole-row Var for the target RTE,
* the Var is not changed but *found_whole_row is returned as TRUE.
* For most callers this is an error condition, but we leave it to the caller
- * to report the error so that useful context can be provided. (In some
+ * to report the error so that useful context can be provided. (In some
* usages it would be appropriate to modify the Var's vartype and insert a
* ConvertRowtypeExpr node to map back to the original vartype. We might
* someday extend this function's API to support that. For now, the only
diff --git a/src/backend/rewrite/rewriteSupport.c b/src/backend/rewrite/rewriteSupport.c
index f481c531ac7..e796cb8346c 100644
--- a/src/backend/rewrite/rewriteSupport.c
+++ b/src/backend/rewrite/rewriteSupport.c
@@ -122,7 +122,7 @@ get_rewrite_oid(Oid relid, const char *rulename, bool missing_ok)
* Find rule oid, given only a rule name but no rel OID.
*
* If there's more than one, it's an error. If there aren't any, that's an
- * error, too. In general, this should be avoided - it is provided to support
+ * error, too. In general, this should be avoided - it is provided to support
* syntax that is compatible with pre-7.3 versions of PG, where rule names
* were unique across the entire database.
*/
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index 5daa46eeed1..26132f849b2 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -44,7 +44,7 @@ int32 *PrivateRefCount;
*
* IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
* It must be set when an IO is initiated and cleared at
- * the end of the IO. It is there to make sure that one
+ * the end of the IO. It is there to make sure that one
* process doesn't start to use a buffer while another is
* faulting it in. see WaitIO and related routines.
*
@@ -54,7 +54,7 @@ int32 *PrivateRefCount;
*
* PrivateRefCount -- Each buffer also has a private refcount that keeps
* track of the number of times the buffer is pinned in the current
- * process. This is used for two purposes: first, if we pin a
+ * process. This is used for two purposes: first, if we pin a
* a buffer more than once, we only need to change the shared refcount
* once, thus only lock the shared state once; second, when a transaction
* aborts, it should only unpin the buffers exactly the number of times it
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index 850764f7545..4237d417c19 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -3,7 +3,7 @@
* buf_table.c
* routines for mapping BufferTags to buffer indexes.
*
- * Note: the routines in this file do no locking of their own. The caller
+ * Note: the routines in this file do no locking of their own. The caller
* must hold a suitable lock on the appropriate BufMappingLock, as specified
* in the comments. We can't do the locking inside these functions because
* in most cases the caller needs to adjust the buffer header contents
@@ -112,7 +112,7 @@ BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
* Insert a hashtable entry for given tag and buffer ID,
* unless an entry already exists for that tag
*
- * Returns -1 on successful insertion. If a conflicting entry exists
+ * Returns -1 on successful insertion. If a conflicting entry exists
* already, returns the buffer ID in that entry.
*
* Caller must hold exclusive lock on BufMappingLock for tag's partition
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index f2f42659c14..0f593b20940 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -117,7 +117,7 @@ static int rnode_comparator(const void *p1, const void *p2);
* PrefetchBuffer -- initiate asynchronous read of a block of a relation
*
* This is named by analogy to ReadBuffer but doesn't actually allocate a
- * buffer. Instead it tries to ensure that a future ReadBuffer for the given
+ * buffer. Instead it tries to ensure that a future ReadBuffer for the given
* block will not be delayed by the I/O. Prefetching is optional.
* No-op if prefetching isn't compiled in.
*/
@@ -207,7 +207,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* Assume when this function is called, that reln has been opened already.
*
* In RBM_NORMAL mode, the page is read from disk, and the page header is
- * validated. An error is thrown if the page header is not valid. (But
+ * validated. An error is thrown if the page header is not valid. (But
* note that an all-zero page is considered "valid"; see PageIsVerified().)
*
* RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
@@ -215,7 +215,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* for non-critical data, where the caller is prepared to repair errors.
*
* In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled
- * with zeros instead of reading it from disk. Useful when the caller is
+ * with zeros instead of reading it from disk. Useful when the caller is
* going to fill the page from scratch, since this saves I/O and avoids
* unnecessary failure if the page-on-disk has corrupt page headers.
* Caution: do not use this mode to read a page that is beyond the relation's
@@ -372,7 +372,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* This can happen because mdread doesn't complain about reads beyond
* EOF (when zero_damaged_pages is ON) and so a previous attempt to
* read a block beyond EOF could have left a "valid" zero-filled
- * buffer. Unfortunately, we have also seen this case occurring
+ * buffer. Unfortunately, we have also seen this case occurring
* because of buggy Linux kernels that sometimes return an
* lseek(SEEK_END) result that doesn't account for a recent write. In
* that situation, the pre-existing buffer would contain valid data
@@ -598,7 +598,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/*
* Didn't find it in the buffer pool. We'll have to initialize a new
- * buffer. Remember to unlock the mapping lock while doing the work.
+ * buffer. Remember to unlock the mapping lock while doing the work.
*/
LWLockRelease(newPartitionLock);
@@ -608,7 +608,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
bool lock_held;
/*
- * Select a victim buffer. The buffer is returned with its header
+ * Select a victim buffer. The buffer is returned with its header
* spinlock still held! Also (in most cases) the BufFreelistLock is
* still held, since it would be bad to hold the spinlock while
* possibly waking up other processes.
@@ -657,7 +657,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* If using a nondefault strategy, and writing the buffer
* would require a WAL flush, let the strategy decide whether
* to go ahead and write/reuse the buffer or to choose another
- * victim. We need lock to inspect the page LSN, so this
+ * victim. We need lock to inspect the page LSN, so this
* can't be done inside StrategyGetBuffer.
*/
if (strategy != NULL)
@@ -787,7 +787,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
{
/*
* We can only get here if (a) someone else is still reading
- * in the page, or (b) a previous read attempt failed. We
+ * in the page, or (b) a previous read attempt failed. We
* have to wait for any active read attempt to finish, and
* then set up our own read attempt if the page is still not
* BM_VALID. StartBufferIO does it all.
@@ -880,7 +880,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* This is used only in contexts such as dropping a relation. We assume
* that no other backend could possibly be interested in using the page,
* so the only reason the buffer might be pinned is if someone else is
- * trying to write it out. We have to let them finish before we can
+ * trying to write it out. We have to let them finish before we can
* reclaim the buffer.
*
* The buffer could get reclaimed by someone else while we are waiting
@@ -979,7 +979,7 @@ retry:
*
* Marks buffer contents as dirty (actual write happens later).
*
- * Buffer must be pinned and exclusive-locked. (If caller does not hold
+ * Buffer must be pinned and exclusive-locked. (If caller does not hold
* exclusive lock, then somebody could be in process of writing the buffer,
* leading to risk of bad data written to disk.)
*/
@@ -1028,7 +1028,7 @@ MarkBufferDirty(Buffer buffer)
*
* Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
* compared to calling the two routines separately. Now it's mainly just
- * a convenience function. However, if the passed buffer is valid and
+ * a convenience function. However, if the passed buffer is valid and
* already contains the desired block, we just return it as-is; and that
* does save considerable work compared to a full release and reacquire.
*
@@ -1080,7 +1080,7 @@ ReleaseAndReadBuffer(Buffer buffer,
* when we first pin it; for other strategies we just make sure the usage_count
* isn't zero. (The idea of the latter is that we don't want synchronized
* heap scans to inflate the count, but we need it to not be zero to discourage
- * other backends from stealing buffers from our ring. As long as we cycle
+ * other backends from stealing buffers from our ring. As long as we cycle
* through the ring faster than the global clock-sweep cycles, buffers in
* our ring won't be chosen as victims for replacement by other backends.)
*
@@ -1088,7 +1088,7 @@ ReleaseAndReadBuffer(Buffer buffer,
*
* Note that ResourceOwnerEnlargeBuffers must have been done already.
*
- * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
+ * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows
* some callers to avoid an extra spinlock cycle.
*/
static bool
@@ -1242,7 +1242,7 @@ BufferSync(int flags)
* have the flag set.
*
* Note that if we fail to write some buffer, we may leave buffers with
- * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
+ * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
* certainly need to be written for the next checkpoint attempt, too.
*/
num_to_write = 0;
@@ -1345,7 +1345,7 @@ BufferSync(int flags)
* This is called periodically by the background writer process.
*
* Returns true if it's appropriate for the bgwriter process to go into
- * low-power hibernation mode. (This happens if the strategy clock sweep
+ * low-power hibernation mode. (This happens if the strategy clock sweep
* has been "lapped" and no buffer allocations have occurred recently,
* or if the bgwriter has been effectively disabled by setting
* bgwriter_lru_maxpages to 0.)
@@ -2111,7 +2111,7 @@ BufferGetLSNAtomic(Buffer buffer)
* specified relation fork that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* Currently, this is called only from smgr.c when the underlying file
@@ -2120,7 +2120,7 @@ BufferGetLSNAtomic(Buffer buffer)
* be deleted momentarily anyway, and there is no point in writing it.
* It is the responsibility of higher-level code to ensure that the
* deletion or truncation does not lose any data that could be needed
- * later. It is also the responsibility of higher-level code to ensure
+ * later. It is also the responsibility of higher-level code to ensure
* that no other process could be trying to load more pages of the
* relation into buffers.
*
@@ -2282,9 +2282,9 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
*
* This function removes all the buffers in the buffer cache for a
* particular database. Dirty pages are simply dropped, without
- * bothering to write them out first. This is used when we destroy a
+ * bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
- * tree no longer exists. Implementation is pretty similar to
+ * tree no longer exists. Implementation is pretty similar to
* DropRelFileNodeBuffers() which is for destroying just one relation.
* --------------------------------------------------------------------
*/
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index c76aaf725b4..a07212dc8ec 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -36,7 +36,7 @@ typedef struct
*/
/*
- * Statistics. These counters should be wide enough that they can't
+ * Statistics. These counters should be wide enough that they can't
* overflow during a single bgwriter cycle.
*/
uint32 completePasses; /* Complete cycles of the clock sweep */
@@ -135,7 +135,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
/*
* We count buffer allocation requests so that the bgwriter can estimate
- * the rate of buffer consumption. Note that buffers recycled by a
+ * the rate of buffer consumption. Note that buffers recycled by a
* strategy object are intentionally not counted here.
*/
StrategyControl->numBufferAllocs++;
@@ -266,7 +266,7 @@ StrategyFreeBuffer(volatile BufferDesc *buf)
*
* In addition, we return the completed-pass count (which is effectively
* the higher-order bits of nextVictimBuffer) and the count of recent buffer
- * allocs if non-NULL pointers are passed. The alloc count is reset after
+ * allocs if non-NULL pointers are passed. The alloc count is reset after
* being read.
*/
int
@@ -291,7 +291,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
* StrategyNotifyBgWriter -- set or clear allocation notification latch
*
* If bgwriterLatch isn't NULL, the next invocation of StrategyGetBuffer will
- * set that latch. Pass NULL to clear the pending notification before it
+ * set that latch. Pass NULL to clear the pending notification before it
* happens. This feature is used by the bgwriter process to wake itself up
* from hibernation, and is not meant for anybody else to use.
*/
@@ -484,7 +484,7 @@ GetBufferFromRing(BufferAccessStrategy strategy)
/*
* If the slot hasn't been filled yet, tell the caller to allocate a new
- * buffer with the normal allocation strategy. He will then fill this
+ * buffer with the normal allocation strategy. He will then fill this
* slot by calling AddBufferToRing with the new buffer.
*/
bufnum = strategy->buffers[strategy->current];
@@ -537,7 +537,7 @@ AddBufferToRing(BufferAccessStrategy strategy, volatile BufferDesc *buf)
*
* When a nondefault strategy is used, the buffer manager calls this function
* when it turns out that the buffer selected by StrategyGetBuffer needs to
- * be written out and doing so would require flushing WAL too. This gives us
+ * be written out and doing so would require flushing WAL too. This gives us
* a chance to choose a different victim.
*
* Returns true if buffer manager should ask for a new victim, and false
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 44eecee3cac..c9ba7c23eb3 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -95,7 +95,7 @@ LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum,
* Find or create a local buffer for the given page of the given relation.
*
* API is similar to bufmgr.c's BufferAlloc, except that we do not need
- * to do any locking since this is all local. Also, IO_IN_PROGRESS
+ * to do any locking since this is all local. Also, IO_IN_PROGRESS
* does not get set. Lastly, we support only default access strategy
* (hence, usage_count is always advanced).
*/
@@ -293,7 +293,7 @@ MarkLocalBufferDirty(Buffer buffer)
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
- * out first. Therefore, this is NOT rollback-able, and so should be
+ * out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* See DropRelFileNodeBuffers in bufmgr.c for more notes.
@@ -460,7 +460,7 @@ GetLocalBufferStorage(void)
/*
* We allocate local buffers in a context of their own, so that the
* space eaten for them is easily recognizable in MemoryContextStats
- * output. Create the context on first use.
+ * output. Create the context on first use.
*/
if (LocalBufferContext == NULL)
LocalBufferContext =
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 6a80d3bba1f..81a30a5273e 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -29,7 +29,7 @@
* that was current at that time.
*
* BufFile also supports temporary files that exceed the OS file size limit
- * (by opening multiple fd.c temporary files). This is an essential feature
+ * (by opening multiple fd.c temporary files). This is an essential feature
* for sorts and hashjoins on large amounts of data.
*-------------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@ struct BufFile
bool dirty; /* does buffer need to be written? */
/*
- * resowner is the ResourceOwner to use for underlying temp files. (We
+ * resowner is the ResourceOwner to use for underlying temp files. (We
* don't need to remember the memory context we're using explicitly,
* because after creation we only repalloc our arrays larger.)
*/
@@ -519,7 +519,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence)
{
/*
* Seek is to a point within existing buffer; we can just adjust
- * pos-within-buffer, without flushing buffer. Note this is OK
+ * pos-within-buffer, without flushing buffer. Note this is OK
* whether reading or writing, but buffer remains dirty if we were
* writing.
*/
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index a2907957ae4..fa0054701cd 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -84,7 +84,7 @@
* and other code that tries to open files without consulting fd.c. This
* is the number left free. (While we can be pretty sure we won't get
* EMFILE, there's never any guarantee that we won't get ENFILE due to
- * other processes chewing up FDs. So it's a bad idea to try to open files
+ * other processes chewing up FDs. So it's a bad idea to try to open files
* without consulting fd.c. Nonetheless we cannot control all code.)
*
* Because this is just a fixed setting, we are effectively assuming that
@@ -169,8 +169,8 @@ typedef struct vfd
} Vfd;
/*
- * Virtual File Descriptor array pointer and size. This grows as
- * needed. 'File' values are indexes into this array.
+ * Virtual File Descriptor array pointer and size. This grows as
+ * needed. 'File' values are indexes into this array.
* Note that VfdCache[0] is not a usable VFD, just a list header.
*/
static Vfd *VfdCache;
@@ -190,7 +190,7 @@ static bool have_xact_temporary_files = false;
/*
* Tracks the total size of all temporary files. Note: when temp_file_limit
* is being enforced, this cannot overflow since the limit cannot be more
- * than INT_MAX kilobytes. When not enforcing, it could theoretically
+ * than INT_MAX kilobytes. When not enforcing, it could theoretically
* overflow, but we don't care.
*/
static uint64 temporary_files_size = 0;
@@ -253,7 +253,7 @@ static int nextTempTableSpace = 0;
*
* The Least Recently Used ring is a doubly linked list that begins and
* ends on element zero. Element zero is special -- it doesn't represent
- * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
+ * a file and its "fd" field always == VFD_CLOSED. Element zero is just an
* anchor that shows us the beginning/end of the ring.
* Only VFD elements that are currently really open (have an FD assigned) are
* in the Lru ring. Elements that are "virtually" open can be recognized
@@ -418,7 +418,7 @@ InitFileAccess(void)
* We stop counting if usable_fds reaches max_to_probe. Note: a small
* value of max_to_probe might result in an underestimate of already_open;
* we must fill in any "gaps" in the set of used FDs before the calculation
- * of already_open will give the right answer. In practice, max_to_probe
+ * of already_open will give the right answer. In practice, max_to_probe
* of a couple of dozen should be enough to ensure good results.
*
* We assume stdin (FD 0) is available for dup'ing
@@ -495,7 +495,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
pfree(fd);
/*
- * Return results. usable_fds is just the number of successful dups. We
+ * Return results. usable_fds is just the number of successful dups. We
* assume that the system limit is highestfd+1 (remember 0 is a legal FD
* number) and so already_open is highestfd+1 - usable_fds.
*/
@@ -990,7 +990,7 @@ OpenTemporaryFile(bool interXact)
/*
* If not, or if tablespace is bad, create in database's default
- * tablespace. MyDatabaseTableSpace should normally be set before we get
+ * tablespace. MyDatabaseTableSpace should normally be set before we get
* here, but just in case it isn't, fall back to pg_default tablespace.
*/
if (file <= 0)
@@ -1284,7 +1284,7 @@ FileWrite(File file, char *buffer, int amount)
/*
* If enforcing temp_file_limit and it's a temp file, check to see if the
- * write would overrun temp_file_limit, and throw error if so. Note: it's
+ * write would overrun temp_file_limit, and throw error if so. Note: it's
* really a modularity violation to throw error here; we should set errno
* and return -1. However, there's no way to report a suitable error
* message if we do that. All current callers would just throw error
@@ -1563,7 +1563,7 @@ reserveAllocatedDesc(void)
/*
* Routines that want to use stdio (ie, FILE*) should use AllocateFile
* rather than plain fopen(). This lets fd.c deal with freeing FDs if
- * necessary to open the file. When done, call FreeFile rather than fclose.
+ * necessary to open the file. When done, call FreeFile rather than fclose.
*
* Note that files that will be open for any significant length of time
* should NOT be handled this way, since they cannot share kernel file
@@ -1868,7 +1868,7 @@ TryAgain:
* Read a directory opened with AllocateDir, ereport'ing any error.
*
* This is easier to use than raw readdir() since it takes care of some
- * otherwise rather tedious and error-prone manipulation of errno. Also,
+ * otherwise rather tedious and error-prone manipulation of errno. Also,
* if you are happy with a generic error message for AllocateDir failure,
* you can just do
*
@@ -2009,7 +2009,7 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces)
numTempTableSpaces = numSpaces;
/*
- * Select a random starting point in the list. This is to minimize
+ * Select a random starting point in the list. This is to minimize
* conflicts between backends that are most likely sharing the same list
* of temp tablespaces. Note that if we create multiple temp files in the
* same transaction, we'll advance circularly through the list --- this
@@ -2038,7 +2038,7 @@ TempTablespacesAreSet(void)
/*
* GetNextTempTableSpace
*
- * Select the next temp tablespace to use. A result of InvalidOid means
+ * Select the next temp tablespace to use. A result of InvalidOid means
* to use the current database's default tablespace.
*/
Oid
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index b15cf8fe452..4845f93a598 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -48,7 +48,7 @@
* Range Category
* 0 - 31 0
* 32 - 63 1
- * ... ... ...
+ * ... ... ...
* 8096 - 8127 253
* 8128 - 8163 254
* 8164 - 8192 255
@@ -123,7 +123,7 @@ static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof);
* will turn out to have too little space available by the time the caller
* gets a lock on it. In that case, the caller should report the actual
* amount of free space available on that page and then try again (see
- * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
+ * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
*/
BlockNumber
diff --git a/src/backend/storage/freespace/fsmpage.c b/src/backend/storage/freespace/fsmpage.c
index 8376a7fc0f8..374fe2671d3 100644
--- a/src/backend/storage/freespace/fsmpage.c
+++ b/src/backend/storage/freespace/fsmpage.c
@@ -185,13 +185,13 @@ restart:
/*----------
* Start the search from the target slot. At every step, move one
- * node to the right, then climb up to the parent. Stop when we reach
+ * node to the right, then climb up to the parent. Stop when we reach
* a node with enough free space (as we must, since the root has enough
* space).
*
* The idea is to gradually expand our "search triangle", that is, all
* nodes covered by the current node, and to be sure we search to the
- * right from the start point. At the first step, only the target slot
+ * right from the start point. At the first step, only the target slot
* is examined. When we move up from a left child to its parent, we are
* adding the right-hand subtree of that parent to the search triangle.
* When we move right then up from a right child, we are dropping the
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index c339e9c780b..819dee1819b 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
@@ -84,7 +84,7 @@ static int on_proc_exit_index,
* -cim 2/6/90
*
* Unfortunately, we can't really guarantee that add-on code
- * obeys the rule of not calling exit() directly. So, while
+ * obeys the rule of not calling exit() directly. So, while
* this is the preferred way out of the system, we also register
* an atexit callback that will make sure cleanup happens.
* ----------------------------------------------------------------
@@ -103,7 +103,7 @@ proc_exit(int code)
* fixed file name, each backend will overwrite earlier profiles. To
* fix that, we create a separate subdirectory for each backend
* (./gprof/pid) and 'cd' to that subdirectory before we exit() - that
- * forces mcleanup() to write each profile into its own directory. We
+ * forces mcleanup() to write each profile into its own directory. We
* end up with something like: $PGDATA/gprof/8829/gmon.out
* $PGDATA/gprof/8845/gmon.out ...
*
@@ -246,7 +246,7 @@ atexit_callback(void)
* on_proc_exit
*
* this function adds a callback function to the list of
- * functions invoked by proc_exit(). -cim 2/6/90
+ * functions invoked by proc_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
@@ -273,7 +273,7 @@ on_proc_exit(pg_on_exit_callback function, Datum arg)
* on_shmem_exit
*
* this function adds a callback function to the list of
- * functions invoked by shmem_exit(). -cim 2/6/90
+ * functions invoked by shmem_exit(). -cim 2/6/90
* ----------------------------------------------------------------
*/
void
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index b34ba447127..918ac51b194 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -52,7 +52,7 @@ static bool addin_request_allowed = true;
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -82,7 +82,7 @@ RequestAddinShmemSpace(Size size)
* This is a bit code-wasteful and could be cleaned up.)
*
* If "makePrivate" is true then we only need private memory, not shared
- * memory. This is true for a standalone backend, false for a postmaster.
+ * memory. This is true for a standalone backend, false for a postmaster.
*/
void
CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c
index 6efb35d43c2..507ba14060c 100644
--- a/src/backend/storage/ipc/pmsignal.c
+++ b/src/backend/storage/ipc/pmsignal.c
@@ -26,9 +26,9 @@
/*
* The postmaster is signaled by its children by sending SIGUSR1. The
- * specific reason is communicated via flags in shared memory. We keep
+ * specific reason is communicated via flags in shared memory. We keep
* a boolean flag for each possible "reason", so that different reasons
- * can be signaled by different backends at the same time. (However,
+ * can be signaled by different backends at the same time. (However,
* if the same reason is signaled more than once simultaneously, the
* postmaster will observe it only once.)
*
@@ -42,7 +42,7 @@
* have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is
* available for assignment. An ASSIGNED slot is associated with a postmaster
* child process, but either the process has not touched shared memory yet,
- * or it has successfully cleaned up after itself. A ACTIVE slot means the
+ * or it has successfully cleaned up after itself. A ACTIVE slot means the
* process is actively using shared memory. The slots are assigned to
* child processes at random, and postmaster.c is responsible for tracking
* which one goes with which PID.
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index e801c8def0c..47c1059cc7b 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -19,11 +19,11 @@
*
* During hot standby, we also keep a list of XIDs representing transactions
* that are known to be running in the master (or more precisely, were running
- * as of the current point in the WAL stream). This list is kept in the
+ * as of the current point in the WAL stream). This list is kept in the
* KnownAssignedXids array, and is updated by watching the sequence of
* arriving XIDs. This is necessary because if we leave those XIDs out of
* snapshots taken for standby queries, then they will appear to be already
- * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
+ * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
* array represents standby processes, which by definition are not running
* transactions that have XIDs.
*
@@ -268,7 +268,7 @@ ProcArrayAdd(PGPROC *proc)
if (arrayP->numProcs >= arrayP->maxProcs)
{
/*
- * Ooops, no room. (This really shouldn't happen, since there is a
+ * Ooops, no room. (This really shouldn't happen, since there is a
* fixed supply of PGPROC structs too, and so we should have failed
* earlier.)
*/
@@ -725,7 +725,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
* ShmemVariableCache->nextXid must be beyond any observed xid.
*
* We don't expect anyone else to modify nextXid, hence we don't need to
- * hold a lock while examining it. We still acquire the lock to modify
+ * hold a lock while examining it. We still acquire the lock to modify
* it, though.
*/
nextXid = latestObservedXid;
@@ -1111,9 +1111,9 @@ TransactionIdIsActive(TransactionId xid)
* ignored.
*
* This is used by VACUUM to decide which deleted tuples must be preserved
- * in a table. allDbs = TRUE is needed for shared relations, but allDbs =
+ * in a table. allDbs = TRUE is needed for shared relations, but allDbs =
* FALSE is sufficient for non-shared relations, since only backends in my
- * own database could ever see the tuples in them. Also, we can ignore
+ * own database could ever see the tuples in them. Also, we can ignore
* concurrently running lazy VACUUMs because (a) they must be working on other
* tables, and (b) they don't need to do snapshot-based lookups.
*
@@ -1424,7 +1424,7 @@ GetSnapshotData(Snapshot snapshot)
* do that much work while holding the ProcArrayLock.
*
* The other backend can add more subxids concurrently, but cannot
- * remove any. Hence it's important to fetch nxids just once.
+ * remove any. Hence it's important to fetch nxids just once.
* Should be safe to use memcpy, though. (We needn't worry about
* missing any xids added concurrently, because they must postdate
* xmax.)
@@ -1973,7 +1973,7 @@ BackendPidGetProc(int pid)
* Only main transaction Ids are considered. This function is mainly
* useful for determining what backend owns a lock.
*
- * Beware that not every xact has an XID assigned. However, as long as you
+ * Beware that not every xact has an XID assigned. However, as long as you
* only call this using an XID found on disk, you're safe.
*/
int
@@ -2037,7 +2037,7 @@ IsBackendPid(int pid)
* some snapshot we have. Since we examine the procarray with only shared
* lock, there are race conditions: a backend could set its xmin just after
* we look. Indeed, on multiprocessors with weak memory ordering, the
- * other backend could have set its xmin *before* we look. We know however
+ * other backend could have set its xmin *before* we look. We know however
* that such a backend must have held shared ProcArrayLock overlapping our
* own hold of ProcArrayLock, else we would see its xmin update. Therefore,
* any snapshot the other backend is taking concurrently with our scan cannot
@@ -2502,7 +2502,7 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
* XidCacheRemoveRunningXids
*
* Remove a bunch of TransactionIds from the list of known-running
- * subtransactions for my backend. Both the specified xid and those in
+ * subtransactions for my backend. Both the specified xid and those in
* the xids[] array (of length nxids) are removed from the subxids cache.
* latestXid must be the latest XID among the group.
*/
@@ -2608,7 +2608,7 @@ DisplayXidCache(void)
* treated as running by standby transactions, even though they are not in
* the standby server's PGXACT array.
*
- * We record all XIDs that we know have been assigned. That includes all the
+ * We record all XIDs that we know have been assigned. That includes all the
* XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have
* been assigned. We can deduce the existence of unobserved XIDs because we
* know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids
@@ -2617,7 +2617,7 @@ DisplayXidCache(void)
*
* During hot standby we do not fret too much about the distinction between
* top-level XIDs and subtransaction XIDs. We store both together in the
- * KnownAssignedXids list. In backends, this is copied into snapshots in
+ * KnownAssignedXids list. In backends, this is copied into snapshots in
* GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot()
* doesn't care about the distinction either. Subtransaction XIDs are
* effectively treated as top-level XIDs and in the typical case pg_subtrans
@@ -2832,14 +2832,14 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid)
* must hold shared ProcArrayLock to examine the array. To remove XIDs from
* the array, the startup process must hold ProcArrayLock exclusively, for
* the usual transactional reasons (compare commit/abort of a transaction
- * during normal running). Compressing unused entries out of the array
+ * during normal running). Compressing unused entries out of the array
* likewise requires exclusive lock. To add XIDs to the array, we just insert
* them into slots to the right of the head pointer and then advance the head
* pointer. This wouldn't require any lock at all, except that on machines
* with weak memory ordering we need to be careful that other processors
* see the array element changes before they see the head pointer change.
* We handle this by using a spinlock to protect reads and writes of the
- * head/tail pointers. (We could dispense with the spinlock if we were to
+ * head/tail pointers. (We could dispense with the spinlock if we were to
* create suitable memory access barrier primitives and use those instead.)
* The spinlock must be taken to read or write the head/tail pointers unless
* the caller holds ProcArrayLock exclusively.
@@ -2936,7 +2936,7 @@ KnownAssignedXidsCompress(bool force)
* If exclusive_lock is true then caller already holds ProcArrayLock in
* exclusive mode, so we need no extra locking here. Else caller holds no
* lock, so we need to be sure we maintain sufficient interlocks against
- * concurrent readers. (Only the startup process ever calls this, so no need
+ * concurrent readers. (Only the startup process ever calls this, so no need
* to worry about concurrent writers.)
*/
static void
@@ -2982,7 +2982,7 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids);
/*
- * Verify that insertions occur in TransactionId sequence. Note that even
+ * Verify that insertions occur in TransactionId sequence. Note that even
* if the last existing element is marked invalid, it must still have a
* correctly sequenced XID value.
*/
@@ -3085,7 +3085,7 @@ KnownAssignedXidsSearch(TransactionId xid, bool remove)
}
/*
- * Standard binary search. Note we can ignore the KnownAssignedXidsValid
+ * Standard binary search. Note we can ignore the KnownAssignedXidsValid
* array here, since even invalid entries will contain sorted XIDs.
*/
first = tail;
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 18ba42611c1..129d9f83705 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -26,7 +26,7 @@
* for a module and should never be allocated after the shared memory
* initialization phase. Hash tables have a fixed maximum size, but
* their actual size can vary dynamically. When entries are added
- * to the table, more space is allocated. Queues link data structures
+ * to the table, more space is allocated. Queues link data structures
* that have been allocated either within fixed-size structures or as hash
* buckets. Each shared data structure has a string name to identify
* it (assigned in the module that declares it).
@@ -40,7 +40,7 @@
* The shmem index has two purposes: first, it gives us
* a simple model of how the world looks when a backend process
* initializes. If something is present in the shmem index,
- * it is initialized. If it is not, it is uninitialized. Second,
+ * it is initialized. If it is not, it is uninitialized. Second,
* the shmem index allows us to allocate shared memory on demand
* instead of trying to preallocate structures and hard-wire the
* sizes and locations in header files. If you are using a lot
@@ -55,8 +55,8 @@
* pointers using the method described in (b) above.
*
* (d) memory allocation model: shared memory can never be
- * freed, once allocated. Each hash table has its own free list,
- * so hash buckets can be reused when an item is deleted. However,
+ * freed, once allocated. Each hash table has its own free list,
+ * so hash buckets can be reused when an item is deleted. However,
* if one hash table grows very large and then shrinks, its space
* cannot be redistributed to other tables. We could build a simple
* hash bucket garbage collector if need be. Right now, it seems
@@ -116,7 +116,7 @@ InitShmemAllocation(void)
Assert(shmhdr != NULL);
/*
- * Initialize the spinlock used by ShmemAlloc. We have to do the space
+ * Initialize the spinlock used by ShmemAlloc. We have to do the space
* allocation the hard way, since obviously ShmemAlloc can't be called
* yet.
*/
@@ -217,7 +217,7 @@ InitShmemIndex(void)
*
* Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex
* hashtable to exist already, we have a bit of a circularity problem in
- * initializing the ShmemIndex itself. The special "ShmemIndex" hash
+ * initializing the ShmemIndex itself. The special "ShmemIndex" hash
* table name will tell ShmemInitStruct to fake it.
*/
info.keysize = SHMEM_INDEX_KEYSIZE;
@@ -294,7 +294,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* ShmemInitStruct -- Create/attach to a structure in shared memory.
*
* This is called during initialization to find or allocate
- * a data structure in shared memory. If no other process
+ * a data structure in shared memory. If no other process
* has created the structure, this routine allocates space
* for it. If it exists already, a pointer to the existing
* structure is returned.
@@ -303,7 +303,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
- * cases. Now, it always throws error instead, so callers need not check
+ * cases. Now, it always throws error instead, so callers need not check
* for NULL.
*/
void *
@@ -335,7 +335,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
- * index has been initialized. This should be OK because no other
+ * index has been initialized. This should be OK because no other
* process can be accessing shared memory yet.
*/
Assert(shmemseghdr->index == NULL);
diff --git a/src/backend/storage/ipc/shmqueue.c b/src/backend/storage/ipc/shmqueue.c
index 72202c2ccdd..5bfcd34239f 100644
--- a/src/backend/storage/ipc/shmqueue.c
+++ b/src/backend/storage/ipc/shmqueue.c
@@ -14,7 +14,7 @@
*
* Package for managing doubly-linked lists in shared memory.
* The only tricky thing is that SHM_QUEUE will usually be a field
- * in a larger record. SHMQueueNext has to return a pointer
+ * in a larger record. SHMQueueNext has to return a pointer
* to the record itself instead of a pointer to the SHMQueue field
* of the record. It takes an extra parameter and does some extra
* pointer arithmetic to do this correctly.
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 1b82241280f..e85ab669388 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -29,7 +29,7 @@ uint64 SharedInvalidMessageCounter;
* Because backends sitting idle will not be reading sinval events, we
* need a way to give an idle backend a swift kick in the rear and make
* it catch up before the sinval queue overflows and forces it to go
- * through a cache reset exercise. This is done by sending
+ * through a cache reset exercise. This is done by sending
* PROCSIG_CATCHUP_INTERRUPT to any backend that gets too far behind.
*
* State for catchup events consists of two flags: one saying whether
@@ -68,7 +68,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n)
* NOTE: it is entirely possible for this routine to be invoked recursively
* as a consequence of processing inside the invalFunction or resetFunction.
* Furthermore, such a recursive call must guarantee that all outstanding
- * inval messages have been processed before it exits. This is the reason
+ * inval messages have been processed before it exits. This is the reason
* for the strange-looking choice to use a statically allocated buffer array
* and counters; it's so that a recursive call can process messages already
* sucked out of sinvaladt.c.
@@ -137,7 +137,7 @@ ReceiveSharedInvalidMessages(
* We are now caught up. If we received a catchup signal, reset that
* flag, and call SICleanupQueue(). This is not so much because we need
* to flush dead messages right now, as that we want to pass on the
- * catchup signal to the next slowest backend. "Daisy chaining" the
+ * catchup signal to the next slowest backend. "Daisy chaining" the
* catchup signal this way avoids creating spikes in system load for what
* should be just a background maintenance activity.
*/
@@ -157,7 +157,7 @@ ReceiveSharedInvalidMessages(
*
* If we are idle (catchupInterruptEnabled is set), we can safely
* invoke ProcessCatchupEvent directly. Otherwise, just set a flag
- * to do it later. (Note that it's quite possible for normal processing
+ * to do it later. (Note that it's quite possible for normal processing
* of the current transaction to cause ReceiveSharedInvalidMessages()
* to be run later on; in that case the flag will get cleared again,
* since there's no longer any reason to do anything.)
@@ -233,7 +233,7 @@ HandleCatchupInterrupt(void)
* EnableCatchupInterrupt
*
* This is called by the PostgresMain main loop just before waiting
- * for a frontend command. We process any pending catchup events,
+ * for a frontend command. We process any pending catchup events,
* and enable the signal handler to process future events directly.
*
* NOTE: the signal handler starts out disabled, and stays so until
@@ -278,7 +278,7 @@ EnableCatchupInterrupt(void)
* DisableCatchupInterrupt
*
* This is called by the PostgresMain main loop just after receiving
- * a frontend command. Signal handler execution of catchup events
+ * a frontend command. Signal handler execution of catchup events
* is disabled until the next EnableCatchupInterrupt call.
*
* The PROCSIG_NOTIFY_INTERRUPT signal handler also needs to call this,
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index 09f41c1d387..1a9e420d3cb 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -45,7 +45,7 @@
* In reality, the messages are stored in a circular buffer of MAXNUMMESSAGES
* entries. We translate MsgNum values into circular-buffer indexes by
* computing MsgNum % MAXNUMMESSAGES (this should be fast as long as
- * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
+ * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
* doesn't exceed minMsgNum by more than MAXNUMMESSAGES, we have enough space
* in the buffer. If the buffer does overflow, we recover by setting the
* "reset" flag for each backend that has fallen too far behind. A backend
@@ -58,7 +58,7 @@
* normal behavior is that at most one such interrupt is in flight at a time;
* when a backend completes processing a catchup interrupt, it executes
* SICleanupQueue, which will signal the next-furthest-behind backend if
- * needed. This avoids undue contention from multiple backends all trying
+ * needed. This avoids undue contention from multiple backends all trying
* to catch up at once. However, the furthest-back backend might be stuck
* in a state where it can't catch up. Eventually it will get reset, so it
* won't cause any more problems for anyone but itself. But we don't want
@@ -89,7 +89,7 @@
* the writer wants to change maxMsgNum while readers need to read it.
* We deal with that by having a spinlock that readers must take for just
* long enough to read maxMsgNum, while writers take it for just long enough
- * to write maxMsgNum. (The exact rule is that you need the spinlock to
+ * to write maxMsgNum. (The exact rule is that you need the spinlock to
* read maxMsgNum if you are not holding SInvalWriteLock, and you need the
* spinlock to write maxMsgNum unless you are holding both locks.)
*
@@ -410,7 +410,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
SISeg *segP = shmInvalBuffer;
/*
- * N can be arbitrarily large. We divide the work into groups of no more
+ * N can be arbitrarily large. We divide the work into groups of no more
* than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
* an unreasonably long time. (This is not so much because we care about
* letting in other writers, as that some just-caught-up backend might be
@@ -433,7 +433,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* If the buffer is full, we *must* acquire some space. Clean the
* queue and reset anyone who is preventing space from being freed.
* Otherwise, clean the queue only when it's exceeded the next
- * fullness threshold. We have to loop and recheck the buffer state
+ * fullness threshold. We have to loop and recheck the buffer state
* after any call of SICleanupQueue.
*/
for (;;)
@@ -501,11 +501,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
* executing on behalf of other backends, since each instance will modify only
* fields of its own backend's ProcState, and no instance will look at fields
* of other backends' ProcStates. We express this by grabbing SInvalReadLock
- * in shared mode. Note that this is not exactly the normal (read-only)
+ * in shared mode. Note that this is not exactly the normal (read-only)
* interpretation of a shared lock! Look closely at the interactions before
* allowing SInvalReadLock to be grabbed in shared mode for any other reason!
*
- * NB: this can also run in parallel with SIInsertDataEntries. It is not
+ * NB: this can also run in parallel with SIInsertDataEntries. It is not
* guaranteed that we will return any messages added after the routine is
* entered.
*
@@ -525,10 +525,10 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
/*
* Before starting to take locks, do a quick, unlocked test to see whether
- * there can possibly be anything to read. On a multiprocessor system,
+ * there can possibly be anything to read. On a multiprocessor system,
* it's possible that this load could migrate backwards and occur before
* we actually enter this function, so we might miss a sinval message that
- * was just added by some other processor. But they can't migrate
+ * was just added by some other processor. But they can't migrate
* backwards over a preceding lock acquisition, so it should be OK. If we
* haven't acquired a lock preventing against further relevant
* invalidations, any such occurrence is not much different than if the
@@ -619,7 +619,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
*
* Caution: because we transiently release write lock when we have to signal
* some other backend, it is NOT guaranteed that there are still minFree
- * free message slots at exit. Caller must recheck and perhaps retry.
+ * free message slots at exit. Caller must recheck and perhaps retry.
*/
void
SICleanupQueue(bool callerHasWriteLock, int minFree)
@@ -640,7 +640,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
/*
* Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
* furthest-back backend that needs signaling (if any), and reset any
- * backends that are too far back. Note that because we ignore sendOnly
+ * backends that are too far back. Note that because we ignore sendOnly
* backends here it is possible for them to keep sending messages without
* a problem even when they are the only active backend.
*/
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index c704412366d..bc57e7aec33 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -130,7 +130,7 @@ GetStandbyLimitTime(void)
/*
* The cutoff time is the last WAL data receipt time plus the appropriate
- * delay variable. Delay of -1 means wait forever.
+ * delay variable. Delay of -1 means wait forever.
*/
GetXLogReceiptTime(&rtime, &fromStream);
if (fromStream)
@@ -475,7 +475,7 @@ SendRecoveryConflictWithBufferPin(ProcSignalReason reason)
* determine whether an actual deadlock condition is present: the lock we
* need to wait for might be unrelated to any held by the Startup process.
* Sooner or later, this mechanism should get ripped out in favor of somehow
- * accounting for buffer locks in DeadLockCheck(). However, errors here
+ * accounting for buffer locks in DeadLockCheck(). However, errors here
* seem to be very low-probability in practice, so for now it's not worth
* the trouble.
*/
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index 8cb34482bd2..d0bedf48895 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -810,7 +810,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len)
/*
* If we found the page of the truncation point we need to truncate the
- * data in it. Otherwise if we're in a hole, we need to create a page to
+ * data in it. Otherwise if we're in a hole, we need to create a page to
* mark the end of data.
*/
if (olddata != NULL && olddata->pageno == pageno)
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index a0f7d34ab07..3d463b3dc3d 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -51,7 +51,7 @@ typedef struct
} WAIT_ORDER;
/*
- * Information saved about each edge in a detected deadlock cycle. This
+ * Information saved about each edge in a detected deadlock cycle. This
* is used to print a diagnostic message upon failure.
*
* Note: because we want to examine this info after releasing the lock
@@ -119,7 +119,7 @@ static PGPROC *blocking_autovacuum_proc = NULL;
* InitDeadLockChecking -- initialize deadlock checker during backend startup
*
* This does per-backend initialization of the deadlock checker; primarily,
- * allocation of working memory for DeadLockCheck. We do this per-backend
+ * allocation of working memory for DeadLockCheck. We do this per-backend
* since there's no percentage in making the kernel do copy-on-write
* inheritance of workspace from the postmaster. We want to allocate the
* space at startup because (a) the deadlock checker might be invoked when
@@ -291,10 +291,10 @@ GetBlockingAutoVacuumPgproc(void)
* DeadLockCheckRecurse -- recursively search for valid orderings
*
* curConstraints[] holds the current set of constraints being considered
- * by an outer level of recursion. Add to this each possible solution
+ * by an outer level of recursion. Add to this each possible solution
* constraint for any cycle detected at this level.
*
- * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
+ * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
* state is attainable, in which case waitOrders[] shows the required
* rearrangements of lock wait queues (if any).
*/
@@ -429,7 +429,7 @@ TestConfiguration(PGPROC *startProc)
*
* Since we need to be able to check hypothetical configurations that would
* exist after wait queue rearrangement, the routine pays attention to the
- * table of hypothetical queue orders in waitOrders[]. These orders will
+ * table of hypothetical queue orders in waitOrders[]. These orders will
* be believed in preference to the actual ordering seen in the locktable.
*/
static bool
@@ -506,7 +506,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
/*
- * Scan for procs that already hold conflicting locks. These are "hard"
+ * Scan for procs that already hold conflicting locks. These are "hard"
* edges in the waits-for graph.
*/
procLocks = &(lock->procLocks);
@@ -705,7 +705,7 @@ ExpandConstraints(EDGE *constraints,
nWaitOrders = 0;
/*
- * Scan constraint list backwards. This is because the last-added
+ * Scan constraint list backwards. This is because the last-added
* constraint is the only one that could fail, and so we want to test it
* for inconsistency first.
*/
@@ -759,7 +759,7 @@ ExpandConstraints(EDGE *constraints,
* The initial queue ordering is taken directly from the lock's wait queue.
* The output is an array of PGPROC pointers, of length equal to the lock's
* wait queue length (the caller is responsible for providing this space).
- * The partial order is specified by an array of EDGE structs. Each EDGE
+ * The partial order is specified by an array of EDGE structs. Each EDGE
* is one that we need to reverse, therefore the "waiter" must appear before
* the "blocker" in the output array. The EDGE array may well contain
* edges associated with other locks; these should be ignored.
@@ -829,7 +829,7 @@ TopoSort(LOCK *lock,
afterConstraints[k] = i + 1;
}
/*--------------------
- * Now scan the topoProcs array backwards. At each step, output the
+ * Now scan the topoProcs array backwards. At each step, output the
* last proc that has no remaining before-constraints, and decrease
* the beforeConstraints count of each of the procs it was constrained
* against.
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index 2b7a1db3eba..a6bbb7ecf52 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -65,7 +65,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
/*
* LockRelationOid
*
- * Lock a relation given only its OID. This should generally be used
+ * Lock a relation given only its OID. This should generally be used
* before attempting to open the relation's relcache entry.
*/
void
@@ -253,7 +253,7 @@ LockHasWaitersRelation(Relation relation, LOCKMODE lockmode)
/*
* LockRelationIdForSession
*
- * This routine grabs a session-level lock on the target relation. The
+ * This routine grabs a session-level lock on the target relation. The
* session lock persists across transaction boundaries. It will be removed
* when UnlockRelationIdForSession() is called, or if an ereport(ERROR) occurs,
* or if the backend exits.
@@ -456,7 +456,7 @@ XactLockTableInsert(TransactionId xid)
*
* Delete the lock showing that the given transaction ID is running.
* (This is never used for main transaction IDs; those locks are only
- * released implicitly at transaction end. But we do use it for subtrans IDs.)
+ * released implicitly at transaction end. But we do use it for subtrans IDs.)
*/
void
XactLockTableDelete(TransactionId xid)
@@ -477,7 +477,7 @@ XactLockTableDelete(TransactionId xid)
* subtransaction, we will exit as soon as it aborts or its top parent commits.
* It takes some extra work to ensure this, because to save on shared memory
* the XID lock of a subtransaction is released when it ends, whether
- * successfully or unsuccessfully. So we have to check if it's "still running"
+ * successfully or unsuccessfully. So we have to check if it's "still running"
* and if so wait for its parent.
*/
void
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 676b3a49382..4327986a20c 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -187,7 +187,7 @@ static int FastPathLocalUseCount = 0;
/*
* The fast-path lock mechanism is concerned only with relation locks on
- * unshared relations by backends bound to a database. The fast-path
+ * unshared relations by backends bound to a database. The fast-path
* mechanism exists mostly to accelerate acquisition and release of locks
* that rarely conflict. Because ShareUpdateExclusiveLock is
* self-conflicting, it can't use the fast-path mechanism; but it also does
@@ -914,7 +914,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* If lock requested conflicts with locks requested by waiters, must join
- * wait queue. Otherwise, check for conflict with already-held locks.
+ * wait queue. Otherwise, check for conflict with already-held locks.
* (That's last because most complex check.)
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
@@ -995,7 +995,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
/*
* NOTE: do not do any material change of state between here and
- * return. All required changes in locktable state must have been
+ * return. All required changes in locktable state must have been
* done when the lock was granted to us --- see notes in WaitOnLock.
*/
@@ -1032,7 +1032,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
{
/*
* Decode the locktag back to the original values, to avoid sending
- * lots of empty bytes with every message. See lock.h to check how a
+ * lots of empty bytes with every message. See lock.h to check how a
* locktag is defined for LOCKTAG_RELATION
*/
LogAccessExclusiveLock(locktag->locktag_field1,
@@ -1290,7 +1290,7 @@ LockCheckConflicts(LockMethod lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own lock. We have
+ * Rats. Something conflicts. But it could still be my own lock. We have
* to construct a conflict mask that does not reflect our own locks, but
* only lock types held by other processes.
*/
@@ -1382,7 +1382,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
/*
* We need only run ProcLockWakeup if the released lock conflicts with at
- * least one of the lock types requested by waiter(s). Otherwise whatever
+ * least one of the lock types requested by waiter(s). Otherwise whatever
* conflict made them wait must still exist. NOTE: before MVCC, we could
* skip wakeup if lock->granted[lockmode] was still positive. But that's
* not true anymore, because the remaining granted locks might belong to
@@ -1402,7 +1402,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
}
/*
- * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
+ * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
* proclock and lock objects if possible, and call ProcLockWakeup if there
* are remaining requests and the caller says it's OK. (Normally, this
* should be called after UnGrantLock, and wakeupNeeded is the result from
@@ -1824,7 +1824,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
}
/*
- * Decrease the total local count. If we're still holding the lock, we're
+ * Decrease the total local count. If we're still holding the lock, we're
* done.
*/
locallock->nLocks--;
@@ -1956,7 +1956,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
#endif
/*
- * Get rid of our fast-path VXID lock, if appropriate. Note that this is
+ * Get rid of our fast-path VXID lock, if appropriate. Note that this is
* the only way that the lock we hold on our own VXID can ever get
* released: it is always and only released when a toplevel transaction
* ends.
@@ -2043,7 +2043,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
* fast-path data structures, we must acquire it before attempting
* to release the lock via the fast-path. We will continue to
* hold the LWLock until we're done scanning the locallock table,
- * unless we hit a transferred fast-path lock. (XXX is this
+ * unless we hit a transferred fast-path lock. (XXX is this
* really such a good idea? There could be a lot of entries ...)
*/
if (!have_fast_path_lwlock)
@@ -2062,7 +2062,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Our lock, originally taken via the fast path, has been
- * transferred to the main lock table. That's going to require
+ * transferred to the main lock table. That's going to require
* some extra work, so release our fast-path lock before starting.
*/
LWLockRelease(MyProc->backendLock);
@@ -2071,7 +2071,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Now dump the lock. We haven't got a pointer to the LOCK or
* PROCLOCK in this case, so we have to handle this a bit
- * differently than a normal lock release. Unfortunately, this
+ * differently than a normal lock release. Unfortunately, this
* requires an extra LWLock acquire-and-release cycle on the
* partitionLock, but hopefully it shouldn't happen often.
*/
@@ -2504,9 +2504,9 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
* acquiring proc->backendLock. In particular, it's certainly safe to
* assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an
- * LWLock acquisition) since setting proc->databaseId. However, it's
+ * LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory
- * fencing operation since the other backend set proc->databaseId. So
+ * fencing operation since the other backend set proc->databaseId. So
* for now, we test it after acquiring the LWLock just to be safe.
*/
if (proc->databaseId != locktag->locktag_field1)
@@ -3020,7 +3020,7 @@ AtPrepare_Locks(void)
continue;
/*
- * If we have both session- and transaction-level locks, fail. This
+ * If we have both session- and transaction-level locks, fail. This
* should never happen with regular locks, since we only take those at
* session level in some special operations like VACUUM. It's
* possible to hit this with advisory locks, though.
@@ -3029,7 +3029,7 @@ AtPrepare_Locks(void)
* the transactional hold to the prepared xact. However, that would
* require two PROCLOCK objects, and we cannot be sure that another
* PROCLOCK will be available when it comes time for PostPrepare_Locks
- * to do the deed. So for now, we error out while we can still do so
+ * to do the deed. So for now, we error out while we can still do so
* safely.
*/
if (haveSessionLock)
@@ -3216,7 +3216,7 @@ PostPrepare_Locks(TransactionId xid)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
- * the proclock would then be in the wrong hash chain. Instead
+ * the proclock would then be in the wrong hash chain. Instead
* use hash_update_hash_key. (We used to create a new hash entry,
* but that risks out-of-memory failure if other processes are
* busy making proclocks too.) We must unlink the proclock from
@@ -3316,7 +3316,7 @@ GetLockStatusData(void)
/*
* First, we iterate through the per-backend fast-path arrays, locking
- * them one at a time. This might produce an inconsistent picture of the
+ * them one at a time. This might produce an inconsistent picture of the
* system state, but taking all of those LWLocks at the same time seems
* impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
* matter too much, because none of these locks can be involved in lock
@@ -3395,7 +3395,7 @@ GetLockStatusData(void)
* will be self-consistent.
*
* Since this is a read-only operation, we take shared instead of
- * exclusive lock. There's not a whole lot of point to this, because all
+ * exclusive lock. There's not a whole lot of point to this, because all
* the normal operations require exclusive lock, but it doesn't hurt
* anything either. It will at least allow two backends to do
* GetLockStatusData in parallel.
@@ -3914,7 +3914,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info,
* as MyProc->lxid, you might wonder if we really need both. The
* difference is that MyProc->lxid is set and cleared unlocked, and
* examined by procarray.c, while fpLocalTransactionId is protected by
- * backendLock and is used only by the locking subsystem. Doing it this
+ * backendLock and is used only by the locking subsystem. Doing it this
* way makes it easier to verify that there are no funny race conditions.
*
* We don't bother recording this lock in the local lock table, since it's
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 4f88d3f1225..62fafca2c4b 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -6,7 +6,7 @@
* Lightweight locks are intended primarily to provide mutual exclusion of
* access to shared-memory data structures. Therefore, they offer both
* exclusive and shared lock modes (to support read/write and read-only
- * access to a shared object). There are few other frammishes. User-level
+ * access to a shared object). There are few other frammishes. User-level
* locking should be done with the full lock manager --- which depends on
* LWLocks to protect its shared state.
*
@@ -53,7 +53,7 @@ typedef struct LWLock
* (LWLockIds are indexes into the array.) We force the array stride to
* be a power of 2, which saves a few cycles in indexing, but more
* importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
+ * boundaries. This reduces cache contention problems, especially on AMD
* Opterons. (Of course, we have to also ensure that the array start
* address is suitably aligned.)
*
@@ -221,7 +221,7 @@ NumLWLocks(void)
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
@@ -390,7 +390,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* in the presence of contention. The efficiency of being able to do that
* outweighs the inefficiency of sometimes wasting a process dispatch
* cycle because the lock is not free when a released waiter finally gets
- * to run. See pgsql-hackers archives for 29-Dec-01.
+ * to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
@@ -580,7 +580,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
/*
* LWLockAcquireOrWait - Acquire lock, or wait until it's free
*
- * The semantics of this function are a bit funky. If the lock is currently
+ * The semantics of this function are a bit funky. If the lock is currently
* free, it is acquired in the given mode, and the function returns true. If
* the lock isn't immediately free, the function waits until it is released
* and returns false, but does not acquire the lock.
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 489078bf178..1347f584c76 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -32,11 +32,11 @@
* examining the MVCC data.)
*
* (1) Besides tuples actually read, they must cover ranges of tuples
- * which would have been read based on the predicate. This will
+ * which would have been read based on the predicate. This will
* require modelling the predicates through locks against database
* objects such as pages, index ranges, or entire tables.
*
- * (2) They must be kept in RAM for quick access. Because of this, it
+ * (2) They must be kept in RAM for quick access. Because of this, it
* isn't possible to always maintain tuple-level granularity -- when
* the space allocated to store these approaches exhaustion, a
* request for a lock may need to scan for situations where a single
@@ -49,7 +49,7 @@
*
* (4) While they are associated with a transaction, they must survive
* a successful COMMIT of that transaction, and remain until all
- * overlapping transactions complete. This even means that they
+ * overlapping transactions complete. This even means that they
* must survive termination of the transaction's process. If a
* top level transaction is rolled back, however, it is immediately
* flagged so that it can be ignored, and its SIREAD locks can be
@@ -90,7 +90,7 @@
* may yet matter because they overlap still-active transactions.
*
* SerializablePredicateLockListLock
- * - Protects the linked list of locks held by a transaction. Note
+ * - Protects the linked list of locks held by a transaction. Note
* that the locks themselves are also covered by the partition
* locks of their respective lock targets; this lock only affects
* the linked list connecting the locks related to a transaction.
@@ -101,11 +101,11 @@
* - It is relatively infrequent that another process needs to
* modify the list for a transaction, but it does happen for such
* things as index page splits for pages with predicate locks and
- * freeing of predicate locked pages by a vacuum process. When
+ * freeing of predicate locked pages by a vacuum process. When
* removing a lock in such cases, the lock itself contains the
* pointers needed to remove it from the list. When adding a
* lock in such cases, the lock can be added using the anchor in
- * the transaction structure. Neither requires walking the list.
+ * the transaction structure. Neither requires walking the list.
* - Cleaning up the list for a terminated transaction is sometimes
* not done on a retail basis, in which case no lock is required.
* - Due to the above, a process accessing its active transaction's
@@ -352,7 +352,7 @@ int max_predicate_locks_per_xact; /* set by guc.c */
/*
* This provides a list of objects in order to track transactions
- * participating in predicate locking. Entries in the list are fixed size,
+ * participating in predicate locking. Entries in the list are fixed size,
* and reside in shared memory. The memory address of an entry must remain
* fixed during its lifetime. The list will be protected from concurrent
* update externally; no provision is made in this code to manage that. The
@@ -544,7 +544,7 @@ SerializationNeededForWrite(Relation relation)
/*
* These functions are a simple implementation of a list for this specific
- * type of struct. If there is ever a generalized shared memory list, we
+ * type of struct. If there is ever a generalized shared memory list, we
* should probably switch to that.
*/
static SERIALIZABLEXACT *
@@ -764,7 +764,7 @@ OldSerXidPagePrecedesLogically(int p, int q)
int diff;
/*
- * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
+ * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should
* be in the range 0..OLDSERXID_MAX_PAGE.
*/
Assert(p >= 0 && p <= OLDSERXID_MAX_PAGE);
@@ -926,7 +926,7 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
}
/*
- * Get the minimum commitSeqNo for any conflict out for the given xid. For
+ * Get the minimum commitSeqNo for any conflict out for the given xid. For
* a transaction which exists but has no conflict out, InvalidSerCommitSeqNo
* will be returned.
*/
@@ -979,7 +979,7 @@ OldSerXidSetActiveSerXmin(TransactionId xid)
/*
* When no sxacts are active, nothing overlaps, set the xid values to
* invalid to show that there are no valid entries. Don't clear headPage,
- * though. A new xmin might still land on that page, and we don't want to
+ * though. A new xmin might still land on that page, and we don't want to
* repeatedly zero out the same page.
*/
if (!TransactionIdIsValid(xid))
@@ -1464,7 +1464,7 @@ SummarizeOldestCommittedSxact(void)
/*
* Grab the first sxact off the finished list -- this will be the earliest
- * commit. Remove it from the list.
+ * commit. Remove it from the list.
*/
sxact = (SERIALIZABLEXACT *)
SHMQueueNext(FinishedSerializableTransactions,
@@ -1617,7 +1617,7 @@ SetSerializableTransactionSnapshot(Snapshot snapshot,
/*
* We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
* import snapshots, since there's no way to wait for a safe snapshot when
- * we're using the snap we're told to. (XXX instead of throwing an error,
+ * we're using the snap we're told to. (XXX instead of throwing an error,
* we could just ignore the XactDeferrable flag?)
*/
if (XactReadOnly && XactDeferrable)
@@ -1666,7 +1666,7 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
* release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
* this means we have to create the sxact first, which is a bit annoying
* (in particular, an elog(ERROR) in procarray.c would cause us to leak
- * the sxact). Consider refactoring to avoid this.
+ * the sxact). Consider refactoring to avoid this.
*/
#ifdef TEST_OLDSERXID
SummarizeOldestCommittedSxact();
@@ -2048,7 +2048,7 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
/*
* Delete child target locks owned by this process.
* This implementation is assuming that the usage of each target tag field
- * is uniform. No need to make this hard if we don't have to.
+ * is uniform. No need to make this hard if we don't have to.
*
* We aren't acquiring lightweight locks for the predicate lock or lock
* target structures associated with this transaction unless we're going
@@ -2494,7 +2494,7 @@ PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
}
/*
- * Do quick-but-not-definitive test for a relation lock first. This will
+ * Do quick-but-not-definitive test for a relation lock first. This will
* never cause a return when the relation is *not* locked, but will
* occasionally let the check continue when there really *is* a relation
* level lock.
@@ -2806,7 +2806,7 @@ exit:
* transaction which is not serializable.
*
* NOTE: This is currently only called with transfer set to true, but that may
- * change. If we decide to clean up the locks from a table on commit of a
+ * change. If we decide to clean up the locks from a table on commit of a
* transaction which executed DROP TABLE, the false condition will be useful.
*/
static void
@@ -2887,7 +2887,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
continue; /* already the right lock */
/*
- * If we made it here, we have work to do. We make sure the heap
+ * If we made it here, we have work to do. We make sure the heap
* relation lock exists, then we walk the list of predicate locks for
* the old target we found, moving all locks to the heap relation lock
* -- unless they already hold that.
@@ -3232,7 +3232,7 @@ ReleasePredicateLocks(bool isCommit)
* If this value is changing, we don't care that much whether we get the
* old or new value -- it is just used to determine how far
* GlobalSerizableXmin must advance before this transaction can be fully
- * cleaned up. The worst that could happen is we wait for one more
+ * cleaned up. The worst that could happen is we wait for one more
* transaction to complete before freeing some RAM; correctness of visible
* behavior is not affected.
*/
@@ -3335,7 +3335,7 @@ ReleasePredicateLocks(bool isCommit)
}
/*
- * Release all outConflicts to committed transactions. If we're rolling
+ * Release all outConflicts to committed transactions. If we're rolling
* back clear them all. Set SXACT_FLAG_CONFLICT_OUT if any point to
* previously committed transactions.
*/
@@ -3654,7 +3654,7 @@ ClearOldPredicateLocks(void)
* matter -- but keep the transaction entry itself and any outConflicts.
*
* When the summarize flag is set, we've run short of room for sxact data
- * and must summarize to the SLRU. Predicate locks are transferred to a
+ * and must summarize to the SLRU. Predicate locks are transferred to a
* dummy "old" transaction, with duplicate locks on a single target
* collapsing to a single lock with the "latest" commitSeqNo from among
* the conflicting locks..
@@ -3847,7 +3847,7 @@ XidIsConcurrent(TransactionId xid)
/*
* CheckForSerializableConflictOut
* We are reading a tuple which has been modified. If it is visible to
- * us but has been deleted, that indicates a rw-conflict out. If it's
+ * us but has been deleted, that indicates a rw-conflict out. If it's
* not visible and was created by a concurrent (overlapping)
* serializable transaction, that is also a rw-conflict out,
*
@@ -3934,7 +3934,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
/*
- * Find top level xid. Bail out if xid is too early to be a conflict, or
+ * Find top level xid. Bail out if xid is too early to be a conflict, or
* if it's our own xid.
*/
if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
@@ -3999,7 +3999,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
/*
* We have a conflict out to a transaction which has a conflict out to a
- * summarized transaction. That summarized transaction must have
+ * summarized transaction. That summarized transaction must have
* committed first, and we can't tell when it committed in relation to our
* snapshot acquisition, so something needs to be canceled.
*/
@@ -4033,7 +4033,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
&& (!SxactHasConflictOut(sxact)
|| MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit))
{
- /* Read-only transaction will appear to run first. No conflict. */
+ /* Read-only transaction will appear to run first. No conflict. */
LWLockRelease(SerializableXactHashLock);
return;
}
@@ -4624,7 +4624,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
*
* If a dangerous structure is found, the pivot (the near conflict) is
* marked for death, because rolling back another transaction might mean
- * that we flail without ever making progress. This transaction is
+ * that we flail without ever making progress. This transaction is
* committing writes, so letting it commit ensures progress. If we
* canceled the far conflict, it might immediately fail again on retry.
*/
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 69ef93b0f84..d23d866c4b4 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -229,10 +229,10 @@ InitProcGlobal(void)
/*
* Newly created PGPROCs for normal backends, autovacuum and bgworkers
- * must be queued up on the appropriate free list. Because there can
+ * must be queued up on the appropriate free list. Because there can
* only ever be a small, fixed number of auxiliary processes, no free
* list is used in that case; InitAuxiliaryProcess() instead uses a
- * linear search. PGPROCs for prepared transactions are added to a
+ * linear search. PGPROCs for prepared transactions are added to a
* free list by TwoPhaseShmemInit().
*/
if (i < MaxConnections)
@@ -291,7 +291,7 @@ InitProcess(void)
elog(ERROR, "you already exist");
/*
- * Initialize process-local latch support. This could fail if the kernel
+ * Initialize process-local latch support. This could fail if the kernel
* is low on resources, and if so we want to exit cleanly before acquiring
* any shared-memory resources.
*/
@@ -400,7 +400,7 @@ InitProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -450,7 +450,7 @@ InitProcessPhase2(void)
*
* Auxiliary processes are presently not expected to wait for real (lockmgr)
* locks, so we need not set up the deadlock checker. They are never added
- * to the ProcArray or the sinval messaging mechanism, either. They also
+ * to the ProcArray or the sinval messaging mechanism, either. They also
* don't get a VXID assigned, since this is only useful when we actually
* hold lockmgr locks.
*
@@ -476,7 +476,7 @@ InitAuxiliaryProcess(void)
elog(ERROR, "you already exist");
/*
- * Initialize process-local latch support. This could fail if the kernel
+ * Initialize process-local latch support. This could fail if the kernel
* is low on resources, and if so we want to exit cleanly before acquiring
* any shared-memory resources.
*/
@@ -557,7 +557,7 @@ InitAuxiliaryProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -715,7 +715,7 @@ LockErrorCleanup(void)
/*
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
- * semaphore is reset to zero. This prevented a leftover wakeup signal
+ * semaphore is reset to zero. This prevented a leftover wakeup signal
* from remaining in the semaphore if someone else had granted us the lock
* we wanted before we were able to remove ourselves from the wait-list.
* However, now that ProcSleep loops until waitStatus changes, a leftover
@@ -847,7 +847,7 @@ ProcKill(int code, Datum arg)
/*
* AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
- * processes (bgwriter, etc). The PGPROC and sema are not released, only
+ * processes (bgwriter, etc). The PGPROC and sema are not released, only
* marked as not-in-use.
*/
static void
@@ -973,7 +973,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
*
* Special case: if I find I should go in front of some waiter, check to
* see if I conflict with already-held locks or the requests before that
- * waiter. If not, then just grant myself the requested lock immediately.
+ * waiter. If not, then just grant myself the requested lock immediately.
* This is the same as the test for immediate grant in LockAcquire, except
* we are only considering the part of the wait queue before my insertion
* point.
@@ -992,7 +992,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
- * Yes, so we have a deadlock. Easiest way to clean up
+ * Yes, so we have a deadlock. Easiest way to clean up
* correctly is to call RemoveFromWaitQueue(), but we
* can't do that until we are *on* the wait queue. So, set
* a flag to check below, and break out of loop. Also,
@@ -1114,8 +1114,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
- * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
- * implementation. While this is normally good, there are cases where a
+ * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
+ * implementation. While this is normally good, there are cases where a
* saved wakeup might be leftover from a previous operation (for example,
* we aborted ProcWaitForSignal just before someone did ProcSendSignal).
* So, loop to wait again if the waitStatus shows we haven't been granted
@@ -1135,7 +1135,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* waitStatus could change from STATUS_WAITING to something else
- * asynchronously. Read it just once per loop to prevent surprising
+ * asynchronously. Read it just once per loop to prevent surprising
* behavior (such as missing log messages).
*/
myWaitStatus = MyProc->waitStatus;
@@ -1544,10 +1544,10 @@ check_done:
* This can share the semaphore normally used for waiting for locks,
* since a backend could never be waiting for a lock and a signal at
* the same time. As with locks, it's OK if the signal arrives just
- * before we actually reach the waiting state. Also as with locks,
+ * before we actually reach the waiting state. Also as with locks,
* it's necessary that the caller be robust against bogus wakeups:
* always check that the desired state has occurred, and wait again
- * if not. This copes with possible "leftover" wakeups.
+ * if not. This copes with possible "leftover" wakeups.
*/
void
ProcWaitForSignal(void)
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index ed1f56aa8b6..a82d7679ca2 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -78,7 +78,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
*
* We time out and declare error after NUM_DELAYS delays (thus, exactly
* that many tries). With the given settings, this will usually take 2 or
- * so minutes. It seems better to fix the total number of tries (and thus
+ * so minutes. It seems better to fix the total number of tries (and thus
* the probability of unintended failure) than to fix the total time
* spent.
*
@@ -141,7 +141,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
* Note: spins_per_delay is local within our current process. We want to
* average these observations across multiple backends, since it's
* relatively rare for this function to even get entered, and so a single
- * backend might not live long enough to converge on a good value. That
+ * backend might not live long enough to converge on a good value. That
* is handled by the two routines below.
*/
if (cur_delay == 0)
@@ -181,7 +181,7 @@ update_spins_per_delay(int shared_spins_per_delay)
/*
* We use an exponential moving average with a relatively slow adaption
* rate, so that noise in any one backend's result won't affect the shared
- * value too much. As long as both inputs are within the allowed range,
+ * value too much. As long as both inputs are within the allowed range,
* the result must be too, so we need not worry about clamping the result.
*
* We deliberately truncate rather than rounding; this is so that single
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 5503925788e..2864790d82d 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -5,7 +5,7 @@
*
*
* For machines that have test-and-set (TAS) instructions, s_lock.h/.c
- * define the spinlock implementation. This file contains only a stub
+ * define the spinlock implementation. This file contains only a stub
* implementation for spinlocks using PGSemaphores. Unless semaphores
* are implemented in a way that doesn't involve a kernel call, this
* is too slow to be very useful :-(
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 97ac784d89f..93fdb54fded 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -62,7 +62,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* PageIsVerified
* Check that the page header and checksum (if any) appear valid.
*
- * This is called when a page has just been read in from disk. The idea is
+ * This is called when a page has just been read in from disk. The idea is
* to cheaply detect trashed pages before we go nuts following bogus item
* pointers, testing invalid transaction identifiers, etc.
*
@@ -154,7 +154,7 @@ PageIsVerified(Page page, BlockNumber blkno)
/*
* PageAddItem
*
- * Add an item to a page. Return value is offset at which it was
+ * Add an item to a page. Return value is offset at which it was
* inserted, or InvalidOffsetNumber if there's not room to insert.
*
* If overwrite is true, we just store the item at the specified
@@ -754,7 +754,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
* PageIndexMultiDelete
*
* This routine handles the case of deleting multiple tuples from an
- * index page at once. It is considerably faster than a loop around
+ * index page at once. It is considerably faster than a loop around
* PageIndexTupleDelete ... however, the caller *must* supply the array
* of item numbers to be deleted in item number order!
*/
@@ -888,7 +888,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
* If checksums are disabled, or if the page is not initialized, just return
* the input. Otherwise, we must make a copy of the page before calculating
* the checksum, to prevent concurrent modifications (e.g. setting hint bits)
- * from making the final checksum invalid. It doesn't matter if we include or
+ * from making the final checksum invalid. It doesn't matter if we include or
* exclude hints during the copy, as long as we write a valid page and
* associated checksum.
*
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index e62918195ea..ef23f8c40c4 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -87,7 +87,7 @@
* not needed because of an mdtruncate() operation. The reason for leaving
* them present at size zero, rather than unlinking them, is that other
* backends and/or the checkpointer might be holding open file references to
- * such segments. If the relation expands again after mdtruncate(), such
+ * such segments. If the relation expands again after mdtruncate(), such
* that a deactivated segment becomes active again, it is important that
* such file references still be valid --- else data might get written
* out to an unlinked old copy of a segment file that will eventually
@@ -124,7 +124,7 @@ static MemoryContext MdCxt; /* context for all md.c allocations */
* we keep track of pending fsync operations: we need to remember all relation
* segments that have been written since the last checkpoint, so that we can
* fsync them down to disk before completing the next checkpoint. This hash
- * table remembers the pending operations. We use a hash table mostly as
+ * table remembers the pending operations. We use a hash table mostly as
* a convenient way of merging duplicate requests.
*
* We use a similar mechanism to remember no-longer-needed files that can
@@ -292,7 +292,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* During bootstrap, there are cases where a system relation will be
* accessed (by internal backend processes) before the bootstrap
* script nominally creates it. Therefore, allow the file to exist
- * already, even if isRedo is not set. (See also mdopen)
+ * already, even if isRedo is not set. (See also mdopen)
*/
if (isRedo || IsBootstrapProcessingMode())
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
@@ -337,7 +337,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* if the contents of the file were repopulated by subsequent WAL entries.
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as for instance CLUSTER and CREATE INDEX do),
- * the contents of the file would be lost forever. By leaving the empty file
+ * the contents of the file would be lost forever. By leaving the empty file
* until after the next checkpoint, we prevent reassignment of the relfilenode
* number until it's safe, because relfilenode assignment skips over any
* existing file.
@@ -350,7 +350,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
*
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
- * relfilenode number from being recycled. Also, we do not carefully
+ * relfilenode number from being recycled. Also, we do not carefully
* track whether other forks have been created or not, but just attempt to
* unlink them unconditionally; so we should never complain about ENOENT.
*
@@ -367,7 +367,7 @@ mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
{
/*
* We have to clean out any pending fsync requests for the doomed
- * relation, else the next mdsync() will fail. There can't be any such
+ * relation, else the next mdsync() will fail. There can't be any such
* requests for a temp relation, though. We can send just one request
* even when deleting multiple forks, since the fsync queuing code accepts
* the "InvalidForkNumber = all forks" convention.
@@ -504,7 +504,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
/*
* Note: because caller usually obtained blocknum by calling mdnblocks,
* which did a seek(SEEK_END), this seek is often redundant and will be
- * optimized away by fd.c. It's not redundant, however, if there is a
+ * optimized away by fd.c. It's not redundant, however, if there is a
* partial page at the end of the file. In that case we want to try to
* overwrite the partial page with a full page. It's also not redundant
* if bufmgr.c had to dump another buffer of the same file to make room
@@ -804,9 +804,9 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
* exactly RELSEG_SIZE long, and it's useless to recheck that each time.
*
* NOTE: this assumption could only be wrong if another backend has
- * truncated the relation. We rely on higher code levels to handle that
+ * truncated the relation. We rely on higher code levels to handle that
* scenario by closing and re-opening the md fd, which is handled via
- * relcache flush. (Since the checkpointer doesn't participate in
+ * relcache flush. (Since the checkpointer doesn't participate in
* relcache flush, it could have segment chain entries for inactive
* segments; that's OK because the checkpointer never needs to compute
* relation size.)
@@ -1000,7 +1000,7 @@ mdsync(void)
/*
* If we are in the checkpointer, the sync had better include all fsync
- * requests that were queued by backends up to this point. The tightest
+ * requests that were queued by backends up to this point. The tightest
* race condition that could occur is that a buffer that must be written
* and fsync'd for the checkpoint could have been dumped by a backend just
* before it was visited by BufferSync(). We know the backend will have
@@ -1116,7 +1116,7 @@ mdsync(void)
* that have been deleted (unlinked) by the time we get to
* them. Rather than just hoping an ENOENT (or EACCES on
* Windows) error can be ignored, what we do on error is
- * absorb pending requests and then retry. Since mdunlink()
+ * absorb pending requests and then retry. Since mdunlink()
* queues a "cancel" message before actually unlinking, the
* fsync request is guaranteed to be marked canceled after the
* absorb if it really was this case. DROP DATABASE likewise
@@ -1220,7 +1220,7 @@ mdsync(void)
/*
* We've finished everything that was requested before we started to
- * scan the entry. If no new requests have been inserted meanwhile,
+ * scan the entry. If no new requests have been inserted meanwhile,
* remove the entry. Otherwise, update its cycle counter, as all the
* requests now in it must have arrived during this cycle.
*/
@@ -1325,7 +1325,7 @@ mdpostckpt(void)
/*
* As in mdsync, we don't want to stop absorbing fsync requests for a
- * long time when there are many deletions to be done. We can safely
+ * long time when there are many deletions to be done. We can safely
* call AbsorbFsyncRequests() at this point in the loop (note it might
* try to delete list entries).
*/
@@ -1450,7 +1450,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* We can't just delete the entry since mdsync could have an
* active hashtable scan. Instead we delete the bitmapsets; this
- * is safe because of the way mdsync is coded. We also set the
+ * is safe because of the way mdsync is coded. We also set the
* "canceled" flags so that mdsync can tell that a cancel arrived
* for the fork(s).
*/
@@ -1552,7 +1552,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
/*
* NB: it's intentional that we don't change cycle_ctr if the entry
- * already exists. The cycle_ctr must represent the oldest fsync
+ * already exists. The cycle_ctr must represent the oldest fsync
* request that could be in the entry.
*/
@@ -1723,7 +1723,7 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
{
/*
* Normally we will create new segments only if authorized by the
- * caller (i.e., we are doing mdextend()). But when doing WAL
+ * caller (i.e., we are doing mdextend()). But when doing WAL
* recovery, create segments anyway; this allows cases such as
* replaying WAL data that has a write into a high-numbered
* segment of a relation that was later deleted. We want to go
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 4cc5d7e1ec4..f119889b314 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -494,7 +494,7 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
}
/*
- * Get rid of any remaining buffers for the relations. bufmgr will just
+ * Get rid of any remaining buffers for the relations. bufmgr will just
* drop them without bothering to write the contents.
*/
DropRelFileNodesAllBuffers(rnodes, nrels);
@@ -679,7 +679,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
* Send a shared-inval message to force other backends to close any smgr
* references they may have for this rel. This is useful because they
* might have open file pointers to segments that got removed, and/or
- * smgr_targblock variables pointing past the new rel end. (The inval
+ * smgr_targblock variables pointing past the new rel end. (The inval
* message will come back to our backend, too, causing a
* probably-unnecessary local smgr flush. But we don't expect that this
* is a performance-critical path.) As in the unlink code, we want to be
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index bd0801c0bf7..9ce817ba276 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -44,8 +44,8 @@
* each fastpath call as a separate transaction command, and so the
* cached data could never actually have been reused. If it had worked
* as intended, it would have had problems anyway with dangling references
- * in the FmgrInfo struct. So, forget about caching and just repeat the
- * syscache fetches on each usage. They're not *that* expensive.
+ * in the FmgrInfo struct. So, forget about caching and just repeat the
+ * syscache fetches on each usage. They're not *that* expensive.
*/
struct fp_info
{
@@ -205,7 +205,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
/*
* Since the validity of this structure is determined by whether the
- * funcid is OK, we clear the funcid here. It must not be set to the
+ * funcid is OK, we clear the funcid here. It must not be set to the
* correct value until we are about to return with a good struct fp_info,
* since we can be interrupted (i.e., with an ereport(ERROR, ...)) at any
* time. [No longer really an issue since we don't save the struct
@@ -257,7 +257,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
* RETURNS:
* 0 if successful completion, EOF if frontend connection lost.
*
- * Note: All ordinary errors result in ereport(ERROR,...). However,
+ * Note: All ordinary errors result in ereport(ERROR,...). However,
* if we lose the frontend connection there is no one to ereport to,
* and no use in proceeding...
*
@@ -526,7 +526,7 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
/*
* Since stringinfo.c keeps a trailing null in place even for
- * binary data, the contents of abuf are a valid C string. We
+ * binary data, the contents of abuf are a valid C string. We
* have to do encoding conversion before calling the typinput
* routine, though.
*/
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index c466453a767..737feb6f69f 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -452,7 +452,7 @@ SocketBackend(StringInfo inBuf)
default:
/*
- * Otherwise we got garbage from the frontend. We treat this as
+ * Otherwise we got garbage from the frontend. We treat this as
* fatal because we have probably lost message boundary sync, and
* there's no good way to recover.
*/
@@ -854,7 +854,7 @@ exec_simple_query(const char *query_string)
ResetUsage();
/*
- * Start up a transaction command. All queries generated by the
+ * Start up a transaction command. All queries generated by the
* query_string will be in this same command block, *unless* we find a
* BEGIN/COMMIT/ABORT statement; we have to force a new xact command after
* one of those, else bad things will happen in xact.c. (Note that this
@@ -863,7 +863,7 @@ exec_simple_query(const char *query_string)
start_xact_command();
/*
- * Zap any pre-existing unnamed statement. (While not strictly necessary,
+ * Zap any pre-existing unnamed statement. (While not strictly necessary,
* it seems best to define simple-Query mode as if it used the unnamed
* statement and portal; this ensures we recover any storage used by prior
* unnamed operations.)
@@ -922,7 +922,7 @@ exec_simple_query(const char *query_string)
/*
* Get the command name for use in status display (it also becomes the
- * default completion tag, down inside PortalRun). Set ps_status and
+ * default completion tag, down inside PortalRun). Set ps_status and
* do any special start-of-SQL-command processing needed by the
* destination.
*/
@@ -1010,7 +1010,7 @@ exec_simple_query(const char *query_string)
/*
* Select the appropriate output format: text unless we are doing a
- * FETCH from a binary cursor. (Pretty grotty to have to do this here
+ * FETCH from a binary cursor. (Pretty grotty to have to do this here
* --- but it avoids grottiness in other places. Ah, the joys of
* backward compatibility...)
*/
@@ -1311,7 +1311,7 @@ exec_parse_message(const char *query_string, /* string to execute */
}
else
{
- /* Empty input string. This is legal. */
+ /* Empty input string. This is legal. */
raw_parse_tree = NULL;
commandTag = NULL;
psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag);
@@ -1361,7 +1361,7 @@ exec_parse_message(const char *query_string, /* string to execute */
/*
* We do NOT close the open transaction command here; that only happens
- * when the client sends Sync. Instead, do CommandCounterIncrement just
+ * when the client sends Sync. Instead, do CommandCounterIncrement just
* in case something happened during parse/plan.
*/
CommandCounterIncrement();
@@ -1503,7 +1503,7 @@ exec_bind_message(StringInfo input_message)
* If we are in aborted transaction state, the only portals we can
* actually run are those containing COMMIT or ROLLBACK commands. We
* disallow binding anything else to avoid problems with infrastructure
- * that expects to run inside a valid transaction. We also disallow
+ * that expects to run inside a valid transaction. We also disallow
* binding any parameters, since we can't risk calling user-defined I/O
* functions.
*/
@@ -1592,7 +1592,7 @@ exec_bind_message(StringInfo input_message)
/*
* Rather than copying data around, we just set up a phony
* StringInfo pointing to the correct portion of the message
- * buffer. We assume we can scribble on the message buffer so
+ * buffer. We assume we can scribble on the message buffer so
* as to maintain the convention that StringInfos have a
* trailing null. This is grotty but is a big win when
* dealing with very large parameter strings.
@@ -1942,7 +1942,7 @@ exec_execute_message(const char *portal_name, long max_rows)
if (is_xact_command)
{
/*
- * If this was a transaction control statement, commit it. We
+ * If this was a transaction control statement, commit it. We
* will start a new xact command for the next command (if any).
*/
finish_xact_command();
@@ -2348,7 +2348,7 @@ exec_describe_portal_message(const char *portal_name)
/*
* If we are in aborted transaction state, we can't run
* SendRowDescriptionMessage(), because that needs catalog accesses.
- * Hence, refuse to Describe portals that return data. (We shouldn't just
+ * Hence, refuse to Describe portals that return data. (We shouldn't just
* refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
* blindly Describes whatever it does.)
@@ -2565,7 +2565,7 @@ quickdie(SIGNAL_ARGS)
on_exit_reset();
/*
- * Note we do exit(2) not exit(0). This is to force the postmaster into a
+ * Note we do exit(2) not exit(0). This is to force the postmaster into a
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
@@ -3294,7 +3294,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx,
#endif
/*
- * Parse command-line options. CAUTION: keep this in sync with
+ * Parse command-line options. CAUTION: keep this in sync with
* postmaster/postmaster.c (the option sets should not conflict) and with
* the common help() function in main/main.c.
*/
@@ -3597,7 +3597,7 @@ PostgresMain(int argc, char *argv[],
* we have set up the handler.
*
* Also note: it's best not to use any signals that are SIG_IGNored in the
- * postmaster. If such a signal arrives before we are able to change the
+ * postmaster. If such a signal arrives before we are able to change the
* handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
* handler in the postmaster to reserve the signal. (Of course, this isn't
* an issue for signals that are locally generated, such as SIGALRM and
@@ -3803,7 +3803,7 @@ PostgresMain(int argc, char *argv[],
/*
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
- * AbortTransaction() instead. The only stuff done directly here
+ * AbortTransaction() instead. The only stuff done directly here
* should be stuff that is guaranteed to apply *only* for outer-level
* error recovery, such as adjusting the FE/BE protocol status.
*/
@@ -3916,7 +3916,7 @@ PostgresMain(int argc, char *argv[],
* collector, and to update the PS stats display. We avoid doing
* those every time through the message loop because it'd slow down
* processing of batched messages, and because we don't want to report
- * uncommitted updates (that confuses autovacuum). The notification
+ * uncommitted updates (that confuses autovacuum). The notification
* processor wants a call too, if we are not in a transaction block.
*/
if (send_ready_for_query)
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index f400e935d13..94022bccdb9 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -558,7 +558,7 @@ PortalStart(Portal portal, ParamListInfo params,
/*
* We don't start the executor until we are told to run the
- * portal. We do need to set up the result tupdesc.
+ * portal. We do need to set up the result tupdesc.
*/
{
PlannedStmt *pstmt;
@@ -908,7 +908,7 @@ PortalRunSelect(Portal portal,
Assert(queryDesc || portal->holdStore);
/*
- * Force the queryDesc destination to the right thing. This supports
+ * Force the queryDesc destination to the right thing. This supports
* MOVE, for example, which will pass in dest = DestNone. This is okay to
* change as long as we do it on every fetch. (The Executor must not
* assume that dest never changes.)
@@ -1156,12 +1156,12 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
elog(DEBUG3, "ProcessUtility");
/*
- * Set snapshot if utility stmt needs one. Most reliable way to do this
+ * Set snapshot if utility stmt needs one. Most reliable way to do this
* seems to be to enumerate those that do not need one; this is a short
* list. Transaction control, LOCK, and SET must *not* set a snapshot
* since they need to be executable at the start of a transaction-snapshot
* mode transaction without freezing a snapshot. By extension we allow
- * SHOW not to set a snapshot. The other stmts listed are just efficiency
+ * SHOW not to set a snapshot. The other stmts listed are just efficiency
* hacks. Beware of listing anything that can modify the database --- if,
* say, it has to update an index with expressions that invoke
* user-defined functions, then it had better have a snapshot.
@@ -1196,7 +1196,7 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
/*
* Some utility commands may pop the ActiveSnapshot stack from under us,
- * so we only pop the stack if we actually see a snapshot set. Note that
+ * so we only pop the stack if we actually see a snapshot set. Note that
* the set of utility commands that do this must be the same set
* disallowed to run inside a transaction; otherwise, we could be popping
* a snapshot that belongs to some other operation.
@@ -1518,7 +1518,7 @@ DoPortalRunFetch(Portal portal,
* Definition: Rewind to start, advance count-1 rows, return
* next row (if any). In practice, if the goal is less than
* halfway back to the start, it's better to scan from where
- * we are. In any case, we arrange to fetch the target row
+ * we are. In any case, we arrange to fetch the target row
* going forwards.
*/
if (portal->posOverflow || portal->portalPos == LONG_MAX ||
@@ -1625,7 +1625,7 @@ DoPortalRunFetch(Portal portal,
* If we are sitting on a row, back up one so we can re-fetch it.
* If we are not sitting on a row, we still have to start up and
* shut down the executor so that the destination is initialized
- * and shut down correctly; so keep going. To PortalRunSelect,
+ * and shut down correctly; so keep going. To PortalRunSelect,
* count == 0 means we will retrieve no row.
*/
if (on_row)
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index c86a11c2f30..c900c85d81d 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -231,7 +231,7 @@ PreventCommandIfReadOnly(const char *cmdname)
* PreventCommandDuringRecovery: throw error if RecoveryInProgress
*
* The majority of operations that are unsafe in a Hot Standby slave
- * will be rejected by XactReadOnly tests. However there are a few
+ * will be rejected by XactReadOnly tests. However there are a few
* commands that are allowed in "read-only" xacts but cannot be allowed
* in Hot Standby mode. Those commands should call this function.
*/
@@ -953,7 +953,7 @@ ProcessUtilitySlow(Node *parsetree,
LOCKMODE lockmode;
/*
- * Figure out lock mode, and acquire lock. This also does
+ * Figure out lock mode, and acquire lock. This also does
* basic permissions checks, so that we won't wait for a
* lock on (for example) a relation on which we have no
* permissions.
diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c
index 63d32edd3c1..d9281401baf 100644
--- a/src/backend/tsearch/ts_locale.c
+++ b/src/backend/tsearch/ts_locale.c
@@ -90,7 +90,7 @@ t_isprint(const char *ptr)
/*
- * Set up to read a file using tsearch_readline(). This facility is
+ * Set up to read a file using tsearch_readline(). This facility is
* better than just reading the file directly because it provides error
* context pointing to the specific line where a problem is detected.
*
@@ -168,7 +168,7 @@ tsearch_readline_callback(void *arg)
/*
* We can't include the text of the config line for errors that occur
- * during t_readline() itself. This is only partly a consequence of our
+ * during t_readline() itself. This is only partly a consequence of our
* arms-length use of that routine: the major cause of such errors is
* encoding violations, and we daren't try to print error messages
* containing badly-encoded data.
diff --git a/src/backend/tsearch/ts_selfuncs.c b/src/backend/tsearch/ts_selfuncs.c
index 8fb8875b66d..b3fcadc1674 100644
--- a/src/backend/tsearch/ts_selfuncs.c
+++ b/src/backend/tsearch/ts_selfuncs.c
@@ -319,7 +319,7 @@ tsquery_opr_selec(QueryItem *item, char *operand,
* exclusive. We treat occurrences as independent events.
*
* This is only a good plan if we have a pretty fair number of
- * MCELEMs available; we set the threshold at 100. If no stats or
+ * MCELEMs available; we set the threshold at 100. If no stats or
* insufficient stats, arbitrarily use DEFAULT_TS_MATCH_SEL*4.
*/
if (lookup == NULL || length < 100)
diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c
index c8caa8003f3..54364327b5a 100644
--- a/src/backend/tsearch/ts_typanalyze.c
+++ b/src/backend/tsearch/ts_typanalyze.c
@@ -114,13 +114,13 @@ ts_typanalyze(PG_FUNCTION_ARGS)
* language's frequency table, where K is the target number of entries in
* the MCELEM array plus an arbitrary constant, meant to reflect the fact
* that the most common words in any language would usually be stopwords
- * so we will not actually see them in the input. We assume that the
+ * so we will not actually see them in the input. We assume that the
* distribution of word frequencies (including the stopwords) follows Zipf's
* law with an exponent of 1.
*
* Assuming Zipfian distribution, the frequency of the K'th word is equal
* to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of
- * words in the language. Putting W as one million, we get roughly 0.07/K.
+ * words in the language. Putting W as one million, we get roughly 0.07/K.
* Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set
* epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and
* maximum expected hashtable size of about 1000 * (K + 10).
@@ -161,7 +161,7 @@ compute_tsvector_stats(VacAttrStats *stats,
TrackItem *item;
/*
- * We want statistics_target * 10 lexemes in the MCELEM array. This
+ * We want statistics_target * 10 lexemes in the MCELEM array. This
* multiplier is pretty arbitrary, but is meant to reflect the fact that
* the number of individual lexeme values tracked in pg_statistic ought to
* be more than the number of values for a simple scalar column.
@@ -232,7 +232,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* We loop through the lexemes in the tsvector and add them to our
- * tracking hashtable. Note: the hashtable entries will point into
+ * tracking hashtable. Note: the hashtable entries will point into
* the (detoasted) tsvector value, therefore we cannot free that
* storage until we're done.
*/
@@ -299,7 +299,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@@ -332,7 +332,7 @@ compute_tsvector_stats(VacAttrStats *stats,
/*
* If we obtained more lexemes than we really want, get rid of those
- * with least frequencies. The easiest way is to qsort the array into
+ * with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
@@ -363,7 +363,7 @@ compute_tsvector_stats(VacAttrStats *stats,
* they get sorted on frequencies. The rationale is that we
* usually search through most common elements looking for a
* specific value, so we can grab its frequency. When values are
- * presorted we can employ binary search for that. See
+ * presorted we can employ binary search for that. See
* ts_selfuncs.c for a real usage scenario.
*/
qsort(sort_table, num_mcelem, sizeof(TrackItem *),
diff --git a/src/backend/tsearch/ts_utils.c b/src/backend/tsearch/ts_utils.c
index b92a547e92e..abc6e812f74 100644
--- a/src/backend/tsearch/ts_utils.c
+++ b/src/backend/tsearch/ts_utils.c
@@ -23,8 +23,8 @@
/*
* Given the base name and extension of a tsearch config file, return
- * its full path name. The base name is assumed to be user-supplied,
- * and is checked to prevent pathname attacks. The extension is assumed
+ * its full path name. The base name is assumed to be user-supplied,
+ * and is checked to prevent pathname attacks. The extension is assumed
* to be safe.
*
* The result is a palloc'd string.
@@ -37,7 +37,7 @@ get_tsearch_config_filename(const char *basename,
char *result;
/*
- * We limit the basename to contain a-z, 0-9, and underscores. This may
+ * We limit the basename to contain a-z, 0-9, and underscores. This may
* be overly restrictive, but we don't want to allow access to anything
* outside the tsearch_data directory, so for instance '/' *must* be
* rejected, and on some platforms '\' and ':' are risky as well. Allowing
@@ -61,7 +61,7 @@ get_tsearch_config_filename(const char *basename,
/*
* Reads a stop-word file. Each word is run through 'wordop'
- * function, if given. wordop may either modify the input in-place,
+ * function, if given. wordop may either modify the input in-place,
* or palloc a new version.
*/
void
diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c
index a8e9344f501..cfd07fd4bf3 100644
--- a/src/backend/tsearch/wparser_def.c
+++ b/src/backend/tsearch/wparser_def.c
@@ -330,7 +330,7 @@ TParserInit(char *str, int len)
/*
* Use of %.*s here is a bit risky since it can misbehave if the data is
- * not in what libc thinks is the prevailing encoding. However, since
+ * not in what libc thinks is the prevailing encoding. However, since
* this is just a debugging aid, we choose to live with that.
*/
fprintf(stderr, "parsing \"%.*s\"\n", len, str);
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 5869ec2e589..23b66b3e721 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -123,7 +123,7 @@ static Oid get_role_oid_or_public(const char *rolname);
/*
* getid
* Consumes the first alphanumeric string (identifier) found in string
- * 's', ignoring any leading white space. If it finds a double quote
+ * 's', ignoring any leading white space. If it finds a double quote
* it returns the word inside the quotes.
*
* RETURNS:
@@ -229,7 +229,7 @@ putid(char *p, const char *s)
*
* RETURNS:
* the string position in 's' immediately following the ACL
- * specification. Also:
+ * specification. Also:
* - loads the structure pointed to by 'aip' with the appropriate
* UID/GID, id type identifier and mode type values.
*/
@@ -837,7 +837,7 @@ acldefault(GrantObjectType objtype, Oid ownerId)
/*
- * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
+ * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
* ACL_OBJECT_* values, but it's only used in the information schema, not
* documented for general use.
*/
@@ -1006,7 +1006,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can only
+ * Remove abandoned privileges (cascading revoke). Currently we can only
* handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
@@ -1072,7 +1072,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* If the old ACL contained any references to the new owner, then we may
- * now have generated an ACL containing duplicate entries. Find them and
+ * now have generated an ACL containing duplicate entries. Find them and
* merge them so that there are not duplicates. (This is relatively
* expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
* be the normal case.)
@@ -1083,7 +1083,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
* remove privilege-free entries, should there be any in the input.) dst
* is the next output slot, targ is the currently considered input slot
* (always >= dst), and src scans entries to the right of targ looking for
- * duplicates. Once an entry has been emitted to dst it is known
+ * duplicates. Once an entry has been emitted to dst it is known
* duplicate-free and need not be considered anymore.
*/
if (newpresent)
@@ -2468,7 +2468,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum,
* existence of the pg_class row before risking calling pg_class_aclcheck.
* Note: it might seem there's a race condition against concurrent DROP,
* but really it's safe because there will be no syscache flush between
- * here and there. So if we see the row in the syscache, so will
+ * here and there. So if we see the row in the syscache, so will
* pg_class_aclcheck.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid)))
@@ -5015,14 +5015,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index a66bbe32802..4cb5f419321 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -524,7 +524,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
/*
* Estimate selectivity of "column @> const" and "column && const" based on
- * most common element statistics. This estimation assumes element
+ * most common element statistics. This estimation assumes element
* occurrences are independent.
*
* mcelem (of length nmcelem) and numbers (of length nnumbers) are from
@@ -689,7 +689,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
* In the "column @> const" and "column && const" cases, we usually have a
* "const" with low number of elements (otherwise we have selectivity close
* to 0 or 1 respectively). That's why the effect of dependence related
- * to distinct element count distribution is negligible there. In the
+ * to distinct element count distribution is negligible there. In the
* "column <@ const" case, number of elements is usually high (otherwise we
* have selectivity close to 0). That's why we should do a correction with
* the array distinct element count distribution here.
@@ -848,7 +848,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*
* The presence of many distinct rare elements materially decreases
* selectivity. Use the Poisson distribution to estimate the probability
- * of a column value having zero occurrences of such elements. See above
+ * of a column value having zero occurrences of such elements. See above
* for the definition of "rest".
*/
mult *= exp(-rest);
@@ -856,7 +856,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*----------
* Using the distinct element count histogram requires
* O(unique_nitems * (nmcelem + unique_nitems))
- * operations. Beyond a certain computational cost threshold, it's
+ * operations. Beyond a certain computational cost threshold, it's
* reasonable to sacrifice accuracy for decreased planning time. We limit
* the number of operations to EFFORT * nmcelem; since nmcelem is limited
* by the column's statistics target, the work done is user-controllable.
@@ -868,7 +868,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
* elements to start with, we'd have to remove any discarded elements'
* frequencies from "mult", but since this is only an approximation
* anyway, we don't bother with that. Therefore it's sufficient to qsort
- * elem_selec[] and take the largest elements. (They will no longer match
+ * elem_selec[] and take the largest elements. (They will no longer match
* up with the elements of array_data[], but we don't care.)
*----------
*/
@@ -878,7 +878,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
unique_nitems > EFFORT * nmcelem / (nmcelem + unique_nitems))
{
/*
- * Use the quadratic formula to solve for largest allowable N. We
+ * Use the quadratic formula to solve for largest allowable N. We
* have A = 1, B = nmcelem, C = - EFFORT * nmcelem.
*/
double b = (double) nmcelem;
@@ -953,7 +953,7 @@ calc_hist(const float4 *hist, int nhist, int n)
/*
* frac is a probability contribution for each interval between histogram
- * values. We have nhist - 1 intervals, so contribution of each one will
+ * values. We have nhist - 1 intervals, so contribution of each one will
* be 1 / (nhist - 1).
*/
frac = 1.0f / ((float) (nhist - 1));
@@ -1020,8 +1020,8 @@ calc_hist(const float4 *hist, int nhist, int n)
* "rest" is the sum of the probabilities of all low-probability events not
* included in p.
*
- * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
- * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
+ * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
+ * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
* For any constant j, each increment of i increases the probability iff the
* event occurs. So, by the law of total probability:
* M[i,j] = M[i - 1, j] * (1 - p[i]) + M[i - 1, j - 1] * p[i]
@@ -1143,7 +1143,7 @@ floor_log2(uint32 n)
/*
* find_next_mcelem binary-searches a most common elements array, starting
- * from *index, for the first member >= value. It saves the position of the
+ * from *index, for the first member >= value. It saves the position of the
* match into *index and returns true if it's an exact match. (Note: we
* assume the mcelem elements are distinct so there can't be more than one
* exact match.)
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index ae7bb8a8b81..6b7cd9791da 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -160,13 +160,13 @@ array_typanalyze(PG_FUNCTION_ARGS)
* compute_array_stats() -- compute statistics for a array column
*
* This function computes statistics useful for determining selectivity of
- * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
+ * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
* compute_stats hook after sample rows have been collected.
*
* We also invoke the standard compute_stats function, which will compute
* "scalar" statistics relevant to the btree-style array comparison operators.
* However, exact duplicates of an entire array may be rare despite many
- * arrays sharing individual elements. This especially afflicts long arrays,
+ * arrays sharing individual elements. This especially afflicts long arrays,
* which are also liable to lack all scalar statistics due to the low
* WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
* we find the most common array elements and compute a histogram of distinct
@@ -201,7 +201,7 @@ array_typanalyze(PG_FUNCTION_ARGS)
* In the absence of a principled basis for other particular values, we
* follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
* But we leave out the correction for stopwords, which do not apply to
- * arrays. These parameters give bucket width w = K/0.007 and maximum
+ * arrays. These parameters give bucket width w = K/0.007 and maximum
* expected hashtable size of about 1000 * K.
*
* Elements may repeat within an array. Since duplicates do not change the
@@ -463,7 +463,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@@ -498,7 +498,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* If we obtained more elements than we really want, get rid of those
- * with least frequencies. The easiest way is to qsort the array into
+ * with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
@@ -532,7 +532,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* We sorted statistics on the element value, but we want to be
* able to find the minimal and maximal frequencies without going
- * through all the values. We also want the frequency of null
+ * through all the values. We also want the frequency of null
* elements. Store these three values at the end of mcelem_freqs.
*/
mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
@@ -623,7 +623,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* (compare the histogram-making loop in compute_scalar_stats()).
* But instead of that we have the sorted_count_items[] array,
* which holds unique DEC values with their frequencies (that is,
- * a run-length-compressed version of the full array). So we
+ * a run-length-compressed version of the full array). So we
* control advancing through sorted_count_items[] with the
* variable "frac", which is defined as (x - y) * (num_hist - 1),
* where x is the index in the notional DECs array corresponding
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index 7ce1cce9876..10d5de9d60d 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
@@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS)
int lbs[1];
/*
- * Test for null before Asserting we are in right context. This is to
+ * Test for null before Asserting we are in right context. This is to
* avoid possible Assert failure in 8.4beta installations, where it is
* possible for users to create NULL constants of type internal.
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 438c3d0e9e6..1cb29ed4416 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -694,7 +694,7 @@ ReadArrayStr(char *arrayStr,
/*
* We have to remove " and \ characters to create a clean item value to
- * pass to the datatype input routine. We overwrite each item value
+ * pass to the datatype input routine. We overwrite each item value
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
@@ -894,7 +894,7 @@ ReadArrayStr(char *arrayStr,
* referenced by Datums after copying them.
*
* If the input data is of varlena type, the caller must have ensured that
- * the values are not toasted. (Doing it here doesn't work since the
+ * the values are not toasted. (Doing it here doesn't work since the
* caller has already allocated space for the array...)
*/
static void
@@ -1990,7 +1990,7 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
* copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
@@ -2622,7 +2622,7 @@ array_set_slice(ArrayType *array,
/*
* array_map()
*
- * Map an array through an arbitrary function. Return a new array with
+ * Map an array through an arbitrary function. Return a new array with
* same dimensions and each source element transformed by fn(). Each
* source element is passed as the first argument to fn(); additional
* arguments to be passed to fn() can be specified by the caller.
@@ -2637,9 +2637,9 @@ array_set_slice(ArrayType *array,
* first argument position initially holds the input array value.
* * inpType: OID of element type of input array. This must be the same as,
* or binary-compatible with, the first argument type of fn().
- * * retType: OID of element type of output array. This must be the same as,
+ * * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -3493,7 +3493,7 @@ array_cmp(FunctionCallInfo fcinfo)
/*
* If arrays contain same data (up to end of shorter one), apply
- * additional rules to sort by dimensionality. The relative significance
+ * additional rules to sort by dimensionality. The relative significance
* of the different bits of information is historical; mainly we just care
* that we don't say "equal" for arrays of different dimensionality.
*/
@@ -3755,7 +3755,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation,
/*
* We assume that the comparison operator is strict, so a NULL can't
- * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
* array_eq, should we act like that?
*/
if (isnull1)
@@ -4246,7 +4246,7 @@ array_copy(char *destptr, int nitems,
*
* Note: this could certainly be optimized using standard bitblt methods.
* However, it's not clear that the typical Postgres array has enough elements
- * to make it worth worrying too much. For the moment, KISS.
+ * to make it worth worrying too much. For the moment, KISS.
*/
void
array_bitmap_copy(bits8 *destbitmap, int destoffset,
@@ -4443,7 +4443,7 @@ array_extract_slice(ArrayType *newarray,
* Insert a slice into an array.
*
* ndim/dim[]/lb[] are dimensions of the original array. A new array with
- * those same dimensions is to be constructed. destArray must already
+ * those same dimensions is to be constructed. destArray must already
* have been allocated and its header initialized.
*
* st[]/endp[] identify the slice to be replaced. Elements within the slice
@@ -5111,7 +5111,7 @@ array_unnest(PG_FUNCTION_ARGS)
* Get the array value and detoast if needed. We can't do this
* earlier because if we have to detoast, we want the detoasted copy
* to be in multi_call_memory_ctx, so it will go away when we're done
- * and not before. (If no detoast happens, we assume the originally
+ * and not before. (If no detoast happens, we assume the originally
* passed array will stick around till then.)
*/
arr = PG_GETARG_ARRAYTYPE_P(0);
@@ -5187,7 +5187,7 @@ array_unnest(PG_FUNCTION_ARGS)
*
* Find all array entries matching (not distinct from) search/search_isnull,
* and delete them if remove is true, else replace them with
- * replace/replace_isnull. Comparisons are done using the specified
+ * replace/replace_isnull. Comparisons are done using the specified
* collation. fcinfo is passed only for caching purposes.
*/
static ArrayType *
@@ -5259,7 +5259,7 @@ array_replace_internal(ArrayType *array,
typalign = typentry->typalign;
/*
- * Detoast values if they are toasted. The replacement value must be
+ * Detoast values if they are toasted. The replacement value must be
* detoasted for insertion into the result array, while detoasting the
* search value only once saves cycles.
*/
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index 73e8d55be41..06e09b6d1f6 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span)
/*
* ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array,
- * and get the contents converted to integers. Returns a palloc'd array
+ * and get the contents converted to integers. Returns a palloc'd array
* and places the length at *n.
*/
int32 *
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index 4fe61c7aafe..7fedaf617c7 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS)
* charrecv - converts external binary format to char
*
* The external representation is one byte, with no character set
- * conversion. This is somewhat dubious, perhaps, but in many
+ * conversion. This is somewhat dubious, perhaps, but in many
* cases people use char for a 1-byte binary type.
*/
Datum
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 8677520cb6f..c92cc8d47fa 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1290,7 +1290,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -1638,7 +1638,7 @@ time_interval(PG_FUNCTION_ARGS)
* Convert interval to time data type.
*
* This is defined as producing the fractional-day portion of the interval.
- * Therefore, we can just ignore the months field. It is not real clear
+ * Therefore, we can just ignore the months field. It is not real clear
* what to do with negative intervals, but we choose to subtract the floor,
* so that, say, '-2 hours' becomes '22:00:00'.
*/
@@ -2627,7 +2627,7 @@ timetz_zone(PG_FUNCTION_ARGS)
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 6979e921324..6df3823a08a 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -354,7 +354,7 @@ j2date(int jd, int *year, int *month, int *day)
* j2day - convert Julian date to day-of-week (0..6 == Sun..Sat)
*
* Note: various places use the locution j2day(date - 1) to produce a
- * result according to the convention 0..6 = Mon..Sun. This is a bit of
+ * result according to the convention 0..6 = Mon..Sun. This is a bit of
* a crock, but will work as long as the computation here is just a modulo.
*/
int
@@ -2480,7 +2480,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
/*
* Nothing so far; make a decision about what we think the input
- * is. There used to be lots of heuristics here, but the
+ * is. There used to be lots of heuristics here, but the
* consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
@@ -2513,9 +2513,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
{
/*
* We are at the first numeric field of a date that included a
- * textual month name. We want to support the variants
+ * textual month name. We want to support the variants
* MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
- * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
* either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
@@ -2889,7 +2889,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * Check for signed hh:mm or hh:mm:ss. If so, process exactly
+ * Check for signed hh:mm or hh:mm:ss. If so, process exactly
* like DTK_TIME case above, plus handling the sign.
*/
if (strchr(field[i] + 1, ':') != NULL &&
@@ -3323,7 +3323,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
@@ -4126,7 +4126,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/*
* We've been burnt by stupid errors in the ordering of the datetkn tables
- * once too often. Arrange to check them during postmaster start.
+ * once too often. Arrange to check them during postmaster start.
*/
static bool
CheckDateTokenTable(const char *tablename, const datetkn *base, int nel)
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 612b7ef7e5a..3a0dad29d88 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
/*
* just compare the two datums. NOTE: just comparing "len" bytes will
* not do the work, because we do not know how these bytes are aligned
- * inside the "Datum". We assume instead that any given datatype is
+ * inside the "Datum". We assume instead that any given datatype is
* consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 4c4e1ed8202..3d32dcad8d1 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -695,7 +695,7 @@ pg_size_pretty_numeric(PG_FUNCTION_ARGS)
* This is expected to be used in queries like
* SELECT pg_relation_filenode(oid) FROM pg_class;
* That leads to a couple of choices. We work from the pg_class row alone
- * rather than actually opening each relation, for efficiency. We don't
+ * rather than actually opening each relation, for efficiency. We don't
* fail if we can't find the relation --- some rows might be visible in
* the query's MVCC snapshot but already dead according to SnapshotNow.
* (Note: we could avoid using the catcache, but there's little point
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index 0a26222c39b..c7852185c54 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -12,11 +12,11 @@
* The overhead required for constraint checking can be high, since examining
* the catalogs to discover the constraints for a given domain is not cheap.
* We have three mechanisms for minimizing this cost:
- * 1. In a nest of domains, we flatten the checking of all the levels
+ * 1. In a nest of domains, we flatten the checking of all the levels
* into just one operation.
- * 2. We cache the list of constraint items in the FmgrInfo struct
+ * 2. We cache the list of constraint items in the FmgrInfo struct
* passed by the caller.
- * 3. If there are CHECK constraints, we cache a standalone ExprContext
+ * 3. If there are CHECK constraints, we cache a standalone ExprContext
* to evaluate them in.
*
*
@@ -311,7 +311,7 @@ domain_recv(PG_FUNCTION_ARGS)
/*
* domain_check - check that a datum satisfies the constraints of a
- * domain. extra and mcxt can be passed if they are available from,
+ * domain. extra and mcxt can be passed if they are available from,
* say, a FmgrInfo structure, or they can be NULL, in which case the
* setup is repeated for each call.
*/
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index b035e231d1e..6d1c92df9b6 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -276,7 +276,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -309,7 +309,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* In some IRIX versions, strtod() recognizes only "inf", so if the input
- * is "infinity" we have to skip over "inity". Also, it may return
+ * is "infinity" we have to skip over "inity". Also, it may return
* positive infinity for "-inf".
*/
if (isinf(val))
@@ -507,7 +507,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -540,7 +540,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* In some IRIX versions, strtod() recognizes only "inf", so if the input
- * is "infinity" we have to skip over "inity". Also, it may return
+ * is "infinity" we have to skip over "inity". Also, it may return
* positive infinity for "-inf".
*/
if (isinf(val))
@@ -2118,7 +2118,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS)
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index cd164c7e7ea..c69ba3c1439 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -48,14 +48,14 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
* double quoted if it contains funny characters or matches a keyword.
*
* If typemod is NULL then we are formatting a type name in a context where
- * no typemod is available, eg a function argument or result type. This
+ * no typemod is available, eg a function argument or result type. This
* yields a slightly different result from specifying typemod = -1 in some
* cases. Given typemod = -1 we feel compelled to produce an output that
* the parser will interpret as having typemod -1, so that pg_dump will
- * produce CREATE TABLE commands that recreate the original state. But
+ * produce CREATE TABLE commands that recreate the original state. But
* given NULL typemod, we assume that the parser's interpretation of
* typemod doesn't matter, and so we are willing to output a slightly
- * "prettier" representation of the same type. For example, type = bpchar
+ * "prettier" representation of the same type. For example, type = bpchar
* and typemod = NULL gets you "character", whereas typemod = -1 gets you
* "bpchar" --- the former will be interpreted as character(1) by the
* parser, which does not yield typemod -1.
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 7b854062f0d..a46db73e0cd 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1828,7 +1828,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
/*
* Note: we assume that toupper_l()/tolower_l() will not be so broken
- * as to need guard tests. When using the default collation, we apply
+ * as to need guard tests. When using the default collation, we apply
* the traditional Postgres behavior that forces ASCII-style treatment
* of I/i, but in non-default collations you get exactly what the
* collation says.
@@ -3617,7 +3617,7 @@ do_to_timestamp(text *date_txt, text *fmt,
{
/*
* The month and day field have not been set, so we use the
- * day-of-year field to populate them. Depending on the date mode,
+ * day-of-year field to populate them. Depending on the date mode,
* this field may be interpreted as a Gregorian day-of-year, or an ISO
* week date day-of-year.
*/
diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c
index bd30773c11e..7b30e3d951f 100644
--- a/src/backend/utils/adt/geo_selfuncs.c
+++ b/src/backend/utils/adt/geo_selfuncs.c
@@ -22,7 +22,7 @@
/*
- * Selectivity functions for geometric operators. These are bogus -- unless
+ * Selectivity functions for geometric operators. These are bogus -- unless
* we know the actual key distribution in the index, we can't make a good
* prediction of the selectivity of these operators.
*
@@ -34,7 +34,7 @@
* In general, GiST needs to search multiple subtrees in order to guarantee
* that all occurrences of the same key have been found. Because of this,
* the estimated cost for scanning the index ought to be higher than the
- * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
+ * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
* ought to be adjusted accordingly --- but until we can generate somewhat
* realistic numbers here, it hardly matters...
*/
diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c
index 5f2a3d361d9..d33534ec173 100644
--- a/src/backend/utils/adt/inet_cidr_ntop.c
+++ b/src/backend/utils/adt/inet_cidr_ntop.c
@@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
else
{
- /* Copy src to private buffer. Zero host part. */
+ /* Copy src to private buffer. Zero host part. */
p = (bits + 7) / 8;
memcpy(inbuf, src, p);
memset(inbuf + p, 0, 16 - p);
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 4a9bc0a969f..7beee67fe80 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
* overflow and thus incorrectly match).
@@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index 4b20616416c..0586d69bb54 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
ptr++;
/*
- * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
* cleaner than trying to get the loop below to handle it portably.
*/
if (strncmp(ptr, "9223372036854775808", 19) == 0)
@@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
* will overflow and thus incorrectly match).
@@ -719,7 +719,7 @@ int8inc(PG_FUNCTION_ARGS)
/*
* These functions are exactly like int8inc but are used for aggregates that
- * count only non-null values. Since the functions are declared strict,
+ * count only non-null values. Since the functions are declared strict,
* the null checks happen before we ever get here, and all we need do is
* increment the state value. We could actually make these pg_proc entries
* point right at int8inc, but then the opr_sanity regression test would
@@ -773,7 +773,7 @@ int84pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -794,8 +794,8 @@ int84mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -815,7 +815,7 @@ int84mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -882,7 +882,7 @@ int48pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -903,8 +903,8 @@ int48mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -924,7 +924,7 @@ int48mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -970,7 +970,7 @@ int82pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -991,8 +991,8 @@ int82mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1012,7 +1012,7 @@ int82mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -1079,7 +1079,7 @@ int28pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1100,8 +1100,8 @@ int28mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1121,7 +1121,7 @@ int28mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 7cdf13653e2..7ccbad49750 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -588,10 +588,10 @@ json_lex(JsonLexContext *lex)
/*
* We're not dealing with a string, number, legal
- * punctuation mark, or end of string. The only legal
+ * punctuation mark, or end of string. The only legal
* tokens we might find here are true, false, and null,
* but for error reporting purposes we scan until we see a
- * non-alphanumeric character. That way, we can report
+ * non-alphanumeric character. That way, we can report
* the whole word as an unexpected token, rather than just
* some unintuitive prefix thereof.
*/
@@ -885,12 +885,12 @@ json_lex_string(JsonLexContext *lex)
* begin with a '0'.
*
* (3) An optional decimal part, consisting of a period ('.') followed by
- * one or more digits. (Note: While this part can be omitted
+ * one or more digits. (Note: While this part can be omitted
* completely, it's not OK to have only the decimal point without
* any digits afterwards.)
*
* (4) An optional exponent part, consisting of 'e' or 'E', optionally
- * followed by '+' or '-', followed by one or more digits. (Note:
+ * followed by '+' or '-', followed by one or more digits. (Note:
* As with the decimal part, if 'e' or 'E' is present, it must be
* followed by at least one digit.)
*
@@ -968,7 +968,7 @@ json_lex_number(JsonLexContext *lex, char *s, bool *num_err)
}
/*
- * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
+ * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
* here should be considered part of the token for error-reporting
* purposes.
*/
@@ -1763,7 +1763,7 @@ json_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 4f6c5b39a46..605204528e4 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -76,12 +76,12 @@ wchareq(char *p1, char *p2)
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*/
/*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index aecdcd056cd..b47ddb6045f 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -96,7 +96,7 @@ pg_signal_backend(int pid, int sig)
/*
* BackendPidGetProc returns NULL if the pid isn't valid; but by the time
* we reach kill(), a process for which we get a valid proc here might
- * have terminated on its own. There's no way to acquire a lock on an
+ * have terminated on its own. There's no way to acquire a lock on an
* arbitrary process to prevent that. But since so far all the callers of
* this mechanism involve some request for ending the process anyway, that
* it might end on its own first is not a problem.
@@ -120,7 +120,7 @@ pg_signal_backend(int pid, int sig)
* recycled for a new process, before reaching here? Then we'd be trying
* to kill the wrong thing. Seems near impossible when sequential pid
* assignment and wraparound is used. Perhaps it could happen on a system
- * where pid re-use is randomized. That race condition possibility seems
+ * where pid re-use is randomized. That race condition possibility seems
* too unlikely to worry about.
*/
@@ -140,7 +140,7 @@ pg_signal_backend(int pid, int sig)
}
/*
- * Signal to cancel a backend process. This is allowed if you are superuser or
+ * Signal to cancel a backend process. This is allowed if you are superuser or
* have the same role as the process being canceled.
*/
Datum
@@ -333,7 +333,7 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
/*
* It's useful to apply this function to pg_class.reltablespace, wherein
- * zero means "the database's default tablespace". So, rather than
+ * zero means "the database's default tablespace". So, rather than
* throwing an error for zero, we choose to assume that's what is meant.
*/
if (tablespaceOid == InvalidOid)
@@ -391,7 +391,7 @@ pg_sleep(PG_FUNCTION_ARGS)
* loop.
*
* By computing the intended stop time initially, we avoid accumulation of
- * extra delay across multiple sleeps. This also ensures we won't delay
+ * extra delay across multiple sleeps. This also ensures we won't delay
* less than the specified time when WaitLatch is terminated early by a
* non-query-cancelling signal such as SIGHUP.
*/
@@ -558,7 +558,7 @@ pg_relation_is_updatable(PG_FUNCTION_ARGS)
* non-updatable columns.
*
* Also, this function encapsulates the decision about just what
- * information_schema.columns.is_updatable actually means. It's not clear
+ * information_schema.columns.is_updatable actually means. It's not clear
* whether deletability of the column's relation should be required, so
* we want that decision in C code where we could change it without initdb.
*/
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 32f1726402e..6545d31fc9c 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -199,7 +199,7 @@ tm2abstime(struct pg_tm * tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
@@ -1164,7 +1164,7 @@ tintervalsame(PG_FUNCTION_ARGS)
* 1. The interval length computations overflow at 2^31 seconds, causing
* intervals longer than that to sort oddly compared to those shorter.
* 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are
- * just ordinary integers. Since this code doesn't handle them specially,
+ * just ordinary integers. Since this code doesn't handle them specially,
* it's possible for [a b] to be considered longer than [c infinity] for
* finite abstimes a, b, c. In combination with the previous point, the
* interval [-infinity infinity] is treated as being shorter than many finite
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index f2c337cb8d1..737eeff86da 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -29,7 +29,7 @@ static int ip_addrsize(inet *inetptr);
static inet *internal_inetpl(inet *ip, int64 addend);
/*
- * Access macros. We use VARDATA_ANY so that we can process short-header
+ * Access macros. We use VARDATA_ANY so that we can process short-header
* varlena values without detoasting them. This requires a trick:
* VARDATA_ANY assumes the varlena header is already filled in, which is
* not the case when constructing a new value (until SET_INET_VARSIZE is
@@ -88,7 +88,7 @@ network_in(char *src, bool is_cidr)
dst = (inet *) palloc0(sizeof(inet));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
* will have a : somewhere in them (several, in fact) so if there is one
* present, assume it's V6, otherwise assume it's V4.
*/
@@ -193,7 +193,7 @@ cidr_out(PG_FUNCTION_ARGS)
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
@@ -1392,7 +1392,7 @@ inetmi(PG_FUNCTION_ARGS)
/*
* We form the difference using the traditional complement, increment,
* and add rule, with the increment part being handled by starting the
- * carry off at 1. If you don't think integer arithmetic is done in
+ * carry off at 1. If you don't think integer arithmetic is done in
* two's complement, too bad.
*/
int nb = ip_addrsize(ip);
@@ -1414,7 +1414,7 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * Input wider than int64: check for overflow. All bytes to
+ * Input wider than int64: check for overflow. All bytes to
* the left of what will fit should be 0 or 0xFF, depending on
* sign of the now-complete result.
*/
@@ -1445,9 +1445,9 @@ inetmi(PG_FUNCTION_ARGS)
* XXX This should go away someday!
*
* This is a kluge needed because we don't yet support zones in stored inet
- * values. Since the result of getnameinfo() might include a zone spec,
+ * values. Since the result of getnameinfo() might include a zone spec,
* call this to remove it anywhere we want to feed getnameinfo's output to
- * network_in. Beats failing entirely.
+ * network_in. Beats failing entirely.
*
* An alternative approach would be to let network_in ignore %-parts for
* itself, but that would mean we'd silently drop zone specs in user input,
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index b4d639428ac..eddf7f0385e 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -50,7 +50,7 @@
* Numeric values are represented in a base-NBASE floating point format.
* Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed
* and wide enough to store a digit. We assume that NBASE*NBASE can fit in
- * an int. Although the purely calculational routines could handle any even
+ * an int. Although the purely calculational routines could handle any even
* NBASE that's less than sqrt(INT_MAX), in practice we are only interested
* in NBASE a power of ten, so that I/O conversions and decimal rounding
* are easy. Also, it's actually more efficient if NBASE is rather less than
@@ -95,11 +95,11 @@ typedef int16 NumericDigit;
* If the high bits of the first word of a NumericChoice (n_header, or
* n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the
* numeric follows the NumericShort format; if they are NUMERIC_POS or
- * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
+ * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
* it is a NaN. We currently always store a NaN using just two bytes (i.e.
* only n_header), but previous releases used only the NumericLong format,
* so we might find 4-byte NaNs on disk if a database has been migrated using
- * pg_upgrade. In either case, when the high bits indicate a NaN, the
+ * pg_upgrade. In either case, when the high bits indicate a NaN, the
* remaining bits are never examined. Currently, we always initialize these
* to zero, but it might be possible to use them for some other purpose in
* the future.
@@ -207,19 +207,19 @@ struct NumericData
: ((n)->choice.n_long.n_weight))
/* ----------
- * NumericVar is the format we use for arithmetic. The digit-array part
+ * NumericVar is the format we use for arithmetic. The digit-array part
* is the same as the NumericData storage format, but the header is more
* complex.
*
* The value represented by a NumericVar is determined by the sign, weight,
* ndigits, and digits[] array.
* Note: the first digit of a NumericVar's value is assumed to be multiplied
- * by NBASE ** weight. Another way to say it is that there are weight+1
+ * by NBASE ** weight. Another way to say it is that there are weight+1
* digits before the decimal point. It is possible to have weight < 0.
*
* buf points at the physical start of the palloc'd digit buffer for the
- * NumericVar. digits points at the first digit in actual use (the one
- * with the specified weight). We normally leave an unused digit or two
+ * NumericVar. digits points at the first digit in actual use (the one
+ * with the specified weight). We normally leave an unused digit or two
* (preset to zeroes) between buf and digits, so that there is room to store
* a carry out of the top digit without reallocating space. We just need to
* decrement digits (and increment weight) to make room for the carry digit.
@@ -596,7 +596,7 @@ numeric_maximum_size(int32 typmod)
* In most cases, the size of a numeric will be smaller than the value
* computed below, because the varlena header will typically get toasted
* down to a single byte before being stored on disk, and it may also be
- * possible to use a short numeric header. But our job here is to compute
+ * possible to use a short numeric header. But our job here is to compute
* the worst case.
*/
return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit));
@@ -716,7 +716,7 @@ numeric_send(PG_FUNCTION_ARGS)
*
* Flatten calls to numeric's length coercion function that solely represent
* increases in allowable precision. Scale changes mutate every datum, so
- * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
+ * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
* unconstrained numeric, so a change from an unconstrained numeric to any
* constrained numeric is also unoptimizable.
*/
@@ -746,7 +746,7 @@ numeric_transform(PG_FUNCTION_ARGS)
* If new_typmod < VARHDRSZ, the destination is unconstrained; that's
* always OK. If old_typmod >= VARHDRSZ, the source is constrained,
* and we're OK if the scale is unchanged and the precision is not
- * decreasing. See further notes in function header comment.
+ * decreasing. See further notes in function header comment.
*/
if (new_typmod < (int32) VARHDRSZ ||
(old_typmod >= (int32) VARHDRSZ &&
@@ -958,7 +958,7 @@ numeric_uminus(PG_FUNCTION_ARGS)
/*
* The packed format is known to be totally zero digit trimmed always. So
- * we can identify a ZERO by the fact that there are no digits at all. Do
+ * we can identify a ZERO by the fact that there are no digits at all. Do
* nothing to a zero.
*/
if (NUMERIC_NDIGITS(num) != 0)
@@ -1934,7 +1934,7 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -1985,7 +1985,7 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2568,9 +2568,9 @@ numeric_avg_accum(PG_FUNCTION_ARGS)
/*
* Integer data types all use Numeric accumulators to share code and
- * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
+ * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
* is overkill for the N and sum(X) values, but definitely not overkill
- * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
+ * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
* for stddev/variance --- there are faster special-purpose accumulator
* routines for SUM and AVG of these datatypes.
*/
@@ -2828,7 +2828,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
* the initial condition of the transition data value needs to be NULL. This
* means we can't rely on ExecAgg to automatically insert the first non-null
* data value into the transition data: it doesn't know how to do the type
- * conversion. The upshot is that these routines have to be marked non-strict
+ * conversion. The upshot is that these routines have to be marked non-strict
* and handle substitution of the first non-null input themselves.
*/
@@ -3226,7 +3226,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest)
/*
* We first parse the string to extract decimal digits and determine the
- * correct decimal weight. Then convert to NBASE representation.
+ * correct decimal weight. Then convert to NBASE representation.
*/
switch (*cp)
{
@@ -3834,7 +3834,7 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Convert numeric to int8, rounding if needed.
*
- * If overflow, return FALSE (no error is raised). Return TRUE if okay.
+ * If overflow, return FALSE (no error is raised). Return TRUE if okay.
*/
static bool
numericvar_to_int8(NumericVar *var, int64 *result)
@@ -4305,7 +4305,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -4349,7 +4349,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
* would have more than rscale fractional digits, truncate the computation
- * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
* or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
@@ -4592,7 +4592,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*
* We need the first divisor digit to be >= NBASE/2. If it isn't,
* make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
+ * factor "d". (The reason for allocating dividend[0] above is to
* leave room for possible carry here.)
*/
if (divisor[1] < HALF_NBASE)
@@ -4636,7 +4636,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
+ * no need to adjust the working dividend. It's worth testing
* here to fall out ASAP when processing trailing zeroes in a
* dividend.
*/
@@ -4654,7 +4654,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Adjust quotient digit if it's too large. Knuth proves that
* after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
+ * just one too large. (Note: it's OK to use dividend[j+2] here
* because we know the divisor length is at least 2.)
*/
while (divisor2 * qhat >
@@ -4829,7 +4829,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result,
* dividend's digits (plus appended zeroes to reach the desired precision
* including guard digits). Each step of the main loop computes an
* (approximate) quotient digit and stores it into div[], removing one
- * position of dividend space. A final pass of carry propagation takes
+ * position of dividend space. A final pass of carry propagation takes
* care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
@@ -5679,7 +5679,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 399973b74b9..0373ea52ee5 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -318,7 +318,7 @@ oidparse(Node *node)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid OID
+ * constants by the lexer. Accept these if they are valid OID
* strings.
*/
return oidin_subr(strVal(node), NULL);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 7081b00500b..9f0327bd1b1 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -20,12 +20,12 @@
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also
* settable at run-time. However, we don't actually set those locale
- * categories permanently. This would have bizarre effects like no
+ * categories permanently. This would have bizarre effects like no
* longer accepting standard floating-point literals in some locales.
* Instead, we only set the locales briefly when needed, cache the
* required information obtained from localeconv(), and set them back.
* The cached information is only used by the formatting functions
- * (to_char, etc.) and the money type. For the user, this should all be
+ * (to_char, etc.) and the money type. For the user, this should all be
* transparent.
*
* !!! NOW HEAR THIS !!!
@@ -39,7 +39,7 @@
* fail = true;
* setlocale(category, save);
* DOES NOT WORK RELIABLY: on some platforms the second setlocale() call
- * will change the memory save is pointing at. To do this sort of thing
+ * will change the memory save is pointing at. To do this sort of thing
* safely, you *must* pstrdup what setlocale returns the first time.
*
* FYI, The Open Group locale standard is defined here:
@@ -225,7 +225,7 @@ pg_perm_setlocale(int category, const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a palloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the server environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -268,7 +268,7 @@ check_locale(int category, const char *locale, char **canonname)
*
* For most locale categories, the assign hook doesn't actually set the locale
* permanently, just reset flags so that the next use will cache the
- * appropriate values. (See explanation at the top of this file.)
+ * appropriate values. (See explanation at the top of this file.)
*
* Note: we accept value = "" as selecting the postmaster's environment
* value, whatever it was (so long as the environment setting is legal).
@@ -718,13 +718,13 @@ cache_locale_time(void)
* Convert a Windows setlocale() argument to a Unix-style one.
*
* Regardless of platform, we install message catalogs under a Unix-style
- * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
+ * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
* following that style will elicit localized interface strings.
*
* Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
* (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
* case-insensitive. setlocale() returns the fully-qualified form; for
- * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
+ * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
* setlocale() and _create_locale() select a "locale identifier"[1] and store
* it in an undocumented _locale_t field. From that LCID, we can retrieve the
* ISO 639 language and the ISO 3166 country. Character encoding does not
@@ -735,12 +735,12 @@ cache_locale_time(void)
* Studio 2012, setlocale() accepts locale names in addition to the strings it
* accepted historically. It does not standardize them; setlocale("Th-tH")
* returns "Th-tH". setlocale(category, "") still returns a traditional
- * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
+ * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
* content to carry locale names instead of locale identifiers.
*
* MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol.
* IsoLocaleName() always fails in a MinGW-built postgres.exe, so only
- * Unix-style values of the lc_messages GUC can elicit localized messages. In
+ * Unix-style values of the lc_messages GUC can elicit localized messages. In
* particular, every lc_messages setting that initdb can select automatically
* will yield only C-locale messages. XXX This could be fixed by running the
* fully-qualified locale name through a lookup table.
@@ -784,7 +784,7 @@ IsoLocaleName(const char *winlocname)
* need not standardize letter case here. So long as we do not ship
* message catalogs for which it would matter, we also need not
* translate the script/variant portion, e.g. uz-Cyrl-UZ to
- * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
+ * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
*
* Note that the locale name can be less-specific than the value we
* would derive under earlier Visual Studio releases. For example,
@@ -839,7 +839,7 @@ IsoLocaleName(const char *winlocname)
* could fail if the locale is C, so str_tolower() shouldn't call it
* in that case.
*
- * Note that we currently lack any way to flush the cache. Since we don't
+ * Note that we currently lack any way to flush the cache. Since we don't
* support ALTER COLLATION, this is OK. The worst case is that someone
* drops a collation, and a useless cache entry hangs around in existing
* backends.
@@ -1033,7 +1033,7 @@ report_newlocale_failure(const char *localename)
/*
- * Create a locale_t from a collation OID. Results are cached for the
+ * Create a locale_t from a collation OID. Results are cached for the
* lifetime of the backend. Thus, do not free the result with freelocale().
*
* As a special optimization, the default/database collation returns 0.
@@ -1216,7 +1216,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
* This has almost the API of mbstowcs_l(), except that *from need not be
* null-terminated; instead, the number of input bytes is specified as
* fromlen. Also, we ereport() rather than returning -1 for invalid
- * input encoding. tolen is the maximum number of wchar_t's to store at *to.
+ * input encoding. tolen is the maximum number of wchar_t's to store at *to.
* The output will be zero-terminated iff there is room.
*/
size_t
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 66c64c198f0..70af1aa32c2 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -578,7 +578,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* If we've emitted more than first_success_by bytes without finding
- * anything compressible at all, fail. This lets us fall out
+ * anything compressible at all, fail. This lets us fall out
* reasonably quickly when looking at incompressible input (such as
* pre-compressed data).
*/
@@ -602,7 +602,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
found_match = true;
}
@@ -616,7 +616,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
}
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index 04650d8ba4a..5494d70c923 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -6,7 +6,7 @@
* A pseudo-type isn't really a type and never has any operations, but
* we do need to supply input and output functions to satisfy the links
* in the pseudo-type's entry in pg_type. In most cases the functions
- * just throw an error if invoked. (XXX the error messages here cover
+ * just throw an error if invoked. (XXX the error messages here cover
* the most common case, but might be confusing in some contexts. Can
* we do better?)
*
@@ -139,7 +139,7 @@ anyarray_out(PG_FUNCTION_ARGS)
* anyarray_recv - binary input routine for pseudo-type ANYARRAY.
*
* XXX this could actually be made to work, since the incoming array
- * data will contain the element type OID. Need to think through
+ * data will contain the element type OID. Need to think through
* type-safety issues before allowing it, however.
*/
Datum
@@ -216,7 +216,7 @@ anyrange_out(PG_FUNCTION_ARGS)
* void_in - input routine for pseudo-type VOID.
*
* We allow this so that PL functions can return VOID without any special
- * hack in the PL handler. Whatever value the PL thinks it's returning
+ * hack in the PL handler. Whatever value the PL thinks it's returning
* will just be ignored.
*/
Datum
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 023df8c3e9e..3da08bb8370 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -1441,7 +1441,7 @@ tstzrange_subdiff(PG_FUNCTION_ARGS)
*
* This is for use by range-related functions that follow the convention
* of using the fn_extra field as a pointer to the type cache entry for
- * the range type. Functions that need to cache more information than
+ * the range type. Functions that need to cache more information than
* that must fend for themselves.
*/
TypeCacheEntry *
@@ -1465,7 +1465,7 @@ range_get_typcache(FunctionCallInfo fcinfo, Oid rngtypid)
* range_serialize: construct a range value from bounds and empty-flag
*
* This does not force canonicalization of the range value. In most cases,
- * external callers should only be canonicalization functions. Note that
+ * external callers should only be canonicalization functions. Note that
* we perform some datatype-independent canonicalization checks anyway.
*/
RangeType *
@@ -1802,7 +1802,7 @@ range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1, RangeBound *b2)
* Compare two range boundary point values, returning <0, 0, or >0 according
* to whether b1 is less than, equal to, or greater than b2.
*
- * This is similar to but simpler than range_cmp_bounds(). We just compare
+ * This is similar to but simpler than range_cmp_bounds(). We just compare
* the values held in b1 and b2, ignoring inclusive/exclusive flags. The
* lower/upper flags only matter for infinities, where they tell us if the
* infinity is plus or minus.
@@ -2283,7 +2283,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, RangeType *r, Datum val)
/*
* datum_compute_size() and datum_write() are used to insert the bound
- * values into a range object. They are modeled after heaptuple.c's
+ * values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as
* heaptuple.c's ATT_IS_PACKABLE macro.
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index 464b37fe1fd..ad659a1a89b 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -300,7 +300,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else if (orig_lower.infinite && orig_upper.infinite)
{
/*
- * Original range requires broadening. (-inf; +inf) is most far
+ * Original range requires broadening. (-inf; +inf) is most far
* from normal range in this case.
*/
*penalty = 2 * CONTAIN_EMPTY_PENALTY;
@@ -497,7 +497,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
/*
* The GiST PickSplit method for ranges
*
- * Primarily, we try to segregate ranges of different classes. If splitting
+ * Primarily, we try to segregate ranges of different classes. If splitting
* ranges of the same class, use the appropriate split method for that class.
*/
Datum
@@ -668,7 +668,7 @@ range_gist_same(PG_FUNCTION_ARGS)
/*
* range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to check
- * that for ourselves. More generally, if the entries have been properly
+ * that for ourselves. More generally, if the entries have been properly
* normalized, then unequal flags bytes must mean unequal ranges ... so
* let's just test all the flag bits at once.
*/
@@ -816,7 +816,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
/*
* Empty ranges are contained by anything, so if key is or
- * contains any empty ranges, we must descend into it. Otherwise,
+ * contains any empty ranges, we must descend into it. Otherwise,
* descend only if key overlaps the query.
*/
if (RangeIsOrContainsEmpty(key))
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 4dbb1163bd3..30a05e6d584 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
char errMsg[100];
/*
- * Look for a match among previously compiled REs. Since the data
+ * Look for a match among previously compiled REs. Since the data
* structure is self-organizing with most-used entries at the front, our
* search strategy can just be to scan from the front.
*/
@@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
/*
* Here and in other places in this file, do CHECK_FOR_INTERRUPTS
- * before reporting a regex error. This is so that if the regex
+ * before reporting a regex error. This is so that if the regex
* library aborts and returns REG_CANCEL, we don't print an error
* message that implies the regex was invalid.
*/
@@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len,
* dat_len --- the length of the data string
* nmatch, pmatch --- optional return area for match details
*
- * Data is given in the database encoding. We internally
+ * Data is given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
*/
static bool
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 0d1ff61bf9f..88b1c2b5648 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -85,7 +85,7 @@ regprocin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_proc for a unique match. This is needed for
+ * just search pg_proc for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -270,7 +270,7 @@ regprocedurein(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -319,7 +319,7 @@ format_procedure_qualified(Oid procedure_oid)
* Routine to produce regprocedure names; see format_procedure above.
*
* force_qualify says whether to schema-qualify; if true, the name is always
- * qualified regardless of search_path visibility. Otherwise the name is only
+ * qualified regardless of search_path visibility. Otherwise the name is only
* qualified if the function is not in path.
*/
static char *
@@ -453,7 +453,7 @@ regoperin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_operator for a unique match. This is needed for
+ * just search pg_operator for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -642,7 +642,7 @@ regoperatorin(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -897,7 +897,7 @@ regclassout(PG_FUNCTION_ARGS)
/*
* In bootstrap mode, skip the fancy namespace stuff and just return
- * the class name. (This path is only needed for debugging output
+ * the class name. (This path is only needed for debugging output
* anyway.)
*/
if (IsBootstrapProcessingMode())
@@ -1389,7 +1389,7 @@ stringToQualifiedNameList(const char *string)
/*
* Given a C string, parse it into a qualified function or operator name
- * followed by a parenthesized list of type names. Reduce the
+ * followed by a parenthesized list of type names. Reduce the
* type names to an array of OIDs (returned into *nargs and *argtypes;
* the argtypes array should be of size FUNC_MAX_ARGS). The function or
* operator name is returned to *names as a List of Strings.
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 65edc1fb04e..100e770bc7e 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -698,7 +698,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -922,7 +922,7 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE would remove such rows, while SET NULL is certain
* to result in rows that satisfy the FK constraint.)
*/
@@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE must change the FK key values, while SET NULL is
* certain to result in rows that satisfy the FK constraint.)
*/
@@ -2397,7 +2397,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
- * have many large foreign-key validations happening concurrently. So
+ * have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem.
*
@@ -2451,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
/*
* The columns to look at in the result tuple are 1..N, not whatever
- * they are in the fk_rel. Hack up riinfo so that the subroutines
+ * they are in the fk_rel. Hack up riinfo so that the subroutines
* called here will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
@@ -3180,7 +3180,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't passed
+ * Determine which relation to complain about. If tupdesc wasn't passed
* by caller, assume the violator tuple came from there.
*/
onfk = (queryno == RI_PLAN_CHECK_LOOKUPPK);
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 1ed0c5b205a..36ef4eda85e 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -278,7 +278,7 @@ record_in(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -622,7 +622,7 @@ record_recv(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -860,7 +860,7 @@ record_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1095,7 +1095,7 @@ record_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 9e451349489..bbd575077d9 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -153,11 +153,11 @@ typedef struct
*
* Selecting aliases is unreasonably complicated because of the need to dump
* rules/views whose underlying tables may have had columns added, deleted, or
- * renamed since the query was parsed. We must nonetheless print the rule/view
+ * renamed since the query was parsed. We must nonetheless print the rule/view
* in a form that can be reloaded and will produce the same results as before.
*
* For each RTE used in the query, we must assign column aliases that are
- * unique within that RTE. SQL does not require this of the original query,
+ * unique within that RTE. SQL does not require this of the original query,
* but due to factors such as *-expansion we need to be able to uniquely
* reference every column in a decompiled query. As long as we qualify all
* column references, per-RTE uniqueness is sufficient for that.
@@ -212,8 +212,8 @@ typedef struct
/*
* new_colnames is an array containing column aliases to use for columns
* that would exist if the query was re-parsed against the current
- * definitions of its base tables. This is what to print as the column
- * alias list for the RTE. This array does not include dropped columns,
+ * definitions of its base tables. This is what to print as the column
+ * alias list for the RTE. This array does not include dropped columns,
* but it will include columns added since original parsing. Indexes in
* it therefore have little to do with current varattno values. As above,
* entries are unique unless this is for an unnamed JOIN RTE. (In such an
@@ -1075,7 +1075,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno,
context = deparse_context_for(get_relation_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should never be
+ * Start the index definition. Note that the index's name should never be
* schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -1776,7 +1776,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
SysScanDesc scan;
HeapTuple tup;
- /* Look up table name. Can't lock it - we might not have privileges. */
+ /* Look up table name. Can't lock it - we might not have privileges. */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename));
tableOid = RangeVarGetRelid(tablerv, NoLock, false);
@@ -2297,7 +2297,7 @@ deparse_expression(Node *expr, List *dpcontext,
* tree (ie, not the raw output of gram.y).
*
* dpcontext is a list of deparse_namespace nodes representing the context
- * for interpreting Vars in the node tree. It can be NIL if no Vars are
+ * for interpreting Vars in the node tree. It can be NIL if no Vars are
* expected.
*
* forceprefix is TRUE to force all Vars to be prefixed with their table names.
@@ -2337,7 +2337,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext,
*
* Given the reference name (alias) and OID of a relation, build deparsing
* context for an expression referencing only that relation (as varno 1,
- * varlevelsup 0). This is sufficient for many uses of deparse_expression.
+ * varlevelsup 0). This is sufficient for many uses of deparse_expression.
* ----------
*/
List *
@@ -2408,7 +2408,7 @@ deparse_context_for_planstate(Node *planstate, List *ancestors,
dpns->ctes = NIL;
/*
- * Set up column name aliases. We will get rather bogus results for join
+ * Set up column name aliases. We will get rather bogus results for join
* RTEs, but that doesn't matter because plan trees don't contain any join
* alias Vars.
*/
@@ -2966,7 +2966,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* Scan the columns, select a unique alias for each one, and store it in
* colinfo->colnames and colinfo->new_colnames. The former array has NULL
- * entries for dropped columns, the latter omits them. Also mark
+ * entries for dropped columns, the latter omits them. Also mark
* new_colnames entries as to whether they are new since parse time; this
* is the case for entries beyond the length of rte->eref->colnames.
*/
@@ -3021,7 +3021,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* For a relation RTE, we need only print the alias column names if any
- * are different from the underlying "real" names. For a function RTE,
+ * are different from the underlying "real" names. For a function RTE,
* always emit a complete column alias list; this is to protect against
* possible instability of the default column names (eg, from altering
* parameter names). For other RTE types, print if we changed anything OR
@@ -3484,7 +3484,7 @@ identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
/*
* If there's a USING clause, deconstruct the join quals to identify the
- * merged columns. This is a tad painful but if we cannot rely on the
+ * merged columns. This is a tad painful but if we cannot rely on the
* column names, there is no other representation of which columns were
* joined by USING. (Unless the join type is FULL, we can't tell from the
* joinaliasvars list which columns are merged.) Note: we assume that the
@@ -3618,7 +3618,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps)
* We special-case Append and MergeAppend to pretend that the first child
* plan is the OUTER referent; we have to interpret OUTER Vars in their
* tlists according to one of the children, and the first one is the most
- * natural choice. Likewise special-case ModifyTable to pretend that the
+ * natural choice. Likewise special-case ModifyTable to pretend that the
* first child plan is the OUTER referent; this is to support RETURNING
* lists containing references to non-target relations.
*/
@@ -4034,8 +4034,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the passed
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
* querytree!
*
* We are only deparsing the query (we are not about to execute it), so we
@@ -4508,7 +4508,7 @@ get_target_list(List *targetList, deparse_context *context,
}
/*
- * Figure out what the result column should be called. In the context
+ * Figure out what the result column should be called. In the context
* of a view, use the view's tuple descriptor (so as to pick up the
* effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE.
@@ -4730,7 +4730,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno,
* expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
- * dump it without any decoration. Otherwise, just dump the expression
+ * dump it without any decoration. Otherwise, just dump the expression
* normally.
*/
if (force_colno)
@@ -5425,8 +5425,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* If it's an unnamed join, look at the expansion of the alias variable.
* If it's a simple reference to one of the input vars, then recursively
- * print the name of that var instead. When it's not a simple reference,
- * we have to just print the unqualified join column name. (This can only
+ * print the name of that var instead. When it's not a simple reference,
+ * we have to just print the unqualified join column name. (This can only
* happen with "dangerous" merged columns in a JOIN USING; we took pains
* previously to make the unqualified column name unique in such cases.)
*
@@ -5454,7 +5454,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* Unnamed join has no refname. (Note: since it's unnamed, there is
* no way the user could have referenced it to create a whole-row Var
- * for it. So we don't have to cover that case below.)
+ * for it. So we don't have to cover that case below.)
*/
Assert(refname == NULL);
}
@@ -5495,7 +5495,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
- * Get the name of a field of an expression of composite type. The
+ * Get the name of a field of an expression of composite type. The
* expression is usually a Var, but we handle other cases too.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
@@ -5505,7 +5505,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
* could also be RECORD. Since no actual table or view column is allowed to
* have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
* or to a subquery output. We drill down to find the ultimate defining
- * expression and attempt to infer the field name from it. We ereport if we
+ * expression and attempt to infer the field name from it. We ereport if we
* can't determine the name.
*
* Similarly, a PARAM of type RECORD has to refer to some expression of
@@ -5870,7 +5870,7 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
@@ -5888,7 +5888,7 @@ get_name_for_var_field(Var *var, int fieldno,
* reference a parameter supplied by an upper NestLoop or SubPlan plan node.
*
* If successful, return the expression and set *dpns_p and *ancestor_cell_p
- * appropriately for calling push_ancestor_plan(). If no referent can be
+ * appropriately for calling push_ancestor_plan(). If no referent can be
* found, return NULL.
*/
static Node *
@@ -6020,7 +6020,7 @@ get_parameter(Param *param, deparse_context *context)
/*
* If it's a PARAM_EXEC parameter, try to locate the expression from which
- * the parameter was computed. Note that failing to find a referent isn't
+ * the parameter was computed. Note that failing to find a referent isn't
* an error, since the Param might well be a subplan output rather than an
* input.
*/
@@ -6498,10 +6498,10 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* If there's a refassgnexpr, we want to print the node in the
- * format "array[subscripts] := refassgnexpr". This is not
+ * format "array[subscripts] := refassgnexpr". This is not
* legal SQL, so decompilation of INSERT or UPDATE statements
* should always use processIndirection as part of the
- * statement-level syntax. We should only see this when
+ * statement-level syntax. We should only see this when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement.
*/
@@ -6660,7 +6660,7 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* We cannot see an already-planned subplan in rule deparsing,
- * only while EXPLAINing a query plan. We don't try to
+ * only while EXPLAINing a query plan. We don't try to
* reconstruct the original SQL, just reference the subplan
* that appears elsewhere in EXPLAIN's result.
*/
@@ -6733,14 +6733,14 @@ get_rule_expr(Node *node, deparse_context *context,
* There is no good way to represent a FieldStore as real SQL,
* so decompilation of INSERT or UPDATE statements should
* always use processIndirection as part of the
- * statement-level syntax. We should only get here when
+ * statement-level syntax. We should only get here when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement. The plan case is even harder than
* ordinary rules would be, because the planner tries to
* collapse multiple assignments to the same field or subfield
* into one FieldStore; so we can see a list of target fields
* not just one, and the arguments could be FieldStores
- * themselves. We don't bother to try to print the target
+ * themselves. We don't bother to try to print the target
* field names; we just print the source arguments, with a
* ROW() around them if there's more than one. This isn't
* terribly complete, but it's probably good enough for
@@ -7642,7 +7642,7 @@ get_coercion_expr(Node *arg, deparse_context *context,
* Since parse_coerce.c doesn't immediately collapse application of
* length-coercion functions to constants, what we'll typically see in
* such cases is a Const with typmod -1 and a length-coercion function
- * right above it. Avoid generating redundant output. However, beware of
+ * right above it. Avoid generating redundant output. However, beware of
* suppressing casts when the user actually wrote something like
* 'foo'::text::char(3).
*/
@@ -7724,7 +7724,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype)
/*
* These types are printed without quotes unless they contain
* values that aren't accepted by the scanner unquoted (e.g.,
- * 'NaN'). Note that strtod() and friends might accept NaN,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
* In reality we only need to defend against infinity and NaN,
@@ -8156,7 +8156,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
else if (rte->rtekind == RTE_FUNCTION)
{
/*
- * For a function RTE, always print alias. This covers possible
+ * For a function RTE, always print alias. This covers possible
* renaming of the function and/or instability of the
* FigureColname rules for things that aren't simple functions.
* Also note we'd need to force it anyway for the RECORD case.
@@ -8398,7 +8398,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
{
- /* Okay, we need the opclass name. Do we need to qualify it? */
+ /* Okay, we need the opclass name. Do we need to qualify it? */
opcname = NameStr(opcrec->opcname);
if (OpclassIsVisible(opclass))
appendStringInfo(buf, " %s", quote_identifier(opcname));
@@ -8693,13 +8693,13 @@ generate_relation_name(Oid relid, List *namespaces)
* generate_function_name
* Compute the name to display for a function specified by OID,
* given that it is being called with the specified actual arg names and
- * types. (Those matter because of ambiguous-function resolution rules.)
+ * types. (Those matter because of ambiguous-function resolution rules.)
*
* If we're dealing with a potentially variadic function (in practice, this
* means a FuncExpr and not some other way of calling the function), then
* was_variadic should specify whether variadic arguments have been merged,
* and *use_variadic_p will be set to indicate whether to print VARIADIC in
- * the output. For non-FuncExpr cases, was_variadic should be FALSE and
+ * the output. For non-FuncExpr cases, was_variadic should be FALSE and
* use_variadic_p can be NULL.
*
* The result includes all necessary quoting and schema-prefixing.
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 9982bf54d4b..cc93fd5a44e 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -72,7 +72,7 @@
* float8 oprjoin (internal, oid, internal, int2, internal);
*
* (Before Postgres 8.4, join estimators had only the first four of these
- * parameters. That signature is still allowed, but deprecated.) The
+ * parameters. That signature is still allowed, but deprecated.) The
* relationship between jointype and sjinfo is explained in the comments for
* clause_selectivity() --- the short version is that jointype is usually
* best ignored in favor of examining sjinfo.
@@ -209,7 +209,7 @@ static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
*
* Note: this routine is also used to estimate selectivity for some
* operators that are not "=" but have comparable selectivity behavior,
- * such as "~=" (geometric approximate-match). Even for "=", we must
+ * such as "~=" (geometric approximate-match). Even for "=", we must
* keep in mind that the left and right datatypes may differ.
*/
Datum
@@ -273,7 +273,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -296,7 +296,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* Is the constant "=" to any of the column's most common values?
* (Although the given operator may not really be "=", we will assume
- * that seeing whether it returns TRUE is an appropriate test. If you
+ * that seeing whether it returns TRUE is an appropriate test. If you
* don't like this, maybe you shouldn't be using eqsel for your
* operator...)
*/
@@ -408,7 +408,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -432,7 +432,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* result averaged over all possible values whether common or
* uncommon. (Essentially, we are assuming that the not-yet-known
* comparison value is equally likely to be any of the possible
- * values, regardless of their frequency in the table. Is that a good
+ * values, regardless of their frequency in the table. Is that a good
* idea?)
*/
selec = 1.0 - stats->stanullfrac;
@@ -655,7 +655,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
* essentially using the histogram just as a representative sample. However,
* small histograms are unlikely to be all that representative, so the caller
* should be prepared to fall back on some other estimation approach when the
- * histogram is missing or very small. It may also be prudent to combine this
+ * histogram is missing or very small. It may also be prudent to combine this
* approach with another one when the histogram is small.
*
* If the actual histogram size is not at least min_hist_size, we won't bother
@@ -673,7 +673,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
@@ -786,7 +786,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
*
* If the binary search accesses the first or last histogram
* entry, we try to replace that endpoint with the true column min
- * or max as found by get_actual_variable_range(). This
+ * or max as found by get_actual_variable_range(). This
* ameliorates misestimates when the min or max is moving as a
* result of changes since the last ANALYZE. Note that this could
* result in effectively including MCVs into the histogram that
@@ -890,7 +890,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* Watch out for the possibility that we got a NaN or
- * Infinity from the division. This can happen
+ * Infinity from the division. This can happen
* despite the previous checks, if for example "low"
* is -Infinity.
*/
@@ -905,7 +905,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
* Ideally we'd produce an error here, on the grounds that
* the given operator shouldn't have scalarXXsel
* registered as its selectivity func unless we can deal
- * with its operand types. But currently, all manner of
+ * with its operand types. But currently, all manner of
* stuff is invoking scalarXXsel, so give a default
* estimate until that can be fixed.
*/
@@ -931,7 +931,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* The histogram boundaries are only approximate to begin with,
- * and may well be out of date anyway. Therefore, don't believe
+ * and may well be out of date anyway. Therefore, don't believe
* extremely small or large selectivity estimates --- unless we
* got actual current endpoint values from the table.
*/
@@ -1128,7 +1128,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If this is for a NOT LIKE or similar operator, get the corresponding
- * positive-match operator and work with that. Set result to the correct
+ * positive-match operator and work with that. Set result to the correct
* default estimate, too.
*/
if (negate)
@@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* Pull out any fixed prefix implied by the pattern, and estimate the
- * fractional selectivity of the remainder of the pattern. Unlike many of
+ * fractional selectivity of the remainder of the pattern. Unlike many of
* the other functions in this file, we use the pattern operator's actual
* collation for this step. This is not because we expect the collation
* to make a big difference in the selectivity estimate (it seldom would),
@@ -1332,7 +1332,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
@@ -1838,7 +1838,7 @@ scalararraysel(PlannerInfo *root,
/*
* For generic operators, we assume the probability of success is
- * independent for each array element. But for "= ANY" or "<> ALL",
+ * independent for each array element. But for "= ANY" or "<> ALL",
* if the array elements are distinct (which'd typically be the case)
* then the probabilities are disjoint, and we should just sum them.
*
@@ -2253,9 +2253,9 @@ eqjoinsel_inner(Oid operator,
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2287,7 +2287,7 @@ eqjoinsel_inner(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2452,7 +2452,7 @@ eqjoinsel_semi(Oid operator,
/*
* We clamp nd2 to be not more than what we estimate the inner relation's
- * size to be. This is intuitively somewhat reasonable since obviously
+ * size to be. This is intuitively somewhat reasonable since obviously
* there can't be more than that many distinct values coming from the
* inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
* likewise) is that this is the only pathway by which restriction clauses
@@ -2497,9 +2497,9 @@ eqjoinsel_semi(Oid operator,
if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2530,7 +2530,7 @@ eqjoinsel_semi(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2567,7 +2567,7 @@ eqjoinsel_semi(Oid operator,
/*
* Now we need to estimate the fraction of relation 1 that has at
- * least one join partner. We know for certain that the matched MCVs
+ * least one join partner. We know for certain that the matched MCVs
* do, so that gives us a lower bound, but we're really in the dark
* about everything else. Our crude approach is: if nd1 <= nd2 then
* assume all non-null rel1 rows have join partners, else assume for
@@ -3165,11 +3165,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* case (all possible cross-product terms actually appear as groups) since
* very often the grouped-by Vars are highly correlated. Our current approach
* is as follows:
- * 1. Expressions yielding boolean are assumed to contribute two groups,
+ * 1. Expressions yielding boolean are assumed to contribute two groups,
* independently of their content, and are ignored in the subsequent
- * steps. This is mainly because tests like "col IS NULL" break the
+ * steps. This is mainly because tests like "col IS NULL" break the
* heuristic used in step 2 especially badly.
- * 2. Reduce the given expressions to a list of unique Vars used. For
+ * 2. Reduce the given expressions to a list of unique Vars used. For
* example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
* It is clearly correct not to count the same Var more than once.
* It is also reasonable to treat f(x) the same as x: f() cannot
@@ -3179,14 +3179,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* As a special case, if a GROUP BY expression can be matched to an
* expressional index for which we have statistics, then we treat the
* whole expression as though it were just a Var.
- * 3. If the list contains Vars of different relations that are known equal
+ * 3. If the list contains Vars of different relations that are known equal
* due to equivalence classes, then drop all but one of the Vars from each
* known-equal set, keeping the one with smallest estimated # of values
* (since the extra values of the others can't appear in joined rows).
* Note the reason we only consider Vars of different relations is that
* if we considered ones of the same rel, we'd be double-counting the
* restriction selectivity of the equality in the next step.
- * 4. For Vars within a single source rel, we multiply together the numbers
+ * 4. For Vars within a single source rel, we multiply together the numbers
* of values, clamp to the number of rows in the rel (divided by 10 if
* more than one Var), and then multiply by the selectivity of the
* restriction clauses for that rel. When there's more than one Var,
@@ -3197,7 +3197,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* by the restriction selectivity is effectively assuming that the
* restriction clauses are independent of the grouping, which is a crummy
* assumption, but it's hard to do better.
- * 5. If there are Vars from multiple rels, we repeat step 4 for each such
+ * 5. If there are Vars from multiple rels, we repeat step 4 for each such
* rel, and multiply the results together.
* Note that rels not containing grouped Vars are ignored completely, as are
* join clauses. Such rels cannot increase the number of groups, and we
@@ -3228,7 +3228,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
return 1.0;
/*
- * Count groups derived from boolean grouping expressions. For other
+ * Count groups derived from boolean grouping expressions. For other
* expressions, find the unique Vars used, treating an expression as a Var
* if we can find stats for it. For each one, record the statistical
* estimate of number of distinct values (total in its table, without
@@ -3317,7 +3317,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove these
+ * varinfos, plus all other Vars in the same relation. We remove these
* Vars from the newvarinfos list for the next iteration. This is the
* easiest way to group Vars of same rel together.
*/
@@ -3418,11 +3418,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* distribution, so this will have to do for now.
*
* We are passed the number of buckets the executor will use for the given
- * input relation. If the data were perfectly distributed, with the same
+ * input relation. If the data were perfectly distributed, with the same
* number of tuples going into each available bucket, then the bucketsize
* fraction would be 1/nbuckets. But this happy state of affairs will occur
* only if (a) there are at least nbuckets distinct data values, and (b)
- * we have a not-too-skewed data distribution. Otherwise the buckets will
+ * we have a not-too-skewed data distribution. Otherwise the buckets will
* be nonuniformly occupied. If the other relation in the join has a key
* distribution similar to this one's, then the most-loaded buckets are
* exactly those that will be probed most often. Therefore, the "average"
@@ -3595,7 +3595,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -4179,7 +4179,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
* relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
@@ -4324,7 +4324,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
/*
* Okay, it's a more complicated expression. Determine variable
- * membership. Note that when varRelid isn't zero, only vars of that
+ * membership. Note that when varRelid isn't zero, only vars of that
* relation are considered "real" vars.
*/
varnos = pull_varnos(basenode);
@@ -4373,13 +4373,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to match
+ * We have an expression in vars of a single relation. Try to match
* it to expressional index columns, in hopes of finding some
* statistics.
*
* XXX it's conceivable that there are multiple matches with different
* index opfamilies; if so, we need to pick one that matches the
- * operator we are estimating for. FIXME later.
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -4581,7 +4581,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
*
* This is probably a harsher restriction than necessary; it's
* certainly OK for the selectivity estimator (which is a C function,
- * and therefore omnipotent anyway) to look at the statistics. But
+ * and therefore omnipotent anyway) to look at the statistics. But
* many selectivity estimators will happily *invoke the operator
* function* to try to work out a good estimate - and that's not OK.
* So for now, don't dig down for stats.
@@ -4634,7 +4634,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
*isdefault = false;
/*
- * Determine the stadistinct value to use. There are cases where we can
+ * Determine the stadistinct value to use. There are cases where we can
* get an estimate even without a pg_statistic entry, or can get a better
* value than is in pg_statistic.
*/
@@ -4758,7 +4758,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
/*
* XXX It's very tempting to try to use the actual column min and max, if
- * we can get them relatively-cheaply with an index probe. However, since
+ * we can get them relatively-cheaply with an index probe. However, since
* this function is called many times during join planning, that could
* have unpleasant effects on planning speed. Need more investigation
* before enabling this.
@@ -5009,7 +5009,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
* and it can be very expensive if a lot of uncommitted rows
* exist at the end of the index (because we'll laboriously
* fetch each one and reject it). What seems like a good
- * compromise is to use SnapshotDirty. That will accept
+ * compromise is to use SnapshotDirty. That will accept
* uncommitted rows, and thus avoid fetching multiple heap
* tuples in this scenario. On the other hand, it will reject
* known-dead rows, and thus not give a bogus answer when the
@@ -5148,7 +5148,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids)
* Check whether char is a letter (and, hence, subject to case-folding)
*
* In multibyte character sets, we can't use isalpha, and it does not seem
- * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
+ * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* any multibyte char is potentially case-varying.
*/
static int
@@ -5400,7 +5400,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
* together with info about MCVs and NULLs.
*
* We use the >= and < operators from the specified btree opfamily to do the
- * estimation. The given variable and Const must be of the associated
+ * estimation. The given variable and Const must be of the associated
* datatype.
*
* XXX Note: we make use of the upper bound to estimate operator selectivity
@@ -5459,7 +5459,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
@@ -5696,7 +5696,7 @@ byte_increment(unsigned char *ptr, int len)
* that is not a bulletproof guarantee that an extension of the string might
* not sort after it; an example is that "foo " is less than "foo!", but it
* is not clear that a "dictionary" sort ordering will consider "foo!" less
- * than "foo bar". CAUTION: Therefore, this function should be used only for
+ * than "foo bar". CAUTION: Therefore, this function should be used only for
* estimation purposes when working in a non-C collation.
*
* To try to catch most cases where an extended string might otherwise sort
@@ -5953,7 +5953,7 @@ string_to_bytea_const(const char *str, size_t str_len)
* genericcostestimate is a general-purpose estimator that can be used for
* most index types. In some cases we use genericcostestimate as the base
* code and then incorporate additional index-type-specific knowledge in
- * the type-specific calling function. To avoid code duplication, we make
+ * the type-specific calling function. To avoid code duplication, we make
* genericcostestimate return a number of intermediate values as well as
* its preliminary estimates of the output cost values. The GenericCosts
* struct includes all these values.
@@ -6073,7 +6073,7 @@ genericcostestimate(PlannerInfo *root,
*
* In practice access to upper index levels is often nearly free because
* those tend to stay in cache under load; moreover, the cost involved is
- * highly dependent on index type. We therefore ignore such costs here
+ * highly dependent on index type. We therefore ignore such costs here
* and leave it to the caller to add a suitable charge if needed.
*/
if (index->pages > 1 && index->tuples > 1)
@@ -6092,9 +6092,9 @@ genericcostestimate(PlannerInfo *root,
* The above calculations are all per-index-scan. However, if we are in a
* nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
* the potential for cache effects to reduce the number of disk page
- * fetches needed. We want to estimate the average per-scan I/O cost in
+ * fetches needed. We want to estimate the average per-scan I/O cost in
* the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
@@ -6141,7 +6141,7 @@ genericcostestimate(PlannerInfo *root,
* evaluated once at the start of the scan to reduce them to runtime keys
* to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
- * indexqual operator. Because we have numIndexTuples as a per-scan
+ * indexqual operator. Because we have numIndexTuples as a per-scan
* number, we have to multiply by num_sa_scans to get the correct result
* for ScalarArrayOpExpr cases. Similarly add in costs for any index
* ORDER BY expressions.
@@ -6188,16 +6188,16 @@ genericcostestimate(PlannerInfo *root,
* ANDing the index predicate with the explicitly given indexquals produces
* a more accurate idea of the index's selectivity. However, we need to be
* careful not to insert redundant clauses, because clauselist_selectivity()
- * is easily fooled into computing a too-low selectivity estimate. Our
+ * is easily fooled into computing a too-low selectivity estimate. Our
* approach is to add only the predicate clause(s) that cannot be proven to
- * be implied by the given indexquals. This successfully handles cases such
+ * be implied by the given indexquals. This successfully handles cases such
* as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
* There are many other cases where we won't detect redundancy, leading to a
* too-low selectivity estimate, which will bias the system in favor of using
- * partial indexes where possible. That is not necessarily bad though.
+ * partial indexes where possible. That is not necessarily bad though.
*
* Note that indexQuals contains RestrictInfo nodes while the indpred
- * does not, so the output list will be mixed. This is OK for both
+ * does not, so the output list will be mixed. This is OK for both
* predicate_implied_by() and clauselist_selectivity(), but might be
* problematic if the result were passed to other things.
*/
@@ -6256,7 +6256,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexquals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
@@ -6595,7 +6595,7 @@ hashcostestimate(PG_FUNCTION_ARGS)
* because the hash AM makes sure that's always one page.
*
* Likewise, we could consider charging some CPU for each index tuple in
- * the bucket, if we knew how many there were. But the per-tuple cost is
+ * the bucket, if we knew how many there were. But the per-tuple cost is
* just a hash value comparison, not a general datatype-dependent
* comparison, so any such charge ought to be quite a bit less than
* cpu_operator_cost; which makes it probably not worth worrying about.
@@ -6653,7 +6653,7 @@ gistcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6715,7 +6715,7 @@ spgcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6792,7 +6792,7 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
/*
* Get the operator's strategy number and declared input data types within
- * the index opfamily. (We don't need the latter, but we use
+ * the index opfamily. (We don't need the latter, but we use
* get_op_opfamily_properties because it will throw error if it fails to
* find a matching pg_amop entry.)
*/
@@ -6938,7 +6938,7 @@ gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, OpExpr *clause,
* each of which involves one value from the RHS array, plus all the
* non-array quals (if any). To model this, we average the counts across
* the RHS elements, and add the averages to the counts in *counts (which
- * correspond to per-indexscan costs). We also multiply counts->arrayScans
+ * correspond to per-indexscan costs). We also multiply counts->arrayScans
* by N, causing gincostestimate to scale up its estimates accordingly.
*/
static bool
@@ -7108,7 +7108,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* nPendingPages can be trusted, but the other fields are as of the last
- * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
+ * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
* growth since then. If the fields are zero (implying no VACUUM at all,
* and an index created pre-9.1), assume all pages are entry pages.
*/
@@ -7253,7 +7253,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* Add an estimate of entry pages read by partial match algorithm. It's a
- * scan over leaf pages in entry tree. We haven't any useful stats here,
+ * scan over leaf pages in entry tree. We haven't any useful stats here,
* so estimate it as proportion.
*/
entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries);
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 94b2a3608a6..1dc4e4d7f46 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -385,7 +385,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -757,7 +757,7 @@ interval_send(PG_FUNCTION_ARGS)
/*
* The interval typmod stores a "range" in its high 16 bits and a "precision"
- * in its low 16 bits. Both contribute to defining the resolution of the
+ * in its low 16 bits. Both contribute to defining the resolution of the
* type. Range addresses resolution granules larger than one second, and
* precision specifies resolution below one second. This representation can
* express all SQL standard resolutions, but we implement them all in terms of
@@ -965,7 +965,7 @@ interval_transform(PG_FUNCTION_ARGS)
/*
* Temporally-smaller fields occupy higher positions in the range
- * bitmap. Since only the temporally-smallest bit matters for length
+ * bitmap. Since only the temporally-smallest bit matters for length
* coercion purposes, we compare the last-set bits in the ranges.
* Precision, which is to say, sub-second precision, only affects
* ranges that include SECOND.
@@ -1054,7 +1054,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
* that fields to the right of the last one specified are zeroed out,
* but those to the left of it remain valid. Thus for example there
* is no operational difference between INTERVAL YEAR TO MONTH and
- * INTERVAL MONTH. In some cases we could meaningfully enforce that
+ * INTERVAL MONTH. In some cases we could meaningfully enforce that
* higher-order fields are zero; for example INTERVAL DAY could reject
* nonzero "month" field. However that seems a bit pointless when we
* can't do it consistently. (We cannot enforce a range limit on the
@@ -1064,9 +1064,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
*
* Note: before PG 8.4 we interpreted a limited set of fields as
* actually causing a "modulo" operation on a given value, potentially
- * losing high-order as well as low-order information. But there is
+ * losing high-order as well as low-order information. But there is
* no support for such behavior in the standard, and it seems fairly
- * undesirable on data consistency grounds anyway. Now we only
+ * undesirable on data consistency grounds anyway. Now we only
* perform truncation or rounding of low-order fields.
*/
if (range == INTERVAL_FULL_RANGE)
@@ -1186,7 +1186,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
/*
* Note: this round-to-nearest code is not completely consistent
* about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
+ * values. On most platforms, rint() will implement
* round-to-nearest-even, but the integer code always rounds up
* (away from zero). Is it worth trying to be consistent?
*/
@@ -1440,7 +1440,7 @@ timestamptz_to_time_t(TimestampTz t)
* Produce a C-string representation of a TimestampTz.
*
* This is mostly for use in emitting messages. The primary difference
- * from timestamptz_out is that we force the output format to ISO. Note
+ * from timestamptz_out is that we force the output format to ISO. Note
* also that the result is in a static buffer, not pstrdup'd.
*/
const char *
@@ -1610,7 +1610,7 @@ recalc_t:
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
* coding avoids hardwiring any assumptions about the width of pg_time_t,
* so it should behave sanely on machines without int64.
*/
@@ -4540,7 +4540,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
@@ -4713,7 +4713,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c
index cf118334d76..7d3845a313b 100644
--- a/src/backend/utils/adt/tsginidx.c
+++ b/src/backend/utils/adt/tsginidx.c
@@ -237,7 +237,7 @@ gin_tsquery_consistent(PG_FUNCTION_ARGS)
* Formerly, gin_extract_tsvector had only two arguments. Now it has three,
* but we still need a pg_proc entry with two args to support reloading
* pre-9.1 contrib/tsearch2 opclass declarations. This compatibility
- * function should go away eventually. (Note: you might say "hey, but the
+ * function should go away eventually. (Note: you might say "hey, but the
* code above is only *using* two args, so let's just declare it that way".
* If you try that you'll find the opr_sanity regression test complains.)
*/
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 1a615934cd7..531fbbb7aa1 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -257,7 +257,7 @@ bpcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
@@ -584,7 +584,7 @@ varchar_transform(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 6fc26cee6ed..f990877ffb7 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -591,7 +591,7 @@ textlen(PG_FUNCTION_ARGS)
* Does the real work for textlen()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed form. We can avoid decompressing it at all
* in some cases.
*/
@@ -763,7 +763,7 @@ text_substr_no_len(PG_FUNCTION_ARGS)
* Does the real work for text_substr() and text_substr_no_len()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed/toasted form. We can avoid detoasting all
* of it in some cases.
*
@@ -1113,7 +1113,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* searched (t1) and the "needle" is the pattern being sought (t2).
*
* If the needle is empty or bigger than the haystack then there is no
- * point in wasting cycles initializing the table. We also choose not to
+ * point in wasting cycles initializing the table. We also choose not to
* use B-M-H for needles of length 1, since the skip table can't possibly
* save anything in that case.
*/
@@ -1129,7 +1129,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* declaration of TextPositionState allows up to 256 elements, but for
* short search problems we don't really want to have to initialize so
* many elements --- it would take too long in comparison to the
- * actual search time. So we choose a useful skip table size based on
+ * actual search time. So we choose a useful skip table size based on
* the haystack length minus the needle length. The closer the needle
* length is to the haystack length the less useful skipping becomes.
*
@@ -1161,7 +1161,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
state->skiptable[i] = len2;
/*
- * Now examine the needle. For each character except the last one,
+ * Now examine the needle. For each character except the last one,
* set the corresponding table element to the appropriate skip
* distance. Note that when two characters share the same skip table
* entry, the one later in the needle must determine the skip
@@ -1249,11 +1249,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[(unsigned char) *hptr & skiptablemask];
@@ -1305,11 +1305,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[*hptr & skiptablemask];
@@ -1344,7 +1344,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
/*
* Unfortunately, there is no strncoll(), so in the non-C locale case we
- * have to do some memory copying. This turns out to be significantly
+ * have to do some memory copying. This turns out to be significantly
* slower, so we optimize the case where LC_COLLATE is C. We also try to
* optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
@@ -2334,7 +2334,7 @@ textToQualifiedNameList(text *textval)
* SplitIdentifierString --- parse a string containing identifiers
*
* This is the guts of textToQualifiedNameList, and is exported for use in
- * other situations such as parsing GUC variables. In the GUC case, it's
+ * other situations such as parsing GUC variables. In the GUC case, it's
* important to avoid memory leaks, so the API is designed to minimize the
* amount of stuff that needs to be allocated and freed.
*
@@ -2342,7 +2342,7 @@ textToQualifiedNameList(text *textval)
* rawstring: the input string; must be overwritable! On return, it's
* been modified to contain the separated identifiers.
* separator: the separator punctuation expected between identifiers
- * (typically '.' or ','). Whitespace may also appear around
+ * (typically '.' or ','). Whitespace may also appear around
* identifiers.
* Outputs:
* namelist: filled with a palloc'd list of pointers to identifiers within
@@ -2411,7 +2411,7 @@ SplitIdentifierString(char *rawstring, char separator,
*
* XXX because we want to overwrite the input in-place, we cannot
* support a downcasing transformation that increases the string
- * length. This is not a problem given the current implementation
+ * length. This is not a problem given the current implementation
* of downcase_truncate_identifier, but we'll probably have to do
* something about this someday.
*/
@@ -2468,7 +2468,7 @@ SplitIdentifierString(char *rawstring, char separator,
* Inputs:
* rawstring: the input string; must be modifiable!
* separator: the separator punctuation expected between directories
- * (typically ',' or ';'). Whitespace may also appear around
+ * (typically ',' or ';'). Whitespace may also appear around
* directories.
* Outputs:
* namelist: filled with a palloc'd list of directory names.
@@ -2875,7 +2875,7 @@ check_replace_text_has_escape_char(const text *replace_text)
* appendStringInfoRegexpSubstr
*
* Append replace_text to str, substituting regexp back references for
- * \n escapes. start_ptr is the start of the match in the source string,
+ * \n escapes. start_ptr is the start of the match in the source string,
* at logical character position data_pos.
*/
static void
@@ -2958,7 +2958,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Note so and eo
+ * Copy the text that is back reference of regexp. Note so and eo
* are counted in characters not bytes.
*/
char *chunk_start;
@@ -4249,7 +4249,7 @@ text_format(PG_FUNCTION_ARGS)
/*
* Get the appropriate typOutput function, reusing previous one if
- * same type as previous argument. That's particularly useful in the
+ * same type as previous argument. That's particularly useful in the
* variadic-array case, but often saves work even for ordinary calls.
*/
if (typid != prev_type)
@@ -4341,12 +4341,12 @@ text_format_parse_digits(const char **ptr, const char *end_ptr, int *value)
*
* Inputs are start_ptr (the position after '%') and end_ptr (string end + 1).
* Output parameters:
- * argpos: argument position for value to be printed. -1 means unspecified.
- * widthpos: argument position for width. Zero means the argument position
+ * argpos: argument position for value to be printed. -1 means unspecified.
+ * widthpos: argument position for width. Zero means the argument position
* was unspecified (ie, take the next arg) and -1 means no width
* argument (width was omitted or specified as a constant).
* flags: bitmask of flags.
- * width: directly-specified width value. Zero means the width was omitted
+ * width: directly-specified width value. Zero means the width was omitted
* (note it's not necessary to distinguish this case from an explicit
* zero width value).
*
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 25ab79b1979..335e4d38ca3 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -19,7 +19,7 @@
* fail. For one thing, this avoids having to manage variant catalog
* installations. But it also has nice effects such as that you can
* dump a database containing XML type data even if the server is not
- * linked with libxml. Thus, make sure xml_out() works even if nothing
+ * linked with libxml. Thus, make sure xml_out() works even if nothing
* else does.
*/
@@ -286,7 +286,7 @@ xml_out(PG_FUNCTION_ARGS)
xmltype *x = PG_GETARG_XML_P(0);
/*
- * xml_out removes the encoding property in all cases. This is because we
+ * xml_out removes the encoding property in all cases. This is because we
* cannot control from here whether the datum will be converted to a
* different client encoding, so we'd do more harm than good by including
* it.
@@ -457,7 +457,7 @@ xmlcomment(PG_FUNCTION_ARGS)
/*
* TODO: xmlconcat needs to merge the notations and unparsed entities
- * of the argument values. Not very important in practice, though.
+ * of the argument values. Not very important in practice, though.
*/
xmltype *
xmlconcat(List *args)
@@ -592,7 +592,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
/*
* We first evaluate all the arguments, then start up libxml and create
- * the result. This avoids issues if one of the arguments involves a call
+ * the result. This avoids issues if one of the arguments involves a call
* to some other function or subsystem that wants to use libxml on its own
* terms.
*/
@@ -929,7 +929,7 @@ pg_xml_init_library(void)
* pg_xml_init --- set up for use of libxml and register an error handler
*
* This should be called by each function that is about to use libxml
- * facilities and requires error handling. It initializes libxml with
+ * facilities and requires error handling. It initializes libxml with
* pg_xml_init_library() and establishes our libxml error handler.
*
* strictness determines which errors are reported and which are ignored.
@@ -975,7 +975,7 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Verify that xmlSetStructuredErrorFunc set the context variable we
- * expected it to. If not, the error context pointer we just saved is not
+ * expected it to. If not, the error context pointer we just saved is not
* the correct thing to restore, and since that leaves us without a way to
* restore the context in pg_xml_done, we must fail.
*
@@ -1132,7 +1132,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
int utf8len;
/*
- * Only initialize libxml. We don't need error handling here, but we do
+ * Only initialize libxml. We don't need error handling here, but we do
* need to make sure libxml is initialized before calling any of its
* functions. Note that this is safe (and a no-op) if caller has already
* done pg_xml_init().
@@ -1275,7 +1275,7 @@ finished:
/*
* Write an XML declaration. On output, we adjust the XML declaration
- * as follows. (These rules are the moral equivalent of the clause
+ * as follows. (These rules are the moral equivalent of the clause
* "Serialization of an XML value" in the SQL standard.)
*
* We try to avoid generating an XML declaration if possible. This is
@@ -1499,7 +1499,7 @@ xml_pstrdup(const char *string)
/*
* xmlPgEntityLoader --- entity loader callback function
*
- * Silently prevent any external entity URL from being loaded. We don't want
+ * Silently prevent any external entity URL from being loaded. We don't want
* to throw an error, so instead make the entity appear to expand to an empty
* string.
*
@@ -1668,8 +1668,8 @@ xml_errorHandler(void *data, xmlErrorPtr error)
chopStringInfoNewlines(errorBuf);
/*
- * Legacy error handling mode. err_occurred is never set, we just add the
- * message to err_buf. This mode exists because the xml2 contrib module
+ * Legacy error handling mode. err_occurred is never set, we just add the
+ * message to err_buf. This mode exists because the xml2 contrib module
* uses our error-handling infrastructure, but we don't want to change its
* behaviour since it's deprecated anyway. This is also why we don't
* distinguish between notices, warnings and errors here --- the old-style
@@ -1948,8 +1948,8 @@ map_xml_name_to_sql_identifier(char *name)
*
* When xml_escape_strings is true, then certain characters in string
* values are replaced by entity references (&lt; etc.), as specified
- * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
- * wanted. The false case is mainly useful when the resulting value
+ * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
+ * wanted. The false case is mainly useful when the resulting value
* is used with xmlTextWriterWriteAttribute() to write out an
* attribute, because that function does the escaping itself.
*/
@@ -2230,13 +2230,13 @@ _SPI_strdup(const char *s)
*
* There are two kinds of mappings: Mapping SQL data (table contents)
* to XML documents, and mapping SQL structure (the "schema") to XML
- * Schema. And there are functions that do both at the same time.
+ * Schema. And there are functions that do both at the same time.
*
* Then you can map a database, a schema, or a table, each in both
* ways. This breaks down recursively: Mapping a database invokes
* mapping schemas, which invokes mapping tables, which invokes
* mapping rows, which invokes mapping columns, although you can't
- * call the last two from the outside. Because of this, there are a
+ * call the last two from the outside. Because of this, there are a
* number of xyz_internal() functions which are to be called both from
* the function manager wrapper and from some upper layer in a
* recursive call.
@@ -2245,7 +2245,7 @@ _SPI_strdup(const char *s)
* nulls, tableforest, and targetns mean.
*
* Some style guidelines for XML output: Use double quotes for quoting
- * XML attributes. Indent XML elements by two spaces, but remember
+ * XML attributes. Indent XML elements by two spaces, but remember
* that a lot of code is called recursively at different levels, so
* it's better not to indent rather than create output that indents
* and outdents weirdly. Add newlines to make the output look nice.
@@ -2409,12 +2409,12 @@ cursor_to_xml(PG_FUNCTION_ARGS)
* Write the start tag of the root element of a data mapping.
*
* top_level means that this is the very top level of the eventual
- * output. For example, when the user calls table_to_xml, then a call
+ * output. For example, when the user calls table_to_xml, then a call
* with a table name to this function is the top level. When the user
* calls database_to_xml, then a call with a schema name to this
* function is not the top level. If top_level is false, then the XML
* namespace declarations are omitted, because they supposedly already
- * appeared earlier in the output. Repeating them is not wrong, but
+ * appeared earlier in the output. Repeating them is not wrong, but
* it looks ugly.
*/
static void
@@ -3357,7 +3357,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
* SQL/XML:2008 sections 9.5 and 9.6.
*
* (The distinction between 9.5 and 9.6 is basically that 9.6 adds
- * a name attribute, which this function does. The name-less version
+ * a name attribute, which this function does. The name-less version
* 9.5 doesn't appear to be required anywhere.)
*/
static const char *
@@ -3535,7 +3535,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
/*
* Map an SQL row to an XML element, taking the row from the active
- * SPI cursor. See also SQL/XML:2008 section 9.10.
+ * SPI cursor. See also SQL/XML:2008 section 9.10.
*/
static void
SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 134db1a1bb2..2ef3d08664f 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -46,7 +46,7 @@ typedef struct
* Flush all cache entries when pg_attribute is updated.
*
* When pg_attribute is updated, we must flush the cache entry at least
- * for that attribute. Currently, we just flush them all. Since attribute
+ * for that attribute. Currently, we just flush them all. Since attribute
* options are not currently used in performance-critical paths (such as
* query execution), this seems OK.
*/
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index cc91406582b..05b3b752579 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -816,7 +816,7 @@ InitCatCache(int id,
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
+ * descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
*/
#ifdef CACHEDEBUG
@@ -1041,7 +1041,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
* if necessary (on the first access to a particular cache).
*
* The result is NULL if not found, or a pointer to a HeapTuple in
- * the cache. The caller must not modify the tuple, and must call
+ * the cache. The caller must not modify the tuple, and must call
* ReleaseCatCache() when done with it.
*
* The search key values should be expressed as Datums of the key columns'
@@ -1171,8 +1171,8 @@ SearchCatCache(CatCache *cache,
* the relation --- for example, due to shared-cache-inval messages being
* processed during heap_open(). This is OK. It's even possible for one
* of those lookups to find and enter the very same tuple we are trying to
- * fetch here. If that happens, we will enter a second copy of the tuple
- * into the cache. The first copy will never be referenced again, and
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
@@ -1211,7 +1211,7 @@ SearchCatCache(CatCache *cache,
*
* In bootstrap mode, we don't build negative entries, because the cache
* invalidation mechanism isn't alive and can't clear them if the tuple
- * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
* cache inval for that.)
*/
if (ct == NULL)
@@ -1541,7 +1541,7 @@ SearchCatCacheList(CatCache *cache,
/*
* We are now past the last thing that could trigger an elog before we
* have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1630,7 +1630,7 @@ ReleaseCatCacheList(CatCList *list)
/*
* CatalogCacheCreateEntry
* Create a new CatCTup entry, copying the given HeapTuple and other
- * supplied data into it. The new entry initially has refcount 0.
+ * supplied data into it. The new entry initially has refcount 0.
*/
static CatCTup *
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index 2180f2abcc1..628e1bd1542 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -129,7 +129,7 @@ BuildEventTriggerCache(void)
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
/*
- * Prepare to scan pg_event_trigger in name order. We use an MVCC
+ * Prepare to scan pg_event_trigger in name order. We use an MVCC
* snapshot to avoid getting inconsistent results if the table is being
* concurrently updated.
*/
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index e0dc1267076..62c4369b171 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -29,23 +29,23 @@
*
* If we successfully complete the transaction, we have to broadcast all
* these invalidation events to other backends (via the SI message queue)
- * so that they can flush obsolete entries from their caches. Note we have
+ * so that they can flush obsolete entries from their caches. Note we have
* to record the transaction commit before sending SI messages, otherwise
* the other backends won't see our updated tuples as good.
*
* When a subtransaction aborts, we can process and discard any events
- * it has queued. When a subtransaction commits, we just add its events
+ * it has queued. When a subtransaction commits, we just add its events
* to the pending lists of the parent transaction.
*
* In short, we need to remember until xact end every insert or delete
- * of a tuple that might be in the system caches. Updates are treated as
+ * of a tuple that might be in the system caches. Updates are treated as
* two events, delete + insert, for simplicity. (If the update doesn't
* change the tuple hash value, catcache.c optimizes this into one event.)
*
* We do not need to register EVERY tuple operation in this way, just those
- * on tuples in relations that have associated catcaches. We do, however,
+ * on tuples in relations that have associated catcaches. We do, however,
* have to register every operation on every tuple that *could* be in a
- * catcache, whether or not it currently is in our cache. Also, if the
+ * catcache, whether or not it currently is in our cache. Also, if the
* tuple is in a relation that has multiple catcaches, we need to register
* an invalidation message for each such catcache. catcache.c's
* PrepareToInvalidateCacheTuple() routine provides the knowledge of which
@@ -112,7 +112,7 @@
/*
* To minimize palloc traffic, we keep pending requests in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). All request types can be stored as SharedInvalidationMessage
+ * array). All request types can be stored as SharedInvalidationMessage
* records. The ordering of requests within a list is never significant.
*/
typedef struct InvalidationChunk
@@ -600,7 +600,7 @@ AcceptInvalidationMessages(void)
*
* If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
* slows things by at least a factor of 10000, so I wouldn't suggest
- * trying to run the entire regression tests that way. It's useful to try
+ * trying to run the entire regression tests that way. It's useful to try
* a few simple tests, to make sure that cache reload isn't subject to
* internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
@@ -813,12 +813,12 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
* If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
* to the shared invalidation message queue. Note that these will be read
* not only by other backends, but also by our own backend at the next
- * transaction start (via AcceptInvalidationMessages). This means that
+ * transaction start (via AcceptInvalidationMessages). This means that
* we can skip immediate local processing of anything that's still in
* CurrentCmdInvalidMsgs, and just send that list out too.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
* since they'll not have seen our changed tuples anyway. We can forget
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
* the caches yet.
@@ -877,11 +877,11 @@ AtEOXact_Inval(bool isCommit)
* parent's PriorCmdInvalidMsgs list.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
* We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
* touched the caches yet.
*
- * In any case, pop the transaction stack. We need not physically free memory
+ * In any case, pop the transaction stack. We need not physically free memory
* here, since CurTransactionContext is about to be emptied anyway
* (if aborting). Beware of the possibility of aborting the same nesting
* level twice, though.
@@ -937,7 +937,7 @@ AtEOSubXact_Inval(bool isCommit)
* in a transaction.
*
* Here, we send no messages to the shared queue, since we don't know yet if
- * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
+ * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
* list, so as to flush our caches of any entries we have outdated in the
* current command. We then move the current-cmd list over to become part
* of the prior-cmds list.
@@ -1039,7 +1039,7 @@ CacheInvalidateHeapTuple(Relation relation,
* This essentially means that only backends in this same database
* will react to the relcache flush request. This is in fact
* appropriate, since only those backends could see our pg_attribute
- * change anyway. It looks a bit ugly though. (In practice, shared
+ * change anyway. It looks a bit ugly though. (In practice, shared
* relations can't have schema changes after bootstrap, so we should
* never come here for a shared rel anyway.)
*/
@@ -1051,7 +1051,7 @@ CacheInvalidateHeapTuple(Relation relation,
/*
* When a pg_index row is updated, we should send out a relcache inval
- * for the index relation. As above, we don't know the shared status
+ * for the index relation. As above, we don't know the shared status
* of the index, but in practice it doesn't matter since indexes of
* shared catalogs can't have such updates.
*/
@@ -1159,7 +1159,7 @@ CacheInvalidateRelcacheByRelid(Oid relid)
*
* Sending this type of invalidation msg forces other backends to close open
* smgr entries for the rel. This should be done to flush dangling open-file
- * references when the physical rel is being dropped or truncated. Because
+ * references when the physical rel is being dropped or truncated. Because
* these are nontransactional (i.e., not-rollback-able) operations, we just
* send the inval message immediately without any queuing.
*
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 586596258d3..0b45fcb4943 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -186,13 +186,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
* (This indicates that the operator is not a valid ordering operator.)
*
* Note: the operator could be registered in multiple families, for example
- * if someone were to build a "reverse sort" opfamily. This would result in
+ * if someone were to build a "reverse sort" opfamily. This would result in
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
* or NULLS LAST, as well as inefficient planning due to failure to match up
* pathkeys that should be the same. So we want a determinate result here.
* Because of the way the syscache search works, we'll use the interpretation
* associated with the opfamily with smallest OID, which is probably
- * determinate enough. Since there is no longer any particularly good reason
+ * determinate enough. Since there is no longer any particularly good reason
* to build reverse-sort opfamilies, it doesn't seem worth expending any
* additional effort on ensuring consistency.
*/
@@ -403,7 +403,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
*
* The planner currently uses simple equal() tests to compare the lists
* returned by this function, which makes the list order relevant, though
- * strictly speaking it should not be. Because of the way syscache list
+ * strictly speaking it should not be. Because of the way syscache list
* searches are handled, in normal operation the result will be sorted by OID
* so everything works fine. If running with system index usage disabled,
* the result ordering is unspecified and hence the planner might fail to
@@ -1212,7 +1212,7 @@ op_mergejoinable(Oid opno, Oid inputtype)
*
* In some cases (currently only array_eq), hashjoinability depends on the
* specific input data type the operator is invoked for, so that must be
- * passed as well. We currently assume that only one input's type is needed
+ * passed as well. We currently assume that only one input's type is needed
* to check this --- by convention, pass the left input's data type.
*/
bool
@@ -1861,7 +1861,7 @@ get_typbyval(Oid typid)
* A two-fer: given the type OID, return both typlen and typbyval.
*
* Since both pieces of info are needed to know how to copy a Datum,
- * many places need both. Might as well get them with one cache lookup
+ * many places need both. Might as well get them with one cache lookup
* instead of two. Also, this routine raises an error instead of
* returning a bogus value when given a bad type OID.
*/
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index cf740a94cae..0e210a0d1c5 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -11,7 +11,7 @@
* The logic for choosing generic or custom plans is in choose_custom_plan,
* which see for comments.
*
- * Cache invalidation is driven off sinval events. Any CachedPlanSource
+ * Cache invalidation is driven off sinval events. Any CachedPlanSource
* that matches the event is marked invalid, as is its generic CachedPlan
* if it has one. When (and if) the next demand for a cached plan occurs,
* parse analysis and rewrite is repeated to build a new valid query tree,
@@ -27,7 +27,7 @@
* caller to notice changes and cope with them.
*
* Currently, we track exactly the dependencies of plans on relations and
- * user-defined functions. On relcache invalidation events or pg_proc
+ * user-defined functions. On relcache invalidation events or pg_proc
* syscache invalidation events, we invalidate just those plans that depend
* on the particular object being modified. (Note: this scheme assumes
* that any table modification that requires replanning will generate a
@@ -123,7 +123,7 @@ InitPlanCache(void)
* CreateCachedPlan: initially create a plan cache entry.
*
* Creation of a cached plan is divided into two steps, CreateCachedPlan and
- * CompleteCachedPlan. CreateCachedPlan should be called after running the
+ * CompleteCachedPlan. CreateCachedPlan should be called after running the
* query through raw_parser, but before doing parse analysis and rewrite;
* CompleteCachedPlan is called after that. The reason for this arrangement
* is that it can save one round of copying of the raw parse tree, since
@@ -217,7 +217,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* in that context.
*
* A one-shot plan cannot be saved or copied, since we make no effort to
- * preserve the raw parse tree unmodified. There is also no support for
+ * preserve the raw parse tree unmodified. There is also no support for
* invalidation, so plan use must be completed in the current transaction,
* and DDL that might invalidate the querytree_list must be avoided as well.
*
@@ -274,13 +274,13 @@ CreateOneShotCachedPlan(Node *raw_parse_tree,
* CompleteCachedPlan: second step of creating a plan cache entry.
*
* Pass in the analyzed-and-rewritten form of the query, as well as the
- * required subsidiary data about parameters and such. All passed values will
+ * required subsidiary data about parameters and such. All passed values will
* be copied into the CachedPlanSource's memory, except as specified below.
* After this is called, GetCachedPlan can be called to obtain a plan, and
* optionally the CachedPlanSource can be saved using SaveCachedPlan.
*
* If querytree_context is not NULL, the querytree_list must be stored in that
- * context (but the other parameters need not be). The querytree_list is not
+ * context (but the other parameters need not be). The querytree_list is not
* copied, rather the given context is kept as the initial query_context of
* the CachedPlanSource. (It should have been created as a child of the
* caller's working memory context, but it will now be reparented to belong
@@ -374,7 +374,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This
+ * Also save the current search_path in the query_context. (This
* should not generate much extra cruft either, since almost certainly
* the path is already valid.) Again, we don't really need this for
* one-shot plans; and we *must* skip this for transaction control
@@ -421,7 +421,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
* This is guaranteed not to throw error, except for the caller-error case
* of trying to save a one-shot plan. Callers typically depend on that
* since this is called just before or just after adding a pointer to the
- * CachedPlanSource to some permanent data structure of their own. Up until
+ * CachedPlanSource to some permanent data structure of their own. Up until
* this is done, a CachedPlanSource is just transient data that will go away
* automatically on transaction abort.
*/
@@ -442,13 +442,13 @@ SaveCachedPlan(CachedPlanSource *plansource)
* plans from the CachedPlanSource. If there is a generic plan, moving it
* into CacheMemoryContext would be pretty risky since it's unclear
* whether the caller has taken suitable care with making references
- * long-lived. Best thing to do seems to be to discard the plan.
+ * long-lived. Best thing to do seems to be to discard the plan.
*/
ReleaseGenericPlan(plansource);
/*
* Reparent the source memory context under CacheMemoryContext so that it
- * will live indefinitely. The query_context follows along since it's
+ * will live indefinitely. The query_context follows along since it's
* already a child of the other one.
*/
MemoryContextSetParent(plansource->context, CacheMemoryContext);
@@ -466,7 +466,7 @@ SaveCachedPlan(CachedPlanSource *plansource)
* DropCachedPlan: destroy a cached plan.
*
* Actually this only destroys the CachedPlanSource: any referenced CachedPlan
- * is released, but not destroyed until its refcount goes to zero. That
+ * is released, but not destroyed until its refcount goes to zero. That
* handles the situation where DropCachedPlan is called while the plan is
* still in use.
*/
@@ -617,7 +617,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->search_path = NULL;
/*
- * Free the query_context. We don't really expect MemoryContextDelete to
+ * Free the query_context. We don't really expect MemoryContextDelete to
* fail, but just in case, make sure the CachedPlanSource is left in a
* reasonably sane state. (The generic plan won't get unlinked yet, but
* that's acceptable.)
@@ -675,7 +675,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
PopActiveSnapshot();
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*
* We assume the parameter types didn't change from the first time, so no
@@ -726,7 +726,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This should
+ * Also save the current search_path in the query_context. (This should
* not generate much extra cruft either, since almost certainly the path
* is already valid.)
*/
@@ -860,7 +860,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* we ought to be holding sufficient locks to prevent any invalidation.
* However, if we're building a custom plan after having built and
* rejected a generic plan, it's possible to reach here with is_valid
- * false due to an invalidation while making the generic plan. In theory
+ * false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
* sinval reset event or the CLOBBER_CACHE_ALWAYS debug code. But for
* safety, let's treat it as real and redo the RevalidateCachedQuery call.
@@ -1043,7 +1043,7 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
* on the number of relations in the finished plan's rangetable.
* Join planning effort actually scales much worse than linearly
* in the number of relations --- but only until the join collapse
- * limits kick in. Also, while inheritance child relations surely
+ * limits kick in. Also, while inheritance child relations surely
* add to planning effort, they don't make the join situation
* worse. So the actual shape of the planning cost curve versus
* number of relations isn't all that obvious. It will take
@@ -1153,7 +1153,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/*
* If we choose to plan again, we need to re-copy the query_list,
- * since the planner probably scribbled on it. We can force
+ * since the planner probably scribbled on it. We can force
* BuildCachedPlan to do that by passing NIL.
*/
qlist = NIL;
@@ -1203,7 +1203,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
*
* Note: useResOwner = false is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
- * Portal. Transient references should be protected by a resource owner.
+ * Portal. Transient references should be protected by a resource owner.
*/
void
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
@@ -1267,7 +1267,7 @@ CachedPlanSetParentContext(CachedPlanSource *plansource,
*
* This is a convenience routine that does the equivalent of
* CreateCachedPlan + CompleteCachedPlan, using the data stored in the
- * input CachedPlanSource. The result is therefore "unsaved" (regardless
+ * input CachedPlanSource. The result is therefore "unsaved" (regardless
* of the state of the source), and we don't copy any generic plan either.
* The result will be currently valid, or not, the same as the source.
*/
@@ -1420,7 +1420,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
{
/*
* Ignore utility statements, except those (such as EXPLAIN) that
- * contain a parsed-but-not-planned query. Note: it's okay to use
+ * contain a parsed-but-not-planned query. Note: it's okay to use
* ScanQueryForLocks, even though the query hasn't been through
* rule rewriting, because rewriting doesn't change the query
* representation.
@@ -1616,7 +1616,7 @@ plan_list_is_transient(List *stmt_list)
/*
* PlanCacheComputeResultDesc: given a list of analyzed-and-rewritten Queries,
- * determine the result tupledesc it will produce. Returns NULL if the
+ * determine the result tupledesc it will produce. Returns NULL if the
* execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index f1140385883..8058169d067 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -124,7 +124,7 @@ bool criticalSharedRelcachesBuilt = false;
/*
* This counter counts relcache inval events received since backend startup
- * (but only for rels that are actually in cache). Presently, we use it only
+ * (but only for rels that are actually in cache). Presently, we use it only
* to detect whether data about to be written by write_relcache_init_file()
* might already be obsolete.
*/
@@ -472,7 +472,7 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
* built the critical relcache entries (this includes initdb and startup
* without a pg_internal.init file).
*/
@@ -535,7 +535,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* The attcacheoff values we read from pg_attribute should all be -1
- * ("unknown"). Verify this if assert checking is on. They will be
+ * ("unknown"). Verify this if assert checking is on. They will be
* computed when and if needed during tuple access.
*/
#ifdef USE_ASSERT_CHECKING
@@ -549,7 +549,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special cases
+ * attribute: it must be zero. This eliminates the need for special cases
* for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
@@ -605,7 +605,7 @@ RelationBuildTupleDesc(Relation relation)
* each relcache entry that has associated rules. The context is used
* just for rule info, not for any other subsidiary data of the relcache
* entry, because that keeps the update logic in RelationClearRelation()
- * manageable. The other subsidiary data structures are simple enough
+ * manageable. The other subsidiary data structures are simple enough
* to be easy to free explicitly, anyway.
*/
static void
@@ -714,9 +714,9 @@ RelationBuildRuleLock(Relation relation)
/*
* We want the rule's table references to be checked as though by the
- * table owner, not the user referencing the rule. Therefore, scan
+ * table owner, not the user referencing the rule. Therefore, scan
* through the rule's actions and set the checkAsUser field on all
- * rtable entries. We have to look at the qual as well, in case it
+ * rtable entries. We have to look at the qual as well, in case it
* contains sublinks.
*
* The reason for doing this when the rule is loaded, rather than when
@@ -1059,7 +1059,7 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we need
+ * Make the private context to hold index access info. The reason we need
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*
@@ -1107,7 +1107,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* indcollation cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must extract
+ * because it comes after the variable-width indkey field. Must extract
* the datum the hard way...
*/
indcollDatum = fastgetattr(relation->rd_indextuple,
@@ -1132,7 +1132,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* Fill the support procedure OID array, as well as the info about
- * opfamilies and opclass input types. (aminfo and supportinfo are left
+ * opfamilies and opclass input types. (aminfo and supportinfo are left
* as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(indclass, relation->rd_support,
@@ -1220,7 +1220,7 @@ IndexSupportInitialize(oidvector *indclass,
* Note there is no provision for flushing the cache. This is OK at the
* moment because there is no way to ALTER any interesting properties of an
* existing opclass --- all you can do is drop it, which will result in
- * a useless but harmless dead entry in the cache. To support altering
+ * a useless but harmless dead entry in the cache. To support altering
* opclass membership (not the same as opfamily membership!), we'd need to
* be able to flush this cache as well as the contents of relcache entries
* for indexes.
@@ -1329,7 +1329,7 @@ LookupOpclassInfo(Oid operatorClassOid,
heap_close(rel, AccessShareLock);
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
* the default ones (those with lefttype = righttype = opcintype).
*/
if (numSupport > 0)
@@ -1855,7 +1855,7 @@ RelationDestroyRelation(Relation relation)
*
* NB: when rebuilding, we'd better hold some lock on the relation,
* else the catalog data we need to read could be changing under us.
- * Also, a rel to be rebuilt had better have refcnt > 0. This is because
+ * Also, a rel to be rebuilt had better have refcnt > 0. This is because
* an sinval reset could happen while we're accessing the catalogs, and
* the rel would get blown away underneath us by RelationCacheInvalidate
* if it has zero refcnt.
@@ -1878,7 +1878,7 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
* weren't closed already. If the relation is not getting deleted, the
- * next smgr access should reopen the files automatically. This ensures
+ * next smgr access should reopen the files automatically. This ensures
* that the low-level file access state is updated after, say, a vacuum
* truncation.
*/
@@ -1890,7 +1890,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* in case it is a mapped relation whose mapping changed.
*
* If it's a nailed index, then we need to re-read the pg_class row to see
- * if its relfilenode changed. We can't necessarily do that here, because
+ * if its relfilenode changed. We can't necessarily do that here, because
* we might be in a failed transaction. We assume it's okay to do it if
* there are open references to the relcache entry (cf notes for
* AtEOXact_RelationCache). Otherwise just mark the entry as possibly
@@ -1951,7 +1951,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* over from the old entry). This is to avoid trouble in case an
* error causes us to lose control partway through. The old entry
* will still be marked !rd_isvalid, so we'll try to rebuild it again
- * on next access. Meanwhile it's not any less valid than it was
+ * on next access. Meanwhile it's not any less valid than it was
* before, so any code that might expect to continue accessing it
* isn't hurt by the rebuild failure. (Consider for example a
* subtransaction that ALTERs a table and then gets canceled partway
@@ -2140,7 +2140,7 @@ RelationCacheInvalidateEntry(Oid relationId)
/*
* RelationCacheInvalidate
* Blow away cached relation descriptors that have zero reference counts,
- * and rebuild those with positive reference counts. Also reset the smgr
+ * and rebuild those with positive reference counts. Also reset the smgr
* relation cache and re-read relation mapping data.
*
* This is currently used only to recover from SI message buffer overflow,
@@ -2153,7 +2153,7 @@ RelationCacheInvalidateEntry(Oid relationId)
* We do this in two phases: the first pass deletes deletable items, and
* the second one rebuilds the rebuildable items. This is essential for
* safety, because hash_seq_search only copes with concurrent deletion of
- * the element it is currently visiting. If a second SI overflow were to
+ * the element it is currently visiting. If a second SI overflow were to
* occur while we are walking the table, resulting in recursive entry to
* this routine, we could crash because the inner invocation blows away
* the entry next to be visited by the outer scan. But this way is OK,
@@ -2313,7 +2313,7 @@ AtEOXact_RelationCache(bool isCommit)
* For simplicity, eoxact_list[] entries are not deleted till end of
* top-level transaction, even though we could remove them at
* subtransaction end in some cases, or remove relations from the list if
- * they are cleared for other reasons. Therefore we should expect the
+ * they are cleared for other reasons. Therefore we should expect the
* case that list entries are not found in the hashtable; if not, there's
* nothing to do for them.
*/
@@ -2363,7 +2363,7 @@ AtEOXact_cleanup(Relation relation, bool isCommit)
* transaction calls. (That seems bogus, but it's not worth fixing.)
*
* Note: ideally this check would be applied to every relcache entry, not
- * just those that have eoxact work to do. But it's not worth forcing a
+ * just those that have eoxact work to do. But it's not worth forcing a
* scan of the whole relcache just for this. (Moreover, doing so would
* mean that assert-enabled testing never tests the hash_search code path
* above, which seems a bad idea.)
@@ -2667,7 +2667,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. For a mapped relation, we set relfilenode to zero and rely on
+ * places. For a mapped relation, we set relfilenode to zero and rely on
* RelationInitPhysicalAddr to consult the map.
*/
rel->rd_rel->relisshared = shared_relation;
@@ -2910,7 +2910,7 @@ RelationCacheInitializePhase2(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * Try to load the shared relcache cache file. If unsuccessful, bootstrap
+ * Try to load the shared relcache cache file. If unsuccessful, bootstrap
* the cache with pre-made descriptors for the critical shared catalogs.
*/
if (!load_relcache_init_file(true))
@@ -2990,9 +2990,9 @@ RelationCacheInitializePhase3(void)
/*
* If we didn't get the critical system indexes loaded into relcache, do
- * so now. These are critical because the catcache and/or opclass cache
+ * so now. These are critical because the catcache and/or opclass cache
* depend on them for fetches done during relcache load. Thus, we have an
- * infinite-recursion problem. We can break the recursion by doing
+ * infinite-recursion problem. We can break the recursion by doing
* heapscans instead of indexscans at certain key spots. To avoid hobbling
* performance, we only want to do that until we have the critical indexes
* loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
@@ -3009,7 +3009,7 @@ RelationCacheInitializePhase3(void)
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
* in the same way as the others, because the critical catalogs don't
* (currently) have any rules or triggers, and so these indexes can be
- * rebuilt without inducing recursion. However they are used during
+ * rebuilt without inducing recursion. However they are used during
* relcache load when a rel does have rules or triggers, so we choose to
* nail them for performance reasons.
*/
@@ -3040,7 +3040,7 @@ RelationCacheInitializePhase3(void)
*
* DatabaseNameIndexId isn't critical for relcache loading, but rather for
* initial lookup of MyDatabaseId, without which we'll never find any
- * non-shared catalogs at all. Autovacuum calls InitPostgres with a
+ * non-shared catalogs at all. Autovacuum calls InitPostgres with a
* database OID, so it instead depends on DatabaseOidIndexId. We also
* need to nail up some indexes on pg_authid and pg_auth_members for use
* during client authentication.
@@ -3472,7 +3472,7 @@ RelationGetIndexList(Relation relation)
/*
* We build the list we intend to return (in the caller's context) while
- * doing the scan. After successfully completing the scan, we copy that
+ * doing the scan. After successfully completing the scan, we copy that
* list into the relcache entry. This avoids cache-context memory leakage
* if we get some sort of error partway through.
*/
@@ -3510,7 +3510,7 @@ RelationGetIndexList(Relation relation)
/*
* indclass cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must
+ * because it comes after the variable-width indkey field. Must
* extract the datum the hard way...
*/
indclassDatum = heap_getattr(htup,
@@ -4060,7 +4060,7 @@ errtablecol(Relation rel, int attnum)
* given directly rather than extracted from the relation's catalog data.
*
* Don't use this directly unless errtablecol() is inconvenient for some
- * reason. This might possibly be needed during intermediate states in ALTER
+ * reason. This might possibly be needed during intermediate states in ALTER
* TABLE, for instance.
*/
int
@@ -4480,7 +4480,7 @@ load_relcache_init_file(bool shared)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying to
+ * init file is broken, so do it the hard way. We don't bother trying to
* free the clutter we just allocated; it's not in the relcache so it
* won't hurt.
*/
@@ -4545,7 +4545,7 @@ write_relcache_init_file(bool shared)
}
/*
- * Write a magic number to serve as a file version identifier. We can
+ * Write a magic number to serve as a file version identifier. We can
* change the magic number whenever the relcache layout changes.
*/
magic = RELCACHE_INIT_FILEMAGIC;
@@ -4770,7 +4770,7 @@ RelationCacheInitFilePostInvalidate(void)
*
* We used to keep the init files across restarts, but that is unsafe in PITR
* scenarios, and even in simple crash-recovery cases there are windows for
- * the init files to become out-of-sync with the database. So now we just
+ * the init files to become out-of-sync with the database. So now we just
* remove them during startup and expect the first backend launch to rebuild
* them. Of course, this has to happen in each database of the cluster.
*/
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index 2c7d9f3287b..57ff7de4dbb 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -23,7 +23,7 @@
* mapped catalogs can only be relocated by operations such as VACUUM FULL
* and CLUSTER, which make no transactionally-significant changes: it must be
* safe for the new file to replace the old, even if the transaction itself
- * aborts. An important factor here is that the indexes and toast table of
+ * aborts. An important factor here is that the indexes and toast table of
* a mapped catalog must also be mapped, so that the rewrites/relocations of
* all these files commit in a single map file update rather than being tied
* to transaction commit.
@@ -57,13 +57,13 @@
/*
* The map file is critical data: we have no automatic method for recovering
* from loss or corruption of it. We use a CRC so that we can detect
- * corruption. To minimize the risk of failed updates, the map file should
+ * corruption. To minimize the risk of failed updates, the map file should
* be kept to no more than one standard-size disk sector (ie 512 bytes),
* and we use overwrite-in-place rather than playing renaming games.
* The struct layout below is designed to occupy exactly 512 bytes, which
* might make filesystem updates a bit more efficient.
*
- * Entries in the mappings[] array are in no particular order. We could
+ * Entries in the mappings[] array are in no particular order. We could
* speed searching by insisting on OID order, but it really shouldn't be
* worth the trouble given the intended size of the mapping sets.
*/
@@ -90,7 +90,7 @@ typedef struct RelMapFile
/*
* The currently known contents of the shared map file and our database's
- * local map file are stored here. These can be reloaded from disk
+ * local map file are stored here. These can be reloaded from disk
* immediately whenever we receive an update sinval message.
*/
static RelMapFile shared_map;
@@ -293,7 +293,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay)
* RelationMapRemoveMapping
*
* Remove a relation's entry in the map. This is only allowed for "active"
- * (but not committed) local mappings. We need it so we can back out the
+ * (but not committed) local mappings. We need it so we can back out the
* entry for the transient target file when doing VACUUM FULL/CLUSTER on
* a mapped relation.
*/
@@ -321,7 +321,7 @@ RelationMapRemoveMapping(Oid relationId)
* RelationMapInvalidate
*
* This routine is invoked for SI cache flush messages. We must re-read
- * the indicated map file. However, we might receive a SI message in a
+ * the indicated map file. However, we might receive a SI message in a
* process that hasn't yet, and might never, load the mapping files;
* for example the autovacuum launcher, which *must not* try to read
* a local map since it is attached to no particular database.
@@ -389,7 +389,7 @@ AtCCI_RelationMap(void)
*
* During commit, this must be called as late as possible before the actual
* transaction commit, so as to minimize the window where the transaction
- * could still roll back after committing map changes. Although nothing
+ * could still roll back after committing map changes. Although nothing
* critically bad happens in such a case, we still would prefer that it
* not happen, since we'd possibly be losing useful updates to the relations'
* pg_class row(s).
@@ -456,7 +456,7 @@ AtPrepare_RelationMap(void)
/*
* CheckPointRelationMap
*
- * This is called during a checkpoint. It must ensure that any relation map
+ * This is called during a checkpoint. It must ensure that any relation map
* updates that were WAL-logged before the start of the checkpoint are
* securely flushed to disk and will not need to be replayed later. This
* seems unlikely to be a performance-critical issue, so we use a simple
@@ -647,7 +647,7 @@ load_relmap_file(bool shared)
*
* Because this may be called during WAL replay when MyDatabaseId,
* DatabasePath, etc aren't valid, we require the caller to pass in suitable
- * values. The caller is also responsible for being sure no concurrent
+ * values. The caller is also responsible for being sure no concurrent
* map update could be happening.
*/
static void
@@ -767,7 +767,7 @@ write_relmap_file(bool shared, RelMapFile *newmap,
/*
* Make sure that the files listed in the map are not deleted if the outer
- * transaction aborts. This had better be within the critical section
+ * transaction aborts. This had better be within the critical section
* too: it's not likely to fail, but if it did, we'd arrive at transaction
* abort with the files still vulnerable. PANICing will leave things in a
* good state on-disk.
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index e689291b2ec..5f257749a1e 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -4,7 +4,7 @@
* Tablespace cache management.
*
* We cache the parsed version of spcoptions for each tablespace to avoid
- * needing to reparse on every lookup. Right now, there doesn't appear to
+ * needing to reparse on every lookup. Right now, there doesn't appear to
* be a measurable performance gain from doing this, but that might change
* in the future as we add more options.
*
@@ -128,7 +128,7 @@ get_tablespace(Oid spcid)
return spc;
/*
- * Not found in TableSpace cache. Check catcache. If we don't find a
+ * Not found in TableSpace cache. Check catcache. If we don't find a
* valid HeapTuple, it must mean someone has managed to request tablespace
* details for a non-existent tablespace. We'll just treat that case as
* if no options were specified.
@@ -158,7 +158,7 @@ get_tablespace(Oid spcid)
}
/*
- * Now create the cache entry. It's important to do this only after
+ * Now create the cache entry. It's important to do this only after
* reading the pg_tablespace entry, since doing so could cause a cache
* flush.
*/
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index ecb0f96d467..9aa5ee5ac05 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -801,7 +801,7 @@ static bool CacheInitialized = false;
* InitCatalogCache - initialize the caches
*
* Note that no database access is done here; we only allocate memory
- * and initialize the cache structure. Interrogation of the database
+ * and initialize the cache structure. Interrogation of the database
* to complete initialization of a cache happens upon first use
* of that cache.
*/
@@ -1038,7 +1038,7 @@ SearchSysCacheExistsAttName(Oid relid, const char *attname)
* extract a specific attribute.
*
* This is equivalent to using heap_getattr() on a tuple fetched
- * from a non-cached relation. Usually, this is only used for attributes
+ * from a non-cached relation. Usually, this is only used for attributes
* that could be NULL or variable length; the fixed-size attributes in
* a system table are accessed just by mapping the tuple onto the C struct
* declarations from include/catalog/.
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 65a8ad7be1a..f6b61a2c3ed 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -605,7 +605,7 @@ check_TSCurrentConfig(char **newval, void **extra, GucSource source)
/*
* When source == PGC_S_TEST, we are checking the argument of an ALTER
- * DATABASE SET or ALTER USER SET command. It could be that the
+ * DATABASE SET or ALTER USER SET command. It could be that the
* intended use of the setting is for some other database, so we
* should not error out if the text search configuration is not
* present in the current database. We issue a NOTICE instead.
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 2fa6d335350..fdda169cf38 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -11,7 +11,7 @@
*
* Several seemingly-odd choices have been made to support use of the type
* cache by generic array and record handling routines, such as array_eq(),
- * record_cmp(), and hash_array(). Because those routines are used as index
+ * record_cmp(), and hash_array(). Because those routines are used as index
* support operations, they cannot leak memory. To allow them to execute
* efficiently, all information that they would like to re-use across calls
* is kept in the type cache.
@@ -101,7 +101,7 @@ typedef struct TypeCacheEnumData
*
* Stored record types are remembered in a linear array of TupleDescs,
* which can be indexed quickly with the assigned typmod. There is also
- * a hash table to speed searches for matching TupleDescs. The hash key
+ * a hash table to speed searches for matching TupleDescs. The hash key
* uses just the first N columns' type OIDs, and so we may have multiple
* entries with the same hash key.
*/
@@ -482,7 +482,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
/*
* Link to the tupdesc and increment its refcount (we assert it's a
- * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
* because the reference mustn't be entered in the current resource owner;
* it can outlive the current query.
*/
@@ -1074,7 +1074,7 @@ load_enum_cache_data(TypeCacheEntry *tcache)
/*
* Read all the information for members of the enum type. We collect the
* info in working memory in the caller's context, and then transfer it to
- * permanent memory in CacheMemoryContext. This minimizes the risk of
+ * permanent memory in CacheMemoryContext. This minimizes the risk of
* leaking memory from CacheMemoryContext in the event of an error partway
* through.
*/
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 59415f698d2..c82707165fa 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -5,7 +5,7 @@
*
* Because of the extremely high rate at which log messages can be generated,
* we need to be mindful of the performance cost of obtaining any information
- * that may be logged. Also, it's important to keep in mind that this code may
+ * that may be logged. Also, it's important to keep in mind that this code may
* get called from within an aborted transaction, in which case operations
* such as syscache lookups are unsafe.
*
@@ -15,23 +15,23 @@
* if we run out of memory, it's important to be able to report that fact.
* There are a number of considerations that go into this.
*
- * First, distinguish between re-entrant use and actual recursion. It
+ * First, distinguish between re-entrant use and actual recursion. It
* is possible for an error or warning message to be emitted while the
- * parameters for an error message are being computed. In this case
+ * parameters for an error message are being computed. In this case
* errstart has been called for the outer message, and some field values
- * may have already been saved, but we are not actually recursing. We handle
- * this by providing a (small) stack of ErrorData records. The inner message
+ * may have already been saved, but we are not actually recursing. We handle
+ * this by providing a (small) stack of ErrorData records. The inner message
* can be computed and sent without disturbing the state of the outer message.
* (If the inner message is actually an error, this isn't very interesting
* because control won't come back to the outer message generator ... but
* if the inner message is only debug or log data, this is critical.)
*
* Second, actual recursion will occur if an error is reported by one of
- * the elog.c routines or something they call. By far the most probable
+ * the elog.c routines or something they call. By far the most probable
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
@@ -116,7 +116,7 @@ char *Log_destination_string = NULL;
/*
* Max string length to send to syslog(). Note that this doesn't count the
* sequence-number prefix we add, and of course it doesn't count the prefix
- * added by syslog itself. Solaris and sysklogd truncate the final message
+ * added by syslog itself. Solaris and sysklogd truncate the final message
* at 1024 bytes, so this value leaves 124 bytes for those prefixes. (Most
* other syslog implementations seem to have limits of 2KB or so.)
*/
@@ -243,7 +243,7 @@ errstart(int elevel, const char *filename, int lineno,
{
/*
* If we are inside a critical section, all errors become PANIC
- * errors. See miscadmin.h.
+ * errors. See miscadmin.h.
*/
if (CritSectionCount > 0)
elevel = PANIC;
@@ -256,7 +256,7 @@ errstart(int elevel, const char *filename, int lineno,
*
* 2. ExitOnAnyError mode switch is set (initdb uses this).
*
- * 3. the error occurred after proc_exit has begun to run. (It's
+ * 3. the error occurred after proc_exit has begun to run. (It's
* proc_exit's responsibility to see that this doesn't turn into
* infinite recursion!)
*/
@@ -349,7 +349,7 @@ errstart(int elevel, const char *filename, int lineno,
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -451,7 +451,7 @@ errfinish(int dummy,...)
*
* Reset InterruptHoldoffCount in case we ereport'd from inside an
* interrupt holdoff section. (We assume here that no handler will
- * itself be inside a holdoff section. If necessary, such a handler
+ * itself be inside a holdoff section. If necessary, such a handler
* could save and restore InterruptHoldoffCount for itself, but this
* should make life easier for most.)
*
@@ -477,7 +477,7 @@ errfinish(int dummy,...)
* progress, so that we can report the message before dying. (Without
* this, pq_putmessage will refuse to send the message at all, which is
* what we want for NOTICE messages, but not for fatal exits.) This hack
- * is necessary because of poor design of old-style copy protocol. Note
+ * is necessary because of poor design of old-style copy protocol. Note
* we must do this even if client is fool enough to have set
* client_min_messages above FATAL, so don't look at output_to_client.
*/
@@ -599,7 +599,7 @@ errcode(int sqlerrcode)
/*
* errcode_for_file_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of disk file access.
*
* NOTE: the primary error message string should generally include %m
@@ -670,7 +670,7 @@ errcode_for_file_access(void)
/*
* errcode_for_socket_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of socket access.
*
* NOTE: the primary error message string should generally include %m
@@ -708,7 +708,7 @@ errcode_for_socket_access(void)
* This macro handles expansion of a format string and associated parameters;
* it's common code for errmsg(), errdetail(), etc. Must be called inside
* a routine that is declared like "const char *fmt, ..." and has an edata
- * pointer set up. The message is assigned to edata->targetfield, or
+ * pointer set up. The message is assigned to edata->targetfield, or
* appended to it if appendval is true. The message is subject to translation
* if translateit is true.
*
@@ -1267,7 +1267,7 @@ elog_start(const char *filename, int lineno, const char *funcname)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery. Note that the message is intentionally not localized,
* else failure to convert it to client encoding could cause further
@@ -1435,7 +1435,7 @@ EmitErrorReport(void)
/*
* CopyErrorData --- obtain a copy of the topmost error stack entry
*
- * This is only for use in error handler code. The data is copied into the
+ * This is only for use in error handler code. The data is copied into the
* current memory context, so callers should always switch away from
* ErrorContext first; otherwise it will be lost when FlushErrorState is done.
*/
@@ -1546,7 +1546,7 @@ FlushErrorState(void)
*
* A handler can do CopyErrorData/FlushErrorState to get out of the error
* subsystem, then do some processing, and finally ReThrowError to re-throw
- * the original error. This is slower than just PG_RE_THROW() but should
+ * the original error. This is slower than just PG_RE_THROW() but should
* be used if the "some processing" is likely to incur another error.
*/
void
@@ -1563,7 +1563,7 @@ ReThrowError(ErrorData *edata)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -1720,7 +1720,7 @@ set_syslog_parameters(const char *ident, int facility)
{
/*
* guc.c is likely to call us repeatedly with same parameters, so don't
- * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
* the connection until needed, since this routine will get called whether
* or not Log_destination actually mentions syslog.
*
@@ -2654,7 +2654,7 @@ send_message_to_server_log(ErrorData *edata)
*
* Note: when there are multiple backends writing into the syslogger pipe,
* it's critical that each write go into the pipe indivisibly, and not
- * get interleaved with data from other processes. Fortunately, the POSIX
+ * get interleaved with data from other processes. Fortunately, the POSIX
* spec requires that writes to pipes be atomic so long as they are not
* more than PIPE_BUF bytes long. So we divide long messages into chunks
* that are no more than that length, and send one chunk per write() call.
@@ -2974,7 +2974,7 @@ useful_strerror(int errnum)
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno. This
+ * Some strerror()s return an empty string for out-of-range errno. This
* is ANSI C spec compliant, but not exactly useful. Also, we may get
* back strings of question marks if libc cannot transcode the message to
* the codeset specified by LC_CTYPE. If we get nothing useful, first try
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 562a7c9ab0c..5528eded636 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -131,7 +131,7 @@ load_external_function(char *filename, char *funcname,
/*
* This function loads a shlib file without looking up any particular
- * function in it. If the same shlib has previously been loaded,
+ * function in it. If the same shlib has previously been loaded,
* unload and reload it.
*
* When 'restricted' is true, only libraries in the presumed-secure
@@ -171,7 +171,7 @@ lookup_external_function(void *filehandle, char *funcname)
/*
* Load the specified dynamic-link library file, unless it already is
- * loaded. Return the pg_dl* handle for the file.
+ * loaded. Return the pg_dl* handle for the file.
*
* Note: libname is expected to be an exact name for the library file.
*/
@@ -473,7 +473,7 @@ file_exists(const char *name)
* If name contains a slash, check if the file exists, if so return
* the name. Else (no slash) try to expand using search path (see
* find_in_dynamic_libpath below); if that works, return the fully
- * expanded file name. If the previous failed, append DLSUFFIX and
+ * expanded file name. If the previous failed, append DLSUFFIX and
* try again. If all fails, just return the original name.
*
* The result will always be freshly palloc'd.
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 778f75aefcd..0e804c19116 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -96,7 +96,7 @@ static Datum fmgr_security_definer(PG_FUNCTION_ARGS);
/*
- * Lookup routines for builtin-function table. We can search by either Oid
+ * Lookup routines for builtin-function table. We can search by either Oid
* or name, but search by Oid is much faster.
*/
@@ -581,7 +581,7 @@ clear_external_function_hash(void *filehandle)
* Copy an FmgrInfo struct
*
* This is inherently somewhat bogus since we can't reliably duplicate
- * language-dependent subsidiary info. We cheat by zeroing fn_extra,
+ * language-dependent subsidiary info. We cheat by zeroing fn_extra,
* instead, meaning that subsidiary info will have to be recomputed.
*/
void
@@ -861,7 +861,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
/*
- * Support for security-definer and proconfig-using functions. We support
+ * Support for security-definer and proconfig-using functions. We support
* both of these features using the same call handler, because they are
* often used together and it would be inefficient (as well as notationally
* messy) to have two levels of call handler involved.
@@ -881,7 +881,7 @@ struct fmgr_security_definer_cache
* (All this info is cached for the duration of the current query.)
* To execute a call, we temporarily replace the flinfo with the cached
* and looked-up one, while keeping the outer fcinfo (which contains all
- * the actual arguments, etc.) intact. This is not re-entrant, but then
+ * the actual arguments, etc.) intact. This is not re-entrant, but then
* the fcinfo itself can't be used re-entrantly anyway.
*/
static Datum
@@ -961,7 +961,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* We don't need to restore GUC or userid settings on error, because the
- * ensuing xact or subxact abort will do that. The PG_TRY block is only
+ * ensuing xact or subxact abort will do that. The PG_TRY block is only
* needed to clean up the flinfo link.
*/
save_flinfo = fcinfo->flinfo;
@@ -1014,7 +1014,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* These are for invocation of a specifically named function with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. Also, the function cannot be one that needs to
+ * are allowed to be NULL. Also, the function cannot be one that needs to
* look at FmgrInfo, since there won't be any.
*/
Datum
@@ -1559,8 +1559,8 @@ FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
/*
* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially fmgr_info() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially fmgr_info() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the fmgr_info() once and then use FunctionCallN().
*/
Datum
@@ -1889,7 +1889,7 @@ OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
*
* One important difference from the bare function call is that we will
* push any active SPI context, allowing SPI-using I/O functions to be
- * called from other SPI functions without extra notation. This is a hack,
+ * called from other SPI functions without extra notation. This is a hack,
* but the alternative of expecting all SPI functions to do SPI_push/SPI_pop
* around I/O calls seems worse.
*/
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index 6347a8f1ac3..5341923fce8 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -136,7 +136,7 @@ per_MultiFuncCall(PG_FUNCTION_ARGS)
* FuncCallContext is pointing to it), but in most usage patterns the
* tuples stored in it will be in the function's per-tuple context. So at
* the beginning of each call, the Slot will hold a dangling pointer to an
- * already-recycled tuple. We clear it out here.
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
* will always be NULL. This is just here for backwards compatibility in
@@ -192,13 +192,13 @@ shutdown_MultiFuncCall(Datum arg)
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
@@ -281,7 +281,7 @@ get_func_result_type(Oid functionId,
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
@@ -448,7 +448,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 7c3f9206e5e..a3db973afe1 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -5,19 +5,19 @@
*
* dynahash.c supports both local-to-a-backend hash tables and hash tables in
* shared memory. For shared hash tables, it is the caller's responsibility
- * to provide appropriate access interlocking. The simplest convention is
- * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
+ * to provide appropriate access interlocking. The simplest convention is
+ * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
* hash_seq_search) need only shared lock, but any update requires exclusive
* lock. For heavily-used shared tables, the single-lock approach creates a
* concurrency bottleneck, so we also support "partitioned" locking wherein
* there are multiple LWLocks guarding distinct subsets of the table. To use
* a hash table in partitioned mode, the HASH_PARTITION flag must be given
- * to hash_create. This prevents any attempt to split buckets on-the-fly.
+ * to hash_create. This prevents any attempt to split buckets on-the-fly.
* Therefore, each hash bucket chain operates independently, and no fields
* of the hash header change after init except nentries and freeList.
* A partitioned table uses a spinlock to guard changes of those two fields.
* This lets any subset of the hash buckets be treated as a separately
- * lockable partition. We expect callers to use the low-order bits of a
+ * lockable partition. We expect callers to use the low-order bits of a
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
@@ -81,7 +81,7 @@
* Constants
*
* A hash table has a top-level "directory", each of whose entries points
- * to a "segment" of ssize bucket headers. The maximum number of hash
+ * to a "segment" of ssize bucket headers. The maximum number of hash
* buckets is thus dsize * ssize (but dsize may be expansible). Of course,
* the number of records in the table can be larger, but we don't want a
* whole lot of records per bucket or performance goes down.
@@ -89,7 +89,7 @@
* In a hash table allocated in shared memory, the directory cannot be
* expanded because it must stay at a fixed address. The directory size
* should be selected using hash_select_dirsize (and you'd better have
- * a good idea of the maximum number of entries!). For non-shared hash
+ * a good idea of the maximum number of entries!). For non-shared hash
* tables, the initial directory size can be left at the default.
*/
#define DEF_SEGSIZE 256
@@ -341,7 +341,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
{
/*
* ctl structure and directory are preallocated for shared memory
- * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
+ * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
* well.
*/
hashp->hctl = info->hctl;
@@ -790,7 +790,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
@@ -1042,7 +1042,7 @@ hash_update_hash_key(HTAB *hashp,
hashp->tabname);
/*
- * Lookup the existing element using its saved hash value. We need to do
+ * Lookup the existing element using its saved hash value. We need to do
* this to be able to unlink it from its hash chain, but as a side benefit
* we can verify the validity of the passed existingEntry pointer.
*/
@@ -1119,7 +1119,7 @@ hash_update_hash_key(HTAB *hashp,
/*
* If old and new hash values belong to the same bucket, we need not
* change any chain links, and indeed should not since this simplistic
- * update will corrupt the list if currBucket is the last element. (We
+ * update will corrupt the list if currBucket is the last element. (We
* cannot fall out earlier, however, since we need to scan the bucket to
* check for duplicate keys.)
*/
@@ -1405,7 +1405,7 @@ expand_table(HTAB *hashp)
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the hash
+ * Relocate records to the new bucket. NOTE: because of the way the hash
* masking is done in calc_bucket, only one old bucket can need to be
* split at this point. With a different way of reducing the hash value,
* that might not be true!
@@ -1554,7 +1554,7 @@ hash_corrupted(HTAB *hashp)
{
/*
* If the corruption is in a shared hashtable, we'd better force a
- * systemwide restart. Otherwise, just shut down this one backend.
+ * systemwide restart. Otherwise, just shut down this one backend.
*/
if (hashp->isshared)
elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
@@ -1599,7 +1599,7 @@ next_pow2_int(long num)
/************************* SEQ SCAN TRACKING ************************/
/*
- * We track active hash_seq_search scans here. The need for this mechanism
+ * We track active hash_seq_search scans here. The need for this mechanism
* comes from the fact that a scan will get confused if a bucket split occurs
* while it's in progress: it might visit entries twice, or even miss some
* entirely (if it's partway through the same bucket that splits). Hence
@@ -1619,7 +1619,7 @@ next_pow2_int(long num)
*
* This arrangement is reasonably robust if a transient hashtable is deleted
* without notifying us. The absolute worst case is we might inhibit splits
- * in another table created later at exactly the same address. We will give
+ * in another table created later at exactly the same address. We will give
* a warning at transaction end for reference leaks, so any bugs leading to
* lack of notification should be easy to catch.
*/
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index cb78caf8ebd..cf566f1283e 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -59,7 +59,7 @@ static List *lock_files = NIL;
*
* NOTE: "ignoring system indexes" means we do not use the system indexes
* for lookups (either in hardwired catalog accesses or in planner-generated
- * plans). We do, however, still update the indexes when a catalog
+ * plans). We do, however, still update the indexes when a catalog
* modification is made.
* ----------------------------------------------------------------
*/
@@ -301,7 +301,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* Currently there are two valid bits in SecurityRestrictionContext:
*
* SECURITY_LOCAL_USERID_CHANGE indicates that we are inside an operation
- * that is temporarily changing CurrentUserId via these functions. This is
+ * that is temporarily changing CurrentUserId via these functions. This is
* needed to indicate that the actual value of CurrentUserId is not in sync
* with guc.c's internal state, so SET ROLE has to be disallowed.
*
@@ -322,7 +322,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* ever throw any kind of error. This is because they are used by
* StartTransaction and AbortTransaction to save/restore the settings,
* and during the first transaction within a backend, the value to be saved
- * and perhaps restored is indeed invalid. We have to be able to get
+ * and perhaps restored is indeed invalid. We have to be able to get
* through AbortTransaction without asserting in case InitPostgres fails.
*/
void
@@ -362,7 +362,7 @@ InSecurityRestrictedOperation(void)
/*
* These are obsolete versions of Get/SetUserIdAndSecContext that are
* only provided for bug-compatibility with some rather dubious code in
- * pljava. We allow the userid to be set, but only when not inside a
+ * pljava. We allow the userid to be set, but only when not inside a
* security restriction context.
*/
void
@@ -465,7 +465,7 @@ InitializeSessionUserId(const char *rolename)
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -564,7 +564,7 @@ GetCurrentRoleId(void)
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
@@ -632,7 +632,7 @@ GetUserNameFromId(Oid roleid)
* ($DATADIR/postmaster.pid) and Unix-socket-file lockfiles ($SOCKFILE.lock).
* Both kinds of files contain the same info initially, although we can add
* more information to a data-directory lockfile after it's created, using
- * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
+ * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
* of these lockfiles.
*
* On successful lockfile creation, a proc_exit callback to remove the
@@ -721,7 +721,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
my_gp_pid = 0;
/*
- * We need a loop here because of race conditions. But don't loop forever
+ * We need a loop here because of race conditions. But don't loop forever
* (for example, a non-writable $PGDATA directory might cause a failure
* that won't go away). 100 tries seems like plenty.
*/
@@ -730,7 +730,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
+ * Think not to make the file protection weaker than 0600. See
* comments below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
@@ -798,7 +798,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
* implies that the existing process has a different userid than we
* do, which means it cannot be a competing postmaster. A postmaster
* cannot successfully attach to a data directory owned by a userid
- * other than its own. (This is now checked directly in
+ * other than its own. (This is now checked directly in
* checkDataDir(), but has been true for a long time because of the
* restriction that the data directory isn't group- or
* world-accessible.) Also, since we create the lockfiles mode 600,
@@ -836,9 +836,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be that
+ * No, the creating process did not exist. However, it could be that
* the postmaster crashed (or more likely was kill -9'd by a clueless
- * admin) but has left orphan backends behind. Check for this by
+ * admin) but has left orphan backends behind. Check for this by
* looking to see if there is an associated shmem segment that is
* still in use.
*
@@ -879,7 +879,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Looks like nobody's home. Unlink the file and try again to create
- * it. Need a loop because of possible race condition against other
+ * it. Need a loop because of possible race condition against other
* would-be creators.
*/
if (unlink(filename) < 0)
@@ -893,8 +893,8 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * Successfully created the file, now fill it. See comment in miscadmin.h
- * about the contents. Note that we write the same first five lines into
+ * Successfully created the file, now fill it. See comment in miscadmin.h
+ * about the contents. Note that we write the same first five lines into
* both datadir and socket lockfiles; although more stuff may get added to
* the datadir lockfile later.
*/
@@ -1263,7 +1263,7 @@ load_libraries(const char *libraries, const char *gucname, bool restricted)
/*
* Choose notice level: avoid repeat messages when re-loading a library
- * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND
+ * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND
* configurations)
*/
#ifdef EXEC_BACKEND
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index e0abff1145a..da5638173fd 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -80,7 +80,7 @@ static void process_settings(Oid databaseid, Oid roleid);
* GetDatabaseTuple -- fetch the pg_database row for a database
*
* This is used during backend startup when we don't yet have any access to
- * system catalogs in general. In the worst case, we can seqscan pg_database
+ * system catalogs in general. In the worst case, we can seqscan pg_database
* using nothing but the hard-wired descriptor that relcache.c creates for
* pg_database. In more typical cases, relcache.c was able to load
* descriptors for both pg_database and its indexes from the shared relcache
@@ -104,7 +104,7 @@ GetDatabaseTuple(const char *dbname)
CStringGetDatum(dbname));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -147,7 +147,7 @@ GetDatabaseTupleByOid(Oid dboid)
ObjectIdGetDatum(dboid));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -186,7 +186,7 @@ PerformAuthentication(Port *port)
* In EXEC_BACKEND case, we didn't inherit the contents of pg_hba.conf
* etcetera from the postmaster, and have to load them ourselves.
*
- * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
+ * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#ifdef EXEC_BACKEND
if (!load_hba())
@@ -292,7 +292,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
name)));
/*
- * Check privilege to connect to the database. (The am_superuser test
+ * Check privilege to connect to the database. (The am_superuser test
* is redundant, but since we have the flag, might as well check it
* and save a few cycles.)
*/
@@ -308,7 +308,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -478,7 +478,7 @@ BaseInit(void)
* Initialize POSTGRES.
*
* The database can be specified by name, using the in_dbname parameter, or by
- * OID, using the dboid parameter. In the latter case, the actual database
+ * OID, using the dboid parameter. In the latter case, the actual database
* name can be returned to the caller in out_dbname. If out_dbname isn't
* NULL, it must point to a buffer of size NAMEDATALEN.
*
@@ -895,7 +895,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
/*
* Now process any command-line switches and any additional GUC variable
- * settings passed in the startup packet. We couldn't do this before
+ * settings passed in the startup packet. We couldn't do this before
* because we didn't know if client is a superuser.
*/
if (MyProcPort != NULL)
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index ca965390557..68615726552 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -231,7 +231,7 @@ static unsigned short BinarySearchRange
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
* 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
- * 0x9d. [region_low, region_high] We
+ * 0x9d. [region_low, region_high] We
* should remember big5 has two different regions (above).
* There is a bias for the distance between these regions.
* 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 4582219af73..96f7c74d2d1 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -29,7 +29,7 @@
/*
* We maintain a simple linked list caching the fmgr lookup info for the
* currently selected conversion functions, as well as any that have been
- * selected previously in the current session. (We remember previous
+ * selected previously in the current session. (We remember previous
* settings because we must be able to restore a previous setting during
* transaction rollback, without doing any fresh catalog accesses.)
*
@@ -76,7 +76,7 @@ static int cliplen(const char *str, int len, int limit);
/*
- * Prepare for a future call to SetClientEncoding. Success should mean
+ * Prepare for a future call to SetClientEncoding. Success should mean
* that SetClientEncoding is guaranteed to succeed for this encoding request.
*
* (But note that success before backend_startup_complete does not guarantee
@@ -148,7 +148,7 @@ PrepareClientEncoding(int encoding)
/*
* We cannot yet remove any older entry for the same encoding pair,
- * since it could still be in use. SetClientEncoding will clean up.
+ * since it could still be in use. SetClientEncoding will clean up.
*/
return 0; /* success */
@@ -157,8 +157,8 @@ PrepareClientEncoding(int encoding)
{
/*
* If we're not in a live transaction, the only thing we can do is
- * restore a previous setting using the cache. This covers all
- * transaction-rollback cases. The only case it might not work for is
+ * restore a previous setting using the cache. This covers all
+ * transaction-rollback cases. The only case it might not work for is
* trying to change client_encoding on the fly by editing
* postgresql.conf and SIGHUP'ing. Which would probably be a stupid
* thing to do anyway.
@@ -316,7 +316,7 @@ pg_get_client_encoding_name(void)
*
* CAUTION: although the presence of a length argument means that callers
* can pass non-null-terminated strings, care is required because the same
- * string will be passed back if no conversion occurs. Such callers *must*
+ * string will be passed back if no conversion occurs. Such callers *must*
* check whether result == src and handle that case differently.
*
* Note: we try to avoid raising error, since that could get us into
@@ -572,7 +572,7 @@ pg_any_to_server(const char *s, int len, int encoding)
* the selected client_encoding. If the client encoding is ASCII-safe
* then we just do a straight validation under that encoding. For an
* ASCII-unsafe encoding we have a problem: we dare not pass such data
- * to the parser but we have no way to convert it. We compromise by
+ * to the parser but we have no way to convert it. We compromise by
* rejecting the data if it contains any non-ASCII characters.
*/
if (PG_VALID_BE_ENCODING(encoding))
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 45bc3c1604b..dbb7b77d867 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1512,7 +1512,7 @@ pg_utf8_islegal(const unsigned char *source, int length)
*
* Not knowing anything about the properties of the encoding in use, we just
* keep incrementing the last byte until we get a validly-encoded result,
- * or we run out of values to try. We don't bother to try incrementing
+ * or we run out of values to try. We don't bother to try incrementing
* higher-order bytes, so there's no growth in runtime for wider characters.
* (If we did try to do that, we'd need to consider the likelihood that 255
* is not a valid final byte in the encoding.)
@@ -1542,7 +1542,7 @@ pg_generic_charinc(unsigned char *charptr, int len)
* For a one-byte character less than 0x7F, we just increment the byte.
*
* For a multibyte character, every byte but the first must fall between 0x80
- * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
+ * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
* the last byte that's not already at its maximum value. If we can't find a
* byte that's less than the maximum allowable value, we simply fail. We also
* need some special-case logic to skip regions used for surrogate pair
diff --git a/src/backend/utils/mb/wstrcmp.c b/src/backend/utils/mb/wstrcmp.c
index 64a9cf848e2..dad3ae023a3 100644
--- a/src/backend/utils/mb/wstrcmp.c
+++ b/src/backend/utils/mb/wstrcmp.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/mb/wstrncmp.c b/src/backend/utils/mb/wstrncmp.c
index 87c1f5afdaa..ea4823fc6f8 100644
--- a/src/backend/utils/mb/wstrncmp.c
+++ b/src/backend/utils/mb/wstrncmp.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index a73f022d1a0..2b6527f012a 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3640,7 +3640,7 @@ get_guc_variables(void)
/*
- * Build the sorted array. This is split out so that it could be
+ * Build the sorted array. This is split out so that it could be
* re-executed after startup (eg, we could allow loadable modules to
* add vars, and then we'd need to re-sort).
*/
@@ -3797,7 +3797,7 @@ add_placeholder_variable(const char *name, int elevel)
/*
* The char* is allocated at the end of the struct since we have no
- * 'static' place to point to. Note that the current value, as well as
+ * 'static' place to point to. Note that the current value, as well as
* the boot and reset values, start out NULL.
*/
var->variable = (char **) (var + 1);
@@ -3839,7 +3839,7 @@ find_option(const char *name, bool create_placeholders, int elevel)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that the
+ * See if the name is an obsolete name for a variable. We assume that the
* set of supported old names is short enough that a brute-force search is
* the best way.
*/
@@ -4495,7 +4495,7 @@ NewGUCNestLevel(void)
/*
* Do GUC processing at transaction or subtransaction commit or abort, or
* when exiting a function that has proconfig settings, or when undoing a
- * transient assignment to some GUC variables. (The name is thus a bit of
+ * transient assignment to some GUC variables. (The name is thus a bit of
* a misnomer; perhaps it should be ExitGUCNestLevel or some such.)
* During abort, we discard all GUC settings that were applied at nesting
* levels >= nestLevel. nestLevel == 1 corresponds to the main transaction.
@@ -5296,7 +5296,7 @@ set_config_option(const char *name, const char *value,
* If a PGC_BACKEND parameter is changed in the config file,
* we want to accept the new value in the postmaster (whence
* it will propagate to subsequently-started backends), but
- * ignore it in existing backends. This is a tad klugy, but
+ * ignore it in existing backends. This is a tad klugy, but
* necessary because we don't re-read the config file during
* backend start.
*
@@ -5353,7 +5353,7 @@ set_config_option(const char *name, const char *value,
* An exception might be made if the reset value is assumed to be "safe".
*
* Note: this flag is currently used for "session_authorization" and
- * "role". We need to prohibit changing these inside a local userid
+ * "role". We need to prohibit changing these inside a local userid
* context because when we exit it, GUC won't be notified, leaving things
* out of sync. (This could be fixed by forcing a new GUC nesting level,
* but that would change behavior in possibly-undesirable ways.) Also, we
@@ -6218,7 +6218,7 @@ flatten_set_variable_args(const char *name, List *args)
else
{
/*
- * Plain string literal or identifier. For quote mode,
+ * Plain string literal or identifier. For quote mode,
* quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
@@ -6535,7 +6535,7 @@ define_custom_variable(struct config_generic * variable)
* variable. Essentially, we need to duplicate all the active and stacked
* values, but with appropriate validation and datatype adjustment.
*
- * If an assignment fails, we report a WARNING and keep going. We don't
+ * If an assignment fails, we report a WARNING and keep going. We don't
* want to throw ERROR for bad values, because it'd bollix the add-on
* module that's presumably halfway through getting loaded. In such cases
* the default or previous state will become active instead.
@@ -6563,7 +6563,7 @@ define_custom_variable(struct config_generic * variable)
/*
* Free up as much as we conveniently can of the placeholder structure.
* (This neglects any stack items, so it's possible for some memory to be
- * leaked. Since this can only happen once per session per variable, it
+ * leaked. Since this can only happen once per session per variable, it
* doesn't seem worth spending much code on.)
*/
set_string_field(pHolder, pHolder->variable, NULL);
@@ -6636,7 +6636,7 @@ reapply_stacked_values(struct config_generic * variable,
else
{
/*
- * We are at the end of the stack. If the active/previous value is
+ * We are at the end of the stack. If the active/previous value is
* different from the reset value, it must represent a previously
* committed session value. Apply it, and then drop the stack entry
* that set_config_option will have created under the impression that
@@ -7823,7 +7823,7 @@ ParseLongOption(const char *string, char **name, char **value)
/*
* Handle options fetched from pg_db_role_setting.setconfig,
- * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
+ * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
*
* The array parameter must be an array of TEXT (it must not be NULL).
*/
@@ -8105,7 +8105,7 @@ GUCArrayReset(ArrayType *array)
* Validate a proposed option setting for GUCArrayAdd/Delete/Reset.
*
* name is the option name. value is the proposed value for the Add case,
- * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
+ * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
* not an error to have no permissions to set the option.
*
* Returns TRUE if OK, FALSE if skipIfNoPermissions is true and user does not
@@ -8186,7 +8186,7 @@ validate_option_array_item(const char *name, const char *value,
* ERRCODE_INVALID_PARAMETER_VALUE SQLSTATE for check hook failures.
*
* Note that GUC_check_errmsg() etc are just macros that result in a direct
- * assignment to the associated variables. That is ugly, but forced by the
+ * assignment to the associated variables. That is ugly, but forced by the
* limitations of C's macro mechanisms.
*/
void
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 57aa38435ac..33d275a999c 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -109,7 +109,7 @@ static char **save_argv;
* from being clobbered by subsequent ps_display actions.
*
* (The original argv[] will not be overwritten by this routine, but may be
- * overwritten during init_ps_display. Also, the physical location of the
+ * overwritten during init_ps_display. Also, the physical location of the
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
@@ -210,7 +210,7 @@ save_ps_display_args(int argc, char **argv)
/*
* Call this once during subprocess startup to set the identification
- * values. At this point, the original argv[] array may be overwritten.
+ * values. At this point, the original argv[] array may be overwritten.
*/
void
init_ps_display(const char *username, const char *dbname,
@@ -360,7 +360,7 @@ set_ps_display(const char *activity, bool force)
/*
* Returns what's currently in the ps display, in case someone needs
- * it. Note that only the activity part is returned. On some platforms
+ * it. Note that only the activity part is returned. On some platforms
* the string will not be null-terminated, so return the effective
* length into *displen.
*/
diff --git a/src/backend/utils/misc/rbtree.c b/src/backend/utils/misc/rbtree.c
index 58e797ce1ee..9d545e74d3e 100644
--- a/src/backend/utils/misc/rbtree.c
+++ b/src/backend/utils/misc/rbtree.c
@@ -13,7 +13,7 @@
*
* Red-black trees are a type of balanced binary tree wherein (1) any child of
* a red node is always black, and (2) every path from root to leaf traverses
- * an equal number of black nodes. From these properties, it follows that the
+ * an equal number of black nodes. From these properties, it follows that the
* longest path from root to leaf is only about twice as long as the shortest,
* so lookups are guaranteed to run in O(lg n) time.
*
@@ -102,7 +102,7 @@ static RBNode sentinel = {InitialState, RBBLACK, RBNIL, RBNIL, NULL};
* valid data! freefunc can be NULL if caller doesn't require retail
* space reclamation.
*
- * The RBTree node is palloc'd in the caller's memory context. Note that
+ * The RBTree node is palloc'd in the caller's memory context. Note that
* all contents of the tree are actually allocated by the caller, not here.
*
* Since tree contents are managed by the caller, there is currently not
@@ -282,10 +282,10 @@ rb_rotate_right(RBTree *rb, RBNode *x)
/*
* Maintain Red-Black tree balance after inserting node x.
*
- * The newly inserted node is always initially marked red. That may lead to
+ * The newly inserted node is always initially marked red. That may lead to
* a situation where a red node has a red child, which is prohibited. We can
* always fix the problem by a series of color changes and/or "rotations",
- * which move the problem progressively higher up in the tree. If one of the
+ * which move the problem progressively higher up in the tree. If one of the
* two red nodes is the root, we can always fix the problem by changing the
* root from red to black.
*
@@ -296,7 +296,7 @@ static void
rb_insert_fixup(RBTree *rb, RBNode *x)
{
/*
- * x is always a red node. Initially, it is the newly inserted node. Each
+ * x is always a red node. Initially, it is the newly inserted node. Each
* iteration of this loop moves it higher up in the tree.
*/
while (x != rb->root && x->parent->color == RBRED)
@@ -481,7 +481,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
while (x != rb->root && x->color == RBBLACK)
{
/*
- * Left and right cases are symmetric. Any nodes that are children of
+ * Left and right cases are symmetric. Any nodes that are children of
* x have a black-height one less than the remainder of the nodes in
* the tree. We rotate and recolor nodes to move the problem up the
* tree: at some stage we'll either fix the problem, or reach the root
diff --git a/src/backend/utils/misc/timeout.c b/src/backend/utils/misc/timeout.c
index 05925390438..51f5df1c13b 100644
--- a/src/backend/utils/misc/timeout.c
+++ b/src/backend/utils/misc/timeout.c
@@ -57,7 +57,7 @@ static timeout_params *volatile active_timeouts[MAX_TIMEOUTS];
* Note that we don't bother to reset any pending timer interrupt when we
* disable the signal handler; it's not really worth the cycles to do so,
* since the probability of the interrupt actually occurring while we have
- * it disabled is low. See comments in schedule_alarm() about that.
+ * it disabled is low. See comments in schedule_alarm() about that.
*/
static volatile sig_atomic_t alarm_enabled = false;
@@ -69,7 +69,7 @@ static volatile sig_atomic_t alarm_enabled = false;
* Internal helper functions
*
* For all of these, it is caller's responsibility to protect them from
- * interruption by the signal handler. Generally, call disable_alarm()
+ * interruption by the signal handler. Generally, call disable_alarm()
* first to prevent interruption, then update state, and last call
* schedule_alarm(), which will re-enable the signal handler if needed.
*****************************************************************************/
@@ -144,7 +144,7 @@ enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time)
Assert(all_timeouts[id].timeout_handler != NULL);
/*
- * If this timeout was already active, momentarily disable it. We
+ * If this timeout was already active, momentarily disable it. We
* interpret the call as a directive to reschedule the timeout.
*/
i = find_active_timeout(id);
@@ -152,7 +152,7 @@ enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time)
remove_timeout_index(i);
/*
- * Find out the index where to insert the new timeout. We sort by
+ * Find out the index where to insert the new timeout. We sort by
* fin_time, and for equal fin_time by priority.
*/
for (i = 0; i < num_active_timeouts; i++)
@@ -214,18 +214,18 @@ schedule_alarm(TimestampTz now)
*
* Because we didn't bother to reset the timer in disable_alarm(),
* it's possible that a previously-set interrupt will fire between
- * enable_alarm() and setitimer(). This is safe, however. There are
+ * enable_alarm() and setitimer(). This is safe, however. There are
* two possible outcomes:
*
* 1. The signal handler finds nothing to do (because the nearest
* timeout event is still in the future). It will re-set the timer
- * and return. Then we'll overwrite the timer value with a new one.
+ * and return. Then we'll overwrite the timer value with a new one.
* This will mean that the timer fires a little later than we
* intended, but only by the amount of time it takes for the signal
* handler to do nothing useful, which shouldn't be much.
*
* 2. The signal handler executes and removes one or more timeout
- * events. When it returns, either the queue is now empty or the
+ * events. When it returns, either the queue is now empty or the
* frontmost event is later than the one we looked at above. So we'll
* overwrite the timer value with one that is too soon (plus or minus
* the signal handler's execution time), causing a useless interrupt
@@ -266,14 +266,14 @@ handle_sig_alarm(SIGNAL_ARGS)
* mainline is waiting for a lock). If SIGINT or similar arrives while
* this code is running, we'd lose control and perhaps leave our data
* structures in an inconsistent state. Disable immediate interrupts, and
- * just to be real sure, bump the holdoff counter as well. (The reason
+ * just to be real sure, bump the holdoff counter as well. (The reason
* for this belt-and-suspenders-too approach is to make sure that nothing
* bad happens if a timeout handler calls code that manipulates
* ImmediateInterruptOK.)
*
* Note: it's possible for a SIGINT to interrupt handle_sig_alarm before
* we manage to do this; the net effect would be as if the SIGALRM event
- * had been silently lost. Therefore error recovery must include some
+ * had been silently lost. Therefore error recovery must include some
* action that will allow any lost interrupt to be rescheduled. Disabling
* some or all timeouts is sufficient, or if that's not appropriate,
* reschedule_timeouts() can be called. Also, the signal blocking hazard
@@ -434,7 +434,7 @@ RegisterTimeout(TimeoutId id, timeout_handler_proc handler)
*
* This can be used during error recovery in case query cancel resulted in loss
* of a SIGALRM event (due to longjmp'ing out of handle_sig_alarm before it
- * could do anything). But note it's not necessary if any of the public
+ * could do anything). But note it's not necessary if any of the public
* enable_ or disable_timeout functions are called in the same area, since
* those all do schedule_alarm() internally if needed.
*/
@@ -503,7 +503,7 @@ enable_timeout_at(TimeoutId id, TimestampTz fin_time)
* Enable multiple timeouts at once.
*
* This works like calling enable_timeout_after() and/or enable_timeout_at()
- * multiple times. Use this to reduce the number of GetCurrentTimestamp()
+ * multiple times. Use this to reduce the number of GetCurrentTimestamp()
* and setitimer() calls needed to establish multiple timeouts.
*/
void
diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c
index 93e42db7a51..8638cd92546 100644
--- a/src/backend/utils/misc/tzparser.c
+++ b/src/backend/utils/misc/tzparser.c
@@ -4,7 +4,7 @@
* Functions for parsing timezone offset files
*
* Note: this code is invoked from the check_hook for the GUC variable
- * timezone_abbreviations. Therefore, it should report problems using
+ * timezone_abbreviations. Therefore, it should report problems using
* GUC_check_errmsg() and related functions, and try to avoid throwing
* elog(ERROR). This is not completely bulletproof at present --- in
* particular out-of-memory will throw an error. Could probably fix with
@@ -179,7 +179,7 @@ addToArray(tzEntry **base, int *arraysize, int n,
/*
* Search the array for a duplicate; as a useful side effect, the array is
- * maintained in sorted order. We use strcmp() to ensure we match the
+ * maintained in sorted order. We use strcmp() to ensure we match the
* sort order datetime.c expects.
*/
arrayptr = *base;
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 6a111c78329..94cc8117133 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -38,7 +38,7 @@
* request, even if it was much larger than necessary. This led to more
* and more wasted space in allocated chunks over time. To fix, get rid
* of the midrange behavior: we now handle only "small" power-of-2-size
- * chunks as chunks. Anything "large" is passed off to malloc(). Change
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
* the number of freelists to change the small/large boundary.
*
*
@@ -54,7 +54,7 @@
* Thus, if someone makes the common error of writing past what they've
* requested, the problem is likely to go unnoticed ... until the day when
* there *isn't* any wasted space, perhaps because of different memory
- * alignment on a new platform, or some other effect. To catch this sort
+ * alignment on a new platform, or some other effect. To catch this sort
* of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
* the requested space whenever the request is less than the actual chunk
* size, and verifies that the byte is undamaged when the chunk is freed.
@@ -153,7 +153,7 @@ typedef AllocSetContext *AllocSet;
/*
* AllocBlock
* An AllocBlock is the unit of memory that is obtained by aset.c
- * from malloc(). It contains one or more AllocChunks, which are
+ * from malloc(). It contains one or more AllocChunks, which are
* the units requested by palloc() and freed by pfree(). AllocChunks
* cannot be returned to malloc() individually, instead they are put
* on freelists by pfree() and re-used by the next palloc() that has
@@ -290,7 +290,7 @@ AllocSetFreeIndex(Size size)
/*
* At this point we need to obtain log2(tsize)+1, ie, the number of
- * not-all-zero bits at the right. We used to do this with a
+ * not-all-zero bits at the right. We used to do this with a
* shift-and-count loop, but this function is enough of a hotspot to
* justify micro-optimization effort. The best approach seems to be
* to use a lookup table. Note that this code assumes that
@@ -457,7 +457,7 @@ AllocSetInit(MemoryContext context)
* Actually, this routine has some discretion about what to do.
* It should mark all allocated chunks freed, but it need not necessarily
* give back all the resources the set owns. Our actual implementation is
- * that we hang onto any "keeper" block specified for the set. In this way,
+ * that we hang onto any "keeper" block specified for the set. In this way,
* we don't thrash malloc() when a context is repeatedly reset after small
* allocations, which is typical behavior for per-tuple contexts.
*/
@@ -690,7 +690,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* In most cases, we'll get back the index of the next larger
- * freelist than the one we need to put this chunk on. The
+ * freelist than the one we need to put this chunk on. The
* exception is when availchunk is exactly a power of 2.
*/
if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
@@ -836,7 +836,7 @@ AllocSetFree(MemoryContext context, void *pointer)
{
/*
* Big chunks are certain to have been allocated as single-chunk
- * blocks. Find the containing block and return it to malloc().
+ * blocks. Find the containing block and return it to malloc().
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -932,7 +932,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > set->allocChunkLimit)
{
/*
- * The chunk must have been allocated as a single-chunk block. Find
+ * The chunk must have been allocated as a single-chunk block. Find
* the containing block and use realloc() to make it bigger with
* minimum space wastage.
*/
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 6118e12eaf8..7369ec36232 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -166,7 +166,7 @@ MemoryContextResetChildren(MemoryContext context)
*
* The type-specific delete routine removes all subsidiary storage
* for the context, but we have to delete the context node itself,
- * as well as recurse to get the children. We must also delink the
+ * as well as recurse to get the children. We must also delink the
* node from its parent, if it has one.
*/
void
@@ -476,22 +476,22 @@ MemoryContextContains(MemoryContext context, void *pointer)
* we want to be sure that we don't leave the context tree invalid
* in case of failure (such as insufficient memory to allocate the
* context node itself). The procedure goes like this:
- * 1. Context-type-specific routine first calls MemoryContextCreate(),
+ * 1. Context-type-specific routine first calls MemoryContextCreate(),
* passing the appropriate tag/size/methods values (the methods
* pointer will ordinarily point to statically allocated data).
* The parent and name parameters usually come from the caller.
- * 2. MemoryContextCreate() attempts to allocate the context node,
+ * 2. MemoryContextCreate() attempts to allocate the context node,
* plus space for the name. If this fails we can ereport() with no
* damage done.
- * 3. We fill in all of the type-independent MemoryContext fields.
- * 4. We call the type-specific init routine (using the methods pointer).
+ * 3. We fill in all of the type-independent MemoryContext fields.
+ * 4. We call the type-specific init routine (using the methods pointer).
* The init routine is required to make the node minimally valid
* with zero chance of failure --- it can't allocate more memory,
* for example.
- * 5. Now we have a minimally valid node that can behave correctly
+ * 5. Now we have a minimally valid node that can behave correctly
* when told to reset or delete itself. We link the node to its
* parent (if any), making the node part of the context tree.
- * 6. We return to the context-type-specific routine, which finishes
+ * 6. We return to the context-type-specific routine, which finishes
* up type-specific initialization. This routine can now do things
* that might fail (like allocate more memory), so long as it's
* sure the node is left in a state that delete will handle.
@@ -503,7 +503,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
*
* Normally, the context node and the name are allocated from
* TopMemoryContext (NOT from the parent context, since the node must
- * survive resets of its parent context!). However, this routine is itself
+ * survive resets of its parent context!). However, this routine is itself
* used to create TopMemoryContext! If we see that TopMemoryContext is NULL,
* we assume we are creating TopMemoryContext and use malloc() to allocate
* the node.
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 54bf16ee2f7..e7cc11fa0ea 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -144,14 +144,14 @@ GetPortalByName(const char *name)
* Get the "primary" stmt within a portal, ie, the one marked canSetTag.
*
* Returns NULL if no such stmt. If multiple PlannedStmt structs within the
- * portal are marked canSetTag, returns the first one. Neither of these
+ * portal are marked canSetTag, returns the first one. Neither of these
* cases should occur in present usages of this function.
*
* Copes if given a list of Querys --- can't happen in a portal, but this
* code also supports plancache.c, which needs both cases.
*
* Note: the reason this is just handed a List is so that plancache.c
- * can share the code. For use with a portal, use PortalGetPrimaryStmt
+ * can share the code. For use with a portal, use PortalGetPrimaryStmt
* rather than calling this directly.
*/
Node *
@@ -277,7 +277,7 @@ CreateNewPortal(void)
* you can pass a constant string, perhaps "(query not available)".)
*
* commandTag shall be NULL if and only if the original query string
- * (before rewriting) was an empty string. Also, the passed commandTag must
+ * (before rewriting) was an empty string. Also, the passed commandTag must
* be a pointer to a constant string, since it is not copied.
*
* If cplan is provided, then it is a cached plan containing the stmts, and
@@ -480,14 +480,14 @@ PortalDrop(Portal portal, bool isTopCommit)
/*
* Allow portalcmds.c to clean up the state it knows about, in particular
- * shutting down the executor if still active. This step potentially runs
+ * shutting down the executor if still active. This step potentially runs
* user-defined code so failure has to be expected. It's the cleanup
* hook's responsibility to not try to do that more than once, in the case
* that failure occurs and then we come back to drop the portal again
* during transaction abort.
*
* Note: in most paths of control, this will have been done already in
- * MarkPortalDone or MarkPortalFailed. We're just making sure.
+ * MarkPortalDone or MarkPortalFailed. We're just making sure.
*/
if (PointerIsValid(portal->cleanup))
{
@@ -507,12 +507,12 @@ PortalDrop(Portal portal, bool isTopCommit)
PortalReleaseCachedPlan(portal);
/*
- * Release any resources still attached to the portal. There are several
+ * Release any resources still attached to the portal. There are several
* cases being covered here:
*
* Top transaction commit (indicated by isTopCommit): normally we should
* do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we have a
+ * releasing mechanism handle these resources too. However, if we have a
* FAILED portal (eg, a cursor that got an error), we'd better clean up
* its resources to avoid resource-leakage warning messages.
*
@@ -524,7 +524,7 @@ PortalDrop(Portal portal, bool isTopCommit)
* cleaned up in transaction abort.
*
* Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become the
+ * is not FAILED then we do not release its locks. The locks become the
* responsibility of the transaction's ResourceOwner (since it is the
* parent of the portal's owner) and will be released when the transaction
* eventually ends.
@@ -611,7 +611,7 @@ PortalHashTableDeleteAll(void)
* Holdable cursors created in this transaction need to be converted to
* materialized form, since we are going to close down the executor and
* release locks. Non-holdable portals created in this transaction are
- * simply removed. Portals remaining from prior transactions should be
+ * simply removed. Portals remaining from prior transactions should be
* left untouched.
*
* Returns TRUE if any portals changed state (possibly causing user-defined
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index e7ec3931f12..fd2926d083a 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -171,7 +171,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
* but don't delete the owner objects themselves.
*
* Note that this executes just one phase of release, and so typically
- * must be called three times. We do it this way because (a) we want to
+ * must be called three times. We do it this way because (a) we want to
* do all the recursion separately for each phase, thereby preserving
* the needed order of operations; and (b) xact.c may have other operations
* to do between the phases.
@@ -245,7 +245,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
*
* During a commit, there shouldn't be any remaining pins --- that
* would indicate failure to clean up the executor correctly --- so
- * issue warnings. In the abort case, just clean up quietly.
+ * issue warnings. In the abort case, just clean up quietly.
*
* We are careful to do the releasing back-to-front, so as to avoid
* O(N^2) behavior in ResourceOwnerForgetBuffer().
@@ -417,7 +417,7 @@ ResourceOwnerDelete(ResourceOwner owner)
/*
* We delink the owner from its parent before deleting it, so that if
* there's an error we won't have deleted/busted owners still attached to
- * the owner tree. Better a leak than a crash.
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
@@ -609,7 +609,7 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course, but
+ * recently pinned buffer. This isn't always the case of course, but
* it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index feaabe2f698..f84dc12f797 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -7,14 +7,14 @@
* tuplesort.c). Merging is an ideal algorithm for tape devices, but if
* we implement it on disk by creating a separate file for each "tape",
* there is an annoying problem: the peak space usage is at least twice
- * the volume of actual data to be sorted. (This must be so because each
+ * the volume of actual data to be sorted. (This must be so because each
* datum will appear in both the input and output tapes of the final
- * merge pass. For seven-tape polyphase merge, which is otherwise a
+ * merge pass. For seven-tape polyphase merge, which is otherwise a
* pretty good algorithm, peak usage is more like 4x actual data volume.)
*
* We can work around this problem by recognizing that any one tape
* dataset (with the possible exception of the final output) is written
- * and read exactly once in a perfectly sequential manner. Therefore,
+ * and read exactly once in a perfectly sequential manner. Therefore,
* a datum once read will not be required again, and we can recycle its
* space for use by the new tape dataset(s) being generated. In this way,
* the total space usage is essentially just the actual data volume, plus
@@ -55,7 +55,7 @@
* To support the above policy of writing to the lowest free block,
* ltsGetFreeBlock sorts the list of free block numbers into decreasing
* order each time it is asked for a block and the list isn't currently
- * sorted. This is an efficient way to handle it because we expect cycles
+ * sorted. This is an efficient way to handle it because we expect cycles
* of releasing many blocks followed by re-using many blocks, due to
* tuplesort.c's "preread" behavior.
*
@@ -117,7 +117,7 @@ typedef struct LogicalTape
/*
* The total data volume in the logical tape is numFullBlocks * BLCKSZ +
- * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
* only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
@@ -157,7 +157,7 @@ struct LogicalTapeSet
*
* If blocksSorted is true then the block numbers in freeBlocks are in
* *decreasing* order, so that removing the last entry gives us the lowest
- * free block. We re-sort the blocks whenever a block is demanded; this
+ * free block. We re-sort the blocks whenever a block is demanded; this
* should be reasonably efficient given the expected usage pattern.
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
@@ -218,7 +218,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
/*
* Read a block-sized buffer from the specified block of the underlying file.
*
- * No need for an error return convention; we ereport() on any error. This
+ * No need for an error return convention; we ereport() on any error. This
* module should never attempt to read a block it doesn't know is there.
*/
static void
@@ -353,7 +353,7 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
/*
* Reset a logical tape's indirect-block hierarchy after a write pass
- * to prepare for reading. We dump out partly-filled blocks except
+ * to prepare for reading. We dump out partly-filled blocks except
* at the top of the hierarchy, and we rewind each level to the start.
* This call returns the first data block number, or -1L if the tape
* is empty.
@@ -540,7 +540,7 @@ LogicalTapeSetCreate(int ntapes)
/*
* Initialize per-tape structs. Note we allocate the I/O buffer and
* first-level indirect block for a tape only when it is first actually
- * written to. This avoids wasting memory space when tuplesort.c
+ * written to. This avoids wasting memory space when tuplesort.c
* overestimates the number of tapes needed.
*/
for (i = 0; i < ntapes; i++)
@@ -591,7 +591,7 @@ LogicalTapeSetClose(LogicalTapeSet *lts)
* Mark a logical tape set as not needing management of free space anymore.
*
* This should be called if the caller does not intend to write any more data
- * into the tape set, but is reading from un-frozen tapes. Since no more
+ * into the tape set, but is reading from un-frozen tapes. Since no more
* writes are planned, remembering free blocks is no longer useful. Setting
* this flag lets us avoid wasting time and space in ltsReleaseBlock(), which
* is not designed to handle large numbers of free blocks.
@@ -732,7 +732,7 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
else
{
/*
- * Completion of a read phase. Rewind and prepare for write.
+ * Completion of a read phase. Rewind and prepare for write.
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
@@ -826,7 +826,7 @@ LogicalTapeRead(LogicalTapeSet *lts, int tapenum,
*
* This *must* be called just at the end of a write pass, before the
* tape is rewound (after rewind is too late!). It performs a rewind
- * and switch to read mode "for free". An immediately following rewind-
+ * and switch to read mode "for free". An immediately following rewind-
* for-read call is OK but not necessary.
*/
void
@@ -862,7 +862,7 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
}
/*
- * Backspace the tape a given number of bytes. (We also support a more
+ * Backspace the tape a given number of bytes. (We also support a more
* general seek interface, see below.)
*
* *Only* a frozen-for-read tape can be backed up; we don't support
@@ -966,7 +966,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
return false;
/*
- * OK, advance or back up to the target block. This implementation would
+ * OK, advance or back up to the target block. This implementation would
* be pretty inefficient for long seeks, but we really aren't expecting
* that (a seek over one tuple is typical).
*/
@@ -999,7 +999,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
* Obtain current position in a form suitable for a later LogicalTapeSeek.
*
* NOTE: it'd be OK to do this during write phase with intention of using
- * the position for a seek after freezing. Not clear if anyone needs that.
+ * the position for a seek after freezing. Not clear if anyone needs that.
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 5d42480e604..58b3896af73 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
@@ -40,7 +40,7 @@
* into sorted runs in temporary tapes, emitting just enough tuples at each
* step to get back within the workMem limit. Whenever the run number at
* the top of the heap changes, we begin a new run with a new output tape
- * (selected per Algorithm D). After the end of the input is reached,
+ * (selected per Algorithm D). After the end of the input is reached,
* we dump out remaining tuples in memory into a final run (or two),
* then merge the runs using Algorithm D.
*
@@ -57,17 +57,17 @@
* access at all, defeating the read-ahead methods used by most Unix kernels.
* Worse, the output tape gets written into a very random sequence of blocks
* of the temp file, ensuring that things will be even worse when it comes
- * time to read that tape. A straightforward merge pass thus ends up doing a
+ * time to read that tape. A straightforward merge pass thus ends up doing a
* lot of waiting for disk seeks. We can improve matters by prereading from
* each source tape sequentially, loading about workMem/M bytes from each tape
* in turn. Then we run the merge algorithm, writing but not reading until
- * one of the preloaded tuple series runs out. Then we switch back to preread
+ * one of the preloaded tuple series runs out. Then we switch back to preread
* mode, fill memory again, and repeat. This approach helps to localize both
* read and write accesses.
*
* When the caller requests random access to the sort result, we form
* the final sorted run on a logical tape which is then "frozen", so
- * that we can access it randomly. When the caller does not need random
+ * that we can access it randomly. When the caller does not need random
* access, we return from tuplesort_performsort() as soon as we are down
* to one run per logical tape. The final merge is then performed
* on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
@@ -77,7 +77,7 @@
* grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
* to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
* tape drives are expensive beasts, and in particular that there will always
- * be many more runs than tape drives. In our implementation a "tape drive"
+ * be many more runs than tape drives. In our implementation a "tape drive"
* doesn't cost much more than a few Kb of memory buffers, so we can afford
* to have lots of them. In particular, if we can have as many tape drives
* as sorted runs, we can eliminate any repeated I/O at all. In the current
@@ -134,28 +134,28 @@ bool optimize_bounded_sort = true;
/*
- * The objects we actually sort are SortTuple structs. These contain
+ * The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
* can be freed by a simple pfree(). SortTuples also contain the tuple's
* first key column in Datum/nullflag format, and an index integer.
*
* Storing the first key column lets us save heap_getattr or index_getattr
- * calls during tuple comparisons. We could extract and save all the key
+ * calls during tuple comparisons. We could extract and save all the key
* columns not just the first, but this would increase code complexity and
* overhead, and wouldn't actually save any comparison cycles in the common
* case where the first key determines the comparison result. Note that
* for a pass-by-reference datatype, datum1 points into the "tuple" storage.
*
* When sorting single Datums, the data value is represented directly by
- * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
+ * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
* then datum1 points to a separately palloc'd data value that is also pointed
* to by the "tuple" pointer; otherwise "tuple" is NULL.
*
* While building initial runs, tupindex holds the tuple's run number. During
* merge passes, we re-use it to hold the input tape number that each tuple in
* the heap was read from, or to hold the index of the next tuple pre-read
- * from the same tape in the case of pre-read entries. tupindex goes unused
+ * from the same tape in the case of pre-read entries. tupindex goes unused
* if the sort occurs entirely in memory.
*/
typedef struct
@@ -238,7 +238,7 @@ struct Tuplesortstate
void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() the out-of-line data (not the SortTuple struct!), and increase
@@ -264,7 +264,7 @@ struct Tuplesortstate
void (*reversedirection) (Tuplesortstate *state);
/*
- * This array holds the tuples now in sort memory. If we are in state
+ * This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
@@ -412,7 +412,7 @@ struct Tuplesortstate
* If state->randomAccess is true, then the stored representation of the
* tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* randomAccess is not true, the write/read routines may omit the extra
* length word.
*
@@ -422,7 +422,7 @@ struct Tuplesortstate
* the back length word (if present).
*
* The write/read routines can make use of the tuple description data
- * stored in the Tuplesortstate record, if needed. They are also expected
+ * stored in the Tuplesortstate record, if needed. They are also expected
* to adjust state->availMem by the amount of memory space (not tape space!)
* released or consumed. There is no error return from either writetup
* or readtup; they should ereport() on failure.
@@ -519,7 +519,7 @@ static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
*
* After calling tuplesort_begin, the caller should call tuplesort_putXXX
* zero or more times, then call tuplesort_performsort when all the tuples
- * have been supplied. After performsort, retrieve the tuples in sorted
+ * have been supplied. After performsort, retrieve the tuples in sorted
* order by calling tuplesort_getXXX until it returns false/NULL. (If random
* access was requested, rescan, markpos, and restorepos can also be called.)
* Call tuplesort_end to terminate the operation and release memory/disk space.
@@ -859,7 +859,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
*
* Advise tuplesort that at most the first N result tuples are required.
*
- * Must be called before inserting any tuples. (Actually, we could allow it
+ * Must be called before inserting any tuples. (Actually, we could allow it
* as long as the sort hasn't spilled to disk, but there seems no need for
* delayed calls at the moment.)
*
@@ -1005,7 +1005,7 @@ grow_memtuples(Tuplesortstate *state)
* strategy and instead increase as much as we safely can.
*
* To stay within allowedMem, we can't increase memtupsize by more
- * than availMem / sizeof(SortTuple) elements. In practice, we want
+ * than availMem / sizeof(SortTuple) elements. In practice, we want
* to increase it by considerably less, because we need to leave some
* space for the tuples to which the new array slots will refer. We
* assume the new tuples will be about the same size as the tuples
@@ -1053,9 +1053,9 @@ grow_memtuples(Tuplesortstate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
- * that the array growth fits within availMem. (We could still cause
+ * that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
- * array were to increase. That shouldn't happen with any sane value of
+ * array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
@@ -1191,7 +1191,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
case TSS_INITIAL:
/*
- * Save the tuple into the unsorted array. First, grow the array
+ * Save the tuple into the unsorted array. First, grow the array
* as needed. Note that we try to grow the array when there is
* still one free slot remaining --- if we fail, there'll still be
* room to store the incoming tuple, and then we'll switch to
@@ -1212,7 +1212,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
* enough tuples to meet the bound.
*
* Note that once we enter TSS_BOUNDED state we will always try to
- * complete the sort that way. In the worst case, if later input
+ * complete the sort that way. In the worst case, if later input
* tuples are larger than earlier ones, this might cause us to
* exceed workMem significantly.
*/
@@ -1350,7 +1350,7 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
- * in memory, using a heap to eliminate excess tuples. Now we
+ * in memory, using a heap to eliminate excess tuples. Now we
* have to transform the heap to a properly-sorted array.
*/
sort_bounded_heap(state);
@@ -1364,7 +1364,7 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining in
+ * Finish tape-based sort. First, flush all tuples remaining in
* memory out to tape; then merge until we have a single remaining
* run (or, if !randomAccess, one run per tape). Note that
* mergeruns sets the correct state->status.
@@ -1425,7 +1425,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -1631,7 +1631,7 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
HeapTuple
@@ -1650,7 +1650,7 @@ tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
/*
* Fetch the next index tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
IndexTuple
@@ -1721,7 +1721,7 @@ tuplesort_merge_order(long allowedMem)
/*
* We need one tape for each merge input, plus another one for the output,
- * and each of these tapes needs buffer space. In addition we want
+ * and each of these tapes needs buffer space. In addition we want
* MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
* count).
*
@@ -1775,7 +1775,7 @@ inittapes(Tuplesortstate *state)
* don't decrease it to the point that we have no room for tuples. (That
* case is only likely to occur if sorting pass-by-value Datums; in all
* other scenarios the memtuples[] array is unlikely to occupy more than
- * half of allowedMem. In the pass-by-value case it's not important to
+ * half of allowedMem. In the pass-by-value case it's not important to
* account for tuple space, so we don't care if LACKMEM becomes
* inaccurate.)
*/
@@ -1899,7 +1899,7 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
* volume is between 1X and 2X workMem), we can just use that tape as the
- * finished output, rather than doing a useless merge. (This obvious
+ * finished output, rather than doing a useless merge. (This obvious
* optimization is not in Knuth's algorithm.)
*/
if (state->currentRun == 1)
@@ -2005,7 +2005,7 @@ mergeruns(Tuplesortstate *state)
* the loop without performing the last iteration of step D6, we have not
* rearranged the tape unit assignment, and therefore the result is on
* TAPE[T]. We need to do it this way so that we can freeze the final
- * output tape while rewinding it. The last iteration of step D6 would be
+ * output tape while rewinding it. The last iteration of step D6 would be
* a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[state->tapeRange];
@@ -2089,7 +2089,7 @@ mergeonerun(Tuplesortstate *state)
* beginmerge - initialize for a merge pass
*
* We decrease the counts of real and dummy runs for each tape, and mark
- * which tapes contain active input runs in mergeactive[]. Then, load
+ * which tapes contain active input runs in mergeactive[]. Then, load
* as many tuples as we can from each active input tape, and finally
* fill the merge heap with the first tuple from each active tape.
*/
@@ -2182,7 +2182,7 @@ beginmerge(Tuplesortstate *state)
* This routine exists to improve sequentiality of reads during a merge pass,
* as explained in the header comments of this file. Load tuples from each
* active source tape until the tape's run is exhausted or it has used up
- * its fair share of available memory. In any case, we guarantee that there
+ * its fair share of available memory. In any case, we guarantee that there
* is at least one preread tuple available from each unexhausted input tape.
*
* We invoke this routine at the start of a merge pass for initial load,
@@ -2445,7 +2445,7 @@ tuplesort_get_stats(Tuplesortstate *state,
* accurately once we have begun to return tuples to the caller (since we
* don't account for pfree's the caller is expected to do), so we cannot
* rely on availMem in a disk sort. This does not seem worth the overhead
- * to fix. Is it worth creating an API for the memory context code to
+ * to fix. Is it worth creating an API for the memory context code to
* tell us how much is actually used in sortcontext?
*/
if (state->tapeset)
@@ -2483,7 +2483,7 @@ tuplesort_get_stats(Tuplesortstate *state,
/*
* Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
*
- * Compare two SortTuples. If checkIndex is true, use the tuple index
+ * Compare two SortTuples. If checkIndex is true, use the tuple index
* as the front of the sort key; otherwise, no.
*/
@@ -2588,7 +2588,7 @@ sort_bounded_heap(Tuplesortstate *state)
/*
* Insert a new tuple into an empty or existing heap, maintaining the
- * heap invariant. Caller is responsible for ensuring there's room.
+ * heap invariant. Caller is responsible for ensuring there's room.
*
* Note: we assume *tuple is a temporary variable that can be scribbled on.
* For some callers, tuple actually points to a memtuples[] entry above the
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index ea9bc04823d..549175c885c 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
@@ -17,7 +17,7 @@
* space limit specified by the caller.
*
* The (approximate) amount of memory allowed to the tuplestore is specified
- * in kilobytes by the caller. We absorb tuples and simply store them in an
+ * in kilobytes by the caller. We absorb tuples and simply store them in an
* in-memory array as long as we haven't exceeded maxKBytes. If we do exceed
* maxKBytes, we dump all the tuples into a temp file and then read from that
* when needed.
@@ -29,7 +29,7 @@
* When the caller requests backward-scan capability, we write the temp file
* in a format that allows either forward or backward scan. Otherwise, only
* forward scan is allowed. A request for backward scan must be made before
- * putting any tuples into the tuplestore. Rewind is normally allowed but
+ * putting any tuples into the tuplestore. Rewind is normally allowed but
* can be turned off via tuplestore_set_eflags; turning off rewind for all
* read pointers enables truncation of the tuplestore at the oldest read point
* for minimal memory usage. (The caller must explicitly call tuplestore_trim
@@ -63,7 +63,7 @@
/*
- * Possible states of a Tuplestore object. These denote the states that
+ * Possible states of a Tuplestore object. These denote the states that
* persist between calls of Tuplestore routines.
*/
typedef enum
@@ -82,7 +82,7 @@ typedef enum
*
* Special case: if eof_reached is true, then the pointer's read position is
* implicitly equal to the write position, and current/file/offset aren't
- * maintained. This way we need not update all the read pointers each time
+ * maintained. This way we need not update all the read pointers each time
* we write.
*/
typedef struct
@@ -127,7 +127,7 @@ struct Tuplestorestate
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() it, and increase state->availMem by the amount of memory space
@@ -196,7 +196,7 @@ struct Tuplestorestate
* If state->backward is true, then the stored representation of
* the tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* state->backward is not set, the write/read routines may omit the extra
* length word.
*
@@ -294,7 +294,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes)
* tuple store are allowed.
*
* interXact: if true, the files used for on-disk storage persist beyond the
- * end of the current transaction. NOTE: It's the caller's responsibility to
+ * end of the current transaction. NOTE: It's the caller's responsibility to
* create such a tuplestore in a memory context and resource owner that will
* also survive transaction boundaries, and to ensure the tuplestore is closed
* when it's no longer wanted.
@@ -333,7 +333,7 @@ tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
* any data into the tuplestore.
*
* eflags is a bitmask following the meanings used for executor node
- * startup flags (see executor.h). tuplestore pays attention to these bits:
+ * startup flags (see executor.h). tuplestore pays attention to these bits:
* EXEC_FLAG_REWIND need rewind to start
* EXEC_FLAG_BACKWARD need backward fetch
* If tuplestore_set_eflags is not called, REWIND is allowed, and BACKWARD
@@ -623,9 +623,9 @@ grow_memtuples(Tuplestorestate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
- * that the array growth fits within availMem. (We could still cause
+ * that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
- * array were to increase. That shouldn't happen with any sane value of
+ * array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
@@ -694,7 +694,7 @@ tuplestore_puttuple(Tuplestorestate *state, HeapTuple tuple)
MemoryContext oldcxt = MemoryContextSwitchTo(state->context);
/*
- * Copy the tuple. (Must do this even in WRITEFILE case. Note that
+ * Copy the tuple. (Must do this even in WRITEFILE case. Note that
* COPYTUP includes USEMEM, so we needn't do that here.)
*/
tuple = COPYTUP(state, tuple);
@@ -851,7 +851,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple)
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If should_free is set, the
+ * Returns NULL if no more tuples. If should_free is set, the
* caller must pfree the returned tuple when done with it.
*
* Backward scan is only allowed if randomAccess was set true or
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index 923355d3ceb..44f7ef1bbb8 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -15,7 +15,7 @@
* this module.
*
* To allow reusing existing combo cids, we also keep a hash table that
- * maps cmin,cmax pairs to combo cids. This keeps the data structure size
+ * maps cmin,cmax pairs to combo cids. This keeps the data structure size
* reasonable in most cases, since the number of unique pairs used by any
* one transaction is likely to be small.
*
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index e739d2d192b..3bf42423313 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -11,7 +11,7 @@
* regd_count and count it in RegisteredSnapshots, but this reference is not
* tracked by a resource owner. We used to use the TopTransactionResourceOwner
* to track this snapshot reference, but that introduces logical circularity
- * and thus makes it impossible to clean up in a sane fashion. It's better to
+ * and thus makes it impossible to clean up in a sane fashion. It's better to
* handle this reference as an internally-tracked registration, so that this
* module is entirely lower-level than ResourceOwners.
*
@@ -20,9 +20,9 @@
* tracked by any resource owner.
*
* These arrangements let us reset MyPgXact->xmin when there are no snapshots
- * referenced by this transaction. (One possible improvement would be to be
+ * referenced by this transaction. (One possible improvement would be to be
* able to advance Xmin when the snapshot with the earliest Xmin is no longer
- * referenced. That's a bit harder though, it requires more locking, and
+ * referenced. That's a bit harder though, it requires more locking, and
* anyway it should be rather uncommon to keep temporary snapshots referenced
* for too long.)
*
@@ -57,7 +57,7 @@
* CurrentSnapshot points to the only snapshot taken in transaction-snapshot
* mode, and to the latest one taken in a read-committed transaction.
* SecondarySnapshot is a snapshot that's always up-to-date as of the current
- * instant, even in transaction-snapshot mode. It should only be used for
+ * instant, even in transaction-snapshot mode. It should only be used for
* special-purpose code (say, RI checking.)
*
* These SnapshotData structs are static to simplify memory allocation
@@ -76,7 +76,7 @@ static Snapshot SecondarySnapshot = NULL;
* mode, we don't want it to say that BootstrapTransactionId is in progress.
*
* RecentGlobalXmin is initialized to InvalidTransactionId, to ensure that no
- * one tries to use a stale value. Readers should ensure that it has been set
+ * one tries to use a stale value. Readers should ensure that it has been set
* to something else before using it.
*/
TransactionId TransactionXmin = FirstNormalTransactionId;
@@ -114,7 +114,7 @@ static int RegisteredSnapshots = 0;
bool FirstSnapshotSet = false;
/*
- * Remember the serializable transaction snapshot, if any. We cannot trust
+ * Remember the serializable transaction snapshot, if any. We cannot trust
* FirstSnapshotSet in combination with IsolationUsesXactSnapshot(), because
* GUC may be reset before us, changing the value of IsolationUsesXactSnapshot.
*/
@@ -286,7 +286,7 @@ SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid)
/*
* In transaction-snapshot mode, the first snapshot must live until end of
- * xact, so we must make a copy of it. Furthermore, if we're running in
+ * xact, so we must make a copy of it. Furthermore, if we're running in
* serializable mode, predicate.c needs to do its own processing.
*/
if (IsolationUsesXactSnapshot())
@@ -382,7 +382,7 @@ FreeSnapshot(Snapshot snapshot)
*
* If the passed snapshot is a statically-allocated one, or it is possibly
* subject to a future command counter update, create a new long-lived copy
- * with active refcount=1. Otherwise, only increment the refcount.
+ * with active refcount=1. Otherwise, only increment the refcount.
*/
void
PushActiveSnapshot(Snapshot snap)
@@ -751,7 +751,7 @@ ExportSnapshot(Snapshot snapshot)
* However, we haven't got enough information to do that, since we don't
* know if we're at top level or not. For example, we could be inside a
* plpgsql function that is going to fire off other transactions via
- * dblink. Rather than disallow perfectly legitimate usages, don't make a
+ * dblink. Rather than disallow perfectly legitimate usages, don't make a
* check.
*
* Also note that we don't make any restriction on the transaction's
@@ -964,7 +964,7 @@ parseXidFromText(const char *prefix, char **s, const char *filename)
/*
* ImportSnapshot
- * Import a previously exported snapshot. The argument should be a
+ * Import a previously exported snapshot. The argument should be a
* filename in SNAPSHOT_EXPORT_DIR. Load the snapshot from that file.
* This is called by "SET TRANSACTION SNAPSHOT 'foo'".
*/
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 44b4ddcb02f..82e1a4feb03 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -20,7 +20,7 @@
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
@@ -90,13 +90,13 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
* just refrain from setting the hint bit until some future re-examination
* of the tuple.
*
- * We can always set hint bits when marking a transaction aborted. (Some
+ * We can always set hint bits when marking a transaction aborted. (Some
* code in heapam.c relies on that!)
*
* Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
* we can always set the hint bits, since pre-9.0 VACUUM FULL always used
* synchronous commits and didn't move tuples that weren't previously
- * hinted. (This is not known by this subroutine, but is applied by its
+ * hinted. (This is not known by this subroutine, but is applied by its
* callers.) Note: old-style VACUUM FULL is gone, but we have to keep this
* module's support for MOVED_OFF/MOVED_IN flag bits for as long as we
* support in-place update from pre-9.0 databases.
@@ -542,7 +542,7 @@ HeapTupleSatisfiesAny(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
* This is a simplified version that only checks for VACUUM moving conditions.
* It's appropriate for TOAST usage because TOAST really doesn't want to do
* its own time qual checks; if you can see the main table row that contains
- * a TOAST reference, you should be able to see the TOASTed value. However,
+ * a TOAST reference, you should be able to see the TOASTed value. However,
* vacuuming a TOAST table is independent of the main table, and in case such
* a vacuum fails partway through, we'd better do this much checking.
*
@@ -791,7 +791,7 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
{
/*
* If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK is
- * set, it cannot possibly be running. Otherwise need to check.
+ * set, it cannot possibly be running. Otherwise need to check.
*/
if ((tuple->t_infomask & (HEAP_XMAX_EXCL_LOCK |
HEAP_XMAX_KEYSHR_LOCK)) &&
@@ -1286,7 +1286,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
* we mainly want to know is if a tuple is potentially visible to *any*
* running transaction. If so, it can't be removed yet by VACUUM.
*
- * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
+ * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
* deleted by XIDs >= OldestXmin are deemed "recently dead"; they might
* still be visible to some open transaction, so we can't remove them,
* even if we see that the deleting transaction has committed.
@@ -1374,7 +1374,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * Okay, the inserter committed, so it was good at some point. Now what
+ * Okay, the inserter committed, so it was good at some point. Now what
* about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1514,7 +1514,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
* in lieu of HeapTupleSatisifesVacuum when the tuple has just been
* tested by HeapTupleSatisfiesMVCC and, therefore, any hint bits that
* can be set should already be set. We assume that if no hint bits
- * either for xmin or xmax, the transaction is still running. This is
+ * either for xmin or xmax, the transaction is still running. This is
* therefore faster than HeapTupleSatisfiesVacuum, because we don't
* consult CLOG (and also because we don't need to give an exact answer,
* just whether or not the tuple is surely dead).
@@ -1575,7 +1575,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
/*
* Make a quick range check to eliminate most XIDs without looking at the
- * xip arrays. Note that this is OK even if we convert a subxact XID to
+ * xip arrays. Note that this is OK even if we convert a subxact XID to
* its parent below, because a subxact with XID < xmin has surely also got
* a parent with XID < xmin, while one with XID >= xmax must belong to a
* parent that was not yet committed at the time of this snapshot.