diff options
Diffstat (limited to 'src/backend')
34 files changed, 49 insertions, 48 deletions
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index 86f938686c3..b7a5013896a 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -766,7 +766,7 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack, /* * An entry point to ginFinishSplit() that is used when we stumble upon an * existing incompletely split page in the tree, as opposed to completing a - * split that we just made outselves. The difference is that stack->buffer may + * split that we just made ourselves. The difference is that stack->buffer may * be merely share-locked on entry, and will be upgraded to exclusive mode. * * Note: Upgrading the lock momentarily releases it. Doing that in a scan diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index d2eecaf7ebc..3cdfc5b7f1b 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -33,7 +33,7 @@ typedef struct { /*------------------------------------------------------- - * Arguments passed to heap_page_and_freeze() + * Arguments passed to heap_page_prune_and_freeze() *------------------------------------------------------- */ @@ -306,7 +306,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer) * If the HEAP_PRUNE_FREEZE option is set, we will also freeze tuples if it's * required in order to advance relfrozenxid / relminmxid, or if it's * considered advantageous for overall system performance to do so now. The - * 'cutoffs', 'presult', 'new_refrozen_xid' and 'new_relmin_mxid' arguments + * 'cutoffs', 'presult', 'new_relfrozen_xid' and 'new_relmin_mxid' arguments * are required when freezing. When HEAP_PRUNE_FREEZE option is set, we also * set presult->all_visible and presult->all_frozen on exit, to indicate if * the VM bits can be set. They are always set to false when the @@ -337,7 +337,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer) * off_loc is the offset location required by the caller to use in error * callback. * - * new_relfrozen_xid and new_relmin_xid must provided by the caller if the + * new_relfrozen_xid and new_relmin_mxid must provided by the caller if the * HEAP_PRUNE_FREEZE option is set. On entry, they contain the oldest XID and * multi-XID seen on the relation so far. They will be updated with oldest * values present on the page after pruning. After processing the whole diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 498b8d20358..ecbbc2466d2 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -1756,7 +1756,7 @@ _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir) * * (The rules are the same for backwards scans, except that the operators are * flipped: just replace the precondition's >= operator with a <=, and the - * postcondition's <= operator with with a >=. In other words, just swap the + * postcondition's <= operator with a >=. In other words, just swap the * precondition with the postcondition.) * * We also deal with "advancing" non-required arrays here. Callers whose @@ -4133,7 +4133,7 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate, else { /* - * Failure -- "ahead" tuple is too far ahead (we were too aggresive). + * Failure -- "ahead" tuple is too far ahead (we were too aggressive). * * Reset the number of rechecks, and aggressively reduce the target * distance (we're much more aggressive here than we were when the diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index 41b842d80ec..dccca201e05 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -25,7 +25,7 @@ * Parse the WAL format of an xact commit and abort records into an easier to * understand format. * - * This routines are in xactdesc.c because they're accessed in backend (when + * These routines are in xactdesc.c because they're accessed in backend (when * replaying WAL) and frontend (pg_waldump) code. This file is the only xact * specific one shared between both. They're complicated enough that * duplication would be bothersome. diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index 778b7c381df..45a99af774e 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -1668,7 +1668,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks, } /* - * FindFkPeriodOpers - + * FindFKPeriodOpers - * * Looks up the operator oids used for the PERIOD part of a temporal foreign key. * The opclass should be the opclass of that PERIOD element. diff --git a/src/backend/catalog/system_functions.sql b/src/backend/catalog/system_functions.sql index fe2bb50f46d..ae099e328c2 100644 --- a/src/backend/catalog/system_functions.sql +++ b/src/backend/catalog/system_functions.sql @@ -5,7 +5,7 @@ * * src/backend/catalog/system_functions.sql * - * This file redefines certain built-in functions that it's impractical + * This file redefines certain built-in functions that are impractical * to fully define in pg_proc.dat. In most cases that's because they use * SQL-standard function bodies and/or default expressions. The node * tree representations of those are too unreadable, platform-dependent, diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c index 10e386288a6..aaa0f9a1dc8 100644 --- a/src/backend/commands/amcmds.c +++ b/src/backend/commands/amcmds.c @@ -167,7 +167,7 @@ get_index_am_oid(const char *amname, bool missing_ok) /* * get_table_am_oid - given an access method name, look up its OID - * and verify it corresponds to an table AM. + * and verify it corresponds to a table AM. */ Oid get_table_am_oid(const char *amname, bool missing_ok) diff --git a/src/backend/commands/copyfrom.c b/src/backend/commands/copyfrom.c index 06bc14636d3..ce4d62e707c 100644 --- a/src/backend/commands/copyfrom.c +++ b/src/backend/commands/copyfrom.c @@ -996,7 +996,7 @@ CopyFrom(CopyFromState cstate) cstate->escontext->error_occurred) { /* - * Soft error occured, skip this tuple and deal with error + * Soft error occurred, skip this tuple and deal with error * information according to ON_ERROR. */ if (cstate->opts.on_error == COPY_ON_ERROR_IGNORE) diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 65464fac8e5..8229dfa1f22 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -3312,7 +3312,7 @@ dbase_redo(XLogReaderState *record) */ FlushDatabaseBuffers(xlrec->src_db_id); - /* Close all sgmr fds in all backends. */ + /* Close all smgr fds in all backends. */ WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE)); /* @@ -3378,7 +3378,7 @@ dbase_redo(XLogReaderState *record) /* Clean out the xlog relcache too */ XLogDropDatabase(xlrec->db_id); - /* Close all sgmr fds in all backends. */ + /* Close all smgr fds in all backends. */ WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE)); for (i = 0; i < xlrec->ntablespaces; i++) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index f72b2dcadfb..fbffaef1966 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -5687,7 +5687,7 @@ ATParseTransformCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, case AT_AddIndex: /* - * A primary key on a inheritance parent needs supporting NOT + * A primary key on an inheritance parent needs supporting NOT * NULL constraint on its children; enqueue commands to create * those or mark them inherited if they already exist. */ diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index 5174a4e9753..f26070bff2a 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -8,7 +8,7 @@ * * In a parallel vacuum, we perform both index bulk deletion and index cleanup * with parallel worker processes. Individual indexes are processed by one - * vacuum process. ParalleVacuumState contains shared information as well as + * vacuum process. ParallelVacuumState contains shared information as well as * the memory space for storing dead items allocated in the DSA area. We * launch parallel worker processes at the start of parallel index * bulk-deletion and index cleanup and once all indexes are processed, the diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index 79087cc6d63..eb5ac208248 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -4400,7 +4400,7 @@ ExecInitJsonExpr(JsonExpr *jsexpr, ExprState *state, /* * Add a special step, if needed, to check if the coercion evaluation ran * into an error but was not thrown because the ON ERROR behavior is not - * ERROR. It will set jsesestate->error if an error did occur. + * ERROR. It will set jsestate->error if an error did occur. */ if (jsestate->jump_eval_coercion >= 0 && escontext != NULL) { diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index 1d6bedb399a..21ce1ae2e13 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -2885,7 +2885,7 @@ add_child_join_rel_equivalences(PlannerInfo *root, /* * add_setop_child_rel_equivalences * Add equivalence members for each non-resjunk target in 'child_tlist' - * to the EquivalenceClass in the corresponding setop_pathkey's pk_class. + * to the EquivalenceClass in the corresponding setop_pathkey's pk_eclass. * * 'root' is the PlannerInfo belonging to the top-level set operation. * 'child_rel' is the RelOptInfo of the child relation we're adding diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 1d61881a6b6..8b258cbef92 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -384,7 +384,7 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys, * *group_pathkeys containing grouping pathkeys altogether with aggregate * pathkeys. If we process aggregate pathkeys we could get an invalid * result of get_sortgroupref_clause_noerr(), because their - * pathkey->pk_eclass->ec_sortref doesn't referece query targetlist. So, + * pathkey->pk_eclass->ec_sortref doesn't reference query targetlist. So, * we allocate a separate list of pathkeys for lookups. */ grouping_pathkeys = list_copy_head(*group_pathkeys, num_groupby_pathkeys); diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index afcb5c0f0f0..3f14e90a45b 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -214,7 +214,8 @@ set_operation_ordered_results_useful(SetOperationStmt *setop) * * Returns a RelOptInfo for the subtree, as well as these output parameters: * *pTargetList: receives the fully-fledged tlist for the subtree's top plan - * *istrivial_tlist: true iif datatypes between parent and child match. + * *istrivial_tlist: true if, and only if, datatypes between parent and child + * match. * * The pTargetList output parameter is mostly redundant with the pathtarget * of the returned RelOptInfo, but for the moment we need it because much of diff --git a/src/backend/parser/parse_jsontable.c b/src/backend/parser/parse_jsontable.c index 37f2cba0ef0..b2519c2f329 100644 --- a/src/backend/parser/parse_jsontable.c +++ b/src/backend/parser/parse_jsontable.c @@ -70,7 +70,7 @@ static JsonTablePlan *makeJsonTableSiblingJoin(JsonTablePlan *lplan, * (jt->context_item) and the column-generating expressions (jt->columns) to * populate TableFunc.docexpr and TableFunc.colvalexprs, respectively. Also, * the PASSING values (jt->passing) are transformed and added into - * TableFunc.passvalexprs. + * TableFunc.passingvalexprs. */ ParseNamespaceItem * transformJsonTable(ParseState *pstate, JsonTable *jt) diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 7fd8fbc0b4b..fef084f5d52 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -3451,7 +3451,7 @@ checkPartition(Relation rel, Oid partRelOid) /* * transformPartitionCmdForSplit - * Analyze the ALTER TABLLE ... SPLIT PARTITION command + * Analyze the ALTER TABLE ... SPLIT PARTITION command * * For each new partition sps->bound is set to the transformed value of bound. * Does checks for bounds of new partitions. @@ -3490,7 +3490,7 @@ transformPartitionCmdForSplit(CreateStmtContext *cxt, PartitionCmd *partcmd) /* * transformPartitionCmdForMerge - * Analyze the ALTER TABLLE ... MERGE PARTITIONS command + * Analyze the ALTER TABLE ... MERGE PARTITIONS command * * Does simple checks for merged partitions. Calculates bound of resulting * partition. diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index 0dbacf39c05..b08edf87a69 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -5146,7 +5146,7 @@ get_partition_bound_spec(Oid partOid, RangeVar *name) * the first of new partitions) then lower bound of "spec" should be equal (or * greater than or equal in case defaultPart=true) to lower bound of split * partition. If last=true (this means that "spec" is the last of new - * partitions) then upper bound of of "spec" should be equal (or less than or + * partitions) then upper bound of "spec" should be equal (or less than or * equal in case defaultPart=true) to upper bound of split partition. * * parent: partitioned table @@ -5245,8 +5245,8 @@ check_partition_bounds_for_split_range(Relation parent, false, split_upper); /* - * Upper bound of of "spec" should be equal (or less than or equal - * in case defaultPart=true) to upper bound of split partition. + * Upper bound of "spec" should be equal (or less than or equal in + * case defaultPart=true) to upper bound of split partition. */ if ((!defaultPart && cmpval) || (defaultPart && cmpval > 0)) overlap = true; diff --git a/src/backend/partitioning/partprune.c b/src/backend/partitioning/partprune.c index 9006afd9d21..9a1a7faac7a 100644 --- a/src/backend/partitioning/partprune.c +++ b/src/backend/partitioning/partprune.c @@ -1825,7 +1825,7 @@ match_clause_to_partition_key(GeneratePruningStepsContext *context, BooleanTest *new_booltest = (BooleanTest *) copyObject(clause); NullTest *nulltest; - /* We expect 'noteq' to only be set to true for BooleanTests */ + /* We expect 'notclause' to only be set to true for BooleanTests */ Assert(IsA(clause, BooleanTest)); /* reverse the bool test */ diff --git a/src/backend/postmaster/launch_backend.c b/src/backend/postmaster/launch_backend.c index cb0c3e2f8ab..4e9dde1517b 100644 --- a/src/backend/postmaster/launch_backend.c +++ b/src/backend/postmaster/launch_backend.c @@ -187,7 +187,7 @@ child_process_kind child_process_kinds[] = { /* * WAL senders start their life as regular backend processes, and change * their type after authenticating the client for replication. We list it - * here forPostmasterChildName() but cannot launch them directly. + * here for PostmasterChildName() but cannot launch them directly. */ [B_WAL_SENDER] = {"wal sender", NULL, true}, [B_SLOTSYNC_WORKER] = {"slot sync worker", ReplSlotSyncWorkerMain, true}, diff --git a/src/backend/postmaster/walsummarizer.c b/src/backend/postmaster/walsummarizer.c index 0cd5080fa78..72f6c04478d 100644 --- a/src/backend/postmaster/walsummarizer.c +++ b/src/backend/postmaster/walsummarizer.c @@ -108,7 +108,7 @@ static WalSummarizerData *WalSummarizerCtl; /* * When we reach end of WAL and need to read more, we sleep for a number of - * milliseconds that is a integer multiple of MS_PER_SLEEP_QUANTUM. This is + * milliseconds that is an integer multiple of MS_PER_SLEEP_QUANTUM. This is * the multiplier. It should vary between 1 and MAX_SLEEP_QUANTA, depending * on system activity. See summarizer_wait_for_wal() for how we adjust this. */ diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c index bda0de52db9..cb39adcd0ea 100644 --- a/src/backend/replication/logical/slotsync.c +++ b/src/backend/replication/logical/slotsync.c @@ -88,7 +88,7 @@ * overwrites. * * The 'last_start_time' is needed by postmaster to start the slot sync worker - * once per SLOTSYNC_RESTART_INTERVAL_SEC. In cases where a immediate restart + * once per SLOTSYNC_RESTART_INTERVAL_SEC. In cases where an immediate restart * is expected (e.g., slot sync GUCs change), slot sync worker will reset * last_start_time before exiting, so that postmaster can start the worker * without waiting for SLOTSYNC_RESTART_INTERVAL_SEC. diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index bc40c454de4..9bf7c67f37d 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -3493,7 +3493,7 @@ WalSndDone(WalSndSendDataCallback send_data) * Returns the latest point in WAL that has been safely flushed to disk. * This should only be called when in recovery. * - * This is called either by cascading walsender to find WAL postion to be sent + * This is called either by cascading walsender to find WAL position to be sent * to a cascaded standby or by slot synchronization operation to validate remote * slot's lsn before syncing it locally. * diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c index 5eda06839ea..8d01a93b309 100644 --- a/src/backend/statistics/dependencies.c +++ b/src/backend/statistics/dependencies.c @@ -794,7 +794,7 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum) } else if (IsA(clause, ScalarArrayOpExpr)) { - /* If it's an scalar array operator, check for Var IN Const. */ + /* If it's a scalar array operator, check for Var IN Const. */ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; /* @@ -1222,7 +1222,7 @@ dependency_is_compatible_expression(Node *clause, Index relid, List *statlist, N } else if (IsA(clause, ScalarArrayOpExpr)) { - /* If it's an scalar array operator, check for Var IN Const. */ + /* If it's a scalar array operator, check for Var IN Const. */ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; /* diff --git a/src/backend/storage/aio/read_stream.c b/src/backend/storage/aio/read_stream.c index f54dacdd914..634cf4f0d10 100644 --- a/src/backend/storage/aio/read_stream.c +++ b/src/backend/storage/aio/read_stream.c @@ -541,9 +541,9 @@ read_stream_begin_relation(int flags, stream->distance = 1; /* - * Since we always always access the same relation, we can initialize - * parts of the ReadBuffersOperation objects and leave them that way, to - * avoid wasting CPU cycles writing to them for each read. + * Since we always access the same relation, we can initialize parts of + * the ReadBuffersOperation objects and leave them that way, to avoid + * wasting CPU cycles writing to them for each read. */ for (int i = 0; i < max_ios; ++i) { diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 901b7230fb9..49637284f91 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -1073,7 +1073,7 @@ PinBufferForBlock(Relation rel, /* * If there is no Relation it usually implies recovery and thus permanent, - * but we take an argmument because CreateAndCopyRelationData can reach us + * but we take an argument because CreateAndCopyRelationData can reach us * with only an SMgrRelation for an unlogged relation that we don't want * to flag with BM_PERMANENT. */ diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 5022a50dd7b..5154353c844 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -1032,7 +1032,7 @@ LockAcquireExtended(const LOCKTAG *locktag, /* * Sleep till someone wakes me up. We do this even in the dontWait - * case, beause while trying to go to sleep, we may discover that we + * case, because while trying to go to sleep, we may discover that we * can acquire the lock immediately after all. */ diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 162b1f919db..e4f256c63c7 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -1047,7 +1047,7 @@ AuxiliaryPidGetProc(int pid) * called, because it could be that when we try to find a position at which * to insert ourself into the wait queue, we discover that we must be inserted * ahead of everyone who wants a lock that conflict with ours. In that case, - * we get the lock immediately. Beause of this, it's sensible for this function + * we get the lock immediately. Because of this, it's sensible for this function * to have a dontWait argument, despite the name. * * The lock table's partition lock must be held at entry, and will be held diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c index 4daf1a68d9d..8a0a2dbc850 100644 --- a/src/backend/utils/adt/jsonpath_exec.c +++ b/src/backend/utils/adt/jsonpath_exec.c @@ -4221,7 +4221,7 @@ JsonTableSetDocument(TableFuncScanState *state, Datum value) } /* - * Evaluate a JsonTablePlan's jsonpath to get a new row pattren from + * Evaluate a JsonTablePlan's jsonpath to get a new row pattern from * the given context item */ static void @@ -4339,7 +4339,7 @@ JsonTablePlanScanNextRow(JsonTablePlanState *planstate) /* * Now fetch the nested plan's current row to be joined against the * parent row. Any further nested plans' paths will be re-evaluated - * reursively, level at a time, after setting each nested plan's + * recursively, level at a time, after setting each nested plan's * current row. */ (void) JsonTablePlanNextRow(planstate->nested); diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c index 8f12c953cad..558c6c18c34 100644 --- a/src/backend/utils/adt/multirangetypes.c +++ b/src/backend/utils/adt/multirangetypes.c @@ -330,7 +330,7 @@ multirange_out(PG_FUNCTION_ARGS) } /* - * Binary representation: First a int32-sized count of ranges, followed by + * Binary representation: First an int32-sized count of ranges, followed by * ranges in their native binary representation. */ Datum diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 35f8f306ee4..5f5d7959d8e 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -6968,7 +6968,7 @@ btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count, * btree scans, making the top-level scan look like a continuous scan * (as opposed to num_sa_scans-many primitive index scans). After * all, btree scans mostly work like that at runtime. However, such a - * scheme would badly bias genericcostestimate's simplistic appraoch + * scheme would badly bias genericcostestimate's simplistic approach * to calculating numIndexPages through prorating. * * Stick with the approach taken by non-native SAOP scans for now. diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index 751cc3408c5..dede30dd86a 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -979,8 +979,8 @@ AllocSetAlloc(MemoryContext context, Size size, int flags) Assert(set->blocks != NULL); /* - * If requested size exceeds maximum for chunks we hand the the request - * off to AllocSetAllocLarge(). + * If requested size exceeds maximum for chunks we hand the request off to + * AllocSetAllocLarge(). */ if (size > set->allocChunkLimit) return AllocSetAllocLarge(context, size, flags); diff --git a/src/backend/utils/mmgr/bump.c b/src/backend/utils/mmgr/bump.c index a98bafbcc03..c60c9c131e3 100644 --- a/src/backend/utils/mmgr/bump.c +++ b/src/backend/utils/mmgr/bump.c @@ -505,8 +505,8 @@ BumpAlloc(MemoryContext context, Size size, int flags) #endif /* - * If requested size exceeds maximum for chunks we hand the the request - * off to BumpAllocLarge(). + * If requested size exceeds maximum for chunks we hand the request off to + * BumpAllocLarge(). */ if (chunk_size > set->allocChunkLimit) return BumpAllocLarge(context, size, flags); diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c index 5d81af1f947..b858b8d0f7b 100644 --- a/src/backend/utils/mmgr/generation.c +++ b/src/backend/utils/mmgr/generation.c @@ -541,8 +541,8 @@ GenerationAlloc(MemoryContext context, Size size, int flags) #endif /* - * If requested size exceeds maximum for chunks we hand the the request - * off to GenerationAllocLarge(). + * If requested size exceeds maximum for chunks we hand the request off to + * GenerationAllocLarge(). */ if (chunk_size > set->allocChunkLimit) return GenerationAllocLarge(context, size, flags); |