diff options
Diffstat (limited to 'src/backend/executor')
30 files changed, 199 insertions, 199 deletions
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index 61c90a16009..a298b92af8c 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -777,7 +777,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, if (nfuncs != winstate->numfuncs) ereport(ERROR, (errcode(ERRCODE_WINDOWING_ERROR), - errmsg("window function calls cannot be nested"))); + errmsg("window function calls cannot be nested"))); } else { @@ -1545,8 +1545,8 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("ROW() column has type %s instead of type %s", - format_type_be(exprType((Node *) e)), - format_type_be(attrs[i]->atttypid)))); + format_type_be(exprType((Node *) e)), + format_type_be(attrs[i]->atttypid)))); } else { @@ -2076,10 +2076,10 @@ ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, Oid funcid, if (nargs > FUNC_MAX_ARGS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg_plural("cannot pass more than %d argument to a function", - "cannot pass more than %d arguments to a function", - FUNC_MAX_ARGS, - FUNC_MAX_ARGS))); + errmsg_plural("cannot pass more than %d argument to a function", + "cannot pass more than %d arguments to a function", + FUNC_MAX_ARGS, + FUNC_MAX_ARGS))); /* Allocate function lookup data and parameter workspace for this call */ scratch->d.func.finfo = palloc0(sizeof(FmgrInfo)); @@ -2105,7 +2105,7 @@ ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, Oid funcid, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"), parent ? executor_errposition(parent->state, - exprLocation((Node *) node)) : 0)); + exprLocation((Node *) node)) : 0)); /* Build code to evaluate arguments directly into the fcinfo struct */ argno = 0; @@ -2380,7 +2380,7 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent, /* Each subscript is evaluated into subscriptvalue/subscriptnull */ ExecInitExprRec(e, parent, state, - &arefstate->subscriptvalue, &arefstate->subscriptnull); + &arefstate->subscriptvalue, &arefstate->subscriptnull); /* ... and then ARRAYREF_SUBSCRIPT saves it into step's workspace */ scratch->opcode = EEOP_ARRAYREF_SUBSCRIPT; @@ -2413,7 +2413,7 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent, /* Each subscript is evaluated into subscriptvalue/subscriptnull */ ExecInitExprRec(e, parent, state, - &arefstate->subscriptvalue, &arefstate->subscriptnull); + &arefstate->subscriptvalue, &arefstate->subscriptnull); /* ... and then ARRAYREF_SUBSCRIPT saves it into step's workspace */ scratch->opcode = EEOP_ARRAYREF_SUBSCRIPT; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index 146d1a0bece..c227d9bdd99 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -1986,7 +1986,7 @@ ExecEvalCurrentOfExpr(ExprState *state, ExprEvalStep *op) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("WHERE CURRENT OF is not supported for this table type"))); + errmsg("WHERE CURRENT OF is not supported for this table type"))); } /* @@ -2187,7 +2187,7 @@ ExecEvalArrayExpr(ExprState *state, ExprEvalStep *op) (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot merge incompatible arrays"), errdetail("Array with element type %s cannot be " - "included in ARRAY construct with element type %s.", + "included in ARRAY construct with element type %s.", format_type_be(ARR_ELEMTYPE(array)), format_type_be(element_type)))); @@ -2207,8 +2207,8 @@ ExecEvalArrayExpr(ExprState *state, ExprEvalStep *op) if (ndims <= 0 || ndims > MAXDIM) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("number of array dimensions (%d) exceeds " \ - "the maximum allowed (%d)", ndims, MAXDIM))); + errmsg("number of array dimensions (%d) exceeds " \ + "the maximum allowed (%d)", ndims, MAXDIM))); elem_dims = (int *) palloc(elem_ndims * sizeof(int)); memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int)); @@ -2601,7 +2601,7 @@ ExecEvalArrayRefSubscript(ExprState *state, ExprEvalStep *op) if (arefstate->isassignment) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("array subscript in assignment must not be null"))); + errmsg("array subscript in assignment must not be null"))); *op->resnull = true; return false; } @@ -2834,7 +2834,7 @@ ExecEvalConvertRowtype(ExprState *state, ExprEvalStep *op, ExprContext *econtext /* prepare map from old to new attribute numbers */ op->d.convert_rowtype.map = convert_tuples_by_name(indesc, outdesc, - gettext_noop("could not convert row type")); + gettext_noop("could not convert row type")); op->d.convert_rowtype.initialized = true; MemoryContextSwitchTo(old_cxt); @@ -3049,9 +3049,9 @@ ExecEvalConstraintCheck(ExprState *state, ExprEvalStep *op) !DatumGetBool(*op->d.domaincheck.checkvalue)) ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("value for domain %s violates check constraint \"%s\"", - format_type_be(op->d.domaincheck.resulttype), - op->d.domaincheck.constraintname), + errmsg("value for domain %s violates check constraint \"%s\"", + format_type_be(op->d.domaincheck.resulttype), + op->d.domaincheck.constraintname), errdomainconstraint(op->d.domaincheck.resulttype, op->d.domaincheck.constraintname))); } @@ -3116,7 +3116,7 @@ ExecEvalXmlExpr(ExprState *state, ExprEvalStep *op) appendStringInfo(&buf, "<%s>%s</%s>", argname, map_sql_value_to_xml_value(value, - exprType((Node *) e), true), + exprType((Node *) e), true), argname); *op->resnull = false; } @@ -3137,10 +3137,10 @@ ExecEvalXmlExpr(ExprState *state, ExprEvalStep *op) case IS_XMLELEMENT: *op->resvalue = PointerGetDatum(xmlelement(xexpr, - op->d.xmlexpr.named_argvalue, - op->d.xmlexpr.named_argnull, + op->d.xmlexpr.named_argvalue, + op->d.xmlexpr.named_argnull, op->d.xmlexpr.argvalue, - op->d.xmlexpr.argnull)); + op->d.xmlexpr.argnull)); *op->resnull = false; break; @@ -3166,7 +3166,7 @@ ExecEvalXmlExpr(ExprState *state, ExprEvalStep *op) *op->resvalue = PointerGetDatum(xmlparse(data, xexpr->xmloption, - preserve_whitespace)); + preserve_whitespace)); *op->resnull = false; } break; @@ -3243,8 +3243,8 @@ ExecEvalXmlExpr(ExprState *state, ExprEvalStep *op) value = argvalue[0]; *op->resvalue = PointerGetDatum( - xmltotext_with_xmloption(DatumGetXmlP(value), - xexpr->xmloption)); + xmltotext_with_xmloption(DatumGetXmlP(value), + xexpr->xmloption)); *op->resnull = false; } break; @@ -3418,7 +3418,7 @@ ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext) (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail_plural("Table row contains %d attribute, but query expects %d.", - "Table row contains %d attributes, but query expects %d.", + "Table row contains %d attributes, but query expects %d.", slot_tupdesc->natts, slot_tupdesc->natts, var_tupdesc->natts))); @@ -3492,10 +3492,10 @@ ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext) * perhaps other places.) */ if (econtext->ecxt_estate && - variable->varno <= list_length(econtext->ecxt_estate->es_range_table)) + variable->varno <= list_length(econtext->ecxt_estate->es_range_table)) { RangeTblEntry *rte = rt_fetch(variable->varno, - econtext->ecxt_estate->es_range_table); + econtext->ecxt_estate->es_range_table); if (rte->eref) ExecTypeSetColNames(output_tupdesc, rte->eref->colnames); diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index 4d818309962..89e189fa715 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -432,7 +432,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot, indexRelation, indexInfo, tupleid, values, isnull, estate, false, - waitMode, violationOK, NULL); + waitMode, violationOK, NULL); } if ((checkUnique == UNIQUE_CHECK_PARTIAL || @@ -542,7 +542,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("ON CONFLICT does not support deferrable unique constraints/exclusion constraints as arbiters"), errtableconstraint(heapRelation, - RelationGetRelationName(indexRelation)))); + RelationGetRelationName(indexRelation)))); checkedIndex = true; @@ -580,7 +580,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot, satisfiesConstraint = check_exclusion_or_unique_constraint(heapRelation, indexRelation, indexInfo, &invalidItemPtr, - values, isnull, estate, false, + values, isnull, estate, false, CEOUC_WAIT, true, conflictTid); if (!satisfiesConstraint) diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 2e3717d4dd7..7f0d21f5166 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -698,14 +698,14 @@ ExecCheckRTEPerms(RangeTblEntry *rte) */ if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid, userid, - rte->insertedCols, - ACL_INSERT)) + rte->insertedCols, + ACL_INSERT)) return false; if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid, userid, - rte->updatedCols, - ACL_UPDATE)) + rte->updatedCols, + ACL_UPDATE)) return false; } return true; @@ -1133,26 +1133,26 @@ CheckValidResultRel(Relation resultRel, CmdType operation) case CMD_INSERT: if (!trigDesc || !trigDesc->trig_insert_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot insert into view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot insert into view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule."))); break; case CMD_UPDATE: if (!trigDesc || !trigDesc->trig_update_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot update view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot update view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule."))); break; case CMD_DELETE: if (!trigDesc || !trigDesc->trig_delete_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot delete from view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot delete from view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule."))); break; default: elog(ERROR, "unrecognized CmdType: %d", (int) operation); @@ -1175,14 +1175,14 @@ CheckValidResultRel(Relation resultRel, CmdType operation) if (fdwroutine->ExecForeignInsert == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot insert into foreign table \"%s\"", - RelationGetRelationName(resultRel)))); + errmsg("cannot insert into foreign table \"%s\"", + RelationGetRelationName(resultRel)))); if (fdwroutine->IsForeignRelUpdatable != NULL && (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("foreign table \"%s\" does not allow inserts", - RelationGetRelationName(resultRel)))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("foreign table \"%s\" does not allow inserts", + RelationGetRelationName(resultRel)))); break; case CMD_UPDATE: if (fdwroutine->ExecForeignUpdate == NULL) @@ -1193,22 +1193,22 @@ CheckValidResultRel(Relation resultRel, CmdType operation) if (fdwroutine->IsForeignRelUpdatable != NULL && (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("foreign table \"%s\" does not allow updates", - RelationGetRelationName(resultRel)))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("foreign table \"%s\" does not allow updates", + RelationGetRelationName(resultRel)))); break; case CMD_DELETE: if (fdwroutine->ExecForeignDelete == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot delete from foreign table \"%s\"", - RelationGetRelationName(resultRel)))); + errmsg("cannot delete from foreign table \"%s\"", + RelationGetRelationName(resultRel)))); if (fdwroutine->IsForeignRelUpdatable != NULL && (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("foreign table \"%s\" does not allow deletes", - RelationGetRelationName(resultRel)))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("foreign table \"%s\" does not allow deletes", + RelationGetRelationName(resultRel)))); break; default: elog(ERROR, "unrecognized CmdType: %d", (int) operation); @@ -1267,8 +1267,8 @@ CheckValidRowMarkRel(Relation rel, RowMarkType markType) if (markType != ROW_MARK_REFERENCE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot lock rows in materialized view \"%s\"", - RelationGetRelationName(rel)))); + errmsg("cannot lock rows in materialized view \"%s\"", + RelationGetRelationName(rel)))); break; case RELKIND_FOREIGN_TABLE: /* Okay only if the FDW supports it */ @@ -1875,7 +1875,7 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, tupdesc = RelationGetDescr(rel); /* a reverse map */ map = convert_tuples_by_name(old_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + gettext_noop("could not convert row type")); if (map != NULL) { tuple = do_convert_tuple(tuple, map); @@ -1893,9 +1893,9 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, 64); ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("new row for relation \"%s\" violates partition constraint", - RelationGetRelationName(orig_rel)), - val_desc ? errdetail("Failing row contains %s.", val_desc) : 0)); + errmsg("new row for relation \"%s\" violates partition constraint", + RelationGetRelationName(orig_rel)), + val_desc ? errdetail("Failing row contains %s.", val_desc) : 0)); } } @@ -1952,7 +1952,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo, tupdesc = RelationGetDescr(rel); /* a reverse map */ map = convert_tuples_by_name(orig_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + gettext_noop("could not convert row type")); if (map != NULL) { tuple = do_convert_tuple(tuple, map); @@ -1972,7 +1972,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo, ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("null value in column \"%s\" violates not-null constraint", - NameStr(orig_tupdesc->attrs[attrChk - 1]->attname)), + NameStr(orig_tupdesc->attrs[attrChk - 1]->attname)), val_desc ? errdetail("Failing row contains %s.", val_desc) : 0, errtablecol(orig_rel, attrChk))); } @@ -1999,7 +1999,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo, tupdesc = RelationGetDescr(rel); /* a reverse map */ map = convert_tuples_by_name(old_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + gettext_noop("could not convert row type")); if (map != NULL) { tuple = do_convert_tuple(tuple, map); @@ -2019,7 +2019,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("new row for relation \"%s\" violates check constraint \"%s\"", RelationGetRelationName(orig_rel), failed), - val_desc ? errdetail("Failing row contains %s.", val_desc) : 0, + val_desc ? errdetail("Failing row contains %s.", val_desc) : 0, errtableconstraint(orig_rel, failed))); } } @@ -2108,8 +2108,8 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, ereport(ERROR, (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION), - errmsg("new row violates check option for view \"%s\"", - wco->relname), + errmsg("new row violates check option for view \"%s\"", + wco->relname), val_desc ? errdetail("Failing row contains %s.", val_desc) : 0)); break; @@ -2577,7 +2577,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode, ereport(ERROR, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("could not obtain lock on row in relation \"%s\"", - RelationGetRelationName(relation)))); + RelationGetRelationName(relation)))); break; } continue; /* loop back to repeat heap_fetch */ @@ -2875,8 +2875,8 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate) if (fdwroutine->RefetchForeignRow == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot lock rows in foreign table \"%s\"", - RelationGetRelationName(erm->relation)))); + errmsg("cannot lock rows in foreign table \"%s\"", + RelationGetRelationName(erm->relation)))); copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate, erm, datum, @@ -3232,7 +3232,7 @@ ExecSetupPartitionTupleRouting(Relation rel, *partitions = (ResultRelInfo *) palloc(*num_partitions * sizeof(ResultRelInfo)); *tup_conv_maps = (TupleConversionMap **) palloc0(*num_partitions * - sizeof(TupleConversionMap *)); + sizeof(TupleConversionMap *)); /* * Initialize an empty slot that will be used to manipulate tuples of any @@ -3267,7 +3267,7 @@ ExecSetupPartitionTupleRouting(Relation rel, * partition from the parent's type to the partition's. */ (*tup_conv_maps)[i] = convert_tuples_by_name(tupDesc, part_tupdesc, - gettext_noop("could not convert row type")); + gettext_noop("could not convert row type")); InitResultRelInfo(leaf_part_rri, partrel, diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 1c02fa140b0..ce47f1d4a8b 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -110,7 +110,7 @@ static bool ExecParallelInitializeDSM(PlanState *node, static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize); static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation); + SharedExecutorInstrumentation *instrumentation); /* Helper function that runs in the parallel worker. */ static DestReceiver *ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc); @@ -446,7 +446,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) /* Estimate space for tuple queues. */ shm_toc_estimate_chunk(&pcxt->estimator, - mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers)); + mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers)); shm_toc_estimate_keys(&pcxt->estimator, 1); /* @@ -504,7 +504,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) /* Allocate space for each worker's BufferUsage; no need to initialize. */ bufusage_space = shm_toc_allocate(pcxt->toc, - mul_size(sizeof(BufferUsage), pcxt->nworkers)); + mul_size(sizeof(BufferUsage), pcxt->nworkers)); shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space); pei->buffer_usage = bufusage_space; @@ -583,7 +583,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) */ static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation) + SharedExecutorInstrumentation *instrumentation) { Instrumentation *instrument; int i; @@ -735,7 +735,7 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, */ static bool ExecParallelReportInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation) + SharedExecutorInstrumentation *instrumentation) { int i; int plan_node_id = planstate->plan->plan_node_id; @@ -804,7 +804,7 @@ ExecParallelInitializeWorker(PlanState *planstate, shm_toc *toc) break; case T_BitmapHeapScanState: ExecBitmapHeapInitializeWorker( - (BitmapHeapScanState *) planstate, toc); + (BitmapHeapScanState *) planstate, toc); break; default: break; diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 5469cde1e00..294ad2cff99 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -259,7 +259,7 @@ ExecInitNode(Plan *node, EState *estate, int eflags) case T_NamedTuplestoreScan: result = (PlanState *) ExecInitNamedTuplestoreScan((NamedTuplestoreScan *) node, - estate, eflags); + estate, eflags); break; case T_WorkTableScan: diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index c6a66b6195f..6dae79a8f00 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -568,6 +568,6 @@ CheckSubscriptionRelkind(char relkind, const char *nspname, if (relkind != RELKIND_RELATION) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("logical replication target relation \"%s.%s\" is not a table", - nspname, relname))); + errmsg("logical replication target relation \"%s.%s\" is not a table", + nspname, relname))); } diff --git a/src/backend/executor/execSRF.c b/src/backend/executor/execSRF.c index 077ac208c13..138e86ac674 100644 --- a/src/backend/executor/execSRF.c +++ b/src/backend/executor/execSRF.c @@ -291,7 +291,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, */ oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupdesc = lookup_rowtype_tupdesc_copy(HeapTupleHeaderGetTypeId(td), - HeapTupleHeaderGetTypMod(td)); + HeapTupleHeaderGetTypMod(td)); rsinfo.setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); } @@ -667,10 +667,10 @@ init_sexpr(Oid foid, Oid input_collation, Expr *node, if (list_length(sexpr->args) > FUNC_MAX_ARGS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg_plural("cannot pass more than %d argument to a function", - "cannot pass more than %d arguments to a function", - FUNC_MAX_ARGS, - FUNC_MAX_ARGS))); + errmsg_plural("cannot pass more than %d argument to a function", + "cannot pass more than %d arguments to a function", + FUNC_MAX_ARGS, + FUNC_MAX_ARGS))); /* Set up the primary fmgr lookup information */ fmgr_info_cxt(foid, &(sexpr->func), sexprCxt); @@ -687,7 +687,7 @@ init_sexpr(Oid foid, Oid input_collation, Expr *node, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"), parent ? executor_errposition(parent->state, - exprLocation((Node *) node)) : 0)); + exprLocation((Node *) node)) : 0)); /* Otherwise, caller should have marked the sexpr correctly */ Assert(sexpr->func.fn_retset == sexpr->funcReturnsSet); @@ -897,7 +897,7 @@ tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc) (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("function return row and query-specified return row do not match"), errdetail_plural("Returned row contains %d attribute, but query expects %d.", - "Returned row contains %d attributes, but query expects %d.", + "Returned row contains %d attributes, but query expects %d.", src_tupdesc->natts, src_tupdesc->natts, dst_tupdesc->natts))); diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 9abf0aa15d2..7ae70a877a0 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -1213,7 +1213,7 @@ HeapTupleHeaderGetDatum(HeapTupleHeader tuple) /* And do the flattening */ result = toast_flatten_tuple_to_datum(tuple, - HeapTupleHeaderGetDatumLength(tuple), + HeapTupleHeaderGetDatumLength(tuple), tupDesc); ReleaseTupleDesc(tupDesc); diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 7199ff6fd72..3630f5d9668 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -517,7 +517,7 @@ init_execution_state(List *queryTree_list, ((CopyStmt *) stmt->utilityStmt)->filename == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot COPY to/from client in a SQL function"))); + errmsg("cannot COPY to/from client in a SQL function"))); if (IsA(stmt->utilityStmt, TransactionStmt)) ereport(ERROR, @@ -531,8 +531,8 @@ init_execution_state(List *queryTree_list, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), /* translator: %s is a SQL statement name */ - errmsg("%s is not allowed in a non-volatile function", - CreateCommandTag((Node *) stmt)))); + errmsg("%s is not allowed in a non-volatile function", + CreateCommandTag((Node *) stmt)))); if (IsInParallelMode() && !CommandIsReadOnly(stmt)) PreventCommandIfParallelMode(CreateCommandTag((Node *) stmt)); @@ -713,7 +713,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) queryTree_sublist = pg_analyze_and_rewrite_params(parsetree, fcache->src, - (ParserSetupHook) sql_fn_parser_setup, + (ParserSetupHook) sql_fn_parser_setup, fcache->pinfo, NULL); queryTree_list = lappend(queryTree_list, queryTree_sublist); @@ -1594,8 +1594,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (rettype != VOIDOID) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), errdetail("Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING."))); return false; } @@ -1631,9 +1631,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (tlistlen != 1) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), - errdetail("Final statement must return exactly one column."))); + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), + errdetail("Final statement must return exactly one column."))); /* We assume here that non-junk TLEs must come first in tlists */ tle = (TargetEntry *) linitial(tlist); @@ -1643,8 +1643,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (!IsBinaryCoercible(restype, rettype)) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), errdetail("Actual return type is %s.", format_type_be(restype)))); if (modifyTargetList && restype != rettype) @@ -1698,8 +1698,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, tle->expr = (Expr *) makeRelabelType(tle->expr, rettype, -1, - get_typcollation(rettype), - COERCE_IMPLICIT_CAST); + get_typcollation(rettype), + COERCE_IMPLICIT_CAST); /* Relabel is dangerous if sort/group or setop column */ if (tle->ressortgroupref != 0 || parse->setOperations) *modifyTargetList = true; @@ -1758,7 +1758,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("return type mismatch in function declared to return %s", format_type_be(rettype)), - errdetail("Final statement returns too many columns."))); + errdetail("Final statement returns too many columns."))); attr = tupdesc->attrs[colindex - 1]; if (attr->attisdropped && modifyTargetList) { @@ -1802,8 +1802,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, tle->expr = (Expr *) makeRelabelType(tle->expr, atttype, -1, - get_typcollation(atttype), - COERCE_IMPLICIT_CAST); + get_typcollation(atttype), + COERCE_IMPLICIT_CAST); /* Relabel is dangerous if sort/group or setop column */ if (tle->ressortgroupref != 0 || parse->setOperations) *modifyTargetList = true; @@ -1821,7 +1821,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("return type mismatch in function declared to return %s", format_type_be(rettype)), - errdetail("Final statement returns too few columns."))); + errdetail("Final statement returns too few columns."))); if (modifyTargetList) { Expr *null_expr; @@ -1861,7 +1861,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, /* Set up junk filter if needed */ if (junkFilter) *junkFilter = ExecInitJunkFilterConversion(tlist, - CreateTupleDescCopy(tupdesc), + CreateTupleDescCopy(tupdesc), NULL); /* Report that we are returning entire tuple result */ diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index b0f9520e530..de9a18e71c3 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -752,7 +752,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans, MemoryContext oldContext; oldContext = MemoryContextSwitchTo( - aggstate->curaggcontext->ecxt_per_tuple_memory); + aggstate->curaggcontext->ecxt_per_tuple_memory); pergroupstate->transValue = datumCopy(pertrans->initValue, pertrans->transtypeByVal, pertrans->transtypeLen); @@ -869,7 +869,7 @@ advance_transition_function(AggState *aggstate, * do not need to pfree the old transValue, since it's NULL. */ oldContext = MemoryContextSwitchTo( - aggstate->curaggcontext->ecxt_per_tuple_memory); + aggstate->curaggcontext->ecxt_per_tuple_memory); pergroupstate->transValue = datumCopy(fcinfo->arg[1], pertrans->transtypeByVal, pertrans->transtypeLen); @@ -1200,9 +1200,9 @@ advance_combine_function(AggState *aggstate, if (!pertrans->transtypeByVal) { oldContext = MemoryContextSwitchTo( - aggstate->curaggcontext->ecxt_per_tuple_memory); + aggstate->curaggcontext->ecxt_per_tuple_memory); pergroupstate->transValue = datumCopy(fcinfo->arg[1], - pertrans->transtypeByVal, + pertrans->transtypeByVal, pertrans->transtypeLen); MemoryContextSwitchTo(oldContext); } @@ -1530,7 +1530,7 @@ finalize_aggregate(AggState *aggstate, /* Fill in the transition state value */ fcinfo.arg[0] = MakeExpandedObjectReadOnly(pergroupstate->transValue, - pergroupstate->transValueIsNull, + pergroupstate->transValueIsNull, pertrans->transtypeLen); fcinfo.argnull[0] = pergroupstate->transValueIsNull; anynull |= pergroupstate->transValueIsNull; @@ -1610,8 +1610,8 @@ finalize_partialaggregate(AggState *aggstate, FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo; fcinfo->arg[0] = MakeExpandedObjectReadOnly(pergroupstate->transValue, - pergroupstate->transValueIsNull, - pertrans->transtypeLen); + pergroupstate->transValueIsNull, + pertrans->transtypeLen); fcinfo->argnull[0] = pergroupstate->transValueIsNull; *resultVal = FunctionCallInvoke(fcinfo); @@ -1872,9 +1872,9 @@ build_hash_table(AggState *aggstate) perhash->hashfunctions, perhash->aggnode->numGroups, additionalsize, - aggstate->hashcontext->ecxt_per_tuple_memory, + aggstate->hashcontext->ecxt_per_tuple_memory, tmpmem, - DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit)); + DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit)); } } @@ -2051,7 +2051,7 @@ lookup_hash_entry(AggState *aggstate) { entry->additional = (AggStatePerGroup) MemoryContextAlloc(perhash->hashtable->tablecxt, - sizeof(AggStatePerGroupData) * aggstate->numtrans); + sizeof(AggStatePerGroupData) * aggstate->numtrans); /* initialize aggregates for new tuple group */ initialize_aggregates(aggstate, (AggStatePerGroup) entry->additional, -1); @@ -2433,7 +2433,7 @@ agg_retrieve_direct(AggState *aggstate) node->numCols, node->grpColIdx, aggstate->phase->eqfunctions, - tmpcontext->ecxt_per_tuple_memory)) + tmpcontext->ecxt_per_tuple_memory)) { aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot); break; @@ -2817,7 +2817,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) ExecAssignScanTypeFromOuterPlan(&aggstate->ss); if (node->chain) ExecSetSlotDescriptor(aggstate->sort_slot, - aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor); + aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor); /* * Initialize result tuple type and projection info. @@ -2933,7 +2933,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) } all_grouped_cols = bms_add_members(all_grouped_cols, - phasedata->grouped_cols[0]); + phasedata->grouped_cols[0]); } else { @@ -3304,8 +3304,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) */ existing_transno = find_compatible_pertrans(aggstate, aggref, transfn_oid, aggtranstype, - serialfn_oid, deserialfn_oid, - initValue, initValueIsNull, + serialfn_oid, deserialfn_oid, + initValue, initValueIsNull, same_input_transnos); if (existing_transno != -1) { @@ -3977,7 +3977,7 @@ ExecReScanAgg(AggState *node) * Reset the per-group state (in particular, mark transvalues null) */ MemSet(node->pergroup, 0, - sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets); + sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets); /* reset to phase 1 */ initialize_phase(node, 1); diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index c9f8b7c7fba..2411a2e5c1a 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -254,7 +254,7 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags) */ relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid); indexstate->biss_RelationDesc = index_open(node->indexid, - relistarget ? NoLock : AccessShareLock); + relistarget ? NoLock : AccessShareLock); /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c index 426527d2a2a..3217d641d76 100644 --- a/src/backend/executor/nodeFunctionscan.c +++ b/src/backend/executor/nodeFunctionscan.c @@ -96,7 +96,7 @@ FunctionNext(FunctionScanState *node) node->ss.ps.ps_ExprContext, node->argcontext, node->funcstates[0].tupdesc, - node->eflags & EXEC_FLAG_BACKWARD); + node->eflags & EXEC_FLAG_BACKWARD); /* * paranoia - cope if the function, which may have constructed the @@ -155,7 +155,7 @@ FunctionNext(FunctionScanState *node) node->ss.ps.ps_ExprContext, node->argcontext, fs->tupdesc, - node->eflags & EXEC_FLAG_BACKWARD); + node->eflags & EXEC_FLAG_BACKWARD); /* * paranoia - cope if the function, which may have constructed the diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index b1965f5e1c4..80ee1fc89b4 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -515,7 +515,7 @@ form_tuple_array(GatherMergeState *gm_state, int reader) tuple_buffer->tuple[i] = heap_copytuple(gm_readnext_tuple(gm_state, reader, false, - &tuple_buffer->done)); + &tuple_buffer->done)); if (!HeapTupleIsValid(tuple_buffer->tuple[i])) break; tuple_buffer->nTuples++; diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 6c84ad9989a..075f4ed11c8 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -657,7 +657,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal; hashtable->buckets = repalloc(hashtable->buckets, - sizeof(HashJoinTuple) * hashtable->nbuckets); + sizeof(HashJoinTuple) * hashtable->nbuckets); } /* @@ -783,7 +783,7 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable) */ hashtable->buckets = (HashJoinTuple *) repalloc(hashtable->buckets, - hashtable->nbuckets * sizeof(HashJoinTuple)); + hashtable->nbuckets * sizeof(HashJoinTuple)); memset(hashtable->buckets, 0, hashtable->nbuckets * sizeof(HashJoinTuple)); @@ -1650,7 +1650,7 @@ dense_alloc(HashJoinTable hashtable, Size size) { /* allocate new chunk and put it at the beginning of the list */ newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt, - offsetof(HashMemoryChunkData, data) + size); + offsetof(HashMemoryChunkData, data) + size); newChunk->maxlen = size; newChunk->used = 0; newChunk->ntuples = 0; @@ -1685,7 +1685,7 @@ dense_alloc(HashJoinTable hashtable, Size size) { /* allocate new chunk and put it at the beginning of the list */ newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt, - offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE); + offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE); newChunk->maxlen = HASH_CHUNK_SIZE; newChunk->used = size; diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 8d2325398b8..668ed871e19 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -234,7 +234,7 @@ ExecHashJoin(HashJoinState *node) Assert(batchno > hashtable->curbatch); ExecHashJoinSaveTuple(ExecFetchSlotMinimalTuple(outerTupleSlot), hashvalue, - &hashtable->outerBatchFile[batchno]); + &hashtable->outerBatchFile[batchno]); /* Loop around, staying in HJ_NEED_NEW_OUTER state */ continue; } @@ -452,20 +452,20 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) case JOIN_ANTI: hjstate->hj_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(hjstate))); + ExecGetResultType(innerPlanState(hjstate))); break; case JOIN_RIGHT: hjstate->hj_NullOuterTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(hjstate))); + ExecGetResultType(outerPlanState(hjstate))); break; case JOIN_FULL: hjstate->hj_NullOuterTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(hjstate))); + ExecGetResultType(outerPlanState(hjstate))); hjstate->hj_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(hjstate))); + ExecGetResultType(innerPlanState(hjstate))); break; default: elog(ERROR, "unrecognized join type: %d", @@ -764,7 +764,7 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) if (BufFileSeek(innerFile, 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); while ((slot = ExecHashJoinGetSavedTuple(hjstate, innerFile, @@ -794,7 +794,7 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) if (BufFileSeek(hashtable->outerBatchFile[curbatch], 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); } return true; diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index fb3d3bb1218..890e54416a5 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -542,7 +542,7 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) */ relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid); indexstate->ioss_RelationDesc = index_open(node->indexid, - relistarget ? NoLock : AccessShareLock); + relistarget ? NoLock : AccessShareLock); /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 7e123758b4f..d8aceb1f2c8 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -970,7 +970,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) */ relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid); indexstate->iss_RelationDesc = index_open(node->indexid, - relistarget ? NoLock : AccessShareLock); + relistarget ? NoLock : AccessShareLock); /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index aaec1322189..abd060d75f4 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -248,8 +248,8 @@ recompute_limits(LimitState *node) node->offset = DatumGetInt64(val); if (node->offset < 0) ereport(ERROR, - (errcode(ERRCODE_INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), - errmsg("OFFSET must not be negative"))); + (errcode(ERRCODE_INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), + errmsg("OFFSET must not be negative"))); } } else diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 94a5e98e3e0..6a145ee33a2 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -1534,14 +1534,14 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->mj_FillInner = false; mergestate->mj_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(mergestate))); + ExecGetResultType(innerPlanState(mergestate))); break; case JOIN_RIGHT: mergestate->mj_FillOuter = false; mergestate->mj_FillInner = true; mergestate->mj_NullOuterTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(mergestate))); + ExecGetResultType(outerPlanState(mergestate))); /* * Can't handle right or full join with non-constant extra @@ -1558,10 +1558,10 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->mj_FillInner = true; mergestate->mj_NullOuterTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(mergestate))); + ExecGetResultType(outerPlanState(mergestate))); mergestate->mj_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(mergestate))); + ExecGetResultType(innerPlanState(mergestate))); /* * Can't handle right or full join with non-constant extra diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 11594b58b57..5e43a069426 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -107,7 +107,7 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList) errdetail("Table has type %s at ordinal position %d, but query expects %s.", format_type_be(attr->atttypid), attno, - format_type_be(exprType((Node *) tle->expr))))); + format_type_be(exprType((Node *) tle->expr))))); } else { @@ -128,7 +128,7 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList) if (attno != resultDesc->natts) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("table row type and query-specified row type do not match"), + errmsg("table row type and query-specified row type do not match"), errdetail("Query has too few columns."))); } @@ -211,7 +211,7 @@ ExecCheckHeapTupleVisible(EState *estate, if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data))) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("could not serialize access due to concurrent update"))); + errmsg("could not serialize access due to concurrent update"))); } LockBuffer(buffer, BUFFER_LOCK_UNLOCK); } @@ -291,7 +291,7 @@ ExecInsert(ModifyTableState *mtstate, * respectively. */ leaf_part_index = ExecFindPartition(resultRelInfo, - mtstate->mt_partition_dispatch_info, + mtstate->mt_partition_dispatch_info, slot, estate); Assert(leaf_part_index >= 0 && @@ -308,7 +308,7 @@ ExecInsert(ModifyTableState *mtstate, if (resultRelInfo->ri_FdwRoutine) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot route inserted tuples to a foreign table"))); + errmsg("cannot route inserted tuples to a foreign table"))); /* For ExecInsertIndexTuples() to work on the partition's indexes */ estate->es_result_relation_info = resultRelInfo; @@ -529,7 +529,7 @@ ExecInsert(ModifyTableState *mtstate, /* insert index entries for tuple */ recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), - estate, true, &specConflict, + estate, true, &specConflict, arbiterIndexes); /* adjust the tuple's state accordingly */ @@ -1614,16 +1614,16 @@ ExecModifyTable(ModifyTableState *node) { case CMD_INSERT: slot = ExecInsert(node, slot, planSlot, - node->mt_arbiterindexes, node->mt_onconflict, + node->mt_arbiterindexes, node->mt_onconflict, estate, node->canSetTag); break; case CMD_UPDATE: slot = ExecUpdate(tupleid, oldtuple, slot, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + &node->mt_epqstate, estate, node->canSetTag); break; case CMD_DELETE: slot = ExecDelete(tupleid, oldtuple, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + &node->mt_epqstate, estate, node->canSetTag); break; default: elog(ERROR, "unknown operation"); @@ -1721,7 +1721,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* Initialize the usesFdwDirectModify flag */ resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i, - node->fdwDirectModifyPlans); + node->fdwDirectModifyPlans); /* * Verify result relation is a valid target for the current operation @@ -1917,7 +1917,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) resultRelInfo->ri_projectReturning = ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, - resultRelInfo->ri_RelationDesc->rd_att); + resultRelInfo->ri_RelationDesc->rd_att); resultRelInfo++; } @@ -1941,7 +1941,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) partrel, rel); resultRelInfo->ri_projectReturning = ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, - resultRelInfo->ri_RelationDesc->rd_att); + resultRelInfo->ri_RelationDesc->rd_att); resultRelInfo++; } } @@ -1991,7 +1991,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* create target slot for UPDATE SET projection */ tupDesc = ExecTypeFromTL((List *) node->onConflictSet, - resultRelInfo->ri_RelationDesc->rd_rel->relhasoids); + resultRelInfo->ri_RelationDesc->rd_rel->relhasoids); mtstate->mt_conflproj = ExecInitExtraTupleSlot(mtstate->ps.state); ExecSetSlotDescriptor(mtstate->mt_conflproj, tupDesc); @@ -2100,7 +2100,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) subplan->targetlist); j = ExecInitJunkFilter(subplan->targetlist, - resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, + resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, ExecInitExtraTupleSlot(estate)); if (operation == CMD_UPDATE || operation == CMD_DELETE) diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index 69d245358e6..0065fe601ec 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -326,7 +326,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags) case JOIN_ANTI: nlstate->nl_NullInnerTupleSlot = ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(nlstate))); + ExecGetResultType(innerPlanState(nlstate))); break; default: elog(ERROR, "unrecognized join type: %d", diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c index 428fd98665e..b710ef7edf2 100644 --- a/src/backend/executor/nodeSamplescan.c +++ b/src/backend/executor/nodeSamplescan.c @@ -120,7 +120,7 @@ InitScanRelation(SampleScanState *node, EState *estate, int eflags) * open that relation and acquire appropriate lock on it. */ currentRelation = ExecOpenScanRelation(estate, - ((SampleScan *) node->ss.ps.plan)->scan.scanrelid, + ((SampleScan *) node->ss.ps.plan)->scan.scanrelid, eflags); node->ss.ss_currentRelation = currentRelation; @@ -307,7 +307,7 @@ tablesample_init(SampleScanState *scanstate) if (isnull) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLESAMPLE_REPEAT), - errmsg("TABLESAMPLE REPEATABLE parameter cannot be null"))); + errmsg("TABLESAMPLE REPEATABLE parameter cannot be null"))); /* * The REPEATABLE parameter has been coerced to float8 by the parser. diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index 822c6bf20ac..307df87c82f 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -145,7 +145,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags) * open that relation and acquire appropriate lock on it. */ currentRelation = ExecOpenScanRelation(estate, - ((SeqScan *) node->ss.ps.plan)->scanrelid, + ((SeqScan *) node->ss.ps.plan)->scanrelid, eflags); node->ss.ss_currentRelation = currentRelation; diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c index da557ceb6f1..bb016ec8f60 100644 --- a/src/backend/executor/nodeTableFuncscan.c +++ b/src/backend/executor/nodeTableFuncscan.c @@ -288,7 +288,7 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext) PG_TRY(); { routine->InitOpaque(tstate, - tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts); + tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts); /* * If evaluating the document expression returns NULL, the table @@ -398,9 +398,9 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) if (isnull) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("column filter expression must not be null"), + errmsg("column filter expression must not be null"), errdetail("Filter for column \"%s\" is null.", - NameStr(tupdesc->attrs[colno]->attname)))); + NameStr(tupdesc->attrs[colno]->attname)))); colfilter = TextDatumGetCString(value); } else @@ -460,8 +460,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) values[colno] = routine->GetValue(tstate, colno, - tupdesc->attrs[colno]->atttypid, - tupdesc->attrs[colno]->atttypmod, + tupdesc->attrs[colno]->atttypid, + tupdesc->attrs[colno]->atttypmod, &isnull); /* No value? Evaluate and apply the default, if any */ @@ -479,7 +479,7 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("null is not allowed in column \"%s\"", - NameStr(tupdesc->attrs[colno]->attname)))); + NameStr(tupdesc->attrs[colno]->attname)))); nulls[colno] = isnull; } diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c index 64be613693c..96af2d21d9d 100644 --- a/src/backend/executor/nodeTidscan.c +++ b/src/backend/executor/nodeTidscan.c @@ -221,7 +221,7 @@ TidListEval(TidScanState *tidstate) Assert(tidexpr->cexpr); if (execCurrentOf(tidexpr->cexpr, econtext, - RelationGetRelid(tidstate->ss.ss_currentRelation), + RelationGetRelid(tidstate->ss.ss_currentRelation), &cursor_tid)) { if (numTids >= numAllocTids) diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 1f2cbdde361..8f13fe0c732 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -350,7 +350,7 @@ advance_windowaggregate(WindowAggState *winstate, if (fcinfo->isnull && OidIsValid(peraggstate->invtransfn_oid)) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("moving-aggregate transition function must not return null"))); + errmsg("moving-aggregate transition function must not return null"))); /* * We must track the number of rows included in transValue, since to @@ -599,7 +599,7 @@ finalize_windowaggregate(WindowAggState *winstate, perfuncstate->winCollation, (void *) winstate, NULL); fcinfo.arg[0] = MakeExpandedObjectReadOnly(peraggstate->transValue, - peraggstate->transValueIsNull, + peraggstate->transValueIsNull, peraggstate->transtypeLen); fcinfo.argnull[0] = peraggstate->transValueIsNull; anynull = peraggstate->transValueIsNull; @@ -1142,7 +1142,7 @@ begin_partition(WindowAggState *winstate) winobj->markptr = tuplestore_alloc_read_pointer(winstate->buffer, 0); winobj->readptr = tuplestore_alloc_read_pointer(winstate->buffer, - EXEC_FLAG_BACKWARD); + EXEC_FLAG_BACKWARD); winobj->markpos = -1; winobj->seekpos = -1; } @@ -1631,7 +1631,7 @@ ExecWindowAgg(WindowAggState *winstate) if (offset < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("frame starting offset must not be negative"))); + errmsg("frame starting offset must not be negative"))); } } if (frameOptions & FRAMEOPTION_END_VALUE) @@ -1656,7 +1656,7 @@ ExecWindowAgg(WindowAggState *winstate) if (offset < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("frame ending offset must not be negative"))); + errmsg("frame ending offset must not be negative"))); } } winstate->all_first = false; @@ -1732,8 +1732,8 @@ ExecWindowAgg(WindowAggState *winstate) if (perfuncstate->plain_agg) continue; eval_windowfunction(winstate, perfuncstate, - &(econtext->ecxt_aggvalues[perfuncstate->wfuncstate->wfuncno]), - &(econtext->ecxt_aggnulls[perfuncstate->wfuncstate->wfuncno])); + &(econtext->ecxt_aggvalues[perfuncstate->wfuncstate->wfuncno]), + &(econtext->ecxt_aggnulls[perfuncstate->wfuncstate->wfuncno])); } /* @@ -1863,7 +1863,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) /* Set up data for comparing tuples */ if (node->partNumCols > 0) winstate->partEqfunctions = execTuplesMatchPrepare(node->partNumCols, - node->partOperators); + node->partOperators); if (node->ordNumCols > 0) winstate->ordEqfunctions = execTuplesMatchPrepare(node->ordNumCols, node->ordOperators); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 9db41813d5d..cd00a6d9f25 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -1230,7 +1230,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL))) { if (list_length(stmt_list) == 1 && - linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && + linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL && ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree)) portal->cursorOptions |= CURSOR_OPT_SCROLL; @@ -1246,7 +1246,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, if (portal->cursorOptions & CURSOR_OPT_SCROLL) { if (list_length(stmt_list) == 1 && - linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && + linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY && linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1279,8 +1279,8 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), /* translator: %s is a SQL statement name */ - errmsg("%s is not allowed in a non-volatile function", - CreateCommandTag((Node *) pstmt)))); + errmsg("%s is not allowed in a non-volatile function", + CreateCommandTag((Node *) pstmt)))); else PreventCommandIfParallelMode(CreateCommandTag((Node *) pstmt)); } @@ -1713,7 +1713,7 @@ spi_printtup(TupleTableSlot *slot, DestReceiver *self) tuptable->free = tuptable->alloced; tuptable->alloced += tuptable->free; tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals, - tuptable->alloced * sizeof(HeapTuple)); + tuptable->alloced * sizeof(HeapTuple)); } tuptable->vals[tuptable->alloced - tuptable->free] = @@ -1879,7 +1879,7 @@ _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan) plansource = CreateOneShotCachedPlan(parsetree, src, - CreateCommandTag(parsetree->stmt)); + CreateCommandTag(parsetree->stmt)); plancache_list = lappend(plancache_list, plansource); } @@ -1990,8 +1990,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, stmt_list = pg_analyze_and_rewrite_params(parsetree, src, plan->parserSetup, - plan->parserSetupArg, - _SPI_current->queryEnv); + plan->parserSetupArg, + _SPI_current->queryEnv); } else { @@ -2066,8 +2066,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), /* translator: %s is a SQL statement name */ - errmsg("%s is not allowed in a non-volatile function", - CreateCommandTag((Node *) stmt)))); + errmsg("%s is not allowed in a non-volatile function", + CreateCommandTag((Node *) stmt)))); if (IsInParallelMode() && !CommandIsReadOnly(stmt)) PreventCommandIfParallelMode(CreateCommandTag((Node *) stmt)); diff --git a/src/backend/executor/tqueue.c b/src/backend/executor/tqueue.c index c086b5b682c..a4cfe9685ab 100644 --- a/src/backend/executor/tqueue.c +++ b/src/backend/executor/tqueue.c @@ -527,7 +527,7 @@ TQSendRecordInfo(TQueueDestReceiver *tqueue, int32 typmod, TupleDesc tupledesc) ctl.hcxt = tqueue->mycontext; tqueue->recordhtab = hash_create("tqueue sender record type hashtable", 100, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* Have we already seen this record type? If not, must report it. */ @@ -865,7 +865,7 @@ TQRemapArray(TupleQueueReader *reader, ArrayRemapInfo *remapinfo, /* Reconstruct and return the array. */ *changed = true; arr = construct_md_array(elem_values, elem_nulls, - ARR_NDIM(arr), ARR_DIMS(arr), ARR_LBOUND(arr), + ARR_NDIM(arr), ARR_DIMS(arr), ARR_LBOUND(arr), typid, remapinfo->typlen, remapinfo->typbyval, remapinfo->typalign); return PointerGetDatum(arr); @@ -1099,7 +1099,7 @@ TupleQueueHandleControlMessage(TupleQueueReader *reader, Size nbytes, ctl.hcxt = reader->mycontext; reader->typmodmap = hash_create("tqueue receiver record type hashtable", 100, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* Create map entry. */ diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c index 1e641c9837f..eda38b1de13 100644 --- a/src/backend/executor/tstoreReceiver.c +++ b/src/backend/executor/tstoreReceiver.c @@ -135,7 +135,7 @@ tstoreReceiveSlot_detoast(TupleTableSlot *slot, DestReceiver *self) if (VARATT_IS_EXTERNAL(DatumGetPointer(val))) { val = PointerGetDatum(heap_tuple_fetch_attr((struct varlena *) - DatumGetPointer(val))); + DatumGetPointer(val))); myState->tofree[nfree++] = val; } } |