diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2020-05-14 13:06:38 -0400 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2020-05-14 13:06:50 -0400 |
commit | 5cbfce562f7cd2aab0cdc4694ce298ec3567930e (patch) | |
tree | 64e722d72fc5f1803cb6f6371d6cf12863e2812f | |
parent | 1255466f8358ecac29581aa5ecec76628dc2e33c (diff) | |
download | postgresql-5cbfce562f7cd2aab0cdc4694ce298ec3567930e.tar.gz postgresql-5cbfce562f7cd2aab0cdc4694ce298ec3567930e.zip |
Initial pgindent and pgperltidy run for v13.
Includes some manual cleanup of places that pgindent messed up,
most of which weren't per project style anyway.
Notably, it seems some people didn't absorb the style rules of
commit c9d297751, because there were a bunch of new occurrences
of function calls with a newline just after the left paren, all
with faulty expectations about how the rest of the call would get
indented.
198 files changed, 2021 insertions, 1788 deletions
diff --git a/contrib/adminpack/adminpack.c b/contrib/adminpack/adminpack.c index 3f4b06fdbbd..d064b5a0806 100644 --- a/contrib/adminpack/adminpack.c +++ b/contrib/adminpack/adminpack.c @@ -217,7 +217,7 @@ Datum pg_file_sync(PG_FUNCTION_ARGS) { char *filename; - struct stat fst; + struct stat fst; filename = convert_and_check_filename(PG_GETARG_TEXT_PP(0)); diff --git a/contrib/intarray/_int_bool.c b/contrib/intarray/_int_bool.c index 58113892d3b..4b6a31080e4 100644 --- a/contrib/intarray/_int_bool.c +++ b/contrib/intarray/_int_bool.c @@ -256,7 +256,7 @@ checkcondition_arr(void *checkval, ITEM *item, void *options) static bool checkcondition_bit(void *checkval, ITEM *item, void *siglen) { - return GETBIT(checkval, HASHVAL(item->val, (int)(intptr_t) siglen)); + return GETBIT(checkval, HASHVAL(item->val, (int) (intptr_t) siglen)); } /* @@ -300,7 +300,7 @@ bool signconsistent(QUERYTYPE *query, BITVECP sign, int siglen, bool calcnot) { return execute(GETQUERY(query) + query->size - 1, - (void *) sign, (void *)(intptr_t) siglen, calcnot, + (void *) sign, (void *) (intptr_t) siglen, calcnot, checkcondition_bit); } diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c index 95cc367dd81..72516c3b6b9 100644 --- a/contrib/ltree/_ltree_gist.c +++ b/contrib/ltree/_ltree_gist.c @@ -407,8 +407,8 @@ gist_te(ltree_gist *key, ltree *query, int siglen) typedef struct LtreeSignature { - BITVECP sign; - int siglen; + BITVECP sign; + int siglen; } LtreeSignature; static bool diff --git a/contrib/ltree/ltree.h b/contrib/ltree/ltree.h index 7eac7c94528..dc68a0c212f 100644 --- a/contrib/ltree/ltree.h +++ b/contrib/ltree/ltree.h @@ -272,7 +272,7 @@ typedef struct #define LTG_GETRNODE(x, siglen) ( LTG_ISONENODE(x) ? LTG_NODE(x) : LTG_RNODE(x, siglen) ) extern ltree_gist *ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen, - ltree *left, ltree *right); + ltree *left, ltree *right); /* GiST support for ltree[] */ diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c index 041e28064ff..6cf181bc530 100644 --- a/contrib/ltree/ltree_gist.c +++ b/contrib/ltree/ltree_gist.c @@ -40,7 +40,7 @@ ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen, ltree *left, ltree *right) { int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) + - (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0); + (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0); ltree_gist *result = palloc(size); SET_VARSIZE(result, size); @@ -557,8 +557,8 @@ gist_between(ltree_gist *key, lquery *query, int siglen) typedef struct LtreeSignature { - BITVECP sign; - int siglen; + BITVECP sign; + int siglen; } LtreeSignature; static bool diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 4ce25fb88aa..cef8bb5a49a 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -2681,6 +2681,7 @@ JumbleRowMarks(pgssJumbleState *jstate, List *rowMarks) foreach(lc, rowMarks) { RowMarkClause *rowmark = lfirst_node(RowMarkClause, lc); + if (!rowmark->pushedDown) { APP_JUMB(rowmark->rti); diff --git a/contrib/pg_visibility/pg_visibility.c b/contrib/pg_visibility/pg_visibility.c index 0cd1160ceb2..68d580ed1e0 100644 --- a/contrib/pg_visibility/pg_visibility.c +++ b/contrib/pg_visibility/pg_visibility.c @@ -384,7 +384,7 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS) Oid relid = PG_GETARG_OID(0); Relation rel; ForkNumber fork; - BlockNumber block; + BlockNumber block; rel = relation_open(relid, AccessExclusiveLock); diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index e45647f3eaf..52d1fe35631 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -303,8 +303,8 @@ connect_pg_server(ForeignServer *server, UserMapping *user) /* * Check that non-superuser has used password to establish connection; * otherwise, he's piggybacking on the postgres server's user - * identity. See also dblink_security_check() in contrib/dblink - * and check_conn_params. + * identity. See also dblink_security_check() in contrib/dblink and + * check_conn_params. */ if (!superuser_arg(user->userid) && UserMappingPasswordRequired(user) && !PQconnectionUsedPassword(conn)) @@ -361,6 +361,7 @@ UserMappingPasswordRequired(UserMapping *user) foreach(cell, user->options) { DefElem *def = (DefElem *) lfirst(cell); + if (strcmp(def->defname, "password_required") == 0) return defGetBoolean(def); } diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index c442af5bb96..1a03e02263e 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -144,13 +144,13 @@ postgres_fdw_validator(PG_FUNCTION_ARGS) } else if (strcmp(def->defname, "password_required") == 0) { - bool pw_required = defGetBoolean(def); + bool pw_required = defGetBoolean(def); /* * Only the superuser may set this option on a user mapping, or * alter a user mapping on which this option is set. We allow a - * user to clear this option if it's set - in fact, we don't have a - * choice since we can't see the old mapping when validating an + * user to clear this option if it's set - in fact, we don't have + * a choice since we can't see the old mapping when validating an * alter. */ if (!superuser() && !pw_required) @@ -204,11 +204,11 @@ InitPgFdwOptions(void) {"fetch_size", ForeignServerRelationId, false}, {"fetch_size", ForeignTableRelationId, false}, {"password_required", UserMappingRelationId, false}, + /* * sslcert and sslkey are in fact libpq options, but we repeat them - * here to allow them to appear in both foreign server context - * (when we generate libpq options) and user mapping context - * (from here). + * here to allow them to appear in both foreign server context (when + * we generate libpq options) and user mapping context (from here). */ {"sslcert", UserMappingRelationId, true}, {"sslkey", UserMappingRelationId, true}, diff --git a/doc/src/sgml/mk_feature_tables.pl b/doc/src/sgml/mk_feature_tables.pl index a9dfebfbb31..5a16da0d060 100644 --- a/doc/src/sgml/mk_feature_tables.pl +++ b/doc/src/sgml/mk_feature_tables.pl @@ -55,8 +55,10 @@ while (<$feat>) print " <entry>$feature_id</entry>\n"; } print " <entry>", - defined($feature_packages{$feature_id}) ? $feature_packages{$feature_id} : "", - "</entry>\n"; + defined($feature_packages{$feature_id}) + ? $feature_packages{$feature_id} + : "", + "</entry>\n"; if ($subfeature_id) { print " <entry>$subfeature_name</entry>\n"; diff --git a/src/backend/access/common/detoast.c b/src/backend/access/common/detoast.c index 496240c7551..44c37edcbb4 100644 --- a/src/backend/access/common/detoast.c +++ b/src/backend/access/common/detoast.c @@ -201,7 +201,7 @@ detoast_attr(struct varlena *attr) */ struct varlena * detoast_attr_slice(struct varlena *attr, - int32 sliceoffset, int32 slicelength) + int32 sliceoffset, int32 slicelength) { struct varlena *preslice; struct varlena *result; @@ -220,12 +220,12 @@ detoast_attr_slice(struct varlena *attr, /* * For compressed values, we need to fetch enough slices to decompress - * at least the requested part (when a prefix is requested). Otherwise, - * just fetch all slices. + * at least the requested part (when a prefix is requested). + * Otherwise, just fetch all slices. */ if (slicelength > 0 && sliceoffset >= 0) { - int32 max_size; + int32 max_size; /* * Determine maximum amount of compressed data needed for a prefix @@ -253,7 +253,7 @@ detoast_attr_slice(struct varlena *attr, Assert(!VARATT_IS_EXTERNAL_INDIRECT(redirect.pointer)); return detoast_attr_slice(redirect.pointer, - sliceoffset, slicelength); + sliceoffset, slicelength); } else if (VARATT_IS_EXTERNAL_EXPANDED(attr)) { @@ -343,7 +343,8 @@ toast_fetch_datum(struct varlena *attr) SET_VARSIZE(result, attrsize + VARHDRSZ); if (attrsize == 0) - return result; /* Probably shouldn't happen, but just in case. */ + return result; /* Probably shouldn't happen, but just in + * case. */ /* * Open the toast relation and its indexes @@ -387,9 +388,9 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr); /* - * It's nonsense to fetch slices of a compressed datum unless when it's - * a prefix -- this isn't lo_* we can't return a compressed datum which - * is meaningful to toast later. + * It's nonsense to fetch slices of a compressed datum unless when it's a + * prefix -- this isn't lo_* we can't return a compressed datum which is + * meaningful to toast later. */ Assert(!VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer) || 0 == sliceoffset); diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 9eee5381aea..79fe6eb8d62 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1347,8 +1347,8 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, left->buf, right->buf, false, false)) { /* - * If the parent page was split, the existing downlink might - * have moved. + * If the parent page was split, the existing downlink might have + * moved. */ stack->downlinkoffnum = InvalidOffsetNumber; } @@ -1370,9 +1370,10 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, tuples, 2, stack->downlinkoffnum, left->buf, right->buf, - true, /* Unlock parent */ - unlockbuf /* Unlock stack->buffer if caller wants that */ - )) + true, /* Unlock parent */ + unlockbuf /* Unlock stack->buffer if caller wants + * that */ + )) { /* * If the parent page was split, the downlink might have moved. diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index b23d19474e6..eb510be3324 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -144,6 +144,7 @@ _hash_spareindex(uint32 num_bucket) { uint32 splitpoint_group; uint32 splitpoint_phases; + splitpoint_group = pg_ceil_log2_32(num_bucket); if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) diff --git a/src/backend/access/hash/hashvalidate.c b/src/backend/access/hash/hashvalidate.c index b3d1367fecb..6f14a9fb455 100644 --- a/src/backend/access/hash/hashvalidate.c +++ b/src/backend/access/hash/hashvalidate.c @@ -318,7 +318,7 @@ check_hash_func_signature(Oid funcid, int16 amprocnum, Oid argtype) argtype == XIDOID || argtype == CIDOID)) /* okay, allowed use of hashint4() */ ; else if ((funcid == F_HASHINT8 || funcid == F_HASHINT8EXTENDED) && - (argtype == XID8OID)) + (argtype == XID8OID)) /* okay, allowed use of hashint8() */ ; else if ((funcid == F_TIMESTAMP_HASH || funcid == F_TIMESTAMP_HASH_EXTENDED) && diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 0d4ed602d76..94eb37d48d2 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2153,8 +2153,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false); /* - * Note that heap_multi_insert is not used for catalog tuples yet, - * but this will cover the gap once that is the case. + * Note that heap_multi_insert is not used for catalog tuples yet, but + * this will cover the gap once that is the case. */ if (needwal && need_cids) log_heap_new_cid(relation, heaptuples[ndone]); diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index a3f77169a79..6b9750c244a 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -943,7 +943,7 @@ index_opclass_options(Relation indrel, AttrNumber attnum, Datum attoptions, /* fetch options support procedure if specified */ if (amoptsprocnum != 0) - procid =index_getprocid(indrel, attnum, amoptsprocnum); + procid = index_getprocid(indrel, attnum, amoptsprocnum); if (!OidIsValid(procid)) { @@ -953,7 +953,7 @@ index_opclass_options(Relation indrel, AttrNumber attnum, Datum attoptions, bool isnull; if (!DatumGetPointer(attoptions)) - return NULL; /* ok, no options, no procedure */ + return NULL; /* ok, no options, no procedure */ /* * Report an error if the opclass's options-parsing procedure does not diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 10abd61983a..93d2b706633 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -1566,7 +1566,8 @@ _bt_pagedel(Relation rel, Buffer leafbuf, TransactionId *oldestBtpoXact) BTScanInsert itup_key; ItemId itemid; IndexTuple targetkey; - BlockNumber leftsib, leafblkno; + BlockNumber leftsib, + leafblkno; Buffer sleafbuf; itemid = PageGetItemId(page, P_HIKEY); @@ -1777,6 +1778,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) opaque = (BTPageOpaque) PageGetSpecialPointer(page); #ifdef USE_ASSERT_CHECKING + /* * This is just an assertion because _bt_lock_subtree_parent should have * guaranteed tuple has the expected contents @@ -2368,7 +2370,8 @@ _bt_lock_subtree_parent(Relation rel, BlockNumber child, BTStack stack, Buffer *subtreeparent, OffsetNumber *poffset, BlockNumber *topparent, BlockNumber *topparentrightsib) { - BlockNumber parent, leftsibparent; + BlockNumber parent, + leftsibparent; OffsetNumber parentoffset, maxoff; Buffer pbuf; @@ -2439,9 +2442,9 @@ _bt_lock_subtree_parent(Relation rel, BlockNumber child, BTStack stack, /* * Now make sure that the parent deletion is itself safe by examining the * child's grandparent page. Recurse, passing the parent page as the - * child page (child's grandparent is the parent on the next level up). - * If parent deletion is unsafe, then child deletion must also be unsafe - * (in which case caller cannot delete any pages at all). + * child page (child's grandparent is the parent on the next level up). If + * parent deletion is unsafe, then child deletion must also be unsafe (in + * which case caller cannot delete any pages at all). */ *topparent = parent; *topparentrightsib = opaque->btpo_next; diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index a1fc63d42ee..e947addef6b 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -1091,7 +1091,8 @@ btvacuumpage(BTVacState *vstate, BlockNumber scanblkno) void *callback_state = vstate->callback_state; Relation rel = info->index; bool attempt_pagedel; - BlockNumber blkno, backtrack_to; + BlockNumber blkno, + backtrack_to; Buffer buf; Page page; BTPageOpaque opaque; diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 8ff49ce6d6f..45342248128 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -156,11 +156,10 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, /* * We need to save the location of the pivot tuple we chose in the - * parent page on a stack. If we need to split a page, we'll use - * the stack to work back up to its parent page. If caller ends up - * splitting a page one level down, it usually ends up inserting a - * new pivot tuple/downlink immediately after the location recorded - * here. + * parent page on a stack. If we need to split a page, we'll use the + * stack to work back up to its parent page. If caller ends up + * splitting a page one level down, it usually ends up inserting a new + * pivot tuple/downlink immediately after the location recorded here. */ new_stack = (BTStack) palloc(sizeof(BTStackData)); new_stack->bts_blkno = par_blkno; diff --git a/src/backend/access/nbtree/nbtsplitloc.c b/src/backend/access/nbtree/nbtsplitloc.c index 1a527e59e0c..fcfc23ce601 100644 --- a/src/backend/access/nbtree/nbtsplitloc.c +++ b/src/backend/access/nbtree/nbtsplitloc.c @@ -72,7 +72,7 @@ static bool _bt_afternewitemoff(FindSplitData *state, OffsetNumber maxoff, static bool _bt_adjacenthtid(ItemPointer lowhtid, ItemPointer highhtid); static OffsetNumber _bt_bestsplitloc(FindSplitData *state, int perfectpenalty, bool *newitemonleft, FindSplitStrat strategy); -static int _bt_defaultinterval(FindSplitData *state); +static int _bt_defaultinterval(FindSplitData *state); static int _bt_strategy(FindSplitData *state, SplitPoint *leftpage, SplitPoint *rightpage, FindSplitStrat *strategy); static void _bt_interval_edges(FindSplitData *state, diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c index 73d2a4ca34b..d82484b9db4 100644 --- a/src/backend/access/rmgrdesc/dbasedesc.c +++ b/src/backend/access/rmgrdesc/dbasedesc.c @@ -35,7 +35,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record) else if (info == XLOG_DBASE_DROP) { xl_dbase_drop_rec *xlrec = (xl_dbase_drop_rec *) rec; - int i; + int i; appendStringInfo(buf, "dir"); for (i = 0; i < xlrec->ntablespaces; i++) diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index fbc5942578b..9fce75565f4 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -251,7 +251,7 @@ static void xact_desc_relations(StringInfo buf, char *label, int nrels, RelFileNode *xnodes) { - int i; + int i; if (nrels > 0) { @@ -269,7 +269,7 @@ xact_desc_relations(StringInfo buf, char *label, int nrels, static void xact_desc_subxacts(StringInfo buf, int nsubxacts, TransactionId *subxacts) { - int i; + int i; if (nsubxacts > 0) { diff --git a/src/backend/access/spgist/spgvalidate.c b/src/backend/access/spgist/spgvalidate.c index 3c433e94e76..f0cfd8b42b1 100644 --- a/src/backend/access/spgist/spgvalidate.c +++ b/src/backend/access/spgist/spgvalidate.c @@ -275,7 +275,7 @@ spgvalidate(Oid opclassoid) if ((thisgroup->functionset & (((uint64) 1) << i)) != 0) continue; /* got it */ if (i == SPGIST_OPTIONS_PROC) - continue; /* optional method */ + continue; /* optional method */ ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("operator family \"%s\" of access method %s is missing support function %d for type %s", diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 3984dd3e1a0..cd30b62d365 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -3750,7 +3750,7 @@ EndTransactionBlock(bool chain) if (chain) ereport(ERROR, (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), - /* translator: %s represents an SQL statement name */ + /* translator: %s represents an SQL statement name */ errmsg("%s can only be used in transaction blocks", "COMMIT AND CHAIN"))); else @@ -3829,7 +3829,7 @@ EndTransactionBlock(bool chain) if (chain) ereport(ERROR, (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), - /* translator: %s represents an SQL statement name */ + /* translator: %s represents an SQL statement name */ errmsg("%s can only be used in transaction blocks", "COMMIT AND CHAIN"))); else @@ -3952,7 +3952,7 @@ UserAbortTransactionBlock(bool chain) if (chain) ereport(ERROR, (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), - /* translator: %s represents an SQL statement name */ + /* translator: %s represents an SQL statement name */ errmsg("%s can only be used in transaction blocks", "ROLLBACK AND CHAIN"))); else diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 4284659099b..ca09d81b08c 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -6071,7 +6071,7 @@ recoveryApplyDelay(XLogReaderState *record) { uint8 xact_info; TimestampTz xtime; - TimestampTz delayUntil; + TimestampTz delayUntil; long secs; int microsecs; @@ -6341,7 +6341,11 @@ StartupXLOG(void) switch (ControlFile->state) { case DB_SHUTDOWNED: - /* This is the expected case, so don't be chatty in standalone mode */ + + /* + * This is the expected case, so don't be chatty in standalone + * mode + */ ereport(IsPostmasterEnvironment ? LOG : NOTICE, (errmsg("database system was shut down at %s", str_time(ControlFile->time)))); @@ -10691,8 +10695,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, datadirpathlen = strlen(DataDir); /* - * Report that we are now estimating the total backup size - * if we're streaming base backup as requested by pg_basebackup + * Report that we are now estimating the total backup size if we're + * streaming base backup as requested by pg_basebackup */ if (tablespaces) pgstat_progress_update_param(PROGRESS_BASEBACKUP_PHASE, @@ -11410,7 +11414,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) void do_pg_abort_backup(int code, Datum arg) { - bool emit_warning = DatumGetBool(arg); + bool emit_warning = DatumGetBool(arg); /* * Quick exit if session is not keeping around a non-exclusive backup @@ -12154,8 +12158,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, */ /* - * We should be able to move to XLOG_FROM_STREAM - * only in standby mode. + * We should be able to move to XLOG_FROM_STREAM only in + * standby mode. */ Assert(StandbyMode); @@ -12242,6 +12246,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, { case XLOG_FROM_ARCHIVE: case XLOG_FROM_PG_WAL: + /* * WAL receiver must not be running when reading WAL from * archive or pg_wal. @@ -12279,8 +12284,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, bool havedata; /* - * We should be able to move to XLOG_FROM_STREAM - * only in standby mode. + * We should be able to move to XLOG_FROM_STREAM only in + * standby mode. */ Assert(StandbyMode); diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index 55becd65d4d..cdd586fcfba 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -64,8 +64,8 @@ RestoreArchivedFile(char *path, const char *xlogfname, TimeLineID restartTli; /* - * Ignore restore_command when not in archive recovery (meaning - * we are in crash recovery). + * Ignore restore_command when not in archive recovery (meaning we are in + * crash recovery). */ if (!ArchiveRecoveryRequested) goto not_available; diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index aae3fee24cd..5995798b585 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -1595,9 +1595,9 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) FullTransactionId XLogRecGetFullXid(XLogReaderState *record) { - TransactionId xid, - next_xid; - uint32 epoch; + TransactionId xid, + next_xid; + uint32 epoch; /* * This function is only safe during replay, because it depends on the @@ -1610,8 +1610,8 @@ XLogRecGetFullXid(XLogReaderState *record) epoch = EpochFromFullTransactionId(ShmemVariableCache->nextFullXid); /* - * If xid is numerically greater than next_xid, it has to be from the - * last epoch. + * If xid is numerically greater than next_xid, it has to be from the last + * epoch. */ if (unlikely(xid > next_xid)) --epoch; diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index 8e03af4ffcd..b07537fbbac 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -109,7 +109,7 @@ foreach my $header (@ARGV) } else { - push @{ $catalog_data{pg_description}}, \%descr; + push @{ $catalog_data{pg_description} }, \%descr; } } @@ -679,8 +679,8 @@ close $bki; close $schemapg; # Finally, rename the completed files into place. -Catalog::RenameTempFile($bkifile, $tmpext); -Catalog::RenameTempFile($schemafile, $tmpext); +Catalog::RenameTempFile($bkifile, $tmpext); +Catalog::RenameTempFile($schemafile, $tmpext); exit 0; diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 632c058b80a..e393c93a452 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -3464,7 +3464,7 @@ restart: */ foreach(cell, parent_cons) { - Oid parent = lfirst_oid(cell); + Oid parent = lfirst_oid(cell); ScanKeyInit(&key, Anum_pg_constraint_oid, @@ -3487,9 +3487,9 @@ restart: * * Because of this arrangement, we can correctly catch all * relevant relations by adding to 'parent_cons' all rows with - * valid conparentid, and to the 'oids' list all rows with a - * zero conparentid. If any oids are added to 'oids', redo the - * first loop above by setting 'restart'. + * valid conparentid, and to the 'oids' list all rows with a zero + * conparentid. If any oids are added to 'oids', redo the first + * loop above by setting 'restart'. */ if (OidIsValid(con->conparentid)) parent_cons = list_append_unique_oid(parent_cons, diff --git a/src/backend/catalog/pg_cast.c b/src/backend/catalog/pg_cast.c index 38544556371..5ea2b82b083 100644 --- a/src/backend/catalog/pg_cast.c +++ b/src/backend/catalog/pg_cast.c @@ -43,13 +43,13 @@ ObjectAddress CastCreate(Oid sourcetypeid, Oid targettypeid, Oid funcid, char castcontext, char castmethod, DependencyType behavior) { - Relation relation; - HeapTuple tuple; - Oid castid; - Datum values[Natts_pg_cast]; - bool nulls[Natts_pg_cast]; - ObjectAddress myself, - referenced; + Relation relation; + HeapTuple tuple; + Oid castid; + Datum values[Natts_pg_cast]; + bool nulls[Natts_pg_cast]; + ObjectAddress myself, + referenced; relation = table_open(CastRelationId, RowExclusiveLock); diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index fa38ee94777..21cfdcace94 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -706,8 +706,8 @@ getAutoExtensionsOfObject(Oid classId, Oid objectId) { List *result = NIL; Relation depRel; - ScanKeyData key[2]; - SysScanDesc scan; + ScanKeyData key[2]; + SysScanDesc scan; HeapTuple tup; depRel = table_open(DependRelationId, AccessShareLock); diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 61447f33a69..f776e821b3d 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -1324,6 +1324,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior) sdepForm->objid); break; case SHARED_DEPENDENCY_POLICY: + /* * Try to remove role from policy; if unable to, remove * policy. @@ -1335,6 +1336,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior) obj.classId = sdepForm->classid; obj.objectId = sdepForm->objid; obj.objectSubId = sdepForm->objsubid; + /* * Acquire lock on object, then verify this dependency * is still relevant. If not, the object might have diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index d713d5cade9..ec143b640aa 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -280,8 +280,8 @@ RelationTruncate(Relation rel, BlockNumber nblocks) bool vm; bool need_fsm_vacuum = false; ForkNumber forks[MAX_FORKNUM]; - BlockNumber blocks[MAX_FORKNUM]; - int nforks = 0; + BlockNumber blocks[MAX_FORKNUM]; + int nforks = 0; /* Open it at the smgr level if not already done */ RelationOpenSmgr(rel); @@ -298,7 +298,7 @@ RelationTruncate(Relation rel, BlockNumber nblocks) blocks[nforks] = nblocks; nforks++; - /* Prepare for truncation of the FSM if it exists */ + /* Prepare for truncation of the FSM if it exists */ fsm = smgrexists(rel->rd_smgr, FSM_FORKNUM); if (fsm) { @@ -367,9 +367,9 @@ RelationTruncate(Relation rel, BlockNumber nblocks) smgrtruncate(rel->rd_smgr, forks, nforks, blocks); /* - * Update upper-level FSM pages to account for the truncation. - * This is important because the just-truncated pages were likely - * marked as all-free, and would be preferentially selected. + * Update upper-level FSM pages to account for the truncation. This is + * important because the just-truncated pages were likely marked as + * all-free, and would be preferentially selected. */ if (need_fsm_vacuum) FreeSpaceMapVacuumRange(rel, nblocks, InvalidBlockNumber); @@ -923,8 +923,8 @@ smgr_redo(XLogReaderState *record) SMgrRelation reln; Relation rel; ForkNumber forks[MAX_FORKNUM]; - BlockNumber blocks[MAX_FORKNUM]; - int nforks = 0; + BlockNumber blocks[MAX_FORKNUM]; + int nforks = 0; bool need_fsm_vacuum = false; reln = smgropen(xlrec->rnode, InvalidBackendId); @@ -995,9 +995,9 @@ smgr_redo(XLogReaderState *record) smgrtruncate(reln, forks, nforks, blocks); /* - * Update upper-level FSM pages to account for the truncation. - * This is important because the just-truncated pages were likely - * marked as all-free, and would be preferentially selected. + * Update upper-level FSM pages to account for the truncation. This is + * important because the just-truncated pages were likely marked as + * all-free, and would be preferentially selected. */ if (need_fsm_vacuum) FreeSpaceMapVacuumRange(rel, xlrec->blkno, diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 951690b2b8d..b11ebf0f618 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -470,7 +470,7 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre } else { - List *currexts; + List *currexts; /* Avoid duplicates */ currexts = getAutoExtensionsOfObject(address.classId, diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 367c30adb01..f27c3fe8c1c 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -1947,11 +1947,11 @@ remove_dbtablespaces(Oid db_id) Relation rel; TableScanDesc scan; HeapTuple tuple; - List *ltblspc = NIL; - ListCell *cell; - int ntblspc; - int i; - Oid *tablespace_ids; + List *ltblspc = NIL; + ListCell *cell; + int ntblspc; + int i; + Oid *tablespace_ids; rel = table_open(TableSpaceRelationId, AccessShareLock); scan = table_beginscan_catalog(rel, 0, NULL); diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 91800d1fac1..918b9b4d8fa 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -72,12 +72,6 @@ typedef struct EventTriggerQueryState static EventTriggerQueryState *currentEventTriggerState = NULL; -typedef struct -{ - const char *obtypename; - bool supported; -} event_trigger_support_data; - /* Support for dropped objects */ typedef struct SQLDropObject { diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 05f4d4c2c2b..058610af9b0 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -2886,8 +2886,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate, * we don't need to do anything if there were 0 full groups. * * We still have to continue after this block if there are no full groups, - * though, since it's possible that we have workers that did real work even - * if the leader didn't participate. + * though, since it's possible that we have workers that did real work + * even if the leader didn't participate. */ if (fullsortGroupInfo->groupCount > 0) { @@ -2914,8 +2914,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate, &incrsortstate->shared_info->sinfo[n]; /* - * If a worker hasn't processed any sort groups at all, then exclude - * it from output since it either didn't launch or didn't + * If a worker hasn't processed any sort groups at all, then + * exclude it from output since it either didn't launch or didn't * contribute anything meaningful. */ fullsortGroupInfo = &incsort_info->fullsortGroupInfo; @@ -2923,8 +2923,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate, /* * Since we never have any prefix groups unless we've first sorted * a full groups and transitioned modes (copying the tuples into a - * prefix group), we don't need to do anything if there were 0 full - * groups. + * prefix group), we don't need to do anything if there were 0 + * full groups. */ if (fullsortGroupInfo->groupCount == 0) continue; @@ -3048,8 +3048,8 @@ show_hash_info(HashState *hashstate, ExplainState *es) static void show_hashagg_info(AggState *aggstate, ExplainState *es) { - Agg *agg = (Agg *)aggstate->ss.ps.plan; - int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024; + Agg *agg = (Agg *) aggstate->ss.ps.plan; + int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024; Assert(IsA(aggstate, AggState)); diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 38cbea385ae..9b669d95b80 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -1402,39 +1402,39 @@ CreateExtensionInternal(char *extensionName, * does what is needed, we try to find a sequence of update scripts that * will get us there. */ - filename = get_extension_script_filename(pcontrol, NULL, versionName); - if (stat(filename, &fst) == 0) - { - /* Easy, no extra scripts */ - updateVersions = NIL; - } - else - { - /* Look for best way to install this version */ - List *evi_list; - ExtensionVersionInfo *evi_start; - ExtensionVersionInfo *evi_target; + filename = get_extension_script_filename(pcontrol, NULL, versionName); + if (stat(filename, &fst) == 0) + { + /* Easy, no extra scripts */ + updateVersions = NIL; + } + else + { + /* Look for best way to install this version */ + List *evi_list; + ExtensionVersionInfo *evi_start; + ExtensionVersionInfo *evi_target; - /* Extract the version update graph from the script directory */ - evi_list = get_ext_ver_list(pcontrol); + /* Extract the version update graph from the script directory */ + evi_list = get_ext_ver_list(pcontrol); - /* Identify the target version */ - evi_target = get_ext_ver_info(versionName, &evi_list); + /* Identify the target version */ + evi_target = get_ext_ver_info(versionName, &evi_list); - /* Identify best path to reach target */ - evi_start = find_install_path(evi_list, evi_target, - &updateVersions); + /* Identify best path to reach target */ + evi_start = find_install_path(evi_list, evi_target, + &updateVersions); - /* Fail if no path ... */ - if (evi_start == NULL) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("extension \"%s\" has no installation script nor update path for version \"%s\"", - pcontrol->name, versionName))); + /* Fail if no path ... */ + if (evi_start == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("extension \"%s\" has no installation script nor update path for version \"%s\"", + pcontrol->name, versionName))); - /* Otherwise, install best starting point and then upgrade */ - versionName = evi_start->name; - } + /* Otherwise, install best starting point and then upgrade */ + versionName = evi_start->name; + } /* * Fetch control parameters for installation target version diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 694114adedc..bb918388842 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -1417,7 +1417,7 @@ CreateCast(CreateCastStmt *stmt) char castmethod; HeapTuple tuple; AclResult aclresult; - ObjectAddress myself; + ObjectAddress myself; sourcetypeid = typenameTypeId(NULL, stmt->sourcetype); targettypeid = typenameTypeId(NULL, stmt->targettype); diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 7322cbc154e..7d44d3a049b 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -338,7 +338,7 @@ DefineOpClass(CreateOpClassStmt *stmt) opfamilyoid, /* oid of containing opfamily */ opclassoid; /* oid of opclass we create */ int maxOpNumber, /* amstrategies value */ - optsProcNumber, /* amoptsprocnum value */ + optsProcNumber, /* amoptsprocnum value */ maxProcNumber; /* amsupport value */ bool amstorage; /* amstorage flag */ List *operators; /* OpFamilyMember list for operators */ @@ -779,7 +779,7 @@ AlterOpFamily(AlterOpFamilyStmt *stmt) Oid amoid, /* our AM's oid */ opfamilyoid; /* oid of opfamily */ int maxOpNumber, /* amstrategies value */ - optsProcNumber, /* amopclassopts value */ + optsProcNumber, /* amopclassopts value */ maxProcNumber; /* amsupport value */ HeapTuple tup; Form_pg_am amform; @@ -1252,6 +1252,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid, ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("btree equal image functions must return boolean"))); + /* * pg_amproc functions are indexed by (lefttype, righttype), but * an equalimage function can only be called at CREATE INDEX time. diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 771268f70a2..a5e29b5a827 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -322,8 +322,8 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, * invalidate all partitions contained in the respective partition * trees, not just those explicitly mentioned in the publication. */ - List *relids = GetPublicationRelations(pubform->oid, - PUBLICATION_PART_ALL); + List *relids = GetPublicationRelations(pubform->oid, + PUBLICATION_PART_ALL); /* * We don't want to send too many individual messages, at some point @@ -380,8 +380,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel, PublicationDropTables(pubid, rels, false); else /* DEFELEM_SET */ { - List *oldrelids = GetPublicationRelations(pubid, - PUBLICATION_PART_ROOT); + List *oldrelids = GetPublicationRelations(pubid, + PUBLICATION_PART_ROOT); List *delrels = NIL; ListCell *oldlc; diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index 988cdba6f53..e1b1afafd7e 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -431,7 +431,7 @@ AlterStatistics(AlterStatsStmt *stmt) Datum repl_val[Natts_pg_statistic_ext]; bool repl_null[Natts_pg_statistic_ext]; bool repl_repl[Natts_pg_statistic_ext]; - ObjectAddress address; + ObjectAddress address; int newtarget = stmt->stxstattarget; /* Limit statistics target to a sane range */ @@ -455,9 +455,9 @@ AlterStatistics(AlterStatsStmt *stmt) stxoid = get_statistics_object_oid(stmt->defnames, stmt->missing_ok); /* - * If we got here and the OID is not valid, it means the statistics - * does not exist, but the command specified IF EXISTS. So report - * this as a simple NOTICE and we're done. + * If we got here and the OID is not valid, it means the statistics does + * not exist, but the command specified IF EXISTS. So report this as a + * simple NOTICE and we're done. */ if (!OidIsValid(stxoid)) { diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index a518b552b3d..8d2ed986d18 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -177,7 +177,7 @@ typedef struct AlteredTableInfo List *changedIndexOids; /* OIDs of indexes to rebuild */ List *changedIndexDefs; /* string definitions of same */ char *replicaIdentityIndex; /* index to reset as REPLICA IDENTITY */ - char *clusterOnIndex; /* index to use for CLUSTER */ + char *clusterOnIndex; /* index to use for CLUSTER */ } AlteredTableInfo; /* Struct describing one new constraint to check in Phase 3 scan */ @@ -1265,9 +1265,9 @@ RemoveRelations(DropStmt *drop) if (drop->concurrent) { /* - * Note that for temporary relations this lock may get upgraded - * later on, but as no other session can access a temporary - * relation, this is actually fine. + * Note that for temporary relations this lock may get upgraded later + * on, but as no other session can access a temporary relation, this + * is actually fine. */ lockmode = ShareUpdateExclusiveLock; Assert(drop->removeType == OBJECT_INDEX); @@ -1620,10 +1620,10 @@ ExecuteTruncate(TruncateStmt *stmt) } /* - * Inherited TRUNCATE commands perform access - * permission checks on the parent table only. - * So we skip checking the children's permissions - * and don't call truncate_check_perms() here. + * Inherited TRUNCATE commands perform access permission + * checks on the parent table only. So we skip checking the + * children's permissions and don't call + * truncate_check_perms() here. */ truncate_check_rel(RelationGetRelid(rel), rel->rd_rel); truncate_check_activity(rel); @@ -2650,6 +2650,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, errmsg("column \"%s\" inherits from generated column but specifies identity", def->colname))); } + /* * If the parent column is not generated, then take whatever * the child column definition says. @@ -7500,8 +7501,8 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE loc */ foreach(lc, RelationGetIndexList(rel)) { - Oid indexoid = lfirst_oid(lc); - Relation indrel; + Oid indexoid = lfirst_oid(lc); + Relation indrel; AttrNumber indattnum = 0; indrel = index_open(indexoid, lockmode); @@ -16993,7 +16994,7 @@ static void DropClonedTriggersFromPartition(Oid partitionId) { ScanKeyData skey; - SysScanDesc scan; + SysScanDesc scan; HeapTuple trigtup; Relation tgrel; ObjectAddresses *objects; diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index ed551ab73aa..672fccff5bd 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -2240,8 +2240,8 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, /* * After a tuple in a partition goes through a trigger, the user - * could have changed the partition key enough that the tuple - * no longer fits the partition. Verify that. + * could have changed the partition key enough that the tuple no + * longer fits the partition. Verify that. */ if (trigger->tgisclone && !ExecPartitionCheck(relinfo, slot, estate, false)) diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index c6a77bd66fa..236413f62aa 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -3238,7 +3238,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate, bool nullcheck) { ExprContext *aggcontext; - int adjust_jumpnull = -1; + int adjust_jumpnull = -1; if (ishash) aggcontext = aggstate->hashcontext; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index 113ed1547cb..b812bbaceef 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -167,15 +167,16 @@ static Datum ExecJustAssignOuterVarVirt(ExprState *state, ExprContext *econtext, static Datum ExecJustAssignScanVarVirt(ExprState *state, ExprContext *econtext, bool *isnull); /* execution helper functions */ -static pg_attribute_always_inline void -ExecAggPlainTransByVal(AggState *aggstate, AggStatePerTrans pertrans, - AggStatePerGroup pergroup, - ExprContext *aggcontext, int setno); - -static pg_attribute_always_inline void -ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans, - AggStatePerGroup pergroup, - ExprContext *aggcontext, int setno); +static pg_attribute_always_inline void ExecAggPlainTransByVal(AggState *aggstate, + AggStatePerTrans pertrans, + AggStatePerGroup pergroup, + ExprContext *aggcontext, + int setno); +static pg_attribute_always_inline void ExecAggPlainTransByRef(AggState *aggstate, + AggStatePerTrans pertrans, + AggStatePerGroup pergroup, + ExprContext *aggcontext, + int setno); /* * Prepare ExprState for interpreted execution. @@ -1611,8 +1612,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_CASE(EEOP_AGG_PLAIN_PERGROUP_NULLCHECK) { AggState *aggstate = castNode(AggState, state->parent); - AggStatePerGroup pergroup_allaggs = aggstate->all_pergroups - [op->d.agg_plain_pergroup_nullcheck.setoff]; + AggStatePerGroup pergroup_allaggs = + aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff]; if (pergroup_allaggs == NULL) EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull); @@ -1636,9 +1637,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; - AggStatePerGroup pergroup = &aggstate->all_pergroups - [op->d.agg_trans.setoff] - [op->d.agg_trans.transno]; + AggStatePerGroup pergroup = + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1665,9 +1665,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; - AggStatePerGroup pergroup = &aggstate->all_pergroups - [op->d.agg_trans.setoff] - [op->d.agg_trans.transno]; + AggStatePerGroup pergroup = + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1684,9 +1683,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; - AggStatePerGroup pergroup = &aggstate->all_pergroups - [op->d.agg_trans.setoff] - [op->d.agg_trans.transno]; + AggStatePerGroup pergroup = + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(pertrans->transtypeByVal); @@ -1702,9 +1700,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; - AggStatePerGroup pergroup = &aggstate->all_pergroups - [op->d.agg_trans.setoff] - [op->d.agg_trans.transno]; + AggStatePerGroup pergroup = + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -1724,9 +1721,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; - AggStatePerGroup pergroup = &aggstate->all_pergroups - [op->d.agg_trans.setoff] - [op->d.agg_trans.transno]; + AggStatePerGroup pergroup = + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -1742,9 +1738,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) { AggState *aggstate = castNode(AggState, state->parent); AggStatePerTrans pertrans = op->d.agg_trans.pertrans; - AggStatePerGroup pergroup = &aggstate->all_pergroups - [op->d.agg_trans.setoff] - [op->d.agg_trans.transno]; + AggStatePerGroup pergroup = + &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno]; Assert(!pertrans->transtypeByVal); @@ -4302,21 +4297,20 @@ ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans, newVal = FunctionCallInvoke(fcinfo); /* - * For pass-by-ref datatype, must copy the new value into - * aggcontext and free the prior transValue. But if transfn - * returned a pointer to its first input, we don't need to do - * anything. Also, if transfn returned a pointer to a R/W - * expanded object that is already a child of the aggcontext, - * assume we can adopt that value without copying it. + * For pass-by-ref datatype, must copy the new value into aggcontext and + * free the prior transValue. But if transfn returned a pointer to its + * first input, we don't need to do anything. Also, if transfn returned a + * pointer to a R/W expanded object that is already a child of the + * aggcontext, assume we can adopt that value without copying it. * - * It's safe to compare newVal with pergroup->transValue without - * regard for either being NULL, because ExecAggTransReparent() - * takes care to set transValue to 0 when NULL. Otherwise we could - * end up accidentally not reparenting, when the transValue has - * the same numerical value as newValue, despite being NULL. This - * is a somewhat hot path, making it undesirable to instead solve - * this with another branch for the common case of the transition - * function returning its (modified) input argument. + * It's safe to compare newVal with pergroup->transValue without regard + * for either being NULL, because ExecAggTransReparent() takes care to set + * transValue to 0 when NULL. Otherwise we could end up accidentally not + * reparenting, when the transValue has the same numerical value as + * newValue, despite being NULL. This is a somewhat hot path, making it + * undesirable to instead solve this with another branch for the common + * case of the transition function returning its (modified) input + * argument. */ if (DatumGetPointer(newVal) != DatumGetPointer(pergroup->transValue)) newVal = ExecAggTransReparent(aggstate, pertrans, diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 009d27b9a80..8be36ca7634 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -300,9 +300,9 @@ TupleHashEntry LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, bool *isnew) { - TupleHashEntry entry; - MemoryContext oldContext; - uint32 hash; + TupleHashEntry entry; + MemoryContext oldContext; + uint32 hash; /* Need to run the hash functions in short-lived context */ oldContext = MemoryContextSwitchTo(hashtable->tempcxt); @@ -326,8 +326,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, uint32 TupleHashTableHash(TupleHashTable hashtable, TupleTableSlot *slot) { - MemoryContext oldContext; - uint32 hash; + MemoryContext oldContext; + uint32 hash; hashtable->inputslot = slot; hashtable->in_hash_funcs = hashtable->tab_hash_funcs; @@ -350,8 +350,8 @@ TupleHashEntry LookupTupleHashEntryHash(TupleHashTable hashtable, TupleTableSlot *slot, bool *isnew, uint32 hash) { - TupleHashEntry entry; - MemoryContext oldContext; + TupleHashEntry entry; + MemoryContext oldContext; /* Need to run the hash functions in short-lived context */ oldContext = MemoryContextSwitchTo(hashtable->tempcxt); diff --git a/src/backend/executor/execSRF.c b/src/backend/executor/execSRF.c index 461c8601b4f..b0ea72de685 100644 --- a/src/backend/executor/execSRF.c +++ b/src/backend/executor/execSRF.c @@ -259,7 +259,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, if (first_time) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); rsinfo.setResult = tupstore; @@ -289,7 +289,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr, if (tupdesc == NULL) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); /* * This is the first non-NULL result from the @@ -384,7 +384,7 @@ no_function_result: if (rsinfo.setResult == NULL) { MemoryContext oldcontext = - MemoryContextSwitchTo(econtext->ecxt_per_query_memory); + MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); rsinfo.setResult = tupstore; diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index ca973882d01..d0e65b86473 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -320,9 +320,9 @@ CreateExprContext(EState *estate) ExprContext * CreateWorkExprContext(EState *estate) { - Size minContextSize = ALLOCSET_DEFAULT_MINSIZE; - Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE; - Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE; + Size minContextSize = ALLOCSET_DEFAULT_MINSIZE; + Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE; + Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE; /* choose the maxBlockSize to be no larger than 1/16 of work_mem */ while (16 * maxBlockSize > work_mem * 1024L) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 9f4229de600..8553db0dd07 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -317,11 +317,11 @@ */ typedef struct HashTapeInfo { - LogicalTapeSet *tapeset; - int ntapes; - int *freetapes; - int nfreetapes; - int freetapes_alloc; + LogicalTapeSet *tapeset; + int ntapes; + int *freetapes; + int nfreetapes; + int freetapes_alloc; } HashTapeInfo; /* @@ -336,11 +336,11 @@ typedef struct HashTapeInfo typedef struct HashAggSpill { LogicalTapeSet *tapeset; /* borrowed reference to tape set */ - int npartitions; /* number of partitions */ - int *partitions; /* spill partition tape numbers */ - int64 *ntuples; /* number of tuples in each partition */ - uint32 mask; /* mask to find partition from hash value */ - int shift; /* after masking, shift by this amount */ + int npartitions; /* number of partitions */ + int *partitions; /* spill partition tape numbers */ + int64 *ntuples; /* number of tuples in each partition */ + uint32 mask; /* mask to find partition from hash value */ + int shift; /* after masking, shift by this amount */ } HashAggSpill; /* @@ -354,11 +354,11 @@ typedef struct HashAggSpill */ typedef struct HashAggBatch { - int setno; /* grouping set */ - int used_bits; /* number of bits of hash already used */ - LogicalTapeSet *tapeset; /* borrowed reference to tape set */ - int input_tapenum; /* input partition tape */ - int64 input_tuples; /* number of tuples in this batch */ + int setno; /* grouping set */ + int used_bits; /* number of bits of hash already used */ + LogicalTapeSet *tapeset; /* borrowed reference to tape set */ + int input_tapenum; /* input partition tape */ + int64 input_tuples; /* number of tuples in this batch */ } HashAggBatch; static void select_current_set(AggState *aggstate, int setno, bool is_hash); @@ -402,10 +402,10 @@ static void hashagg_recompile_expressions(AggState *aggstate, bool minslot, static long hash_choose_num_buckets(double hashentrysize, long estimated_nbuckets, Size memory); -static int hash_choose_num_partitions(uint64 input_groups, - double hashentrysize, - int used_bits, - int *log2_npartittions); +static int hash_choose_num_partitions(uint64 input_groups, + double hashentrysize, + int used_bits, + int *log2_npartittions); static AggStatePerGroup lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table); static void lookup_hash_entries(AggState *aggstate); @@ -786,14 +786,14 @@ advance_transition_function(AggState *aggstate, * pointer to a R/W expanded object that is already a child of the * aggcontext, assume we can adopt that value without copying it. * - * It's safe to compare newVal with pergroup->transValue without - * regard for either being NULL, because ExecAggTransReparent() - * takes care to set transValue to 0 when NULL. Otherwise we could - * end up accidentally not reparenting, when the transValue has - * the same numerical value as newValue, despite being NULL. This - * is a somewhat hot path, making it undesirable to instead solve - * this with another branch for the common case of the transition - * function returning its (modified) input argument. + * It's safe to compare newVal with pergroup->transValue without regard + * for either being NULL, because ExecAggTransReparent() takes care to set + * transValue to 0 when NULL. Otherwise we could end up accidentally not + * reparenting, when the transValue has the same numerical value as + * newValue, despite being NULL. This is a somewhat hot path, making it + * undesirable to instead solve this with another branch for the common + * case of the transition function returning its (modified) input + * argument. */ if (!pertrans->transtypeByVal && DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue)) @@ -1206,7 +1206,7 @@ prepare_hash_slot(AggState *aggstate) TupleTableSlot *inputslot = aggstate->tmpcontext->ecxt_outertuple; AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set]; TupleTableSlot *hashslot = perhash->hashslot; - int i; + int i; /* transfer just the needed columns into hashslot */ slot_getsomeattrs(inputslot, perhash->largestGrpColIdx); @@ -1438,13 +1438,13 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos) static void build_hash_tables(AggState *aggstate) { - int setno; + int setno; for (setno = 0; setno < aggstate->num_hashes; ++setno) { AggStatePerHash perhash = &aggstate->perhash[setno]; - long nbuckets; - Size memory; + long nbuckets; + Size memory; if (perhash->hashtable != NULL) { @@ -1457,8 +1457,9 @@ build_hash_tables(AggState *aggstate) memory = aggstate->hash_mem_limit / aggstate->num_hashes; /* choose reasonable number of buckets per hashtable */ - nbuckets = hash_choose_num_buckets( - aggstate->hashentrysize, perhash->aggnode->numGroups, memory); + nbuckets = hash_choose_num_buckets(aggstate->hashentrysize, + perhash->aggnode->numGroups, + memory); build_hash_table(aggstate, setno, nbuckets); } @@ -1473,10 +1474,10 @@ static void build_hash_table(AggState *aggstate, int setno, long nbuckets) { AggStatePerHash perhash = &aggstate->perhash[setno]; - MemoryContext metacxt = aggstate->hash_metacxt; - MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory; - MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory; - Size additionalsize; + MemoryContext metacxt = aggstate->hash_metacxt; + MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory; + MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory; + Size additionalsize; Assert(aggstate->aggstrategy == AGG_HASHED || aggstate->aggstrategy == AGG_MIXED); @@ -1489,20 +1490,19 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets) */ additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData); - perhash->hashtable = BuildTupleHashTableExt( - &aggstate->ss.ps, - perhash->hashslot->tts_tupleDescriptor, - perhash->numCols, - perhash->hashGrpColIdxHash, - perhash->eqfuncoids, - perhash->hashfunctions, - perhash->aggnode->grpCollations, - nbuckets, - additionalsize, - metacxt, - hashcxt, - tmpcxt, - DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit)); + perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps, + perhash->hashslot->tts_tupleDescriptor, + perhash->numCols, + perhash->hashGrpColIdxHash, + perhash->eqfuncoids, + perhash->hashfunctions, + perhash->aggnode->grpCollations, + nbuckets, + additionalsize, + metacxt, + hashcxt, + tmpcxt, + DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit)); } /* @@ -1648,12 +1648,12 @@ find_hash_columns(AggState *aggstate) Size hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace) { - Size tupleChunkSize; - Size pergroupChunkSize; - Size transitionChunkSize; - Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) + - tupleWidth); - Size pergroupSize = numTrans * sizeof(AggStatePerGroupData); + Size tupleChunkSize; + Size pergroupChunkSize; + Size transitionChunkSize; + Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) + + tupleWidth); + Size pergroupSize = numTrans * sizeof(AggStatePerGroupData); tupleChunkSize = CHUNKHDRSZ + tupleSize; @@ -1695,24 +1695,24 @@ hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace) static void hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck) { - AggStatePerPhase phase; - int i = minslot ? 1 : 0; - int j = nullcheck ? 1 : 0; + AggStatePerPhase phase; + int i = minslot ? 1 : 0; + int j = nullcheck ? 1 : 0; Assert(aggstate->aggstrategy == AGG_HASHED || aggstate->aggstrategy == AGG_MIXED); if (aggstate->aggstrategy == AGG_HASHED) phase = &aggstate->phases[0]; - else /* AGG_MIXED */ + else /* AGG_MIXED */ phase = &aggstate->phases[1]; if (phase->evaltrans_cache[i][j] == NULL) { - const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops; - bool outerfixed = aggstate->ss.ps.outeropsfixed; - bool dohash = true; - bool dosort; + const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops; + bool outerfixed = aggstate->ss.ps.outeropsfixed; + bool dohash = true; + bool dosort; dosort = aggstate->aggstrategy == AGG_MIXED ? true : false; @@ -1723,8 +1723,9 @@ hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck) aggstate->ss.ps.outeropsfixed = true; } - phase->evaltrans_cache[i][j] = ExecBuildAggTrans( - aggstate, phase, dosort, dohash, nullcheck); + phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase, + dosort, dohash, + nullcheck); /* change back */ aggstate->ss.ps.outerops = outerops; @@ -1747,8 +1748,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits, Size *mem_limit, uint64 *ngroups_limit, int *num_partitions) { - int npartitions; - Size partition_mem; + int npartitions; + Size partition_mem; /* if not expected to spill, use all of work_mem */ if (input_groups * hashentrysize < work_mem * 1024L) @@ -1762,9 +1763,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits, /* * Calculate expected memory requirements for spilling, which is the size - * of the buffers needed for all the tapes that need to be open at - * once. Then, subtract that from the memory available for holding hash - * tables. + * of the buffers needed for all the tapes that need to be open at once. + * Then, subtract that from the memory available for holding hash tables. */ npartitions = hash_choose_num_partitions(input_groups, hashentrysize, @@ -1803,11 +1803,11 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits, static void hash_agg_check_limits(AggState *aggstate) { - uint64 ngroups = aggstate->hash_ngroups_current; - Size meta_mem = MemoryContextMemAllocated( - aggstate->hash_metacxt, true); - Size hash_mem = MemoryContextMemAllocated( - aggstate->hashcontext->ecxt_per_tuple_memory, true); + uint64 ngroups = aggstate->hash_ngroups_current; + Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, + true); + Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, + true); /* * Don't spill unless there's at least one group in the hash table so we @@ -1841,13 +1841,12 @@ hash_agg_enter_spill_mode(AggState *aggstate) hashagg_tapeinfo_init(aggstate); - aggstate->hash_spills = palloc( - sizeof(HashAggSpill) * aggstate->num_hashes); + aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes); for (int setno = 0; setno < aggstate->num_hashes; setno++) { - AggStatePerHash perhash = &aggstate->perhash[setno]; - HashAggSpill *spill = &aggstate->hash_spills[setno]; + AggStatePerHash perhash = &aggstate->perhash[setno]; + HashAggSpill *spill = &aggstate->hash_spills[setno]; hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0, perhash->aggnode->numGroups, @@ -1865,10 +1864,10 @@ hash_agg_enter_spill_mode(AggState *aggstate) static void hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) { - Size meta_mem; - Size hash_mem; - Size buffer_mem; - Size total_mem; + Size meta_mem; + Size hash_mem; + Size buffer_mem; + Size total_mem; if (aggstate->aggstrategy != AGG_MIXED && aggstate->aggstrategy != AGG_HASHED) @@ -1878,8 +1877,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true); /* memory for the group keys and transition states */ - hash_mem = MemoryContextMemAllocated( - aggstate->hashcontext->ecxt_per_tuple_memory, true); + hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true); /* memory for read/write tape buffers, if spilled */ buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE; @@ -1894,8 +1892,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) /* update disk usage */ if (aggstate->hash_tapeinfo != NULL) { - uint64 disk_used = LogicalTapeSetBlocks( - aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024); + uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024); if (aggstate->hash_disk_used < disk_used) aggstate->hash_disk_used = disk_used; @@ -1906,7 +1903,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) { aggstate->hashentrysize = sizeof(TupleHashEntryData) + - (hash_mem / (double)aggstate->hash_ngroups_current); + (hash_mem / (double) aggstate->hash_ngroups_current); } } @@ -1916,8 +1913,8 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions) static long hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory) { - long max_nbuckets; - long nbuckets = ngroups; + long max_nbuckets; + long nbuckets = ngroups; max_nbuckets = memory / hashentrysize; @@ -1943,10 +1940,10 @@ static int hash_choose_num_partitions(uint64 input_groups, double hashentrysize, int used_bits, int *log2_npartitions) { - Size mem_wanted; - int partition_limit; - int npartitions; - int partition_bits; + Size mem_wanted; + int partition_limit; + int npartitions; + int partition_bits; /* * Avoid creating so many partitions that the memory requirements of the @@ -2005,8 +2002,8 @@ lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table) AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set]; TupleTableSlot *hashslot = perhash->hashslot; TupleHashEntryData *entry; - bool isnew = false; - bool *p_isnew; + bool isnew = false; + bool *p_isnew; /* if hash table already spilled, don't create new entries */ p_isnew = aggstate->hash_spill_mode ? NULL : &isnew; @@ -2025,8 +2022,8 @@ lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table) if (isnew) { - AggStatePerGroup pergroup; - int transno; + AggStatePerGroup pergroup; + int transno; aggstate->hash_ngroups_current++; hash_agg_check_limits(aggstate); @@ -2083,9 +2080,9 @@ lookup_hash_entries(AggState *aggstate) for (setno = 0; setno < aggstate->num_hashes; setno++) { - AggStatePerHash perhash = &aggstate->perhash[setno]; - uint32 hash; - bool in_hash_table; + AggStatePerHash perhash = &aggstate->perhash[setno]; + uint32 hash; + bool in_hash_table; select_current_set(aggstate, setno, true); prepare_hash_slot(aggstate); @@ -2095,8 +2092,8 @@ lookup_hash_entries(AggState *aggstate) /* check to see if we need to spill the tuple for this grouping set */ if (!in_hash_table) { - HashAggSpill *spill = &aggstate->hash_spills[setno]; - TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple; + HashAggSpill *spill = &aggstate->hash_spills[setno]; + TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple; if (spill->partitions == NULL) hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0, @@ -2560,11 +2557,11 @@ agg_fill_hash_table(AggState *aggstate) static bool agg_refill_hash_table(AggState *aggstate) { - HashAggBatch *batch; - HashAggSpill spill; - HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo; - uint64 ngroups_estimate; - bool spill_initialized = false; + HashAggBatch *batch; + HashAggSpill spill; + HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo; + uint64 ngroups_estimate; + bool spill_initialized = false; if (aggstate->hash_batches == NIL) return false; @@ -2623,11 +2620,12 @@ agg_refill_hash_table(AggState *aggstate) LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum, HASHAGG_READ_BUFFER_SIZE); - for (;;) { - TupleTableSlot *slot = aggstate->hash_spill_slot; - MinimalTuple tuple; - uint32 hash; - bool in_hash_table; + for (;;) + { + TupleTableSlot *slot = aggstate->hash_spill_slot; + MinimalTuple tuple; + uint32 hash; + bool in_hash_table; CHECK_FOR_INTERRUPTS(); @@ -2639,8 +2637,8 @@ agg_refill_hash_table(AggState *aggstate) aggstate->tmpcontext->ecxt_outertuple = slot; prepare_hash_slot(aggstate); - aggstate->hash_pergroup[batch->setno] = lookup_hash_entry( - aggstate, hash, &in_hash_table); + aggstate->hash_pergroup[batch->setno] = + lookup_hash_entry(aggstate, hash, &in_hash_table); if (in_hash_table) { @@ -2657,7 +2655,7 @@ agg_refill_hash_table(AggState *aggstate) */ spill_initialized = true; hashagg_spill_init(&spill, tapeinfo, batch->used_bits, - ngroups_estimate, aggstate->hashentrysize); + ngroups_estimate, aggstate->hashentrysize); } /* no memory for a new group, spill */ hashagg_spill_tuple(&spill, slot, hash); @@ -2851,8 +2849,8 @@ agg_retrieve_hash_table_in_memory(AggState *aggstate) static void hashagg_tapeinfo_init(AggState *aggstate) { - HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo)); - int init_tapes = 16; /* expanded dynamically */ + HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo)); + int init_tapes = 16; /* expanded dynamically */ tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, NULL, NULL, -1); tapeinfo->ntapes = init_tapes; @@ -2873,7 +2871,7 @@ static void hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions, int npartitions) { - int partidx = 0; + int partidx = 0; /* use free tapes if available */ while (partidx < npartitions && tapeinfo->nfreetapes > 0) @@ -2899,8 +2897,8 @@ hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum) if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes) { tapeinfo->freetapes_alloc <<= 1; - tapeinfo->freetapes = repalloc( - tapeinfo->freetapes, tapeinfo->freetapes_alloc * sizeof(int)); + tapeinfo->freetapes = repalloc(tapeinfo->freetapes, + tapeinfo->freetapes_alloc * sizeof(int)); } tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum; } @@ -2915,11 +2913,11 @@ static void hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits, uint64 input_groups, double hashentrysize) { - int npartitions; - int partition_bits; + int npartitions; + int partition_bits; - npartitions = hash_choose_num_partitions( - input_groups, hashentrysize, used_bits, &partition_bits); + npartitions = hash_choose_num_partitions(input_groups, hashentrysize, + used_bits, &partition_bits); spill->partitions = palloc0(sizeof(int) * npartitions); spill->ntuples = palloc0(sizeof(int64) * npartitions); @@ -2941,12 +2939,12 @@ hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits, static Size hashagg_spill_tuple(HashAggSpill *spill, TupleTableSlot *slot, uint32 hash) { - LogicalTapeSet *tapeset = spill->tapeset; - int partition; - MinimalTuple tuple; - int tapenum; - int total_written = 0; - bool shouldFree; + LogicalTapeSet *tapeset = spill->tapeset; + int partition; + MinimalTuple tuple; + int tapenum; + int total_written = 0; + bool shouldFree; Assert(spill->partitions != NULL); @@ -2999,11 +2997,11 @@ static MinimalTuple hashagg_batch_read(HashAggBatch *batch, uint32 *hashp) { LogicalTapeSet *tapeset = batch->tapeset; - int tapenum = batch->input_tapenum; - MinimalTuple tuple; - uint32 t_len; - size_t nread; - uint32 hash; + int tapenum = batch->input_tapenum; + MinimalTuple tuple; + uint32 t_len; + size_t nread; + uint32 hash; nread = LogicalTapeRead(tapeset, tapenum, &hash, sizeof(uint32)); if (nread == 0) @@ -3027,7 +3025,7 @@ hashagg_batch_read(HashAggBatch *batch, uint32 *hashp) tuple->t_len = t_len; nread = LogicalTapeRead(tapeset, tapenum, - (void *)((char *)tuple + sizeof(uint32)), + (void *) ((char *) tuple + sizeof(uint32)), t_len - sizeof(uint32)); if (nread != t_len - sizeof(uint32)) ereport(ERROR, @@ -3048,14 +3046,15 @@ hashagg_batch_read(HashAggBatch *batch, uint32 *hashp) static void hashagg_finish_initial_spills(AggState *aggstate) { - int setno; - int total_npartitions = 0; + int setno; + int total_npartitions = 0; if (aggstate->hash_spills != NULL) { for (setno = 0; setno < aggstate->num_hashes; setno++) { HashAggSpill *spill = &aggstate->hash_spills[setno]; + total_npartitions += spill->npartitions; hashagg_spill_finish(aggstate, spill, setno); } @@ -3081,16 +3080,16 @@ hashagg_finish_initial_spills(AggState *aggstate) static void hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno) { - int i; - int used_bits = 32 - spill->shift; + int i; + int used_bits = 32 - spill->shift; if (spill->npartitions == 0) - return; /* didn't spill */ + return; /* didn't spill */ for (i = 0; i < spill->npartitions; i++) { - int tapenum = spill->partitions[i]; - HashAggBatch *new_batch; + int tapenum = spill->partitions[i]; + HashAggBatch *new_batch; /* if the partition is empty, don't create a new batch of work */ if (spill->ntuples[i] == 0) @@ -3113,16 +3112,17 @@ hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno) static void hashagg_reset_spill_state(AggState *aggstate) { - ListCell *lc; + ListCell *lc; /* free spills from initial pass */ if (aggstate->hash_spills != NULL) { - int setno; + int setno; for (setno = 0; setno < aggstate->num_hashes; setno++) { HashAggSpill *spill = &aggstate->hash_spills[setno]; + pfree(spill->ntuples); pfree(spill->partitions); } @@ -3133,7 +3133,8 @@ hashagg_reset_spill_state(AggState *aggstate) /* free batches */ foreach(lc, aggstate->hash_batches) { - HashAggBatch *batch = (HashAggBatch*) lfirst(lc); + HashAggBatch *batch = (HashAggBatch *) lfirst(lc); + pfree(batch); } list_free(aggstate->hash_batches); @@ -3142,7 +3143,7 @@ hashagg_reset_spill_state(AggState *aggstate) /* close tape set */ if (aggstate->hash_tapeinfo != NULL) { - HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo; + HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo; LogicalTapeSetClose(tapeinfo->tapeset); pfree(tapeinfo->freetapes); @@ -3558,22 +3559,22 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) */ if (use_hashing) { - Plan *outerplan = outerPlan(node); - uint64 totalGroups = 0; - int i; + Plan *outerplan = outerPlan(node); + uint64 totalGroups = 0; + int i; - aggstate->hash_metacxt = AllocSetContextCreate( - aggstate->ss.ps.state->es_query_cxt, - "HashAgg meta context", - ALLOCSET_DEFAULT_SIZES); - aggstate->hash_spill_slot = ExecInitExtraTupleSlot( - estate, scanDesc, &TTSOpsMinimalTuple); + aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt, + "HashAgg meta context", + ALLOCSET_DEFAULT_SIZES); + aggstate->hash_spill_slot = ExecInitExtraTupleSlot(estate, scanDesc, + &TTSOpsMinimalTuple); /* this is an array of pointers, not structures */ aggstate->hash_pergroup = pergroups; - aggstate->hashentrysize = hash_agg_entry_size( - aggstate->numtrans, outerplan->plan_width, node->transitionSpace); + aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans, + outerplan->plan_width, + node->transitionSpace); /* * Consider all of the grouping sets together when setting the limits diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 726d3a2d9a4..5a5c410106a 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -791,8 +791,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags) ExecInitQual(node->bitmapqualorig, (PlanState *) scanstate); /* - * Maximum number of prefetches for the tablespace if configured, otherwise - * the current value of the effective_io_concurrency GUC. + * Maximum number of prefetches for the tablespace if configured, + * otherwise the current value of the effective_io_concurrency GUC. */ scanstate->prefetch_maximum = get_tablespace_io_concurrency(currentRelation->rd_rel->reltablespace); diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c index 7af6a12a1e3..e056469448c 100644 --- a/src/backend/executor/nodeIncrementalSort.c +++ b/src/backend/executor/nodeIncrementalSort.c @@ -97,17 +97,24 @@ * - groupName: the token fullsort or prefixsort */ #define INSTRUMENT_SORT_GROUP(node, groupName) \ - if (node->ss.ps.instrument != NULL) \ - { \ - if (node->shared_info && node->am_worker) \ + do { \ + if ((node)->ss.ps.instrument != NULL) \ { \ - Assert(IsParallelWorker()); \ - Assert(ParallelWorkerNumber <= node->shared_info->num_workers); \ - instrumentSortedGroup(&node->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, node->groupName##_state); \ - } else { \ - instrumentSortedGroup(&node->incsort_info.groupName##GroupInfo, node->groupName##_state); \ + if ((node)->shared_info && (node)->am_worker) \ + { \ + Assert(IsParallelWorker()); \ + Assert(ParallelWorkerNumber <= (node)->shared_info->num_workers); \ + instrumentSortedGroup(&(node)->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, \ + (node)->groupName##_state); \ + } \ + else \ + { \ + instrumentSortedGroup(&(node)->incsort_info.groupName##GroupInfo, \ + (node)->groupName##_state); \ + } \ } \ - } + } while (0) + /* ---------------------------------------------------------------- * instrumentSortedGroup @@ -122,6 +129,7 @@ instrumentSortedGroup(IncrementalSortGroupInfo *groupInfo, Tuplesortstate *sortState) { TuplesortInstrumentation sort_instr; + groupInfo->groupCount++; tuplesort_get_stats(sortState, &sort_instr); @@ -444,7 +452,7 @@ switchToPresortedPrefixMode(PlanState *pstate) SO1_printf("Sorting presorted prefix tuplesort with %ld tuples\n", nTuples); tuplesort_performsort(node->prefixsort_state); - INSTRUMENT_SORT_GROUP(node, prefixsort) + INSTRUMENT_SORT_GROUP(node, prefixsort); if (node->bounded) { @@ -702,7 +710,7 @@ ExecIncrementalSort(PlanState *pstate) SO1_printf("Sorting fullsort with %ld tuples\n", nTuples); tuplesort_performsort(fullsort_state); - INSTRUMENT_SORT_GROUP(node, fullsort) + INSTRUMENT_SORT_GROUP(node, fullsort); SO_printf("Setting execution_status to INCSORT_READFULLSORT (final tuple)\n"); node->execution_status = INCSORT_READFULLSORT; @@ -783,7 +791,7 @@ ExecIncrementalSort(PlanState *pstate) nTuples); tuplesort_performsort(fullsort_state); - INSTRUMENT_SORT_GROUP(node, fullsort) + INSTRUMENT_SORT_GROUP(node, fullsort); SO_printf("Setting execution_status to INCSORT_READFULLSORT (found end of group)\n"); node->execution_status = INCSORT_READFULLSORT; @@ -792,8 +800,8 @@ ExecIncrementalSort(PlanState *pstate) } /* - * Unless we've already transitioned modes to reading from the full - * sort state, then we assume that having read at least + * Unless we've already transitioned modes to reading from the + * full sort state, then we assume that having read at least * DEFAULT_MAX_FULL_SORT_GROUP_SIZE tuples means it's likely we're * processing a large group of tuples all having equal prefix keys * (but haven't yet found the final tuple in that prefix key @@ -823,7 +831,7 @@ ExecIncrementalSort(PlanState *pstate) SO1_printf("Sorting fullsort tuplesort with %ld tuples\n", nTuples); tuplesort_performsort(fullsort_state); - INSTRUMENT_SORT_GROUP(node, fullsort) + INSTRUMENT_SORT_GROUP(node, fullsort); /* * If the full sort tuplesort happened to switch into top-n @@ -849,8 +857,9 @@ ExecIncrementalSort(PlanState *pstate) /* * We might have multiple prefix key groups in the full sort - * state, so the mode transition function needs to know that it - * needs to move from the fullsort to presorted prefix sort. + * state, so the mode transition function needs to know that + * it needs to move from the fullsort to presorted prefix + * sort. */ node->n_fullsort_remaining = nTuples; @@ -936,7 +945,7 @@ ExecIncrementalSort(PlanState *pstate) SO1_printf("Sorting presorted prefix tuplesort with >= %ld tuples\n", nTuples); tuplesort_performsort(node->prefixsort_state); - INSTRUMENT_SORT_GROUP(node, prefixsort) + INSTRUMENT_SORT_GROUP(node, prefixsort); SO_printf("Setting execution_status to INCSORT_READPREFIXSORT (found end of group)\n"); node->execution_status = INCSORT_READPREFIXSORT; @@ -986,9 +995,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags) SO_printf("ExecInitIncrementalSort: initializing sort node\n"); /* - * Incremental sort can't be used with EXEC_FLAG_BACKWARD or EXEC_FLAG_MARK, - * because the current sort state contains only one sort batch rather than - * the full result set. + * Incremental sort can't be used with EXEC_FLAG_BACKWARD or + * EXEC_FLAG_MARK, because the current sort state contains only one sort + * batch rather than the full result set. */ Assert((eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)) == 0); @@ -1041,8 +1050,8 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags) * Initialize child nodes. * * Incremental sort does not support backwards scans and mark/restore, so - * we don't bother removing the flags from eflags here. We allow passing - * a REWIND flag, because although incremental sort can't use it, the child + * we don't bother removing the flags from eflags here. We allow passing a + * REWIND flag, because although incremental sort can't use it, the child * nodes may be able to do something more useful. */ outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags); @@ -1128,10 +1137,10 @@ ExecReScanIncrementalSort(IncrementalSortState *node) * re-execute the sort along with the child node. Incremental sort itself * can't do anything smarter, but maybe the child nodes can. * - * In theory if we've only filled the full sort with one batch (and haven't - * reset it for a new batch yet) then we could efficiently rewind, but - * that seems a narrow enough case that it's not worth handling specially - * at this time. + * In theory if we've only filled the full sort with one batch (and + * haven't reset it for a new batch yet) then we could efficiently rewind, + * but that seems a narrow enough case that it's not worth handling + * specially at this time. */ /* must drop pointer to sort result tuple */ @@ -1152,10 +1161,10 @@ ExecReScanIncrementalSort(IncrementalSortState *node) /* * If we've set up either of the sort states yet, we need to reset them. * We could end them and null out the pointers, but there's no reason to - * repay the setup cost, and because ExecIncrementalSort guards - * presorted column functions by checking to see if the full sort state - * has been initialized yet, setting the sort states to null here might - * actually cause a leak. + * repay the setup cost, and because ExecIncrementalSort guards presorted + * column functions by checking to see if the full sort state has been + * initialized yet, setting the sort states to null here might actually + * cause a leak. */ if (node->fullsort_state != NULL) { diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c index b53a6bbe1d6..8049fdc64ea 100644 --- a/src/backend/executor/nodeTidscan.c +++ b/src/backend/executor/nodeTidscan.c @@ -144,7 +144,7 @@ TidListEval(TidScanState *tidstate) if (tidstate->ss.ss_currentScanDesc == NULL) tidstate->ss.ss_currentScanDesc = table_beginscan_tid(tidstate->ss.ss_currentRelation, - tidstate->ss.ps.state->es_snapshot); + tidstate->ss.ps.state->es_snapshot); scan = tidstate->ss.ss_currentScanDesc; /* diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c index b855e739571..0bc7a06aee3 100644 --- a/src/backend/jit/llvm/llvmjit_expr.c +++ b/src/backend/jit/llvm/llvmjit_expr.c @@ -2048,11 +2048,11 @@ llvm_compile_expr(ExprState *state) case EEOP_AGG_PLAIN_PERGROUP_NULLCHECK: { - int jumpnull; - LLVMValueRef v_aggstatep; - LLVMValueRef v_allpergroupsp; - LLVMValueRef v_pergroup_allaggs; - LLVMValueRef v_setoff; + int jumpnull; + LLVMValueRef v_aggstatep; + LLVMValueRef v_allpergroupsp; + LLVMValueRef v_pergroup_allaggs; + LLVMValueRef v_setoff; jumpnull = op->d.agg_plain_pergroup_nullcheck.jumpnull; @@ -2060,28 +2060,23 @@ llvm_compile_expr(ExprState *state) * pergroup_allaggs = aggstate->all_pergroups * [op->d.agg_plain_pergroup_nullcheck.setoff]; */ - v_aggstatep = LLVMBuildBitCast( - b, v_parent, l_ptr(StructAggState), ""); + v_aggstatep = LLVMBuildBitCast(b, v_parent, + l_ptr(StructAggState), ""); - v_allpergroupsp = l_load_struct_gep( - b, v_aggstatep, - FIELDNO_AGGSTATE_ALL_PERGROUPS, - "aggstate.all_pergroups"); + v_allpergroupsp = l_load_struct_gep(b, v_aggstatep, + FIELDNO_AGGSTATE_ALL_PERGROUPS, + "aggstate.all_pergroups"); - v_setoff = l_int32_const( - op->d.agg_plain_pergroup_nullcheck.setoff); + v_setoff = l_int32_const(op->d.agg_plain_pergroup_nullcheck.setoff); - v_pergroup_allaggs = l_load_gep1( - b, v_allpergroupsp, v_setoff, ""); + v_pergroup_allaggs = l_load_gep1(b, v_allpergroupsp, v_setoff, ""); - LLVMBuildCondBr( - b, - LLVMBuildICmp(b, LLVMIntEQ, - LLVMBuildPtrToInt( - b, v_pergroup_allaggs, TypeSizeT, ""), - l_sizet_const(0), ""), - opblocks[jumpnull], - opblocks[opno + 1]); + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, + LLVMBuildPtrToInt(b, v_pergroup_allaggs, TypeSizeT, ""), + l_sizet_const(0), ""), + opblocks[jumpnull], + opblocks[opno + 1]); break; } diff --git a/src/backend/libpq/auth-scram.c b/src/backend/libpq/auth-scram.c index 5e5119e8ea7..5214d328656 100644 --- a/src/backend/libpq/auth-scram.c +++ b/src/backend/libpq/auth-scram.c @@ -162,7 +162,7 @@ static char *build_server_final_message(scram_state *state); static bool verify_client_proof(scram_state *state); static bool verify_final_nonce(scram_state *state); static void mock_scram_secret(const char *username, int *iterations, - char **salt, uint8 *stored_key, uint8 *server_key); + char **salt, uint8 *stored_key, uint8 *server_key); static bool is_scram_printable(char *p); static char *sanitize_char(char c); static char *sanitize_str(const char *s); @@ -257,7 +257,7 @@ pg_be_scram_init(Port *port, if (password_type == PASSWORD_TYPE_SCRAM_SHA_256) { if (parse_scram_secret(shadow_pass, &state->iterations, &state->salt, - state->StoredKey, state->ServerKey)) + state->StoredKey, state->ServerKey)) got_secret = true; else { @@ -293,15 +293,15 @@ pg_be_scram_init(Port *port, } /* - * If the user did not have a valid SCRAM secret, we still go through - * the motions with a mock one, and fail as if the client supplied an + * If the user did not have a valid SCRAM secret, we still go through the + * motions with a mock one, and fail as if the client supplied an * incorrect password. This is to avoid revealing information to an * attacker. */ if (!got_secret) { mock_scram_secret(state->port->user_name, &state->iterations, - &state->salt, state->StoredKey, state->ServerKey); + &state->salt, state->StoredKey, state->ServerKey); state->doomed = true; } @@ -471,7 +471,7 @@ pg_be_scram_build_secret(const char *password) errmsg("could not generate random salt"))); result = scram_build_secret(saltbuf, SCRAM_DEFAULT_SALT_LEN, - SCRAM_DEFAULT_ITERATIONS, password); + SCRAM_DEFAULT_ITERATIONS, password); if (prep_password) pfree(prep_password); @@ -500,7 +500,7 @@ scram_verify_plain_password(const char *username, const char *password, pg_saslprep_rc rc; if (!parse_scram_secret(secret, &iterations, &encoded_salt, - stored_key, server_key)) + stored_key, server_key)) { /* * The password looked like a SCRAM secret, but could not be parsed. @@ -554,7 +554,7 @@ scram_verify_plain_password(const char *username, const char *password, */ bool parse_scram_secret(const char *secret, int *iterations, char **salt, - uint8 *stored_key, uint8 *server_key) + uint8 *stored_key, uint8 *server_key) { char *v; char *p; @@ -645,7 +645,7 @@ invalid_secret: */ static void mock_scram_secret(const char *username, int *iterations, char **salt, - uint8 *stored_key, uint8 *server_key) + uint8 *stored_key, uint8 *server_key) { char *raw_salt; char *encoded_salt; diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 42c5c07e580..7e15778da52 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -46,7 +46,7 @@ #include "utils/memutils.h" /* default init hook can be overridden by a shared library */ -static void default_openssl_tls_init(SSL_CTX *context, bool isServerStart); +static void default_openssl_tls_init(SSL_CTX *context, bool isServerStart); openssl_tls_init_hook_typ openssl_tls_init_hook = default_openssl_tls_init; static int my_sock_read(BIO *h, char *buf, int size); @@ -122,7 +122,7 @@ be_tls_init(bool isServerStart) /* * Call init hook (usually to set password callback) */ - (* openssl_tls_init_hook)(context, isServerStart); + (*openssl_tls_init_hook) (context, isServerStart); /* used by the callback */ ssl_is_server_start = isServerStart; @@ -1341,6 +1341,7 @@ default_openssl_tls_init(SSL_CTX *context, bool isServerStart) if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload) SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb); else + /* * If reloading and no external command is configured, override * OpenSSL's default handling of passphrase-protected files, diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c index ff755320388..17b91ac9e60 100644 --- a/src/backend/libpq/crypt.c +++ b/src/backend/libpq/crypt.c @@ -98,7 +98,7 @@ get_password_type(const char *shadow_pass) strspn(shadow_pass + 3, MD5_PASSWD_CHARSET) == MD5_PASSWD_LEN - 3) return PASSWORD_TYPE_MD5; if (parse_scram_secret(shadow_pass, &iterations, &encoded_salt, - stored_key, server_key)) + stored_key, server_key)) return PASSWORD_TYPE_SCRAM_SHA_256; return PASSWORD_TYPE_PLAINTEXT; } diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 255f56b8276..d984da25d77 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -2751,13 +2751,14 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel) List *useful_pathkeys_list = NIL; /* - * Considering query_pathkeys is always worth it, because it might allow us - * to avoid a total sort when we have a partially presorted path available. + * Considering query_pathkeys is always worth it, because it might allow + * us to avoid a total sort when we have a partially presorted path + * available. */ if (root->query_pathkeys) { ListCell *lc; - int npathkeys = 0; /* useful pathkeys */ + int npathkeys = 0; /* useful pathkeys */ foreach(lc, root->query_pathkeys) { @@ -2765,15 +2766,15 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel) EquivalenceClass *pathkey_ec = pathkey->pk_eclass; /* - * We can only build an Incremental Sort for pathkeys which contain - * an EC member in the current relation, so ignore any suffix of the - * list as soon as we find a pathkey without an EC member the - * relation. + * We can only build an Incremental Sort for pathkeys which + * contain an EC member in the current relation, so ignore any + * suffix of the list as soon as we find a pathkey without an EC + * member the relation. * - * By still returning the prefix of the pathkeys list that does meet - * criteria of EC membership in the current relation, we enable not - * just an incremental sort on the entirety of query_pathkeys but - * also incremental sort below a JOIN. + * By still returning the prefix of the pathkeys list that does + * meet criteria of EC membership in the current relation, we + * enable not just an incremental sort on the entirety of + * query_pathkeys but also incremental sort below a JOIN. */ if (!find_em_expr_for_rel(pathkey_ec, rel)) break; @@ -2782,9 +2783,9 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel) } /* - * The whole query_pathkeys list matches, so append it directly, to allow - * comparing pathkeys easily by comparing list pointer. If we have to truncate - * the pathkeys, we gotta do a copy though. + * The whole query_pathkeys list matches, so append it directly, to + * allow comparing pathkeys easily by comparing list pointer. If we + * have to truncate the pathkeys, we gotta do a copy though. */ if (npathkeys == list_length(root->query_pathkeys)) useful_pathkeys_list = lappend(useful_pathkeys_list, @@ -2851,14 +2852,15 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r /* * If the path has no ordering at all, then we can't use either - * incremental sort or rely on implict sorting with a gather merge. + * incremental sort or rely on implict sorting with a gather + * merge. */ if (subpath->pathkeys == NIL) continue; is_sorted = pathkeys_count_contained_in(useful_pathkeys, - subpath->pathkeys, - &presorted_keys); + subpath->pathkeys, + &presorted_keys); /* * We don't need to consider the case where a subpath is already @@ -2915,8 +2917,9 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r Path *tmp; /* - * We should have already excluded pathkeys of length 1 because - * then presorted_keys > 0 would imply is_sorted was true. + * We should have already excluded pathkeys of length 1 + * because then presorted_keys > 0 would imply is_sorted was + * true. */ Assert(list_length(useful_pathkeys) != 1); diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index b10b14a7fb8..f4d4a4df66c 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -1821,19 +1821,19 @@ cost_incremental_sort(Path *path, /* * Extract presorted keys as list of expressions. * - * We need to be careful about Vars containing "varno 0" which might - * have been introduced by generate_append_tlist, which would confuse + * We need to be careful about Vars containing "varno 0" which might have + * been introduced by generate_append_tlist, which would confuse * estimate_num_groups (in fact it'd fail for such expressions). See * recurse_set_operations which has to deal with the same issue. * - * Unlike recurse_set_operations we can't access the original target - * list here, and even if we could it's not very clear how useful would - * that be for a set operation combining multiple tables. So we simply - * detect if there are any expressions with "varno 0" and use the - * default DEFAULT_NUM_DISTINCT in that case. + * Unlike recurse_set_operations we can't access the original target list + * here, and even if we could it's not very clear how useful would that be + * for a set operation combining multiple tables. So we simply detect if + * there are any expressions with "varno 0" and use the default + * DEFAULT_NUM_DISTINCT in that case. * - * We might also use either 1.0 (a single group) or input_tuples (each - * row being a separate group), pretty much the worst and best case for + * We might also use either 1.0 (a single group) or input_tuples (each row + * being a separate group), pretty much the worst and best case for * incremental sort. But those are extreme cases and using something in * between seems reasonable. Furthermore, generate_append_tlist is used * for set operations, which are likely to produce mostly unique output @@ -2403,40 +2403,40 @@ cost_agg(Path *path, PlannerInfo *root, /* * Add the disk costs of hash aggregation that spills to disk. * - * Groups that go into the hash table stay in memory until finalized, - * so spilling and reprocessing tuples doesn't incur additional - * invocations of transCost or finalCost. Furthermore, the computed - * hash value is stored with the spilled tuples, so we don't incur - * extra invocations of the hash function. + * Groups that go into the hash table stay in memory until finalized, so + * spilling and reprocessing tuples doesn't incur additional invocations + * of transCost or finalCost. Furthermore, the computed hash value is + * stored with the spilled tuples, so we don't incur extra invocations of + * the hash function. * - * Hash Agg begins returning tuples after the first batch is - * complete. Accrue writes (spilled tuples) to startup_cost and to - * total_cost; accrue reads only to total_cost. + * Hash Agg begins returning tuples after the first batch is complete. + * Accrue writes (spilled tuples) to startup_cost and to total_cost; + * accrue reads only to total_cost. */ if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED) { - double pages; - double pages_written = 0.0; - double pages_read = 0.0; - double hashentrysize; - double nbatches; - Size mem_limit; - uint64 ngroups_limit; - int num_partitions; - int depth; + double pages; + double pages_written = 0.0; + double pages_read = 0.0; + double hashentrysize; + double nbatches; + Size mem_limit; + uint64 ngroups_limit; + int num_partitions; + int depth; /* * Estimate number of batches based on the computed limits. If less * than or equal to one, all groups are expected to fit in memory; * otherwise we expect to spill. */ - hashentrysize = hash_agg_entry_size( - aggcosts->numAggs, input_width, aggcosts->transitionSpace); + hashentrysize = hash_agg_entry_size(aggcosts->numAggs, input_width, + aggcosts->transitionSpace); hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit, &ngroups_limit, &num_partitions); - nbatches = Max( (numGroups * hashentrysize) / mem_limit, - numGroups / ngroups_limit ); + nbatches = Max((numGroups * hashentrysize) / mem_limit, + numGroups / ngroups_limit); nbatches = Max(ceil(nbatches), 1.0); num_partitions = Max(num_partitions, 2); @@ -2446,7 +2446,7 @@ cost_agg(Path *path, PlannerInfo *root, * recursion; but for the purposes of this calculation assume it stays * constant. */ - depth = ceil( log(nbatches) / log(num_partitions) ); + depth = ceil(log(nbatches) / log(num_partitions)); /* * Estimate number of pages read and written. For each level of diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index 4e1650994d5..2d343cd2934 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -1378,8 +1378,8 @@ try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, Assert(joinrel->consider_partitionwise_join); /* - * We can not perform partitionwise join if either of the joining relations - * is not partitioned. + * We can not perform partitionwise join if either of the joining + * relations is not partitioned. */ if (!IS_PARTITIONED_REL(rel1) || !IS_PARTITIONED_REL(rel2)) return; @@ -1622,8 +1622,8 @@ compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1, * partition bounds as inputs, and the partitions with the same * cardinal positions form the pairs. * - * Note: even in cases where one or both inputs have merged bounds, - * it would be possible for both the bounds to be exactly the same, but + * Note: even in cases where one or both inputs have merged bounds, it + * would be possible for both the bounds to be exactly the same, but * it seems unlikely to be worth the cycles to check. */ if (!rel1->partbounds_merged && @@ -1670,8 +1670,8 @@ compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1, /* * If the join rel's partbounds_merged flag is true, it means inputs * are not guaranteed to have the same partition bounds, therefore we - * can't assume that the partitions at the same cardinal positions form - * the pairs; let get_matching_part_pairs() generate the pairs. + * can't assume that the partitions at the same cardinal positions + * form the pairs; let get_matching_part_pairs() generate the pairs. * Otherwise, nothing to do since we can assume that. */ if (joinrel->partbounds_merged) @@ -1695,7 +1695,7 @@ get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel, { bool rel1_is_simple = IS_SIMPLE_REL(rel1); bool rel2_is_simple = IS_SIMPLE_REL(rel2); - int cnt_parts; + int cnt_parts; *parts1 = NIL; *parts2 = NIL; @@ -1735,9 +1735,10 @@ get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel, * Get a child rel for rel1 with the relids. Note that we should have * the child rel even if rel1 is a join rel, because in that case the * partitions specified in the relids would have matching/overlapping - * boundaries, so the specified partitions should be considered as ones - * to be joined when planning partitionwise joins of rel1, meaning that - * the child rel would have been built by the time we get here. + * boundaries, so the specified partitions should be considered as + * ones to be joined when planning partitionwise joins of rel1, + * meaning that the child rel would have been built by the time we get + * here. */ if (rel1_is_simple) { diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 21e3f5a987c..ce9bf87e9b6 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -1857,7 +1857,7 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys) return 0; /* unordered path */ (void) pathkeys_count_contained_in(root->query_pathkeys, pathkeys, - &n_common_pathkeys); + &n_common_pathkeys); return n_common_pathkeys; } diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index e664eb18c05..357850624cc 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -4866,8 +4866,7 @@ create_distinct_paths(PlannerInfo *root, allow_hash = false; /* policy-based decision not to hash */ else { - Size hashentrysize = hash_agg_entry_size( - 0, cheapest_input_path->pathtarget->width, 0); + Size hashentrysize = hash_agg_entry_size(0, cheapest_input_path->pathtarget->width, 0); allow_hash = enable_hashagg_disk || (hashentrysize * numDistinctRows <= work_mem * 1024L); @@ -4972,7 +4971,7 @@ create_ordered_paths(PlannerInfo *root, int presorted_keys; is_sorted = pathkeys_count_contained_in(root->sort_pathkeys, - input_path->pathkeys, &presorted_keys); + input_path->pathkeys, &presorted_keys); if (is_sorted) { @@ -4986,9 +4985,9 @@ create_ordered_paths(PlannerInfo *root, else { /* - * Try adding an explicit sort, but only to the cheapest total path - * since a full sort should generally add the same cost to all - * paths. + * Try adding an explicit sort, but only to the cheapest total + * path since a full sort should generally add the same cost to + * all paths. */ if (input_path == cheapest_input_path) { @@ -5010,11 +5009,11 @@ create_ordered_paths(PlannerInfo *root, } /* - * If incremental sort is enabled, then try it as well. Unlike with - * regular sorts, we can't just look at the cheapest path, because - * the cost of incremental sort depends on how well presorted the - * path is. Additionally incremental sort may enable a cheaper - * startup path to win out despite higher total cost. + * If incremental sort is enabled, then try it as well. Unlike + * with regular sorts, we can't just look at the cheapest path, + * because the cost of incremental sort depends on how well + * presorted the path is. Additionally incremental sort may enable + * a cheaper startup path to win out despite higher total cost. */ if (!enable_incrementalsort) continue; @@ -5110,15 +5109,15 @@ create_ordered_paths(PlannerInfo *root, double total_groups; /* - * We don't care if this is the cheapest partial path - we can't - * simply skip it, because it may be partially sorted in which - * case we want to consider adding incremental sort (instead of - * full sort, which is what happens above). + * We don't care if this is the cheapest partial path - we + * can't simply skip it, because it may be partially sorted in + * which case we want to consider adding incremental sort + * (instead of full sort, which is what happens above). */ is_sorted = pathkeys_count_contained_in(root->sort_pathkeys, - input_path->pathkeys, - &presorted_keys); + input_path->pathkeys, + &presorted_keys); /* No point in adding incremental sort on fully sorted paths. */ if (is_sorted) @@ -6510,8 +6509,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, int presorted_keys; is_sorted = pathkeys_count_contained_in(root->group_pathkeys, - path->pathkeys, - &presorted_keys); + path->pathkeys, + &presorted_keys); if (path == cheapest_path || is_sorted) { @@ -6607,8 +6606,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, else if (parse->hasAggs) { /* - * We have aggregation, possibly with plain GROUP BY. Make - * an AggPath. + * We have aggregation, possibly with plain GROUP BY. Make an + * AggPath. */ add_path(grouped_rel, (Path *) create_agg_path(root, @@ -6625,8 +6624,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, else if (parse->groupClause) { /* - * We have GROUP BY without aggregation or grouping sets. - * Make a GroupPath. + * We have GROUP BY without aggregation or grouping sets. Make + * a GroupPath. */ add_path(grouped_rel, (Path *) create_group_path(root, @@ -6657,8 +6656,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, int presorted_keys; is_sorted = pathkeys_count_contained_in(root->group_pathkeys, - path->pathkeys, - &presorted_keys); + path->pathkeys, + &presorted_keys); /* * Insert a Sort node, if required. But there's no point in @@ -6712,8 +6711,9 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, continue; /* - * We should have already excluded pathkeys of length 1 because - * then presorted_keys > 0 would imply is_sorted was true. + * We should have already excluded pathkeys of length 1 + * because then presorted_keys > 0 would imply is_sorted was + * true. */ Assert(list_length(root->group_pathkeys) != 1); @@ -7032,8 +7032,8 @@ create_partial_grouping_paths(PlannerInfo *root, int presorted_keys; is_sorted = pathkeys_count_contained_in(root->group_pathkeys, - path->pathkeys, - &presorted_keys); + path->pathkeys, + &presorted_keys); /* Ignore already sorted paths */ if (is_sorted) @@ -7086,8 +7086,8 @@ create_partial_grouping_paths(PlannerInfo *root, int presorted_keys; is_sorted = pathkeys_count_contained_in(root->group_pathkeys, - path->pathkeys, - &presorted_keys); + path->pathkeys, + &presorted_keys); if (path == cheapest_partial_path || is_sorted) { @@ -7301,8 +7301,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel) * Consider incremental sort on all partial paths, if enabled. * * We can also skip the entire loop when we only have a single-item - * group_pathkeys because then we can't possibly have a presorted - * prefix of the list without having the list be fully sorted. + * group_pathkeys because then we can't possibly have a presorted prefix + * of the list without having the list be fully sorted. */ if (!enable_incrementalsort || list_length(root->group_pathkeys) == 1) return; @@ -7316,8 +7316,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel) double total_groups; is_sorted = pathkeys_count_contained_in(root->group_pathkeys, - path->pathkeys, - &presorted_keys); + path->pathkeys, + &presorted_keys); if (is_sorted) continue; diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index 6fff13479e4..96ae3a7e64d 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -1767,7 +1767,7 @@ transformLimitClause(ParseState *pstate, Node *clause, * unadorned NULL that's not accepted back by the grammar. */ if (exprKind == EXPR_KIND_LIMIT && limitOption == LIMIT_OPTION_WITH_TIES && - IsA(clause, A_Const) && ((A_Const *) clause)->val.type == T_Null) + IsA(clause, A_Const) &&((A_Const *) clause)->val.type == T_Null) ereport(ERROR, (errcode(ERRCODE_INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), errmsg("row count cannot be NULL in FETCH FIRST ... WITH TIES clause"))); diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 75c122fe348..0e4caa6ad47 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -990,7 +990,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla /* * We must fill the attmap now so that it can be used to process generated * column default expressions in the per-column loop below. - */ + */ new_attno = 1; for (parent_attno = 1; parent_attno <= tupleDesc->natts; parent_attno++) @@ -2194,7 +2194,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) * mentioned above. */ Datum attoptions = - get_attoptions(RelationGetRelid(index_rel), i + 1); + get_attoptions(RelationGetRelid(index_rel), i + 1); defopclass = GetDefaultOpClass(attform->atttypid, index_rel->rd_rel->relam); diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index b9c65ff87c5..7553d559877 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -76,7 +76,7 @@ typedef struct PartitionRangeBound typedef struct PartitionMap { int nparts; /* number of partitions */ - int *merged_indexes; /* indexes of merged partitions */ + int *merged_indexes; /* indexes of merged partitions */ bool *merged; /* flags to indicate whether partitions are * merged with non-dummy partitions */ bool did_remapping; /* did we re-map partitions? */ @@ -120,29 +120,29 @@ static PartitionBoundInfo merge_range_bounds(int partnatts, static void init_partition_map(RelOptInfo *rel, PartitionMap *map); static void free_partition_map(PartitionMap *map); static bool is_dummy_partition(RelOptInfo *rel, int part_index); -static int merge_matching_partitions(PartitionMap *outer_map, - PartitionMap *inner_map, - int outer_part, - int inner_part, - int *next_index); -static int process_outer_partition(PartitionMap *outer_map, - PartitionMap *inner_map, - bool outer_has_default, - bool inner_has_default, - int outer_index, - int inner_default, - JoinType jointype, - int *next_index, - int *default_index); -static int process_inner_partition(PartitionMap *outer_map, - PartitionMap *inner_map, - bool outer_has_default, - bool inner_has_default, - int inner_index, - int outer_default, - JoinType jointype, - int *next_index, - int *default_index); +static int merge_matching_partitions(PartitionMap *outer_map, + PartitionMap *inner_map, + int outer_part, + int inner_part, + int *next_index); +static int process_outer_partition(PartitionMap *outer_map, + PartitionMap *inner_map, + bool outer_has_default, + bool inner_has_default, + int outer_index, + int inner_default, + JoinType jointype, + int *next_index, + int *default_index); +static int process_inner_partition(PartitionMap *outer_map, + PartitionMap *inner_map, + bool outer_has_default, + bool inner_has_default, + int inner_index, + int outer_default, + JoinType jointype, + int *next_index, + int *default_index); static void merge_null_partitions(PartitionMap *outer_map, PartitionMap *inner_map, bool outer_has_null, @@ -161,8 +161,8 @@ static void merge_default_partitions(PartitionMap *outer_map, JoinType jointype, int *next_index, int *default_index); -static int merge_partition_with_dummy(PartitionMap *map, int index, - int *next_index); +static int merge_partition_with_dummy(PartitionMap *map, int index, + int *next_index); static void fix_merged_indexes(PartitionMap *outer_map, PartitionMap *inner_map, int nmerged, List *merged_indexes); @@ -179,15 +179,15 @@ static PartitionBoundInfo build_merged_partition_bounds(char strategy, List *merged_indexes, int null_index, int default_index); -static int get_range_partition(RelOptInfo *rel, - PartitionBoundInfo bi, - int *lb_pos, - PartitionRangeBound *lb, - PartitionRangeBound *ub); -static int get_range_partition_internal(PartitionBoundInfo bi, - int *lb_pos, - PartitionRangeBound *lb, - PartitionRangeBound *ub); +static int get_range_partition(RelOptInfo *rel, + PartitionBoundInfo bi, + int *lb_pos, + PartitionRangeBound *lb, + PartitionRangeBound *ub); +static int get_range_partition_internal(PartitionBoundInfo bi, + int *lb_pos, + PartitionRangeBound *lb, + PartitionRangeBound *ub); static bool compare_range_partitions(int partnatts, FmgrInfo *partsupfuncs, Oid *partcollations, PartitionRangeBound *outer_lb, @@ -201,7 +201,7 @@ static void get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs, PartitionRangeBound *outer_ub, PartitionRangeBound *inner_lb, PartitionRangeBound *inner_ub, - int lb_cmpval, int ub_cmpval, + int lb_cmpval, int ub_cmpval, PartitionRangeBound *merged_lb, PartitionRangeBound *merged_ub); static void add_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs, @@ -955,8 +955,8 @@ partition_bounds_copy(PartitionBoundInfo src, dest->kind = NULL; /* - * For hash partitioning, datums array will have two elements - modulus and - * remainder. + * For hash partitioning, datums array will have two elements - modulus + * and remainder. */ hash_part = (key->strategy == PARTITION_STRATEGY_HASH); natts = hash_part ? 2 : partnatts; @@ -1076,7 +1076,7 @@ partition_bounds_merge(int partnatts, default: elog(ERROR, "unexpected partition strategy: %d", (int) outer_binfo->strategy); - return NULL; /* keep compiler quiet */ + return NULL; /* keep compiler quiet */ } } @@ -1144,10 +1144,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation, /* * Merge partitions from both sides. In each iteration we compare a pair - * of list values, one from each side, and decide whether the corresponding - * partitions match or not. If the two values match exactly, move to the - * next pair of list values, otherwise move to the next list value on the - * side with a smaller list value. + * of list values, one from each side, and decide whether the + * corresponding partitions match or not. If the two values match + * exactly, move to the next pair of list values, otherwise move to the + * next list value on the side with a smaller list value. */ outer_pos = inner_pos = 0; while (outer_pos < outer_bi->ndatums || inner_pos < inner_bi->ndatums) @@ -1163,8 +1163,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation, if (outer_pos < outer_bi->ndatums) { /* - * If the partition on the outer side has been proven empty, ignore - * it and move to the next datum on the outer side. + * If the partition on the outer side has been proven empty, + * ignore it and move to the next datum on the outer side. */ outer_index = outer_bi->indexes[outer_pos]; if (is_dummy_partition(outer_rel, outer_index)) @@ -1176,8 +1176,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation, if (inner_pos < inner_bi->ndatums) { /* - * If the partition on the inner side has been proven empty, ignore - * it and move to the next datum on the inner side. + * If the partition on the inner side has been proven empty, + * ignore it and move to the next datum on the inner side. */ inner_index = inner_bi->indexes[inner_pos]; if (is_dummy_partition(inner_rel, inner_index)) @@ -1197,10 +1197,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation, * We run this loop till both sides finish. This allows us to avoid * duplicating code to handle the remaining values on the side which * finishes later. For that we set the comparison parameter cmpval in - * such a way that it appears as if the side which finishes earlier has - * an extra value higher than any other value on the unfinished side. - * That way we advance the values on the unfinished side till all of - * its values are exhausted. + * such a way that it appears as if the side which finishes earlier + * has an extra value higher than any other value on the unfinished + * side. That way we advance the values on the unfinished side till + * all of its values are exhausted. */ if (outer_pos >= outer_bi->ndatums) cmpval = 1; @@ -1245,10 +1245,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation, Assert(outer_pos < outer_bi->ndatums); /* - * If the inner side has the default partition, or this is an outer - * join, try to assign a merged partition to the outer partition - * (see process_outer_partition()). Otherwise, the outer partition - * will not contribute to the result. + * If the inner side has the default partition, or this is an + * outer join, try to assign a merged partition to the outer + * partition (see process_outer_partition()). Otherwise, the + * outer partition will not contribute to the result. */ if (inner_has_default || IS_OUTER_JOIN(jointype)) { @@ -1281,8 +1281,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation, /* * If the outer side has the default partition, or this is a FULL * join, try to assign a merged partition to the inner partition - * (see process_inner_partition()). Otherwise, the inner partition - * will not contribute to the result. + * (see process_inner_partition()). Otherwise, the inner + * partition will not contribute to the result. */ if (outer_has_default || jointype == JOIN_FULL) { @@ -1459,8 +1459,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs, * partitions match or not. If the two ranges overlap, move to the next * pair of ranges, otherwise move to the next range on the side with a * lower range. outer_lb_pos/inner_lb_pos keep track of the positions of - * lower bounds in the datums arrays in the outer/inner PartitionBoundInfos - * respectively. + * lower bounds in the datums arrays in the outer/inner + * PartitionBoundInfos respectively. */ outer_lb_pos = inner_lb_pos = 0; outer_index = get_range_partition(outer_rel, outer_bi, &outer_lb_pos, @@ -1480,10 +1480,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs, * We run this loop till both sides finish. This allows us to avoid * duplicating code to handle the remaining ranges on the side which * finishes later. For that we set the comparison parameter cmpval in - * such a way that it appears as if the side which finishes earlier has - * an extra range higher than any other range on the unfinished side. - * That way we advance the ranges on the unfinished side till all of - * its ranges are exhausted. + * such a way that it appears as if the side which finishes earlier + * has an extra range higher than any other range on the unfinished + * side. That way we advance the ranges on the unfinished side till + * all of its ranges are exhausted. */ if (outer_index == -1) { @@ -1563,10 +1563,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs, goto cleanup; /* - * A row from a non-overlapping portion (if any) of a partition - * on one side might find its join partner in the default - * partition (if any) on the other side, causing the same - * situation as above; give up in that case. + * A row from a non-overlapping portion (if any) of a partition on + * one side might find its join partner in the default partition + * (if any) on the other side, causing the same situation as + * above; give up in that case. */ if ((outer_has_default && (lb_cmpval > 0 || ub_cmpval < 0)) || (inner_has_default && (lb_cmpval < 0 || ub_cmpval > 0))) @@ -1582,10 +1582,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs, outer_map.merged[outer_index] == false); /* - * If the inner side has the default partition, or this is an outer - * join, try to assign a merged partition to the outer partition - * (see process_outer_partition()). Otherwise, the outer partition - * will not contribute to the result. + * If the inner side has the default partition, or this is an + * outer join, try to assign a merged partition to the outer + * partition (see process_outer_partition()). Otherwise, the + * outer partition will not contribute to the result. */ if (inner_has_default || IS_OUTER_JOIN(jointype)) { @@ -1621,8 +1621,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs, /* * If the outer side has the default partition, or this is a FULL * join, try to assign a merged partition to the inner partition - * (see process_inner_partition()). Otherwise, the inner partition - * will not contribute to the result. + * (see process_inner_partition()). Otherwise, the inner + * partition will not contribute to the result. */ if (outer_has_default || jointype == JOIN_FULL) { @@ -1647,8 +1647,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs, } /* - * If we assigned a merged partition, add the range bounds and index of - * the merged partition if appropriate. + * If we assigned a merged partition, add the range bounds and index + * of the merged partition if appropriate. */ if (merged_index >= 0 && merged_index != default_index) add_merged_range_bounds(partnatts, partsupfuncs, partcollations, @@ -1766,10 +1766,10 @@ static int merge_matching_partitions(PartitionMap *outer_map, PartitionMap *inner_map, int outer_index, int inner_index, int *next_index) { - int outer_merged_index; - int inner_merged_index; - bool outer_merged; - bool inner_merged; + int outer_merged_index; + int inner_merged_index; + bool outer_merged; + bool inner_merged; Assert(outer_index >= 0 && outer_index < outer_map->nparts); outer_merged_index = outer_map->merged_indexes[outer_index]; @@ -1839,7 +1839,7 @@ merge_matching_partitions(PartitionMap *outer_map, PartitionMap *inner_map, */ if (outer_merged_index == -1 && inner_merged_index == -1) { - int merged_index = *next_index; + int merged_index = *next_index; Assert(!outer_merged); Assert(!inner_merged); @@ -1891,16 +1891,16 @@ process_outer_partition(PartitionMap *outer_map, int *next_index, int *default_index) { - int merged_index = -1; + int merged_index = -1; Assert(outer_index >= 0); /* * If the inner side has the default partition, a row from the outer * partition might find its join partner in the default partition; try - * merging the outer partition with the default partition. Otherwise, this - * should be an outer join, in which case the outer partition has to be - * scanned all the way anyway; merge the outer partition with a dummy + * merging the outer partition with the default partition. Otherwise, + * this should be an outer join, in which case the outer partition has to + * be scanned all the way anyway; merge the outer partition with a dummy * partition on the other side. */ if (inner_has_default) @@ -1909,9 +1909,10 @@ process_outer_partition(PartitionMap *outer_map, /* * If the outer side has the default partition as well, the default - * partition on the inner side will have two matching partitions on the - * other side: the outer partition and the default partition on the - * outer side. Partitionwise join doesn't handle this scenario yet. + * partition on the inner side will have two matching partitions on + * the other side: the outer partition and the default partition on + * the outer side. Partitionwise join doesn't handle this scenario + * yet. */ if (outer_has_default) return -1; @@ -1923,10 +1924,10 @@ process_outer_partition(PartitionMap *outer_map, return -1; /* - * If this is a FULL join, the default partition on the inner side - * has to be scanned all the way anyway, so the resulting partition - * will contain all key values from the default partition, which any - * other partition of the join relation will not contain. Thus the + * If this is a FULL join, the default partition on the inner side has + * to be scanned all the way anyway, so the resulting partition will + * contain all key values from the default partition, which any other + * partition of the join relation will not contain. Thus the * resulting partition will act as the default partition of the join * relation; record the index in *default_index if not already done. */ @@ -1972,15 +1973,15 @@ process_inner_partition(PartitionMap *outer_map, int *next_index, int *default_index) { - int merged_index = -1; + int merged_index = -1; Assert(inner_index >= 0); /* * If the outer side has the default partition, a row from the inner * partition might find its join partner in the default partition; try - * merging the inner partition with the default partition. Otherwise, this - * should be a FULL join, in which case the inner partition has to be + * merging the inner partition with the default partition. Otherwise, + * this should be a FULL join, in which case the inner partition has to be * scanned all the way anyway; merge the inner partition with a dummy * partition on the other side. */ @@ -1990,9 +1991,10 @@ process_inner_partition(PartitionMap *outer_map, /* * If the inner side has the default partition as well, the default - * partition on the outer side will have two matching partitions on the - * other side: the inner partition and the default partition on the - * inner side. Partitionwise join doesn't handle this scenario yet. + * partition on the outer side will have two matching partitions on + * the other side: the inner partition and the default partition on + * the inner side. Partitionwise join doesn't handle this scenario + * yet. */ if (inner_has_default) return -1; @@ -2056,8 +2058,8 @@ merge_null_partitions(PartitionMap *outer_map, int *next_index, int *null_index) { - bool consider_outer_null = false; - bool consider_inner_null = false; + bool consider_outer_null = false; + bool consider_inner_null = false; Assert(outer_has_null || inner_has_null); Assert(*null_index == -1); @@ -2090,10 +2092,10 @@ merge_null_partitions(PartitionMap *outer_map, /* * If this is an outer join, the NULL partition on the outer side has * to be scanned all the way anyway; merge the NULL partition with a - * dummy partition on the other side. In that case consider_outer_null - * means that the NULL partition only contains NULL values as the key - * values, so the merged partition will do so; treat it as the NULL - * partition of the join relation. + * dummy partition on the other side. In that case + * consider_outer_null means that the NULL partition only contains + * NULL values as the key values, so the merged partition will do so; + * treat it as the NULL partition of the join relation. */ if (IS_OUTER_JOIN(jointype)) { @@ -2107,12 +2109,12 @@ merge_null_partitions(PartitionMap *outer_map, Assert(inner_has_null); /* - * If this is a FULL join, the NULL partition on the inner side has - * to be scanned all the way anyway; merge the NULL partition with a - * dummy partition on the other side. In that case consider_inner_null - * means that the NULL partition only contains NULL values as the key - * values, so the merged partition will do so; treat it as the NULL - * partition of the join relation. + * If this is a FULL join, the NULL partition on the inner side has to + * be scanned all the way anyway; merge the NULL partition with a + * dummy partition on the other side. In that case + * consider_inner_null means that the NULL partition only contains + * NULL values as the key values, so the merged partition will do so; + * treat it as the NULL partition of the join relation. */ if (jointype == JOIN_FULL) *null_index = merge_partition_with_dummy(inner_map, inner_null, @@ -2166,8 +2168,8 @@ merge_default_partitions(PartitionMap *outer_map, int *next_index, int *default_index) { - int outer_merged_index = -1; - int inner_merged_index = -1; + int outer_merged_index = -1; + int inner_merged_index = -1; Assert(outer_has_default || inner_has_default); @@ -2188,9 +2190,10 @@ merge_default_partitions(PartitionMap *outer_map, /* * If this is an outer join, the default partition on the outer side * has to be scanned all the way anyway; if we have not yet assigned a - * partition, merge the default partition with a dummy partition on the - * other side. The merged partition will act as the default partition - * of the join relation (see comments in process_inner_partition()). + * partition, merge the default partition with a dummy partition on + * the other side. The merged partition will act as the default + * partition of the join relation (see comments in + * process_inner_partition()). */ if (IS_OUTER_JOIN(jointype)) { @@ -2211,11 +2214,12 @@ merge_default_partitions(PartitionMap *outer_map, else if (!outer_has_default && inner_has_default) { /* - * If this is a FULL join, the default partition on the inner side - * has to be scanned all the way anyway; if we have not yet assigned a - * partition, merge the default partition with a dummy partition on the - * other side. The merged partition will act as the default partition - * of the join relation (see comments in process_outer_partition()). + * If this is a FULL join, the default partition on the inner side has + * to be scanned all the way anyway; if we have not yet assigned a + * partition, merge the default partition with a dummy partition on + * the other side. The merged partition will act as the default + * partition of the join relation (see comments in + * process_outer_partition()). */ if (jointype == JOIN_FULL) { @@ -2266,7 +2270,7 @@ merge_default_partitions(PartitionMap *outer_map, static int merge_partition_with_dummy(PartitionMap *map, int index, int *next_index) { - int merged_index = *next_index; + int merged_index = *next_index; Assert(index >= 0 && index < map->nparts); Assert(map->merged_indexes[index] == -1); @@ -2346,7 +2350,7 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel, int *outer_indexes; int *inner_indexes; int max_nparts; - int i; + int i; Assert(nmerged > 0); Assert(*outer_parts == NIL); @@ -2365,7 +2369,7 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel, { if (i < outer_nparts) { - int merged_index = outer_map->merged_indexes[i]; + int merged_index = outer_map->merged_indexes[i]; if (merged_index >= 0) { @@ -2375,7 +2379,7 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel, } if (i < inner_nparts) { - int merged_index = inner_map->merged_indexes[i]; + int merged_index = inner_map->merged_indexes[i]; if (merged_index >= 0) { @@ -2392,10 +2396,10 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel, int inner_index = inner_indexes[i]; /* - * If both partitions are dummy, it means the merged partition that had - * been assigned to the outer/inner partition was removed when - * re-merging the outer/inner partition in merge_matching_partitions(); - * ignore the merged partition. + * If both partitions are dummy, it means the merged partition that + * had been assigned to the outer/inner partition was removed when + * re-merging the outer/inner partition in + * merge_matching_partitions(); ignore the merged partition. */ if (outer_index == -1 && inner_index == -1) continue; @@ -2484,7 +2488,8 @@ get_range_partition(RelOptInfo *rel, Assert(bi->strategy == PARTITION_STRATEGY_RANGE); - do { + do + { part_index = get_range_partition_internal(bi, lb_pos, lb, ub); if (part_index == -1) return -1; @@ -2609,7 +2614,7 @@ get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs, PartitionRangeBound *outer_ub, PartitionRangeBound *inner_lb, PartitionRangeBound *inner_ub, - int lb_cmpval, int ub_cmpval, + int lb_cmpval, int ub_cmpval, PartitionRangeBound *merged_lb, PartitionRangeBound *merged_ub) { @@ -2638,8 +2643,8 @@ get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs, /* * A LEFT/ANTI join will have all the rows from the outer side, so - * the bounds of the merged partition will be the same as the outer - * bounds. + * the bounds of the merged partition will be the same as the + * outer bounds. */ *merged_lb = *outer_lb; *merged_ub = *outer_ub; @@ -2648,10 +2653,10 @@ get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs, case JOIN_FULL: /* - * A FULL join will have all the rows from both sides, so the lower - * bound of the merged partition will be the lower of the two lower - * bounds, and the upper bound of the merged partition will be the - * higher of the two upper bounds. + * A FULL join will have all the rows from both sides, so the + * lower bound of the merged partition will be the lower of the + * two lower bounds, and the upper bound of the merged partition + * will be the higher of the two upper bounds. */ *merged_lb = (lb_cmpval < 0) ? *outer_lb : *inner_lb; *merged_ub = (ub_cmpval > 0) ? *outer_ub : *inner_ub; @@ -2687,7 +2692,7 @@ add_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs, } else { - PartitionRangeBound prev_ub; + PartitionRangeBound prev_ub; Assert(*merged_datums); Assert(*merged_kinds); diff --git a/src/backend/partitioning/partprune.c b/src/backend/partitioning/partprune.c index eac52e6ec85..badd31a44c3 100644 --- a/src/backend/partitioning/partprune.c +++ b/src/backend/partitioning/partprune.c @@ -854,8 +854,8 @@ gen_partprune_steps_internal(GeneratePruningStepsContext *context, ListCell *lc; /* - * If this partitioned relation has a default partition and is itself - * a partition (as evidenced by partition_qual being not NIL), we first + * If this partitioned relation has a default partition and is itself a + * partition (as evidenced by partition_qual being not NIL), we first * check if the clauses contradict the partition constraint. If they do, * there's no need to generate any steps as it'd already be proven that no * partitions need to be scanned. diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 7e97ffab27d..6829ff3e8f8 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -3097,7 +3097,7 @@ relation_needs_vacanalyze(Oid relid, /* Determine if this table needs vacuum or analyze. */ *dovacuum = force_vacuum || (vactuples > vacthresh) || - (vac_ins_base_thresh >= 0 && instuples > vacinsthresh); + (vac_ins_base_thresh >= 0 && instuples > vacinsthresh); *doanalyze = (anltuples > anlthresh); } else diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index 2b552a7ff90..34ed9f78878 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -540,29 +540,29 @@ HandleCheckpointerInterrupts(void) ProcessConfigFile(PGC_SIGHUP); /* - * Checkpointer is the last process to shut down, so we ask it to - * hold the keys for a range of other tasks required most of which - * have nothing to do with checkpointing at all. + * Checkpointer is the last process to shut down, so we ask it to hold + * the keys for a range of other tasks required most of which have + * nothing to do with checkpointing at all. * - * For various reasons, some config values can change dynamically - * so the primary copy of them is held in shared memory to make - * sure all backends see the same value. We make Checkpointer - * responsible for updating the shared memory copy if the - * parameter setting changes because of SIGHUP. + * For various reasons, some config values can change dynamically so + * the primary copy of them is held in shared memory to make sure all + * backends see the same value. We make Checkpointer responsible for + * updating the shared memory copy if the parameter setting changes + * because of SIGHUP. */ UpdateSharedMemoryConfig(); } if (ShutdownRequestPending) { /* - * From here on, elog(ERROR) should end with exit(1), not send - * control back to the sigsetjmp block above + * From here on, elog(ERROR) should end with exit(1), not send control + * back to the sigsetjmp block above */ ExitOnAnyError = true; /* Close down the database */ ShutdownXLOG(0, 0); /* Normal exit from the checkpointer is here */ - proc_exit(0); /* done */ + proc_exit(0); /* done */ } } diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 0ecd29a1d99..e246be388b5 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -6235,7 +6235,7 @@ static void pgstat_recv_resetslrucounter(PgStat_MsgResetslrucounter *msg, int len) { int i; - TimestampTz ts = GetCurrentTimestamp(); + TimestampTz ts = GetCurrentTimestamp(); for (i = 0; i < SLRU_NUM_ELEMENTS; i++) { @@ -6292,10 +6292,10 @@ pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len) /* * It is quite possible that a non-aggressive VACUUM ended up skipping * various pages, however, we'll zero the insert counter here regardless. - * It's currently used only to track when we need to perform an - * "insert" autovacuum, which are mainly intended to freeze newly inserted - * tuples. Zeroing this may just mean we'll not try to vacuum the table - * again until enough tuples have been inserted to trigger another insert + * It's currently used only to track when we need to perform an "insert" + * autovacuum, which are mainly intended to freeze newly inserted tuples. + * Zeroing this may just mean we'll not try to vacuum the table again + * until enough tuples have been inserted to trigger another insert * autovacuum. An anti-wraparound autovacuum will catch any persistent * stragglers. */ @@ -6687,7 +6687,7 @@ pgstat_clip_activity(const char *raw_activity) int pgstat_slru_index(const char *name) { - int i; + int i; for (i = 0; i < SLRU_NUM_ELEMENTS; i++) { diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index e19d5dc1a64..160afe9f392 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -2036,6 +2036,7 @@ retry1: if (SSLok == 'S' && secure_open_server(port) == -1) return STATUS_ERROR; #endif + /* * regular startup packet, cancel, etc packet should follow, but not * another SSL negotiation request, and a GSS request should only @@ -2066,6 +2067,7 @@ retry1: if (GSSok == 'G' && secure_open_gssapi(port) == -1) return STATUS_ERROR; #endif + /* * regular startup packet, cancel, etc packet should follow, but not * another GSS negotiation request, and an SSL request should only diff --git a/src/backend/replication/backup_manifest.c b/src/backend/replication/backup_manifest.c index d2f454c60ef..9fc0e179ff0 100644 --- a/src/backend/replication/backup_manifest.c +++ b/src/backend/replication/backup_manifest.c @@ -80,7 +80,7 @@ InitializeBackupManifest(backup_manifest_info *manifest, void AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid, const char *pathname, size_t size, pg_time_t mtime, - pg_checksum_context * checksum_ctx) + pg_checksum_context *checksum_ctx) { char pathbuf[MAXPGPATH]; int pathlen; @@ -103,11 +103,11 @@ AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid, } /* - * Each file's entry needs to be separated from any entry that follows by a - * comma, but there's no comma before the first one or after the last one. - * To make that work, adding a file to the manifest starts by terminating - * the most recently added line, with a comma if appropriate, but does not - * terminate the line inserted for this file. + * Each file's entry needs to be separated from any entry that follows by + * a comma, but there's no comma before the first one or after the last + * one. To make that work, adding a file to the manifest starts by + * terminating the most recently added line, with a comma if appropriate, + * but does not terminate the line inserted for this file. */ initStringInfo(&buf); if (manifest->first_file) diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index fec39354c03..5781b42af62 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -52,7 +52,7 @@ typedef struct LogicalRepPartMapEntry { Oid partoid; /* LogicalRepPartMap's key */ LogicalRepRelMapEntry relmapentry; -} LogicalRepPartMapEntry; +} LogicalRepPartMapEntry; /* * Relcache invalidation callback for our relation map cache. diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 77b85fc6557..15379e31181 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -692,7 +692,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) if (!publish) { - bool ancestor_published = false; + bool ancestor_published = false; /* * For a partition, check if any of the ancestors are @@ -702,13 +702,16 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) */ if (am_partition) { - List *ancestors = get_partition_ancestors(relid); - ListCell *lc2; + List *ancestors = get_partition_ancestors(relid); + ListCell *lc2; - /* Find the "topmost" ancestor that is in this publication. */ + /* + * Find the "topmost" ancestor that is in this + * publication. + */ foreach(lc2, ancestors) { - Oid ancestor = lfirst_oid(lc2); + Oid ancestor = lfirst_oid(lc2); if (list_member_oid(GetRelationPublications(ancestor), pub->oid)) diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index 26890dffb45..6fed3cfd23b 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -425,10 +425,9 @@ pg_physical_replication_slot_advance(XLogRecPtr moveto) retlsn = moveto; /* - * Dirty the slot so as it is written out at the next checkpoint. - * Note that the LSN position advanced may still be lost in the - * event of a crash, but this makes the data consistent after a - * clean shutdown. + * Dirty the slot so as it is written out at the next checkpoint. Note + * that the LSN position advanced may still be lost in the event of a + * crash, but this makes the data consistent after a clean shutdown. */ ReplicationSlotMarkDirty(); } @@ -532,9 +531,9 @@ pg_logical_replication_slot_advance(XLogRecPtr moveto) * keep track of their progress, so we should make more of an * effort to save it for them. * - * Dirty the slot so it is written out at the next checkpoint. - * The LSN position advanced to may still be lost on a crash - * but this makes the data consistent after a clean shutdown. + * Dirty the slot so it is written out at the next checkpoint. The + * LSN position advanced to may still be lost on a crash but this + * makes the data consistent after a clean shutdown. */ ReplicationSlotMarkDirty(); } diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c index 4afad83539c..e6757573010 100644 --- a/src/backend/replication/walreceiverfuncs.c +++ b/src/backend/replication/walreceiverfuncs.c @@ -255,10 +255,10 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo, walrcv->conninfo[0] = '\0'; /* - * Use configured replication slot if present, and ignore the value - * of create_temp_slot as the slot name should be persistent. Otherwise, - * use create_temp_slot to determine whether this WAL receiver should - * create a temporary slot by itself and use it, or not. + * Use configured replication slot if present, and ignore the value of + * create_temp_slot as the slot name should be persistent. Otherwise, use + * create_temp_slot to determine whether this WAL receiver should create a + * temporary slot by itself and use it, or not. */ if (slotname != NULL && slotname[0] != '\0') { diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 3367aa98f8a..a4ca8daea77 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -346,14 +346,14 @@ WalSndErrorCleanup(void) void WalSndResourceCleanup(bool isCommit) { - ResourceOwner resowner; + ResourceOwner resowner; if (CurrentResourceOwner == NULL) return; /* - * Deleting CurrentResourceOwner is not allowed, so we must save a - * pointer in a local variable and clear it first. + * Deleting CurrentResourceOwner is not allowed, so we must save a pointer + * in a local variable and clear it first. */ resowner = CurrentResourceOwner; CurrentResourceOwner = NULL; diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c index ada1e78f6fd..3e37e2758ca 100644 --- a/src/backend/statistics/dependencies.c +++ b/src/backend/statistics/dependencies.c @@ -800,7 +800,7 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum) else if (IsA(clause, ScalarArrayOpExpr)) { /* If it's an scalar array operator, check for Var IN Const. */ - ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; + ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; /* * Reject ALL() variant, we only care about ANY/IN. @@ -827,8 +827,9 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum) /* * If it's not an "=" operator, just ignore the clause, as it's not * compatible with functional dependencies. The operator is identified - * simply by looking at which function it uses to estimate selectivity. - * That's a bit strange, but it's what other similar places do. + * simply by looking at which function it uses to estimate + * selectivity. That's a bit strange, but it's what other similar + * places do. */ if (get_oprrest(expr->opno) != F_EQSEL) return false; @@ -929,7 +930,8 @@ static MVDependency * find_strongest_dependency(MVDependencies **dependencies, int ndependencies, Bitmapset *attnums) { - int i, j; + int i, + j; MVDependency *strongest = NULL; /* number of attnums in clauses */ @@ -967,8 +969,8 @@ find_strongest_dependency(MVDependencies **dependencies, int ndependencies, /* * this dependency is stronger, but we must still check that it's - * fully matched to these attnums. We perform this check last as it's - * slightly more expensive than the previous checks. + * fully matched to these attnums. We perform this check last as + * it's slightly more expensive than the previous checks. */ if (dependency_is_fully_matched(dependency, attnums)) strongest = dependency; /* save new best match */ diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index d1f818d49a8..c7e16f2f212 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -75,8 +75,8 @@ static VacAttrStats **lookup_var_attr_stats(Relation rel, Bitmapset *attrs, static void statext_store(Oid relid, MVNDistinct *ndistinct, MVDependencies *dependencies, MCVList *mcv, VacAttrStats **stats); -static int statext_compute_stattarget(int stattarget, - int natts, VacAttrStats **stats); +static int statext_compute_stattarget(int stattarget, + int natts, VacAttrStats **stats); /* * Compute requested extended stats, using the rows sampled for the plain @@ -160,9 +160,9 @@ BuildRelationExtStatistics(Relation onerel, double totalrows, stats); /* - * Don't rebuild statistics objects with statistics target set to 0 (we - * just leave the existing values around, just like we do for regular - * per-column statistics). + * Don't rebuild statistics objects with statistics target set to 0 + * (we just leave the existing values around, just like we do for + * regular per-column statistics). */ if (stattarget == 0) continue; @@ -231,10 +231,10 @@ ComputeExtStatisticsRows(Relation onerel, foreach(lc, lstats) { - StatExtEntry *stat = (StatExtEntry *) lfirst(lc); - int stattarget = stat->stattarget; - VacAttrStats **stats; - int nattrs = bms_num_members(stat->columns); + StatExtEntry *stat = (StatExtEntry *) lfirst(lc); + int stattarget = stat->stattarget; + VacAttrStats **stats; + int nattrs = bms_num_members(stat->columns); /* * Check if we can build this statistics object based on the columns @@ -291,19 +291,19 @@ ComputeExtStatisticsRows(Relation onerel, static int statext_compute_stattarget(int stattarget, int nattrs, VacAttrStats **stats) { - int i; + int i; /* - * If there's statistics target set for the statistics object, use it. - * It may be set to 0 which disables building of that statistic. + * If there's statistics target set for the statistics object, use it. It + * may be set to 0 which disables building of that statistic. */ if (stattarget >= 0) return stattarget; /* * The target for the statistics object is set to -1, in which case we - * look at the maximum target set for any of the attributes the object - * is defined on. + * look at the maximum target set for any of the attributes the object is + * defined on. */ for (i = 0; i < nattrs; i++) { @@ -1041,8 +1041,8 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause, /* Var IN Array */ if (IsA(clause, ScalarArrayOpExpr)) { - RangeTblEntry *rte = root->simple_rte_array[relid]; - ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; + RangeTblEntry *rte = root->simple_rte_array[relid]; + ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; Var *var; /* Only expressions with two arguments are considered compatible. */ @@ -1287,7 +1287,7 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli ListCell *l; Bitmapset **list_attnums; int listidx; - Selectivity sel = 1.0; + Selectivity sel = 1.0; /* check if there's any stats that might be useful for us. */ if (!has_stats_of_kind(rel->statlist, STATS_EXT_MCV)) @@ -1338,7 +1338,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli stat = choose_best_statistics(rel->statlist, STATS_EXT_MCV, list_attnums, list_length(clauses)); - /* if no (additional) matching stats could be found then we've nothing to do */ + /* + * if no (additional) matching stats could be found then we've nothing + * to do + */ if (!stat) break; @@ -1352,8 +1355,8 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli foreach(l, clauses) { /* - * If the clause is compatible with the selected statistics, mark it - * as estimated and add it to the list to estimate. + * If the clause is compatible with the selected statistics, mark + * it as estimated and add it to the list to estimate. */ if (list_attnums[listidx] != NULL && bms_is_subset(list_attnums[listidx], stat->keys)) @@ -1371,15 +1374,15 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli /* * First compute "simple" selectivity, i.e. without the extended * statistics, and essentially assuming independence of the - * columns/clauses. We'll then use the various selectivities computed from - * MCV list to improve it. + * columns/clauses. We'll then use the various selectivities computed + * from MCV list to improve it. */ simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid, - jointype, sjinfo, NULL); + jointype, sjinfo, NULL); /* - * Now compute the multi-column estimate from the MCV list, along with the - * other selectivities (base & total selectivity). + * Now compute the multi-column estimate from the MCV list, along with + * the other selectivities (base & total selectivity). */ mcv_sel = mcv_clauselist_selectivity(root, stat, stat_clauses, varRelid, jointype, sjinfo, rel, @@ -1393,7 +1396,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli if (other_sel > 1.0 - mcv_totalsel) other_sel = 1.0 - mcv_totalsel; - /* Overall selectivity is the combination of MCV and non-MCV estimates. */ + /* + * Overall selectivity is the combination of MCV and non-MCV + * estimates. + */ stat_sel = mcv_sel + other_sel; CLAMP_PROBABILITY(stat_sel); @@ -1454,11 +1460,11 @@ statext_clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, bool examine_clause_args(List *args, Var **varp, Const **cstp, bool *varonleftp) { - Var *var; - Const *cst; - bool varonleft; - Node *leftop, - *rightop; + Var *var; + Const *cst; + bool varonleft; + Node *leftop, + *rightop; /* enforced by statext_is_compatible_clause_internal */ Assert(list_length(args) == 2); @@ -1473,13 +1479,13 @@ examine_clause_args(List *args, Var **varp, Const **cstp, bool *varonleftp) if (IsA(rightop, RelabelType)) rightop = (Node *) ((RelabelType *) rightop)->arg; - if (IsA(leftop, Var) && IsA(rightop, Const)) + if (IsA(leftop, Var) &&IsA(rightop, Const)) { var = (Var *) leftop; cst = (Const *) rightop; varonleft = true; } - else if (IsA(leftop, Const) && IsA(rightop, Var)) + else if (IsA(leftop, Const) &&IsA(rightop, Var)) { var = (Var *) rightop; cst = (Const *) leftop; diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c index 3147d8fedc6..6a262f15436 100644 --- a/src/backend/statistics/mcv.c +++ b/src/backend/statistics/mcv.c @@ -210,8 +210,8 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs, groups = build_distinct_groups(nitems, items, mss, &ngroups); /* - * Maximum number of MCV items to store, based on the statistics target - * we computed for the statistics object (from target set for the object + * Maximum number of MCV items to store, based on the statistics target we + * computed for the statistics object (from target set for the object * itself, attributes and the system default). In any case, we can't keep * more groups than we have available. */ @@ -261,7 +261,7 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs, { int j; SortItem key; - MultiSortSupport tmp; + MultiSortSupport tmp; /* frequencies for values in each attribute */ SortItem **freqs; @@ -463,7 +463,7 @@ build_distinct_groups(int numrows, SortItem *items, MultiSortSupport mss, static int sort_item_compare(const void *a, const void *b, void *arg) { - SortSupport ssup = (SortSupport) arg; + SortSupport ssup = (SortSupport) arg; SortItem *ia = (SortItem *) a; SortItem *ib = (SortItem *) b; @@ -499,7 +499,7 @@ build_column_frequencies(SortItem *groups, int ngroups, /* allocate arrays for all columns as a single chunk */ ptr = palloc(MAXALIGN(sizeof(SortItem *) * mss->ndims) + - mss->ndims * MAXALIGN(sizeof(SortItem) * ngroups)); + mss->ndims * MAXALIGN(sizeof(SortItem) * ngroups)); /* initial array of pointers */ result = (SortItem **) ptr; @@ -507,7 +507,7 @@ build_column_frequencies(SortItem *groups, int ngroups, for (dim = 0; dim < mss->ndims; dim++) { - SortSupport ssup = &mss->ssup[dim]; + SortSupport ssup = &mss->ssup[dim]; /* array of values for a single column */ result[dim] = (SortItem *) ptr; @@ -528,15 +528,15 @@ build_column_frequencies(SortItem *groups, int ngroups, /* * Identify distinct values, compute frequency (there might be - * multiple MCV items containing this value, so we need to sum - * counts from all of them. + * multiple MCV items containing this value, so we need to sum counts + * from all of them. */ ncounts[dim] = 1; for (i = 1; i < ngroups; i++) { - if (sort_item_compare(&result[dim][i-1], &result[dim][i], ssup) == 0) + if (sort_item_compare(&result[dim][i - 1], &result[dim][i], ssup) == 0) { - result[dim][ncounts[dim]-1].count += result[dim][i].count; + result[dim][ncounts[dim] - 1].count += result[dim][i].count; continue; } @@ -723,23 +723,23 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats) */ info[dim].nvalues = ndistinct; - if (info[dim].typbyval) /* by-value data types */ + if (info[dim].typbyval) /* by-value data types */ { info[dim].nbytes = info[dim].nvalues * info[dim].typlen; /* * We copy the data into the MCV item during deserialization, so * we don't need to allocate any extra space. - */ + */ info[dim].nbytes_aligned = 0; } - else if (info[dim].typlen > 0) /* fixed-length by-ref */ + else if (info[dim].typlen > 0) /* fixed-length by-ref */ { /* * We don't care about alignment in the serialized data, so we * pack the data as much as possible. But we also track how much - * data will be needed after deserialization, and in that case - * we need to account for alignment of each item. + * data will be needed after deserialization, and in that case we + * need to account for alignment of each item. * * Note: As the items are fixed-length, we could easily compute * this during deserialization, but we do it here anyway. @@ -765,8 +765,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats) /* serialized length (uint32 length + data) */ len = VARSIZE_ANY_EXHDR(values[dim][i]); - info[dim].nbytes += sizeof(uint32); /* length */ - info[dim].nbytes += len; /* value (no header) */ + info[dim].nbytes += sizeof(uint32); /* length */ + info[dim].nbytes += len; /* value (no header) */ /* * During deserialization we'll build regular varlena values @@ -792,8 +792,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats) /* c-strings include terminator, so +1 byte */ len = strlen(DatumGetCString(values[dim][i])) + 1; - info[dim].nbytes += sizeof(uint32); /* length */ - info[dim].nbytes += len; /* value */ + info[dim].nbytes += sizeof(uint32); /* length */ + info[dim].nbytes += len; /* value */ /* space needed for properly aligned deserialized copies */ info[dim].nbytes_aligned += MAXALIGN(len); @@ -809,9 +809,9 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats) * whole serialized MCV list (varlena header, MCV header, dimension info * for each attribute, deduplicated values and items). */ - total_length = (3 * sizeof(uint32)) /* magic + type + nitems */ - + sizeof(AttrNumber) /* ndimensions */ - + (ndims * sizeof(Oid)); /* attribute types */ + total_length = (3 * sizeof(uint32)) /* magic + type + nitems */ + + sizeof(AttrNumber) /* ndimensions */ + + (ndims * sizeof(Oid)); /* attribute types */ /* dimension info */ total_length += ndims * sizeof(DimensionInfo); @@ -954,7 +954,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats) info[dim].nvalues, sizeof(Datum), compare_scalars_simple, &ssup[dim]); - Assert(value != NULL); /* serialization or deduplication error */ + Assert(value != NULL); /* serialization or deduplication + * error */ /* compute index within the deduplicated array */ index = (uint16) (value - values[dim]); @@ -1147,8 +1148,8 @@ statext_mcv_deserialize(bytea *data) * serialized data - it's not aligned properly, and it may disappear while * we're still using the MCV list, e.g. due to catcache release. * - * We do care about alignment here, because we will allocate all the pieces - * at once, but then use pointers to different parts. + * We do care about alignment here, because we will allocate all the + * pieces at once, but then use pointers to different parts. */ mcvlen = MAXALIGN(offsetof(MCVList, items) + (sizeof(MCVItem) * nitems)); @@ -1291,7 +1292,7 @@ statext_mcv_deserialize(bytea *data) /* finally translate the indexes (for non-NULL only) */ for (dim = 0; dim < ndims; dim++) { - uint16 index; + uint16 index; memcpy(&index, ptr, sizeof(uint16)); ptr += sizeof(uint16); @@ -1377,7 +1378,8 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS) /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); - if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more left to send */ + if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more + * left to send */ { Datum values[5]; bool nulls[5]; @@ -1400,10 +1402,10 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS) { astate_nulls = accumArrayResult(astate_nulls, - BoolGetDatum(item->isnull[i]), - false, - BOOLOID, - CurrentMemoryContext); + BoolGetDatum(item->isnull[i]), + false, + BOOLOID, + CurrentMemoryContext); if (!item->isnull[i]) { @@ -1421,17 +1423,17 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS) txt = cstring_to_text(DatumGetPointer(val)); astate_values = accumArrayResult(astate_values, - PointerGetDatum(txt), - false, - TEXTOID, - CurrentMemoryContext); + PointerGetDatum(txt), + false, + TEXTOID, + CurrentMemoryContext); } else astate_values = accumArrayResult(astate_values, - (Datum) 0, - true, - TEXTOID, - CurrentMemoryContext); + (Datum) 0, + true, + TEXTOID, + CurrentMemoryContext); } values[0] = Int32GetDatum(funcctx->call_cntr); @@ -1606,9 +1608,9 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, MCVItem *item = &mcvlist->items[i]; /* - * When the MCV item or the Const value is NULL we can treat - * this as a mismatch. We must not call the operator because - * of strictness. + * When the MCV item or the Const value is NULL we can + * treat this as a mismatch. We must not call the operator + * because of strictness. */ if (item->isnull[idx] || cst->constisnull) { @@ -1631,10 +1633,10 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, * * We don't store collations used to build the statistics, * but we can use the collation for the attribute itself, - * as stored in varcollid. We do reset the statistics after - * a type change (including collation change), so this is - * OK. We may need to relax this after allowing extended - * statistics on expressions. + * as stored in varcollid. We do reset the statistics + * after a type change (including collation change), so + * this is OK. We may need to relax this after allowing + * extended statistics on expressions. */ if (varonleft) match = DatumGetBool(FunctionCall2Coll(&opproc, @@ -1654,7 +1656,7 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, } else if (IsA(clause, ScalarArrayOpExpr)) { - ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; + ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause; FmgrInfo opproc; /* valid only after examine_clause_args returns true */ @@ -1707,9 +1709,9 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, MCVItem *item = &mcvlist->items[i]; /* - * When the MCV item or the Const value is NULL we can treat - * this as a mismatch. We must not call the operator because - * of strictness. + * When the MCV item or the Const value is NULL we can + * treat this as a mismatch. We must not call the operator + * because of strictness. */ if (item->isnull[idx] || cst->constisnull) { @@ -1727,9 +1729,9 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, for (j = 0; j < num_elems; j++) { - Datum elem_value = elem_values[j]; - bool elem_isnull = elem_nulls[j]; - bool elem_match; + Datum elem_value = elem_values[j]; + bool elem_isnull = elem_nulls[j]; + bool elem_match; /* NULL values always evaluate as not matching. */ if (elem_isnull) diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index f9980cf80ce..29c920800a6 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2994,7 +2994,7 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber *forkNum, bufHdr->tag.forkNum == forkNum[j] && bufHdr->tag.blockNum >= firstDelBlock[j]) { - InvalidateBuffer(bufHdr); /* releases spinlock */ + InvalidateBuffer(bufHdr); /* releases spinlock */ break; } } diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index a5083db02b1..95a21f6cc38 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -287,7 +287,8 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks) { buf = fsm_readbuf(rel, first_removed_address, false); if (!BufferIsValid(buf)) - return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */ + return InvalidBlockNumber; /* nothing to do; the FSM was already + * smaller */ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); /* NO EREPORT(ERROR) from here till changes are logged */ @@ -317,7 +318,8 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks) { new_nfsmblocks = fsm_logical_to_physical(first_removed_address); if (smgrnblocks(rel->rd_smgr, FSM_FORKNUM) <= new_nfsmblocks) - return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */ + return InvalidBlockNumber; /* nothing to do; the FSM was already + * smaller */ } return new_nfsmblocks; diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c index e2f4b116b9b..05df5017c41 100644 --- a/src/backend/storage/ipc/latch.c +++ b/src/backend/storage/ipc/latch.c @@ -1099,9 +1099,9 @@ WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events) !PostmasterIsAlive()) { /* - * The extra PostmasterIsAliveInternal() check prevents false alarms on - * systems that give a different value for getppid() while being traced - * by a debugger. + * The extra PostmasterIsAliveInternal() check prevents false alarms + * on systems that give a different value for getppid() while being + * traced by a debugger. */ set->report_postmaster_not_running = true; } diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 6a94448565b..3c2b369615f 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -434,7 +434,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid) pgxact->xmin = InvalidTransactionId; /* must be cleared with xid/xmin: */ pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; - proc->delayChkpt = false; /* be sure this is cleared in abort */ + proc->delayChkpt = false; /* be sure this is cleared in abort */ proc->recoveryConflictPending = false; Assert(pgxact->nxids == 0); @@ -456,7 +456,7 @@ ProcArrayEndTransactionInternal(PGPROC *proc, PGXACT *pgxact, pgxact->xmin = InvalidTransactionId; /* must be cleared with xid/xmin: */ pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; - proc->delayChkpt = false; /* be sure this is cleared in abort */ + proc->delayChkpt = false; /* be sure this is cleared in abort */ proc->recoveryConflictPending = false; /* Clear the subtransaction-XID cache too while holding the lock */ diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c index 65d3946386c..7b0c6ffce7a 100644 --- a/src/backend/storage/ipc/procsignal.c +++ b/src/backend/storage/ipc/procsignal.c @@ -60,8 +60,8 @@ typedef struct { pid_t pss_pid; sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS]; - pg_atomic_uint64 pss_barrierGeneration; - pg_atomic_uint32 pss_barrierCheckMask; + pg_atomic_uint64 pss_barrierGeneration; + pg_atomic_uint32 pss_barrierCheckMask; } ProcSignalSlot; /* @@ -72,8 +72,8 @@ typedef struct */ typedef struct { - pg_atomic_uint64 psh_barrierGeneration; - ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER]; + pg_atomic_uint64 psh_barrierGeneration; + ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER]; } ProcSignalHeader; /* @@ -101,7 +101,7 @@ static void ProcessBarrierPlaceholder(void); Size ProcSignalShmemSize(void) { - Size size; + Size size; size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot)); size = add_size(size, offsetof(ProcSignalHeader, psh_slot)); @@ -124,7 +124,7 @@ ProcSignalShmemInit(void) /* If we're first, initialize. */ if (!found) { - int i; + int i; pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0); @@ -168,13 +168,13 @@ ProcSignalInit(int pss_idx) /* * Initialize barrier state. Since we're a brand-new process, there * shouldn't be any leftover backend-private state that needs to be - * updated. Therefore, we can broadcast the latest barrier generation - * and disregard any previously-set check bits. + * updated. Therefore, we can broadcast the latest barrier generation and + * disregard any previously-set check bits. * * NB: This only works if this initialization happens early enough in the * startup sequence that we haven't yet cached any state that might need - * to be invalidated. That's also why we have a memory barrier here, to - * be sure that any later reads of memory happen strictly after this. + * to be invalidated. That's also why we have a memory barrier here, to be + * sure that any later reads of memory happen strictly after this. */ pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0); barrier_generation = @@ -320,16 +320,16 @@ SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId) uint64 EmitProcSignalBarrier(ProcSignalBarrierType type) { - uint64 flagbit = UINT64CONST(1) << (uint64) type; - uint64 generation; + uint64 flagbit = UINT64CONST(1) << (uint64) type; + uint64 generation; /* * Set all the flags. * - * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this - * is totally ordered with respect to anything the caller did before, and - * anything that we do afterwards. (This is also true of the later call - * to pg_atomic_add_fetch_u64.) + * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is + * totally ordered with respect to anything the caller did before, and + * anything that we do afterwards. (This is also true of the later call to + * pg_atomic_add_fetch_u64.) */ for (int i = 0; i < NumProcSignalSlots; i++) { @@ -349,18 +349,18 @@ EmitProcSignalBarrier(ProcSignalBarrierType type) * generation. * * Concurrency is not a problem here. Backends that have exited don't - * matter, and new backends that have joined since we entered this function - * must already have current state, since the caller is responsible for - * making sure that the relevant state is entirely visible before calling - * this function in the first place. We still have to wake them up - - * because we can't distinguish between such backends and older backends - * that need to update state - but they won't actually need to change - * any state. + * matter, and new backends that have joined since we entered this + * function must already have current state, since the caller is + * responsible for making sure that the relevant state is entirely visible + * before calling this function in the first place. We still have to wake + * them up - because we can't distinguish between such backends and older + * backends that need to update state - but they won't actually need to + * change any state. */ for (int i = NumProcSignalSlots - 1; i >= 0; i--) { volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i]; - pid_t pid = slot->pss_pid; + pid_t pid = slot->pss_pid; if (pid != 0) kill(pid, SIGUSR1); @@ -381,17 +381,17 @@ EmitProcSignalBarrier(ProcSignalBarrierType type) void WaitForProcSignalBarrier(uint64 generation) { - long timeout = 125L; + long timeout = 125L; for (int i = NumProcSignalSlots - 1; i >= 0; i--) { volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i]; - uint64 oldval; + uint64 oldval; oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration); while (oldval < generation) { - int events; + int events; CHECK_FOR_INTERRUPTS(); @@ -408,11 +408,11 @@ WaitForProcSignalBarrier(uint64 generation) } /* - * The caller is probably calling this function because it wants to - * read the shared state or perform further writes to shared state once - * all backends are known to have absorbed the barrier. However, the - * read of pss_barrierGeneration was performed unlocked; insert a memory - * barrier to separate it from whatever follows. + * The caller is probably calling this function because it wants to read + * the shared state or perform further writes to shared state once all + * backends are known to have absorbed the barrier. However, the read of + * pss_barrierGeneration was performed unlocked; insert a memory barrier + * to separate it from whatever follows. */ pg_memory_barrier(); } @@ -428,8 +428,8 @@ WaitForProcSignalBarrier(uint64 generation) void ProcessProcSignalBarrier(void) { - uint64 generation; - uint32 flags; + uint64 generation; + uint32 flags; /* Exit quickly if there's no work to do. */ if (!ProcSignalBarrierPending) @@ -437,8 +437,8 @@ ProcessProcSignalBarrier(void) ProcSignalBarrierPending = false; /* - * Read the current barrier generation, and then get the flags that - * are set for this backend. Note that pg_atomic_exchange_u32 is a full + * Read the current barrier generation, and then get the flags that are + * set for this backend. Note that pg_atomic_exchange_u32 is a full * barrier, so we're guaranteed that the read of the barrier generation * happens before we atomically extract the flags, and that any subsequent * state changes happen afterward. @@ -477,8 +477,8 @@ ProcessBarrierPlaceholder(void) * machinery gets committed. Rename PROCSIGNAL_BARRIER_PLACEHOLDER to * PROCSIGNAL_BARRIER_SOMETHING_ELSE where SOMETHING_ELSE is something * appropriately descriptive. Get rid of this function and instead have - * ProcessBarrierSomethingElse. Most likely, that function should live - * in the file pertaining to that subsystem, rather than here. + * ProcessBarrierSomethingElse. Most likely, that function should live in + * the file pertaining to that subsystem, rather than here. */ } @@ -515,8 +515,8 @@ CheckProcSignalBarrier(void) if (slot != NULL) { - uint64 mygen; - uint64 curgen; + uint64 mygen; + uint64 curgen; mygen = pg_atomic_read_u64(&slot->pss_barrierGeneration); curgen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration); diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 2892a573e4a..97716f6aefd 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -461,7 +461,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr) } else { - Size allocated_size; + Size allocated_size; /* It isn't in the table yet. allocate and initialize it */ structPtr = ShmemAllocRaw(size, &allocated_size); @@ -539,7 +539,7 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS) MemoryContext oldcontext; HASH_SEQ_STATUS hstat; ShmemIndexEnt *ent; - Size named_allocated = 0; + Size named_allocated = 0; Datum values[PG_GET_SHMEM_SIZES_COLS]; bool nulls[PG_GET_SHMEM_SIZES_COLS]; diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index efb44a25c42..fab387b5dfe 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -1035,7 +1035,7 @@ LockAcquireExtended(const LOCKTAG *locktag, found_conflict = true; else found_conflict = LockCheckConflicts(lockMethodTable, lockmode, - lock, proclock); + lock, proclock); if (!found_conflict) { diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index e6fa2148fc7..7d667c6586f 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -553,7 +553,7 @@ smgrnblocks(SMgrRelation reln, ForkNumber forknum) void smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks) { - int i; + int i; /* * Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will @@ -580,11 +580,11 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb /* * We might as well update the local smgr_fsm_nblocks and - * smgr_vm_nblocks settings. The smgr cache inval message that - * this function sent will cause other backends to invalidate - * their copies of smgr_fsm_nblocks and smgr_vm_nblocks, - * and these ones too at the next command boundary. - * But these ensure they aren't outright wrong until then. + * smgr_vm_nblocks settings. The smgr cache inval message that this + * function sent will cause other backends to invalidate their copies + * of smgr_fsm_nblocks and smgr_vm_nblocks, and these ones too at the + * next command boundary. But these ensure they aren't outright wrong + * until then. */ if (forknum[i] == FSM_FORKNUM) reln->smgr_fsm_nblocks = nblocks[i]; diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index b1f7f6e2d01..97cbaa3072b 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -224,8 +224,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree) /* * Surprisingly, ALTER SYSTEM meets all our definitions of * read-only: it changes nothing that affects the output of - * pg_dump, it doesn't write WAL or imperil the application - * of future WAL, and it doesn't depend on any state that needs + * pg_dump, it doesn't write WAL or imperil the application of + * future WAL, and it doesn't depend on any state that needs * to be synchronized with parallel workers. * * So, despite the fact that it writes to a file, it's read @@ -271,10 +271,10 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree) case T_VariableSetStmt: { /* - * These modify only backend-local state, so they're OK to - * run in a read-only transaction or on a standby. However, - * they are disallowed in parallel mode, because they either - * rely upon or modify backend-local state that might not be + * These modify only backend-local state, so they're OK to run + * in a read-only transaction or on a standby. However, they + * are disallowed in parallel mode, because they either rely + * upon or modify backend-local state that might not be * synchronized among cooperating backends. */ return COMMAND_OK_IN_RECOVERY | COMMAND_OK_IN_READ_ONLY_TXN; @@ -285,8 +285,9 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree) case T_VacuumStmt: { /* - * These commands write WAL, so they're not strictly read-only, - * and running them in parallel workers isn't supported. + * These commands write WAL, so they're not strictly + * read-only, and running them in parallel workers isn't + * supported. * * However, they don't change the database state in a way that * would affect pg_dump output, so it's fine to run them in a @@ -299,11 +300,11 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree) case T_CopyStmt: { - CopyStmt *stmt = (CopyStmt *) parsetree; + CopyStmt *stmt = (CopyStmt *) parsetree; /* - * You might think that COPY FROM is not at all read only, - * but it's OK to copy into a temporary table, because that + * You might think that COPY FROM is not at all read only, but + * it's OK to copy into a temporary table, because that * wouldn't change the output of pg_dump. If the target table * turns out to be non-temporary, DoCopy itself will call * PreventCommandIfReadOnly. @@ -318,8 +319,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree) case T_VariableShowStmt: { /* - * These commands don't modify any data and are safe to run - * in a parallel worker. + * These commands don't modify any data and are safe to run in + * a parallel worker. */ return COMMAND_IS_STRICTLY_READ_ONLY; } @@ -329,8 +330,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree) { /* * NOTIFY requires an XID assignment, so it can't be permitted - * on a standby. Perhaps LISTEN could, since without NOTIFY - * it would be OK to just do nothing, at least until promotion, + * on a standby. Perhaps LISTEN could, since without NOTIFY it + * would be OK to just do nothing, at least until promotion, * but we currently prohibit it lest the user get the wrong * idea. * @@ -342,11 +343,12 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree) case T_LockStmt: { - LockStmt *stmt = (LockStmt *) parsetree; + LockStmt *stmt = (LockStmt *) parsetree; /* * Only weaker locker modes are allowed during recovery. The - * restrictions here must match those in LockAcquireExtended(). + * restrictions here must match those in + * LockAcquireExtended(). */ if (stmt->mode > RowExclusiveLock) return COMMAND_OK_IN_READ_ONLY_TXN; @@ -359,10 +361,10 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree) TransactionStmt *stmt = (TransactionStmt *) parsetree; /* - * PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all - * write WAL, so they're not read-only in the strict sense; - * but the first and third do not change pg_dump output, so - * they're OK in a read-only transactions. + * PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all write + * WAL, so they're not read-only in the strict sense; but the + * first and third do not change pg_dump output, so they're OK + * in a read-only transactions. * * We also consider COMMIT PREPARED to be OK in a read-only * transaction environment, by way of exception. diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index 9c808942819..7a965973cd2 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -3862,7 +3862,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str) case USE_XSD_DATES: /* compatible with ISO date formats */ str = pg_ultostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); *str++ = '-'; str = pg_ultostr_zeropad(str, tm->tm_mon, 2); *str++ = '-'; @@ -3885,7 +3885,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str) } *str++ = '/'; str = pg_ultostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); break; case USE_GERMAN_DATES: @@ -3895,7 +3895,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str) str = pg_ultostr_zeropad(str, tm->tm_mon, 2); *str++ = '.'; str = pg_ultostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); break; case USE_POSTGRES_DATES: @@ -3915,7 +3915,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str) } *str++ = '-'; str = pg_ultostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); break; } @@ -3985,7 +3985,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char case USE_XSD_DATES: /* Compatible with ISO-8601 date formats */ str = pg_ultostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); *str++ = '-'; str = pg_ultostr_zeropad(str, tm->tm_mon, 2); *str++ = '-'; @@ -4016,7 +4016,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char } *str++ = '/'; str = pg_ultostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); *str++ = ' '; str = pg_ultostr_zeropad(str, tm->tm_hour, 2); *str++ = ':'; @@ -4048,7 +4048,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char str = pg_ultostr_zeropad(str, tm->tm_mon, 2); *str++ = '.'; str = pg_ultostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); *str++ = ' '; str = pg_ultostr_zeropad(str, tm->tm_hour, 2); *str++ = ':'; @@ -4098,7 +4098,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char str = AppendTimestampSeconds(str, tm, fsec); *str++ = ' '; str = pg_ultostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); if (print_tz) { diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c index 4acbc27d426..63c59c56b3f 100644 --- a/src/backend/utils/adt/int.c +++ b/src/backend/utils/adt/int.c @@ -1213,8 +1213,9 @@ int2abs(PG_FUNCTION_ARGS) static int32 int4gcd_internal(int32 arg1, int32 arg2) { - int32 swap; - int32 a1, a2; + int32 swap; + int32 a1, + a2; /* * Put the greater absolute value in arg1. @@ -1273,9 +1274,9 @@ int4gcd_internal(int32 arg1, int32 arg2) Datum int4gcd(PG_FUNCTION_ARGS) { - int32 arg1 = PG_GETARG_INT32(0); - int32 arg2 = PG_GETARG_INT32(1); - int32 result; + int32 arg1 = PG_GETARG_INT32(0); + int32 arg2 = PG_GETARG_INT32(1); + int32 result; result = int4gcd_internal(arg1, arg2); @@ -1288,10 +1289,10 @@ int4gcd(PG_FUNCTION_ARGS) Datum int4lcm(PG_FUNCTION_ARGS) { - int32 arg1 = PG_GETARG_INT32(0); - int32 arg2 = PG_GETARG_INT32(1); - int32 gcd; - int32 result; + int32 arg1 = PG_GETARG_INT32(0); + int32 arg2 = PG_GETARG_INT32(1); + int32 gcd; + int32 result; /* * Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c index 55e0eb05dac..abba8f1df04 100644 --- a/src/backend/utils/adt/int8.c +++ b/src/backend/utils/adt/int8.c @@ -684,8 +684,9 @@ int8mod(PG_FUNCTION_ARGS) static int64 int8gcd_internal(int64 arg1, int64 arg2) { - int64 swap; - int64 a1, a2; + int64 swap; + int64 a1, + a2; /* * Put the greater absolute value in arg1. @@ -744,9 +745,9 @@ int8gcd_internal(int64 arg1, int64 arg2) Datum int8gcd(PG_FUNCTION_ARGS) { - int64 arg1 = PG_GETARG_INT64(0); - int64 arg2 = PG_GETARG_INT64(1); - int64 result; + int64 arg1 = PG_GETARG_INT64(0); + int64 arg2 = PG_GETARG_INT64(1); + int64 result; result = int8gcd_internal(arg1, arg2); @@ -759,10 +760,10 @@ int8gcd(PG_FUNCTION_ARGS) Datum int8lcm(PG_FUNCTION_ARGS) { - int64 arg1 = PG_GETARG_INT64(0); - int64 arg2 = PG_GETARG_INT64(1); - int64 gcd; - int64 result; + int64 arg1 = PG_GETARG_INT64(0); + int64 arg2 = PG_GETARG_INT64(1); + int64 gcd; + int64 result; /* * Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 8bb00abb6b3..641ae3fdf8e 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -1337,7 +1337,7 @@ json_typeof(PG_FUNCTION_ARGS) JsonLexContext *lex; JsonTokenType tok; char *type; - JsonParseErrorType result; + JsonParseErrorType result; json = PG_GETARG_TEXT_PP(0); lex = makeJsonLexContext(json, false); diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 9b86bdcb22a..5a09d65fdce 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -496,7 +496,7 @@ static void transform_string_values_scalar(void *state, char *token, JsonTokenTy void pg_parse_json_or_ereport(JsonLexContext *lex, JsonSemAction *sem) { - JsonParseErrorType result; + JsonParseErrorType result; result = pg_parse_json(lex, sem); if (result != JSON_SUCCESS) @@ -4524,8 +4524,8 @@ jsonb_set_lax(PG_FUNCTION_ARGS) /* ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); */ /* Jsonb *newval = PG_GETARG_JSONB_P(2); */ /* bool create = PG_GETARG_BOOL(3); */ - text *handle_null; - char *handle_val; + text *handle_null; + char *handle_val; if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(3)) PG_RETURN_NULL(); @@ -4537,13 +4537,13 @@ jsonb_set_lax(PG_FUNCTION_ARGS) errmsg("null_value_treatment must be \"delete_key\", \"return_target\", \"use_json_null\", or \"raise_exception\""))); /* if the new value isn't an SQL NULL just call jsonb_set */ - if (! PG_ARGISNULL(2)) + if (!PG_ARGISNULL(2)) return jsonb_set(fcinfo); handle_null = PG_GETARG_TEXT_P(4); handle_val = text_to_cstring(handle_null); - if (strcmp(handle_val,"raise_exception") == 0) + if (strcmp(handle_val, "raise_exception") == 0) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), @@ -4554,7 +4554,7 @@ jsonb_set_lax(PG_FUNCTION_ARGS) } else if (strcmp(handle_val, "use_json_null") == 0) { - Datum newval; + Datum newval; newval = DirectFunctionCall1(jsonb_in, CStringGetDatum("null")); @@ -4569,6 +4569,7 @@ jsonb_set_lax(PG_FUNCTION_ARGS) else if (strcmp(handle_val, "return_target") == 0) { Jsonb *in = PG_GETARG_JSONB_P(0); + PG_RETURN_JSONB_P(in); } else diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c index a7467adb037..f4f76845a7f 100644 --- a/src/backend/utils/adt/numutils.c +++ b/src/backend/utils/adt/numutils.c @@ -45,11 +45,13 @@ static inline int decimalLength32(const uint32 v) { int t; - static uint32 PowersOfTen[] = - {1, 10, 100, - 1000, 10000, 100000, - 1000000, 10000000, 100000000, - 1000000000}; + static const uint32 PowersOfTen[] = { + 1, 10, 100, + 1000, 10000, 100000, + 1000000, 10000000, 100000000, + 1000000000 + }; + /* * Compute base-10 logarithm by dividing the base-2 logarithm by a * good-enough approximation of the base-2 logarithm of 10 @@ -62,16 +64,16 @@ static inline int decimalLength64(const uint64 v) { int t; - static uint64 PowersOfTen[] = { - UINT64CONST(1), UINT64CONST(10), - UINT64CONST(100), UINT64CONST(1000), - UINT64CONST(10000), UINT64CONST(100000), - UINT64CONST(1000000), UINT64CONST(10000000), - UINT64CONST(100000000), UINT64CONST(1000000000), - UINT64CONST(10000000000), UINT64CONST(100000000000), - UINT64CONST(1000000000000), UINT64CONST(10000000000000), - UINT64CONST(100000000000000), UINT64CONST(1000000000000000), - UINT64CONST(10000000000000000), UINT64CONST(100000000000000000), + static const uint64 PowersOfTen[] = { + UINT64CONST(1), UINT64CONST(10), + UINT64CONST(100), UINT64CONST(1000), + UINT64CONST(10000), UINT64CONST(100000), + UINT64CONST(1000000), UINT64CONST(10000000), + UINT64CONST(100000000), UINT64CONST(1000000000), + UINT64CONST(10000000000), UINT64CONST(100000000000), + UINT64CONST(1000000000000), UINT64CONST(10000000000000), + UINT64CONST(100000000000000), UINT64CONST(1000000000000000), + UINT64CONST(10000000000000000), UINT64CONST(100000000000000000), UINT64CONST(1000000000000000000), UINT64CONST(10000000000000000000) }; diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 988b1fe902a..2aff739466f 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -1698,12 +1698,12 @@ Datum pg_stat_get_slru(PG_FUNCTION_ARGS) { #define PG_STAT_GET_SLRU_COLS 9 - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - int i; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + int i; PgStat_SLRUStats *stats; /* check to see if caller supports us returning a tuplestore */ @@ -1733,12 +1733,12 @@ pg_stat_get_slru(PG_FUNCTION_ARGS) /* request SLRU stats from the stat collector */ stats = pgstat_fetch_slru(); - for (i = 0; ; i++) + for (i = 0;; i++) { /* for each row */ Datum values[PG_STAT_GET_SLRU_COLS]; bool nulls[PG_STAT_GET_SLRU_COLS]; - PgStat_SLRUStats stat = stats[i]; + PgStat_SLRUStats stat = stats[i]; const char *name; name = pgstat_slru_name(i); diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c index dd2bc742aab..9bbef531495 100644 --- a/src/backend/utils/adt/rangetypes_spgist.c +++ b/src/backend/utils/adt/rangetypes_spgist.c @@ -47,11 +47,11 @@ static int16 getQuadrant(TypeCacheEntry *typcache, const RangeType *centroid, const RangeType *tst); static int bound_cmp(const void *a, const void *b, void *arg); -static int adjacent_inner_consistent(TypeCacheEntry *typcache, - const RangeBound *arg, const RangeBound *centroid, - const RangeBound *prev); -static int adjacent_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *arg, - const RangeBound *centroid); +static int adjacent_inner_consistent(TypeCacheEntry *typcache, + const RangeBound *arg, const RangeBound *centroid, + const RangeBound *prev); +static int adjacent_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *arg, + const RangeBound *centroid); /* * SP-GiST 'config' interface function. diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index da8cc0cf6b2..c800d797acc 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -1152,7 +1152,8 @@ regcollationout(PG_FUNCTION_ARGS) char *nspname; /* - * Would this collation be found by regcollationin? If not, qualify it. + * Would this collation be found by regcollationin? If not, + * qualify it. */ if (CollationIsVisible(collationid)) nspname = NULL; diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 695d8c32284..723a8fa48cf 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -10611,7 +10611,7 @@ generate_opclass_name(Oid opclass) initStringInfo(&buf); get_opclass_name(opclass, InvalidOid, &buf); - return &buf.data[1]; /* get_opclass_name() prepends space */ + return &buf.data[1]; /* get_opclass_name() prepends space */ } /* @@ -11313,8 +11313,8 @@ get_reloptions(StringInfo buf, Datum reloptions) char *value; /* - * Each array element should have the form name=value. If the "=" - * is missing for some reason, treat it like an empty value. + * Each array element should have the form name=value. If the "=" is + * missing for some reason, treat it like an empty value. */ name = option; separator = strchr(option, '='); @@ -11332,11 +11332,11 @@ get_reloptions(StringInfo buf, Datum reloptions) /* * In general we need to quote the value; but to avoid unnecessary - * clutter, do not quote if it is an identifier that would not - * need quoting. (We could also allow numbers, but that is a bit - * trickier than it looks --- for example, are leading zeroes - * significant? We don't want to assume very much here about what - * custom reloptions might mean.) + * clutter, do not quote if it is an identifier that would not need + * quoting. (We could also allow numbers, but that is a bit trickier + * than it looks --- for example, are leading zeroes significant? We + * don't want to assume very much here about what custom reloptions + * might mean.) */ if (quote_identifier(value) == value) appendStringInfoString(buf, value); diff --git a/src/backend/utils/adt/tsgistidx.c b/src/backend/utils/adt/tsgistidx.c index 2e0bc3ebd07..c3f25800e7b 100644 --- a/src/backend/utils/adt/tsgistidx.c +++ b/src/backend/utils/adt/tsgistidx.c @@ -307,7 +307,7 @@ checkcondition_arr(void *checkval, QueryOperand *val, ExecPhraseData *data) static bool checkcondition_bit(void *checkval, QueryOperand *val, ExecPhraseData *data) { - void *key = (SignTSVector *) checkval; + void *key = (SignTSVector *) checkval; /* * we are not able to find a prefix in signature tree @@ -499,8 +499,8 @@ hemdistsign(BITVECP a, BITVECP b, int siglen) static int hemdist(SignTSVector *a, SignTSVector *b) { - int siglena = GETSIGLEN(a); - int siglenb = GETSIGLEN(b); + int siglena = GETSIGLEN(a); + int siglenb = GETSIGLEN(b); if (ISALLTRUE(a)) { @@ -721,9 +721,9 @@ gtsvector_picksplit(PG_FUNCTION_ARGS) else size_alpha = SIGLENBIT(siglen) - sizebitvec((cache[j].allistrue) ? - GETSIGN(datum_l) : - GETSIGN(cache[j].sign), - siglen); + GETSIGN(datum_l) : + GETSIGN(cache[j].sign), + siglen); } else size_alpha = hemdistsign(cache[j].sign, GETSIGN(datum_l), siglen); diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index a7d63f107f2..63d1263502d 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -937,7 +937,7 @@ get_attoptions(Oid relid, int16 attnum) if (isnull) result = (Datum) 0; else - result = datumCopy(attopts, false, -1); /* text[] */ + result = datumCopy(attopts, false, -1); /* text[] */ ReleaseSysCache(tuple); @@ -3297,9 +3297,9 @@ get_index_column_opclass(Oid index_oid, int attno) bool get_index_isreplident(Oid index_oid) { - HeapTuple tuple; - Form_pg_index rd_index; - bool result; + HeapTuple tuple; + Form_pg_index rd_index; + bool result; tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid)); if (!HeapTupleIsValid(tuple)) diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 9f1f11d0c14..0b9eb00d2de 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -5318,12 +5318,12 @@ GetRelationPublicationActions(Relation relation) if (relation->rd_rel->relispartition) { /* Add publications that the ancestors are in too. */ - List *ancestors = get_partition_ancestors(RelationGetRelid(relation)); - ListCell *lc; + List *ancestors = get_partition_ancestors(RelationGetRelid(relation)); + ListCell *lc; foreach(lc, ancestors) { - Oid ancestor = lfirst_oid(lc); + Oid ancestor = lfirst_oid(lc); puboids = list_concat_unique_oid(puboids, GetRelationPublications(ancestor)); @@ -5424,13 +5424,14 @@ CopyIndexAttOptions(bytea **srcopts, int natts) * RelationGetIndexAttOptions * get AM/opclass-specific options for an index parsed into a binary form */ -bytea ** +bytea ** RelationGetIndexAttOptions(Relation relation, bool copy) { MemoryContext oldcxt; bytea **opts = relation->rd_opcoptions; Oid relid = RelationGetRelid(relation); - int natts = RelationGetNumberOfAttributes(relation); /* XXX IndexRelationGetNumberOfKeyAttributes */ + int natts = RelationGetNumberOfAttributes(relation); /* XXX + * IndexRelationGetNumberOfKeyAttributes */ int i; /* Try to copy cached options. */ diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index b8858b132b8..e9762010309 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -403,7 +403,7 @@ matches_backtrace_functions(const char *funcname) p = backtrace_symbol_list; for (;;) { - if (*p == '\0') /* end of backtrace_symbol_list */ + if (*p == '\0') /* end of backtrace_symbol_list */ break; if (strcmp(funcname, p) == 0) @@ -845,7 +845,7 @@ errmsg(const char *fmt,...) int errbacktrace(void) { - ErrorData *edata = &errordata[errordata_stack_depth]; + ErrorData *edata = &errordata[errordata_stack_depth]; MemoryContext oldcontext; recursion_depth++; diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 5b4b9e487f5..2688e277267 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -1719,7 +1719,10 @@ hash_corrupted(HTAB *hashp) int my_log2(long num) { - /* guard against too-large input, which would be invalid for pg_ceil_log2_*() */ + /* + * guard against too-large input, which would be invalid for + * pg_ceil_log2_*() + */ if (num > LONG_MAX / 2) num = LONG_MAX / 2; diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index 6fe25c023a4..cca9704d2d7 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -57,7 +57,7 @@ ProcessingMode Mode = InitProcessing; -BackendType MyBackendType; +BackendType MyBackendType; /* List of lock files to be removed at proc exit */ static List *lock_files = NIL; diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 5bdc02fce2f..2f3e0a70e00 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -11718,7 +11718,7 @@ check_backtrace_functions(char **newval, void **extra, GucSource source) else if ((*newval)[i] == ' ' || (*newval)[i] == '\n' || (*newval)[i] == '\t') - ; /* ignore these */ + ; /* ignore these */ else someval[j++] = (*newval)[i]; /* copy anything else */ } diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index dd8d7a33a4f..60a761caba4 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -560,7 +560,7 @@ AllocSetReset(MemoryContext context) AllocSet set = (AllocSet) context; AllocBlock block; Size keepersize PG_USED_FOR_ASSERTS_ONLY - = set->keeper->endptr - ((char *) set); + = set->keeper->endptr - ((char *) set); AssertArg(AllocSetIsValid(set)); @@ -599,7 +599,7 @@ AllocSetReset(MemoryContext context) else { /* Normal case, release the block */ - context->mem_allocated -= block->endptr - ((char*) block); + context->mem_allocated -= block->endptr - ((char *) block); #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, block->freeptr - ((char *) block)); @@ -628,7 +628,7 @@ AllocSetDelete(MemoryContext context) AllocSet set = (AllocSet) context; AllocBlock block = set->blocks; Size keepersize PG_USED_FOR_ASSERTS_ONLY - = set->keeper->endptr - ((char *) set); + = set->keeper->endptr - ((char *) set); AssertArg(AllocSetIsValid(set)); @@ -1032,7 +1032,7 @@ AllocSetFree(MemoryContext context, void *pointer) if (block->next) block->next->prev = block->prev; - context->mem_allocated -= block->endptr - ((char*) block); + context->mem_allocated -= block->endptr - ((char *) block); #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, block->freeptr - ((char *) block)); @@ -1124,7 +1124,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size) /* Do the realloc */ blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ; - oldblksize = block->endptr - ((char *)block); + oldblksize = block->endptr - ((char *) block); block = (AllocBlock) realloc(block, blksize); if (block == NULL) diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index 9e24fec72d6..abda22fa570 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -469,7 +469,7 @@ MemoryContextIsEmpty(MemoryContext context) Size MemoryContextMemAllocated(MemoryContext context, bool recurse) { - Size total = context->mem_allocated; + Size total = context->mem_allocated; AssertArg(MemoryContextIsValid(context)); diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c index e9e962d7674..f8d801c4196 100644 --- a/src/backend/utils/mmgr/slab.c +++ b/src/backend/utils/mmgr/slab.c @@ -217,10 +217,11 @@ SlabContextCreate(MemoryContext parent, headerSize = offsetof(SlabContext, freelist) + freelistSize; #ifdef MEMORY_CONTEXT_CHECKING + /* - * With memory checking, we need to allocate extra space for the bitmap - * of free chunks. The bitmap is an array of bools, so we don't need to - * worry about alignment. + * With memory checking, we need to allocate extra space for the bitmap of + * free chunks. The bitmap is an array of bools, so we don't need to worry + * about alignment. */ headerSize += chunksPerBlock * sizeof(bool); #endif diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c index 48ae0de305b..bc8d56807e2 100644 --- a/src/backend/utils/sort/logtape.c +++ b/src/backend/utils/sort/logtape.c @@ -191,8 +191,8 @@ struct LogicalTapeSet Size freeBlocksLen; /* current allocated length of freeBlocks[] */ /* The array of logical tapes. */ - int nTapes; /* # of logical tapes in set */ - LogicalTape *tapes; /* has nTapes nentries */ + int nTapes; /* # of logical tapes in set */ + LogicalTape *tapes; /* has nTapes nentries */ }; static void ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer); @@ -348,9 +348,9 @@ parent_offset(unsigned long i) static long ltsGetFreeBlock(LogicalTapeSet *lts) { - long *heap = lts->freeBlocks; - long blocknum; - int heapsize; + long *heap = lts->freeBlocks; + long blocknum; + int heapsize; unsigned long pos; /* freelist empty; allocate a new block */ @@ -374,7 +374,7 @@ ltsGetFreeBlock(LogicalTapeSet *lts) heapsize = lts->nFreeBlocks; while (true) { - unsigned long left = left_offset(pos); + unsigned long left = left_offset(pos); unsigned long right = right_offset(pos); unsigned long min_child; @@ -403,7 +403,7 @@ ltsGetFreeBlock(LogicalTapeSet *lts) static void ltsReleaseBlock(LogicalTapeSet *lts, long blocknum) { - long *heap; + long *heap; unsigned long pos; /* @@ -440,6 +440,7 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum) while (pos != 0) { unsigned long parent = parent_offset(pos); + if (heap[parent] < heap[pos]) break; @@ -543,19 +544,19 @@ ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared, static void ltsInitTape(LogicalTape *lt) { - lt->writing = true; - lt->frozen = false; - lt->dirty = false; - lt->firstBlockNumber = -1L; - lt->curBlockNumber = -1L; - lt->nextBlockNumber = -1L; + lt->writing = true; + lt->frozen = false; + lt->dirty = false; + lt->firstBlockNumber = -1L; + lt->curBlockNumber = -1L; + lt->nextBlockNumber = -1L; lt->offsetBlockNumber = 0L; - lt->buffer = NULL; - lt->buffer_size = 0; + lt->buffer = NULL; + lt->buffer_size = 0; /* palloc() larger than MaxAllocSize would fail */ - lt->max_size = MaxAllocSize; - lt->pos = 0; - lt->nbytes = 0; + lt->max_size = MaxAllocSize; + lt->pos = 0; + lt->nbytes = 0; } /* @@ -1012,13 +1013,13 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum, TapeShare *share) void LogicalTapeSetExtend(LogicalTapeSet *lts, int nAdditional) { - int i; - int nTapesOrig = lts->nTapes; + int i; + int nTapesOrig = lts->nTapes; lts->nTapes += nAdditional; - lts->tapes = (LogicalTape *) repalloc( - lts->tapes, lts->nTapes * sizeof(LogicalTape)); + lts->tapes = (LogicalTape *) repalloc(lts->tapes, + lts->nTapes * sizeof(LogicalTape)); for (i = nTapesOrig; i < lts->nTapes; i++) ltsInitTape(<s->tapes[i]); diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index d59e3d5a8d5..3c49476483b 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -256,8 +256,8 @@ struct Tuplesortstate bool isMaxSpaceDisk; /* true when maxSpace is value for on-disk * space, false when it's value for in-memory * space */ - TupSortStatus maxSpaceStatus; /* sort status when maxSpace was reached */ - MemoryContext maincontext; /* memory context for tuple sort metadata that + TupSortStatus maxSpaceStatus; /* sort status when maxSpace was reached */ + MemoryContext maincontext; /* memory context for tuple sort metadata that * persists across multiple batches */ MemoryContext sortcontext; /* memory context holding most sort data */ MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */ @@ -1429,11 +1429,11 @@ tuplesort_updatemax(Tuplesortstate *state) /* * Sort evicts data to the disk when it wasn't able to fit that data into - * main memory. This is why we assume space used on the disk to be - * more important for tracking resource usage than space used in memory. - * Note that the amount of space occupied by some tupleset on the disk might - * be less than amount of space occupied by the same tupleset in - * memory due to more compact representation. + * main memory. This is why we assume space used on the disk to be more + * important for tracking resource usage than space used in memory. Note + * that the amount of space occupied by some tupleset on the disk might be + * less than amount of space occupied by the same tupleset in memory due + * to more compact representation. */ if ((isSpaceDisk && !state->isMaxSpaceDisk) || (isSpaceDisk == state->isMaxSpaceDisk && spaceUsed > state->maxSpace)) diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index a66dd078a79..67021a6dc13 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1460,7 +1460,7 @@ setup_auth(FILE *cmdfd) if (superuser_password) PG_CMD_PRINTF("ALTER USER \"%s\" WITH PASSWORD E'%s';\n\n", - username, escape_quotes(superuser_password)); + username, escape_quotes(superuser_password)); } /* @@ -1674,8 +1674,8 @@ setup_collation(FILE *cmdfd) * that it wins if libc defines a locale named ucs_basic. */ PG_CMD_PRINTF("INSERT INTO pg_collation (oid, collname, collnamespace, collowner, collprovider, collisdeterministic, collencoding, collcollate, collctype)" - "VALUES (pg_nextoid('pg_catalog.pg_collation', 'oid', 'pg_catalog.pg_collation_oid_index'), 'ucs_basic', 'pg_catalog'::regnamespace, %u, '%c', true, %d, 'C', 'C');\n\n", - BOOTSTRAP_SUPERUSERID, COLLPROVIDER_LIBC, PG_UTF8); + "VALUES (pg_nextoid('pg_catalog.pg_collation', 'oid', 'pg_catalog.pg_collation_oid_index'), 'ucs_basic', 'pg_catalog'::regnamespace, %u, '%c', true, %d, 'C', 'C');\n\n", + BOOTSTRAP_SUPERUSERID, COLLPROVIDER_LIBC, PG_UTF8); /* Now import all collations we can find in the operating system */ PG_CMD_PUTS("SELECT pg_import_system_collations('pg_catalog');\n\n"); @@ -1918,15 +1918,15 @@ setup_schema(FILE *cmdfd) free(lines); PG_CMD_PRINTF("UPDATE information_schema.sql_implementation_info " - " SET character_value = '%s' " - " WHERE implementation_info_name = 'DBMS VERSION';\n\n", - infoversion); + " SET character_value = '%s' " + " WHERE implementation_info_name = 'DBMS VERSION';\n\n", + infoversion); PG_CMD_PRINTF("COPY information_schema.sql_features " - " (feature_id, feature_name, sub_feature_id, " - " sub_feature_name, is_supported, comments) " - " FROM E'%s';\n\n", - escape_quotes(features_file)); + " (feature_id, feature_name, sub_feature_id, " + " sub_feature_name, is_supported, comments) " + " FROM E'%s';\n\n", + escape_quotes(features_file)); } /* diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 2e9035d6137..b9ec640d2fc 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -1050,7 +1050,8 @@ ReceiveTarFile(PGconn *conn, PGresult *res, int rownum) #ifdef HAVE_LIBZ if (compresslevel != 0) { - int fd = dup(fileno(stdout)); + int fd = dup(fileno(stdout)); + if (fd < 0) { pg_log_error("could not duplicate stdout: %m"); @@ -1224,7 +1225,7 @@ ReceiveTarFile(PGconn *conn, PGresult *res, int rownum) if (strcmp(basedir, "-") == 0 && manifest) { char header[512]; - PQExpBufferData buf; + PQExpBufferData buf; initPQExpBuffer(&buf); ReceiveBackupManifestInMemory(conn, &buf); diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl index 63381764e97..208df557b85 100644 --- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl +++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl @@ -103,7 +103,7 @@ foreach my $filename (@tempRelationFiles) # Run base backup. $node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backup", '-X', 'none' ], 'pg_basebackup runs'); -ok(-f "$tempdir/backup/PG_VERSION", 'backup was created'); +ok(-f "$tempdir/backup/PG_VERSION", 'backup was created'); ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included'); # Permissions on backup should be default @@ -161,13 +161,14 @@ rmtree("$tempdir/backup"); $node->command_ok( [ - 'pg_basebackup', '-D', "$tempdir/backup2", '--no-manifest', - '--waldir', "$tempdir/xlog2" + 'pg_basebackup', '-D', + "$tempdir/backup2", '--no-manifest', + '--waldir', "$tempdir/xlog2" ], 'separate xlog directory'); -ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created'); -ok(! -f "$tempdir/backup2/backup_manifest", 'manifest was suppressed'); -ok(-d "$tempdir/xlog2/", 'xlog directory was created'); +ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created'); +ok(!-f "$tempdir/backup2/backup_manifest", 'manifest was suppressed'); +ok(-d "$tempdir/xlog2/", 'xlog directory was created'); rmtree("$tempdir/backup2"); rmtree("$tempdir/xlog2"); diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c index 9aa9f756f66..1daa5aed0e0 100644 --- a/src/bin/pg_checksums/pg_checksums.c +++ b/src/bin/pg_checksums/pg_checksums.c @@ -244,7 +244,7 @@ scan_file(const char *fn, BlockNumber segmentno) } else if (mode == PG_MODE_ENABLE) { - int w; + int w; /* Set checksum in page header */ header->pd_checksum = csum; diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index fa1d569f0f8..f33c2463a72 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -2412,8 +2412,8 @@ makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo) /* Skip FOREIGN TABLEs (no data to dump) unless requested explicitly */ if (tbinfo->relkind == RELKIND_FOREIGN_TABLE && (foreign_servers_include_oids.head == NULL || - !simple_oid_list_member(&foreign_servers_include_oids, - tbinfo->foreign_server))) + !simple_oid_list_member(&foreign_servers_include_oids, + tbinfo->foreign_server))) return; /* Skip partitioned tables (data in partitions) */ if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE) @@ -4074,8 +4074,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables) TableInfo *tbinfo = &tblinfo[i]; /* - * Only regular and partitioned tables can be added to - * publications. + * Only regular and partitioned tables can be added to publications. */ if (tbinfo->relkind != RELKIND_RELATION && tbinfo->relkind != RELKIND_PARTITIONED_TABLE) @@ -4397,12 +4396,12 @@ append_depends_on_extension(Archive *fout, { if (dobj->depends_on_ext) { - char *nm; + char *nm; PGresult *res; - PQExpBuffer query; - int ntups; - int i_extname; - int i; + PQExpBuffer query; + int ntups; + int i_extname; + int i; /* dodge fmtId() non-reentrancy */ nm = pg_strdup(objname); @@ -7294,7 +7293,10 @@ getIndexes(Archive *fout, TableInfo tblinfo[], int numTables) indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't'); indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't'); indxinfo[j].parentidx = atooid(PQgetvalue(res, j, i_parentidx)); - indxinfo[j].partattaches = (SimplePtrList) { NULL, NULL }; + indxinfo[j].partattaches = (SimplePtrList) + { + NULL, NULL + }; contype = *(PQgetvalue(res, j, i_contype)); if (contype == 'p' || contype == 'u' || contype == 'x') @@ -7492,7 +7494,7 @@ getConstraints(Archive *fout, TableInfo tblinfo[], int numTables) for (j = 0; j < ntups; j++) { - TableInfo *reftable; + TableInfo *reftable; constrinfo[j].dobj.objType = DO_FK_CONSTRAINT; constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid)); @@ -16802,7 +16804,7 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo) delq = createPQExpBuffer(); foreign = tbinfo && - tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : ""; + tbinfo->relkind == RELKIND_FOREIGN_TABLE ? "FOREIGN " : ""; if (coninfo->contype == 'p' || coninfo->contype == 'u' || diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 61c909e06d8..5f70400b257 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -132,7 +132,7 @@ typedef struct _dumpableObject DumpComponents dump; /* bitmask of components to dump */ DumpComponents dump_contains; /* as above, but for contained objects */ bool ext_member; /* true if object is member of extension */ - bool depends_on_ext; /* true if object depends on an extension */ + bool depends_on_ext; /* true if object depends on an extension */ DumpId *dependencies; /* dumpIds of objects this one depends on */ int nDeps; /* number of valid dependencies */ int allocDeps; /* allocated size of dependencies[] */ @@ -369,7 +369,7 @@ typedef struct _indxInfo bool indisclustered; bool indisreplident; Oid parentidx; /* if partitioned, parent index OID */ - SimplePtrList partattaches; /* if partitioned, partition attach objects */ + SimplePtrList partattaches; /* if partitioned, partition attach objects */ /* if there is an associated constraint object, its dumpId: */ DumpId indexconstraint; diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 1b90cbd9b58..e116235769b 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -1378,7 +1378,7 @@ my %tests = ( 'CREATE COLLATION test0 FROM "C"' => { create_order => 76, create_sql => 'CREATE COLLATION test0 FROM "C";', - regexp => + regexp => qr/CREATE COLLATION public.test0 \(provider = libc, locale = 'C'(, version = '[^']*')?\);/m, collation => 1, like => { %full_runs, section_pre_data => 1, }, @@ -1411,8 +1411,9 @@ my %tests = ( "CREATE DATABASE dump_test2 LOCALE = 'C'" => { create_order => 47, - create_sql => "CREATE DATABASE dump_test2 LOCALE = 'C' TEMPLATE = template0;", - regexp => qr/^ + create_sql => + "CREATE DATABASE dump_test2 LOCALE = 'C' TEMPLATE = template0;", + regexp => qr/^ \QCREATE DATABASE dump_test2 \E.*\QLOCALE = 'C';\E /xm, like => { pg_dumpall_dbprivs => 1, }, @@ -1575,7 +1576,7 @@ my %tests = ( unlike => { exclude_dump_test_schema => 1, }, }, - # verify that a custom operator/opclass/range type is dumped in right order + # verify that a custom operator/opclass/range type is dumped in right order 'CREATE OPERATOR CLASS dump_test.op_class_custom' => { create_order => 74, create_sql => 'CREATE OPERATOR dump_test.~~ ( @@ -2574,7 +2575,8 @@ my %tests = ( 'ALTER STATISTICS extended_stats_options' => { create_order => 98, - create_sql => 'ALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000', + create_sql => + 'ALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000', regexp => qr/^ \QALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000;\E /xms, diff --git a/src/bin/pg_dump/t/003_pg_dump_with_server.pl b/src/bin/pg_dump/t/003_pg_dump_with_server.pl index 8c8063908b9..dd9a60a2c9f 100644 --- a/src/bin/pg_dump/t/003_pg_dump_with_server.pl +++ b/src/bin/pg_dump/t/003_pg_dump_with_server.pl @@ -18,19 +18,19 @@ $node->start; # Verify that dumping foreign data includes only foreign tables of # matching servers -$node->safe_psql( 'postgres', "CREATE FOREIGN DATA WRAPPER dummy"); -$node->safe_psql( 'postgres', "CREATE SERVER s0 FOREIGN DATA WRAPPER dummy"); -$node->safe_psql( 'postgres', "CREATE SERVER s1 FOREIGN DATA WRAPPER dummy"); -$node->safe_psql( 'postgres', "CREATE SERVER s2 FOREIGN DATA WRAPPER dummy"); -$node->safe_psql( 'postgres', "CREATE FOREIGN TABLE t0 (a int) SERVER s0"); -$node->safe_psql( 'postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1"); +$node->safe_psql('postgres', "CREATE FOREIGN DATA WRAPPER dummy"); +$node->safe_psql('postgres', "CREATE SERVER s0 FOREIGN DATA WRAPPER dummy"); +$node->safe_psql('postgres', "CREATE SERVER s1 FOREIGN DATA WRAPPER dummy"); +$node->safe_psql('postgres', "CREATE SERVER s2 FOREIGN DATA WRAPPER dummy"); +$node->safe_psql('postgres', "CREATE FOREIGN TABLE t0 (a int) SERVER s0"); +$node->safe_psql('postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1"); my ($cmd, $stdout, $stderr, $result); command_fails_like( - [ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ], + [ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ], qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: error: query was:.*t0/, "correctly fails to dump a foreign table from a dummy FDW"); command_ok( - [ "pg_dump", '-p', $port, '-a', '--include-foreign-data=s2', 'postgres' ] , + [ "pg_dump", '-p', $port, '-a', '--include-foreign-data=s2', 'postgres' ], "dump foreign server with no tables"); diff --git a/src/bin/pg_rewind/libpq_fetch.c b/src/bin/pg_rewind/libpq_fetch.c index a17799a5d71..1dbbceab0bd 100644 --- a/src/bin/pg_rewind/libpq_fetch.c +++ b/src/bin/pg_rewind/libpq_fetch.c @@ -23,7 +23,7 @@ #include "pg_rewind.h" #include "port/pg_bswap.h" -PGconn *conn = NULL; +PGconn *conn = NULL; /* * Files are fetched max CHUNKSIZE bytes at a time. diff --git a/src/bin/pg_verifybackup/parse_manifest.h b/src/bin/pg_verifybackup/parse_manifest.h index 7d38194907e..cbb7ca1397e 100644 --- a/src/bin/pg_verifybackup/parse_manifest.h +++ b/src/bin/pg_verifybackup/parse_manifest.h @@ -21,16 +21,16 @@ struct JsonManifestParseContext; typedef struct JsonManifestParseContext JsonManifestParseContext; -typedef void (*json_manifest_perfile_callback)(JsonManifestParseContext *, - char *pathname, - size_t size, pg_checksum_type checksum_type, - int checksum_length, uint8 *checksum_payload); -typedef void (*json_manifest_perwalrange_callback)(JsonManifestParseContext *, - TimeLineID tli, - XLogRecPtr start_lsn, XLogRecPtr end_lsn); -typedef void (*json_manifest_error_callback)(JsonManifestParseContext *, - const char *fmt, ...) pg_attribute_printf(2, 3) - pg_attribute_noreturn(); +typedef void (*json_manifest_perfile_callback) (JsonManifestParseContext *, + char *pathname, + size_t size, pg_checksum_type checksum_type, + int checksum_length, uint8 *checksum_payload); +typedef void (*json_manifest_perwalrange_callback) (JsonManifestParseContext *, + TimeLineID tli, + XLogRecPtr start_lsn, XLogRecPtr end_lsn); +typedef void (*json_manifest_error_callback) (JsonManifestParseContext *, + const char *fmt,...) pg_attribute_printf(2, 3) + pg_attribute_noreturn(); struct JsonManifestParseContext { diff --git a/src/bin/pg_verifybackup/pg_verifybackup.c b/src/bin/pg_verifybackup/pg_verifybackup.c index 2fa2aa6ae04..70b6ffdec00 100644 --- a/src/bin/pg_verifybackup/pg_verifybackup.c +++ b/src/bin/pg_verifybackup/pg_verifybackup.c @@ -644,11 +644,10 @@ verify_backup_file(verifier_context *context, char *relpath, char *fullpath) } /* - * We don't verify checksums at this stage. We first finish verifying - * that we have the expected set of files with the expected sizes, and - * only afterwards verify the checksums. That's because computing - * checksums may take a while, and we'd like to report more obvious - * problems quickly. + * We don't verify checksums at this stage. We first finish verifying that + * we have the expected set of files with the expected sizes, and only + * afterwards verify the checksums. That's because computing checksums may + * take a while, and we'd like to report more obvious problems quickly. */ } @@ -707,7 +706,7 @@ verify_backup_checksums(verifier_context *context) */ static void verify_file_checksum(verifier_context *context, manifest_file *m, - char *fullpath) + char *fullpath) { pg_checksum_context checksum_ctx; char *relpath = m->pathname; diff --git a/src/bin/pg_verifybackup/t/001_basic.pl b/src/bin/pg_verifybackup/t/001_basic.pl index 0d4d71aaa10..0c35062dc0a 100644 --- a/src/bin/pg_verifybackup/t/001_basic.pl +++ b/src/bin/pg_verifybackup/t/001_basic.pl @@ -9,22 +9,25 @@ program_help_ok('pg_verifybackup'); program_version_ok('pg_verifybackup'); program_options_handling_ok('pg_verifybackup'); -command_fails_like(['pg_verifybackup'], - qr/no backup directory specified/, - 'target directory must be specified'); -command_fails_like(['pg_verifybackup', $tempdir], - qr/could not open file.*\/backup_manifest\"/, - 'pg_verifybackup requires a manifest'); -command_fails_like(['pg_verifybackup', $tempdir, $tempdir], - qr/too many command-line arguments/, - 'multiple target directories not allowed'); +command_fails_like( + ['pg_verifybackup'], + qr/no backup directory specified/, + 'target directory must be specified'); +command_fails_like( + [ 'pg_verifybackup', $tempdir ], + qr/could not open file.*\/backup_manifest\"/, + 'pg_verifybackup requires a manifest'); +command_fails_like( + [ 'pg_verifybackup', $tempdir, $tempdir ], + qr/too many command-line arguments/, + 'multiple target directories not allowed'); # create fake manifest file open(my $fh, '>', "$tempdir/backup_manifest") || die "open: $!"; close($fh); # but then try to use an alternate, nonexisting manifest -command_fails_like(['pg_verifybackup', '-m', "$tempdir/not_the_manifest", - $tempdir], - qr/could not open file.*\/not_the_manifest\"/, - 'pg_verifybackup respects -m flag'); +command_fails_like( + [ 'pg_verifybackup', '-m', "$tempdir/not_the_manifest", $tempdir ], + qr/could not open file.*\/not_the_manifest\"/, + 'pg_verifybackup respects -m flag'); diff --git a/src/bin/pg_verifybackup/t/002_algorithm.pl b/src/bin/pg_verifybackup/t/002_algorithm.pl index ee82dcee376..d0c97ae3cc3 100644 --- a/src/bin/pg_verifybackup/t/002_algorithm.pl +++ b/src/bin/pg_verifybackup/t/002_algorithm.pl @@ -16,16 +16,16 @@ $master->start; for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512)) { my $backup_path = $master->backup_dir . '/' . $algorithm; - my @backup = ('pg_basebackup', '-D', $backup_path, - '--manifest-checksums', $algorithm, - '--no-sync'); + my @backup = ( + 'pg_basebackup', '-D', $backup_path, + '--manifest-checksums', $algorithm, '--no-sync'); my @verify = ('pg_verifybackup', '-e', $backup_path); # A backup with a bogus algorithm should fail. if ($algorithm eq 'bogus') { $master->command_fails(\@backup, - "backup fails with algorithm \"$algorithm\""); + "backup fails with algorithm \"$algorithm\""); next; } @@ -44,14 +44,14 @@ for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512)) { my $manifest = slurp_file("$backup_path/backup_manifest"); my $count_of_algorithm_in_manifest = - (() = $manifest =~ /$algorithm/mig); - cmp_ok($count_of_algorithm_in_manifest, '>', 100, - "$algorithm is mentioned many times in the manifest"); + (() = $manifest =~ /$algorithm/mig); + cmp_ok($count_of_algorithm_in_manifest, + '>', 100, "$algorithm is mentioned many times in the manifest"); } # Make sure that it verifies OK. $master->command_ok(\@verify, - "verify backup with algorithm \"$algorithm\""); + "verify backup with algorithm \"$algorithm\""); # Remove backup immediately to save disk space. rmtree($backup_path); diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl index 113959420ee..c2e04d0be20 100644 --- a/src/bin/pg_verifybackup/t/003_corruption.pl +++ b/src/bin/pg_verifybackup/t/003_corruption.pl @@ -15,7 +15,7 @@ $master->start; # Include a user-defined tablespace in the hopes of detecting problems in that # area. -my $source_ts_path = TestLib::perl2host(TestLib::tempdir_short()); +my $source_ts_path = TestLib::perl2host(TestLib::tempdir_short()); my $source_ts_prefix = $source_ts_path; $source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!; @@ -29,106 +29,107 @@ EOM my @scenario = ( { - 'name' => 'extra_file', + 'name' => 'extra_file', 'mutilate' => \&mutilate_extra_file, 'fails_like' => - qr/extra_file.*present on disk but not in the manifest/ + qr/extra_file.*present on disk but not in the manifest/ }, { - 'name' => 'extra_tablespace_file', + 'name' => 'extra_tablespace_file', 'mutilate' => \&mutilate_extra_tablespace_file, 'fails_like' => - qr/extra_ts_file.*present on disk but not in the manifest/ + qr/extra_ts_file.*present on disk but not in the manifest/ }, { - 'name' => 'missing_file', + 'name' => 'missing_file', 'mutilate' => \&mutilate_missing_file, 'fails_like' => - qr/pg_xact\/0000.*present in the manifest but not on disk/ + qr/pg_xact\/0000.*present in the manifest but not on disk/ }, { - 'name' => 'missing_tablespace', + 'name' => 'missing_tablespace', 'mutilate' => \&mutilate_missing_tablespace, 'fails_like' => - qr/pg_tblspc.*present in the manifest but not on disk/ + qr/pg_tblspc.*present in the manifest but not on disk/ }, { - 'name' => 'append_to_file', - 'mutilate' => \&mutilate_append_to_file, - 'fails_like' => - qr/has size \d+ on disk but size \d+ in the manifest/ + 'name' => 'append_to_file', + 'mutilate' => \&mutilate_append_to_file, + 'fails_like' => qr/has size \d+ on disk but size \d+ in the manifest/ }, { - 'name' => 'truncate_file', - 'mutilate' => \&mutilate_truncate_file, - 'fails_like' => - qr/has size 0 on disk but size \d+ in the manifest/ + 'name' => 'truncate_file', + 'mutilate' => \&mutilate_truncate_file, + 'fails_like' => qr/has size 0 on disk but size \d+ in the manifest/ }, { - 'name' => 'replace_file', - 'mutilate' => \&mutilate_replace_file, + 'name' => 'replace_file', + 'mutilate' => \&mutilate_replace_file, 'fails_like' => qr/checksum mismatch for file/ }, { - 'name' => 'bad_manifest', - 'mutilate' => \&mutilate_bad_manifest, + 'name' => 'bad_manifest', + 'mutilate' => \&mutilate_bad_manifest, 'fails_like' => qr/manifest checksum mismatch/ }, { - 'name' => 'open_file_fails', - 'mutilate' => \&mutilate_open_file_fails, - 'fails_like' => qr/could not open file/, + 'name' => 'open_file_fails', + 'mutilate' => \&mutilate_open_file_fails, + 'fails_like' => qr/could not open file/, 'skip_on_windows' => 1 }, { - 'name' => 'open_directory_fails', - 'mutilate' => \&mutilate_open_directory_fails, - 'cleanup' => \&cleanup_open_directory_fails, - 'fails_like' => qr/could not open directory/, + 'name' => 'open_directory_fails', + 'mutilate' => \&mutilate_open_directory_fails, + 'cleanup' => \&cleanup_open_directory_fails, + 'fails_like' => qr/could not open directory/, 'skip_on_windows' => 1 }, { - 'name' => 'search_directory_fails', - 'mutilate' => \&mutilate_search_directory_fails, - 'cleanup' => \&cleanup_search_directory_fails, - 'fails_like' => qr/could not stat file or directory/, + 'name' => 'search_directory_fails', + 'mutilate' => \&mutilate_search_directory_fails, + 'cleanup' => \&cleanup_search_directory_fails, + 'fails_like' => qr/could not stat file or directory/, 'skip_on_windows' => 1 - } -); + }); for my $scenario (@scenario) { my $name = $scenario->{'name'}; - SKIP: + SKIP: { skip "unix-style permissions not supported on Windows", 4 - if $scenario->{'skip_on_windows'} && $windows_os; + if $scenario->{'skip_on_windows'} && $windows_os; # Take a backup and check that it verifies OK. - my $backup_path = $master->backup_dir . '/' . $name; + my $backup_path = $master->backup_dir . '/' . $name; my $backup_ts_path = TestLib::perl2host(TestLib::tempdir_short()); # The tablespace map parameter confuses Msys2, which tries to mangle # it. Tell it not to. # See https://www.msys2.org/wiki/Porting/#filesystem-namespaces local $ENV{MSYS2_ARG_CONV_EXCL} = $source_ts_prefix; - $master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync', - '-T', "${source_ts_path}=${backup_ts_path}"], - "base backup ok"); - command_ok(['pg_verifybackup', $backup_path ], - "intact backup verified"); + $master->command_ok( + [ + 'pg_basebackup', '-D', $backup_path, '--no-sync', + '-T', "${source_ts_path}=${backup_ts_path}" + ], + "base backup ok"); + command_ok([ 'pg_verifybackup', $backup_path ], + "intact backup verified"); # Mutilate the backup in some way. $scenario->{'mutilate'}->($backup_path); # Now check that the backup no longer verifies. - command_fails_like(['pg_verifybackup', $backup_path ], - $scenario->{'fails_like'}, - "corrupt backup fails verification: $name"); + command_fails_like( + [ 'pg_verifybackup', $backup_path ], + $scenario->{'fails_like'}, + "corrupt backup fails verification: $name"); # Run cleanup hook, if provided. $scenario->{'cleanup'}->($backup_path) - if exists $scenario->{'cleanup'}; + if exists $scenario->{'cleanup'}; # Finally, use rmtree to reclaim space. rmtree($backup_path); @@ -157,14 +158,14 @@ sub mutilate_extra_file sub mutilate_extra_tablespace_file { my ($backup_path) = @_; - my ($tsoid) = grep { $_ ne '.' && $_ ne '..' } - slurp_dir("$backup_path/pg_tblspc"); + my ($tsoid) = + grep { $_ ne '.' && $_ ne '..' } slurp_dir("$backup_path/pg_tblspc"); my ($catvdir) = grep { $_ ne '.' && $_ ne '..' } - slurp_dir("$backup_path/pg_tblspc/$tsoid"); + slurp_dir("$backup_path/pg_tblspc/$tsoid"); my ($tsdboid) = grep { $_ ne '.' && $_ ne '..' } - slurp_dir("$backup_path/pg_tblspc/$tsoid/$catvdir"); + slurp_dir("$backup_path/pg_tblspc/$tsoid/$catvdir"); create_extra_file($backup_path, - "pg_tblspc/$tsoid/$catvdir/$tsdboid/extra_ts_file"); + "pg_tblspc/$tsoid/$catvdir/$tsdboid/extra_ts_file"); return; } @@ -181,8 +182,8 @@ sub mutilate_missing_file sub mutilate_missing_tablespace { my ($backup_path) = @_; - my ($tsoid) = grep { $_ ne '.' && $_ ne '..' } - slurp_dir("$backup_path/pg_tblspc"); + my ($tsoid) = + grep { $_ ne '.' && $_ ne '..' } slurp_dir("$backup_path/pg_tblspc"); my $pathname = "$backup_path/pg_tblspc/$tsoid"; if ($windows_os) { @@ -226,8 +227,8 @@ sub mutilate_truncate_file sub mutilate_replace_file { my ($backup_path) = @_; - my $pathname = "$backup_path/PG_VERSION"; - my $contents = slurp_file($pathname); + my $pathname = "$backup_path/PG_VERSION"; + my $contents = slurp_file($pathname); open(my $fh, '>', $pathname) || die "open $pathname: $!"; print $fh 'q' x length($contents); close($fh); @@ -279,7 +280,7 @@ sub mutilate_search_directory_fails } # rmtree can't cope with a mode 400 directory, so change back to 700. -sub cleanup_search_directory_fails +sub cleanup_search_directory_fails { my ($backup_path) = @_; my $pathname = "$backup_path/base"; diff --git a/src/bin/pg_verifybackup/t/004_options.pl b/src/bin/pg_verifybackup/t/004_options.pl index 9bae8eb565b..271b7ee5043 100644 --- a/src/bin/pg_verifybackup/t/004_options.pl +++ b/src/bin/pg_verifybackup/t/004_options.pl @@ -14,14 +14,14 @@ my $master = get_new_node('master'); $master->init(allows_streaming => 1); $master->start; my $backup_path = $master->backup_dir . '/test_options'; -$master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync' ], - "base backup ok"); +$master->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync' ], + "base backup ok"); # Verify that pg_verifybackup -q succeeds and produces no output. my $stdout; my $stderr; -my $result = IPC::Run::run ['pg_verifybackup', '-q', $backup_path ], - '>', \$stdout, '2>', \$stderr; +my $result = IPC::Run::run [ 'pg_verifybackup', '-q', $backup_path ], + '>', \$stdout, '2>', \$stderr; ok($result, "-q succeeds: exit code 0"); is($stdout, '', "-q succeeds: no stdout"); is($stderr, '', "-q succeeds: no stderr"); @@ -34,56 +34,71 @@ print $fh 'q' x length($version_contents); close($fh); # Verify that pg_verifybackup -q now fails. -command_fails_like(['pg_verifybackup', '-q', $backup_path ], - qr/checksum mismatch for file \"PG_VERSION\"/, - '-q checksum mismatch'); +command_fails_like( + [ 'pg_verifybackup', '-q', $backup_path ], + qr/checksum mismatch for file \"PG_VERSION\"/, + '-q checksum mismatch'); # Since we didn't change the length of the file, verification should succeed # if we ignore checksums. Check that we get the right message, too. -command_like(['pg_verifybackup', '-s', $backup_path ], - qr/backup successfully verified/, - '-s skips checksumming'); +command_like( + [ 'pg_verifybackup', '-s', $backup_path ], + qr/backup successfully verified/, + '-s skips checksumming'); # Validation should succeed if we ignore the problem file. -command_like(['pg_verifybackup', '-i', 'PG_VERSION', $backup_path ], - qr/backup successfully verified/, - '-i ignores problem file'); +command_like( + [ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ], + qr/backup successfully verified/, + '-i ignores problem file'); # PG_VERSION is already corrupt; let's try also removing all of pg_xact. rmtree($backup_path . "/pg_xact"); # We're ignoring the problem with PG_VERSION, but not the problem with # pg_xact, so verification should fail here. -command_fails_like(['pg_verifybackup', '-i', 'PG_VERSION', $backup_path ], - qr/pg_xact.*is present in the manifest but not on disk/, - '-i does not ignore all problems'); +command_fails_like( + [ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ], + qr/pg_xact.*is present in the manifest but not on disk/, + '-i does not ignore all problems'); # If we use -i twice, we should be able to ignore all of the problems. -command_like(['pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact', - $backup_path ], - qr/backup successfully verified/, - 'multiple -i options work'); +command_like( + [ 'pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact', $backup_path ], + qr/backup successfully verified/, + 'multiple -i options work'); # Verify that when -i is not used, both problems are reported. -$result = IPC::Run::run ['pg_verifybackup', $backup_path ], - '>', \$stdout, '2>', \$stderr; +$result = IPC::Run::run [ 'pg_verifybackup', $backup_path ], + '>', \$stdout, '2>', \$stderr; ok(!$result, "multiple problems: fails"); -like($stderr, qr/pg_xact.*is present in the manifest but not on disk/, - "multiple problems: missing files reported"); -like($stderr, qr/checksum mismatch for file \"PG_VERSION\"/, - "multiple problems: checksum mismatch reported"); +like( + $stderr, + qr/pg_xact.*is present in the manifest but not on disk/, + "multiple problems: missing files reported"); +like( + $stderr, + qr/checksum mismatch for file \"PG_VERSION\"/, + "multiple problems: checksum mismatch reported"); # Verify that when -e is used, only the problem detected first is reported. -$result = IPC::Run::run ['pg_verifybackup', '-e', $backup_path ], - '>', \$stdout, '2>', \$stderr; +$result = IPC::Run::run [ 'pg_verifybackup', '-e', $backup_path ], + '>', \$stdout, '2>', \$stderr; ok(!$result, "-e reports 1 error: fails"); -like($stderr, qr/pg_xact.*is present in the manifest but not on disk/, - "-e reports 1 error: missing files reported"); -unlike($stderr, qr/checksum mismatch for file \"PG_VERSION\"/, - "-e reports 1 error: checksum mismatch not reported"); +like( + $stderr, + qr/pg_xact.*is present in the manifest but not on disk/, + "-e reports 1 error: missing files reported"); +unlike( + $stderr, + qr/checksum mismatch for file \"PG_VERSION\"/, + "-e reports 1 error: checksum mismatch not reported"); # Test valid manifest with nonexistent backup directory. -command_fails_like(['pg_verifybackup', '-m', "$backup_path/backup_manifest", - "$backup_path/fake" ], - qr/could not open directory/, - 'nonexistent backup directory'); +command_fails_like( + [ + 'pg_verifybackup', '-m', + "$backup_path/backup_manifest", "$backup_path/fake" + ], + qr/could not open directory/, + 'nonexistent backup directory'); diff --git a/src/bin/pg_verifybackup/t/005_bad_manifest.pl b/src/bin/pg_verifybackup/t/005_bad_manifest.pl index 3dd2b5a20df..afd64d1a96b 100644 --- a/src/bin/pg_verifybackup/t/005_bad_manifest.pl +++ b/src/bin/pg_verifybackup/t/005_bad_manifest.pl @@ -11,9 +11,10 @@ use Test::More tests => 58; my $tempdir = TestLib::tempdir; -test_bad_manifest('input string ended unexpectedly', - qr/could not parse backup manifest: The input string ended unexpectedly/, - <<EOM); +test_bad_manifest( + 'input string ended unexpectedly', + qr/could not parse backup manifest: The input string ended unexpectedly/, + <<EOM); { EOM @@ -163,7 +164,7 @@ my $manifest_without_newline = <<EOM; EOM chomp($manifest_without_newline); test_parse_error('last line not newline-terminated', - $manifest_without_newline); + $manifest_without_newline); test_fatal_error('invalid manifest checksum', <<EOM); {"PostgreSQL-Backup-Manifest-Version": 1, "Files": [], @@ -175,8 +176,8 @@ sub test_parse_error my ($test_name, $manifest_contents) = @_; test_bad_manifest($test_name, - qr/could not parse backup manifest: $test_name/, - $manifest_contents); + qr/could not parse backup manifest: $test_name/, + $manifest_contents); return; } @@ -184,9 +185,7 @@ sub test_fatal_error { my ($test_name, $manifest_contents) = @_; - test_bad_manifest($test_name, - qr/fatal: $test_name/, - $manifest_contents); + test_bad_manifest($test_name, qr/fatal: $test_name/, $manifest_contents); return; } @@ -198,7 +197,6 @@ sub test_bad_manifest print $fh $manifest_contents; close($fh); - command_fails_like(['pg_verifybackup', $tempdir], $regexp, - $test_name); + command_fails_like([ 'pg_verifybackup', $tempdir ], $regexp, $test_name); return; } diff --git a/src/bin/pg_verifybackup/t/006_encoding.pl b/src/bin/pg_verifybackup/t/006_encoding.pl index 3c6b57adcd4..5ab9649ab6f 100644 --- a/src/bin/pg_verifybackup/t/006_encoding.pl +++ b/src/bin/pg_verifybackup/t/006_encoding.pl @@ -12,16 +12,20 @@ my $master = get_new_node('master'); $master->init(allows_streaming => 1); $master->start; my $backup_path = $master->backup_dir . '/test_encoding'; -$master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync', - '--manifest-force-encode' ], - "backup ok with forced hex encoding"); +$master->command_ok( + [ + 'pg_basebackup', '-D', + $backup_path, '--no-sync', + '--manifest-force-encode' + ], + "backup ok with forced hex encoding"); my $manifest = slurp_file("$backup_path/backup_manifest"); -my $count_of_encoded_path_in_manifest = - (() = $manifest =~ /Encoded-Path/mig); -cmp_ok($count_of_encoded_path_in_manifest, '>', 100, - "many paths are encoded in the manifest"); +my $count_of_encoded_path_in_manifest = (() = $manifest =~ /Encoded-Path/mig); +cmp_ok($count_of_encoded_path_in_manifest, + '>', 100, "many paths are encoded in the manifest"); -command_like(['pg_verifybackup', '-s', $backup_path ], - qr/backup successfully verified/, - 'backup with forced encoding verified'); +command_like( + [ 'pg_verifybackup', '-s', $backup_path ], + qr/backup successfully verified/, + 'backup with forced encoding verified'); diff --git a/src/bin/pg_verifybackup/t/007_wal.pl b/src/bin/pg_verifybackup/t/007_wal.pl index 5e891d1b6f4..56d536675c9 100644 --- a/src/bin/pg_verifybackup/t/007_wal.pl +++ b/src/bin/pg_verifybackup/t/007_wal.pl @@ -14,26 +14,28 @@ my $master = get_new_node('master'); $master->init(allows_streaming => 1); $master->start; my $backup_path = $master->backup_dir . '/test_wal'; -$master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync' ], - "base backup ok"); +$master->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync' ], + "base backup ok"); # Rename pg_wal. -my $original_pg_wal = $backup_path . '/pg_wal'; +my $original_pg_wal = $backup_path . '/pg_wal'; my $relocated_pg_wal = $master->backup_dir . '/relocated_pg_wal'; rename($original_pg_wal, $relocated_pg_wal) || die "rename pg_wal: $!"; # WAL verification should fail. -command_fails_like(['pg_verifybackup', $backup_path ], - qr/WAL parsing failed for timeline 1/, - 'missing pg_wal causes failure'); +command_fails_like( + [ 'pg_verifybackup', $backup_path ], + qr/WAL parsing failed for timeline 1/, + 'missing pg_wal causes failure'); # Should work if we skip WAL verification. -command_ok(['pg_verifybackup', '-n', $backup_path ], - 'missing pg_wal OK if not verifying WAL'); +command_ok( + [ 'pg_verifybackup', '-n', $backup_path ], + 'missing pg_wal OK if not verifying WAL'); # Should also work if we specify the correct WAL location. -command_ok(['pg_verifybackup', '-w', $relocated_pg_wal, $backup_path ], - '-w can be used to specify WAL directory'); +command_ok([ 'pg_verifybackup', '-w', $relocated_pg_wal, $backup_path ], + '-w can be used to specify WAL directory'); # Move directory back to original location. rename($relocated_pg_wal, $original_pg_wal) || die "rename pg_wal back: $!"; @@ -43,13 +45,14 @@ my @walfiles = grep { /^[0-9A-F]{24}$/ } slurp_dir($original_pg_wal); # Replace the contents of one of the files with garbage of equal length. my $wal_corruption_target = $original_pg_wal . '/' . $walfiles[0]; -my $wal_size = -s $wal_corruption_target; +my $wal_size = -s $wal_corruption_target; open(my $fh, '>', $wal_corruption_target) - || die "open $wal_corruption_target: $!"; + || die "open $wal_corruption_target: $!"; print $fh 'w' x $wal_size; close($fh); # WAL verification should fail. -command_fails_like(['pg_verifybackup', $backup_path ], - qr/WAL parsing failed for timeline 1/, - 'corrupt WAL file causes failure'); +command_fails_like( + [ 'pg_verifybackup', $backup_path ], + qr/WAL parsing failed for timeline 1/, + 'corrupt WAL file causes failure'); diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index ef8ef447f66..08a5947a9e6 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -200,7 +200,7 @@ typedef enum PART_NONE, /* no partitioning */ PART_RANGE, /* range partitioning */ PART_HASH /* hash partitioning */ -} partition_method_t; +} partition_method_t; static partition_method_t partition_method = PART_NONE; static const char *PARTITION_METHOD[] = {"none", "range", "hash"}; @@ -3321,7 +3321,7 @@ executeMetaCommand(CState *st, instr_time *now) if (unlikely(__pg_log_level <= PG_LOG_DEBUG)) { - PQExpBufferData buf; + PQExpBufferData buf; initPQExpBuffer(&buf); @@ -3992,7 +3992,7 @@ initGenerateDataServerSide(PGconn *con) snprintf(sql, sizeof(sql), "insert into pgbench_accounts(aid,bid,abalance,filler) " "select aid, (aid - 1) / %d + 1, 0, '' " - "from generate_series(1, "INT64_FORMAT") as aid", + "from generate_series(1, " INT64_FORMAT ") as aid", naccounts, (int64) naccounts * scale); executeStatement(con, sql); @@ -4390,7 +4390,7 @@ syntax_error(const char *source, int lineno, { fprintf(stderr, "%s\n", line); if (column >= 0) - fprintf(stderr, "%*c error found here\n", column+1, '^'); + fprintf(stderr, "%*c error found here\n", column + 1, '^'); } exit(1); diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl index e85728c3790..52009c35242 100644 --- a/src/bin/pgbench/t/001_pgbench_with_server.pl +++ b/src/bin/pgbench/t/001_pgbench_with_server.pl @@ -68,8 +68,7 @@ my $ets = TestLib::perl2host($ts); # the next commands will issue a syntax error if the path contains a "'" $node->safe_psql('postgres', - "CREATE TABLESPACE regress_pgbench_tap_1_ts LOCATION '$ets';" -); + "CREATE TABLESPACE regress_pgbench_tap_1_ts LOCATION '$ets';"); # Test concurrent OID generation via pg_enum_oid_index. This indirectly # exercises LWLock and spinlock concurrency. @@ -106,8 +105,10 @@ pgbench( '-i', 0, [qr{^$}], [ - qr{creating tables}, qr{vacuuming}, - qr{creating primary keys}, qr{done in \d+\.\d\d s } + qr{creating tables}, + qr{vacuuming}, + qr{creating primary keys}, + qr{done in \d+\.\d\d s } ], 'pgbench scale 1 initialization',); @@ -123,7 +124,7 @@ pgbench( qr{vacuuming}, qr{creating primary keys}, qr{creating foreign keys}, - qr{(?!vacuuming)}, # no vacuum + qr{(?!vacuuming)}, # no vacuum qr{done in \d+\.\d\d s } ], 'pgbench scale 1 initialization'); @@ -140,7 +141,7 @@ pgbench( qr{creating primary keys}, qr{generating data \(server-side\)}, qr{creating foreign keys}, - qr{(?!vacuuming)}, # no vacuum + qr{(?!vacuuming)}, # no vacuum qr{done in \d+\.\d\d s } ], 'pgbench --init-steps'); @@ -276,85 +277,90 @@ COMMIT; # 1. Logging neither with errors nor with statements $node->append_conf('postgresql.conf', - "log_min_duration_statement = 0\n" . - "log_parameter_max_length = 0\n" . - "log_parameter_max_length_on_error = 0"); + "log_min_duration_statement = 0\n" + . "log_parameter_max_length = 0\n" + . "log_parameter_max_length_on_error = 0"); $node->reload; pgbench( - '-n -t1 -c1 -M prepared', - 2, - [], - [ + '-n -t1 -c1 -M prepared', + 2, + [], + [ qr{ERROR: invalid input syntax for type json}, qr{(?!extended query with parameters)} - ], - 'server parameter logging', - { - '001_param_1' => q[select '{ invalid ' as value \gset + ], + 'server parameter logging', + { + '001_param_1' => q[select '{ invalid ' as value \gset select $$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$$ as long \gset select column1::jsonb from (values (:value), (:long)) as q; ] }); my $log = TestLib::slurp_file($node->logfile); -unlike($log, qr[DETAIL: parameters: \$1 = '\{ invalid ',], "no parameters logged"); +unlike( + $log, + qr[DETAIL: parameters: \$1 = '\{ invalid ',], + "no parameters logged"); $log = undef; # 2. Logging truncated parameters on error, full with statements $node->append_conf('postgresql.conf', - "log_parameter_max_length = -1\n" . - "log_parameter_max_length_on_error = 64"); + "log_parameter_max_length = -1\n" + . "log_parameter_max_length_on_error = 64"); $node->reload; pgbench( - '-n -t1 -c1 -M prepared', - 2, - [], - [ + '-n -t1 -c1 -M prepared', + 2, + [], + [ qr{ERROR: division by zero}, qr{CONTEXT: extended query with parameters: \$1 = '1', \$2 = NULL} - ], - 'server parameter logging', - { - '001_param_2' => q{select '1' as one \gset + ], + 'server parameter logging', + { + '001_param_2' => q{select '1' as one \gset SELECT 1 / (random() / 2)::int, :one::int, :two::int; } }); pgbench( - '-n -t1 -c1 -M prepared', - 2, - [], - [ + '-n -t1 -c1 -M prepared', + 2, + [], + [ qr{ERROR: invalid input syntax for type json}, qr[CONTEXT: JSON data, line 1: \{ invalid\.\.\.[\r\n]+extended query with parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que \.\.\.']m - ], - 'server parameter logging', - { - '001_param_3' => q[select '{ invalid ' as value \gset + ], + 'server parameter logging', + { + '001_param_3' => q[select '{ invalid ' as value \gset select $$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$$ as long \gset select column1::jsonb from (values (:value), (:long)) as q; ] }); $log = TestLib::slurp_file($node->logfile); -like($log, qr[DETAIL: parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?'''], - "parameter report does not truncate"); +like( + $log, + qr[DETAIL: parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?'''], + "parameter report does not truncate"); $log = undef; # 3. Logging full parameters on error, truncated with statements $node->append_conf('postgresql.conf', - "log_min_duration_statement = -1\n" . - "log_parameter_max_length = 7\n" . - "log_parameter_max_length_on_error = -1"); + "log_min_duration_statement = -1\n" + . "log_parameter_max_length = 7\n" + . "log_parameter_max_length_on_error = -1"); $node->reload; pgbench( - '-n -t1 -c1 -M prepared', - 2, - [], - [ + '-n -t1 -c1 -M prepared', + 2, + [], + [ qr{ERROR: division by zero}, qr{CONTEXT: extended query with parameters: \$1 = '1', \$2 = NULL} - ], - 'server parameter logging', - { - '001_param_4' => q{select '1' as one \gset + ], + 'server parameter logging', + { + '001_param_4' => q{select '1' as one \gset SELECT 1 / (random() / 2)::int, :one::int, :two::int; } }); @@ -362,30 +368,32 @@ SELECT 1 / (random() / 2)::int, :one::int, :two::int; $node->append_conf('postgresql.conf', "log_min_duration_statement = 0"); $node->reload; pgbench( - '-n -t1 -c1 -M prepared', - 2, - [], - [ + '-n -t1 -c1 -M prepared', + 2, + [], + [ qr{ERROR: invalid input syntax for type json}, qr[CONTEXT: JSON data, line 1: \{ invalid\.\.\.[\r\n]+extended query with parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?']m - ], - 'server parameter logging', - { - '001_param_5' => q[select '{ invalid ' as value \gset + ], + 'server parameter logging', + { + '001_param_5' => q[select '{ invalid ' as value \gset select $$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$$ as long \gset select column1::jsonb from (values (:value), (:long)) as q; ] }); $log = TestLib::slurp_file($node->logfile); -like($log, qr[DETAIL: parameters: \$1 = '\{ inval\.\.\.', \$2 = '''Valame\.\.\.'], - "parameter report truncates"); +like( + $log, + qr[DETAIL: parameters: \$1 = '\{ inval\.\.\.', \$2 = '''Valame\.\.\.'], + "parameter report truncates"); $log = undef; # Restore default logging config $node->append_conf('postgresql.conf', - "log_min_duration_statement = -1\n" . - "log_parameter_max_length_on_error = 0\n" . - "log_parameter_max_length = -1"); + "log_min_duration_statement = -1\n" + . "log_parameter_max_length_on_error = 0\n" + . "log_parameter_max_length = -1"); $node->reload; # test expressions diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl index 66b1bd6ff6e..e38c7d77d1c 100644 --- a/src/bin/pgbench/t/002_pgbench_no_server.pl +++ b/src/bin/pgbench/t/002_pgbench_no_server.pl @@ -147,7 +147,10 @@ my @options = ( [ 'invalid init step', '-i -I dta', - [ qr{unrecognized initialization step}, qr{Allowed step characters are} ] + [ + qr{unrecognized initialization step}, + qr{Allowed step characters are} + ] ], [ 'bad random seed', @@ -158,12 +161,20 @@ my @options = ( qr{error while setting random seed from --random-seed option} ] ], - [ 'bad partition method', '-i --partition-method=BAD', [qr{"range"}, qr{"hash"}, qr{"BAD"}] ], - [ 'bad partition number', '-i --partitions -1', [ qr{invalid number of partitions: "-1"} ] ], + [ + 'bad partition method', + '-i --partition-method=BAD', + [ qr{"range"}, qr{"hash"}, qr{"BAD"} ] + ], + [ + 'bad partition number', + '-i --partitions -1', + [qr{invalid number of partitions: "-1"}] + ], [ 'partition method without partitioning', '-i --partition-method=hash', - [ qr{partition-method requires greater than zero --partitions} ] + [qr{partition-method requires greater than zero --partitions}] ], # logging sub-options @@ -231,8 +242,10 @@ pgbench( '--show-script se', 0, [qr{^$}], - [ qr{select-only: }, qr{SELECT abalance FROM pgbench_accounts WHERE}, - qr{(?!UPDATE)}, qr{(?!INSERT)} ], + [ + qr{select-only: }, qr{SELECT abalance FROM pgbench_accounts WHERE}, + qr{(?!UPDATE)}, qr{(?!INSERT)} + ], 'pgbench builtin listing'); my @script_tests = ( diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index 621a33f7e83..06f801764b6 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -707,7 +707,7 @@ PrintNotifications(void) static bool PrintQueryTuples(const PGresult *results) { - bool result = true; + bool result = true; /* write output to \g argument, if any */ if (pset.gfname) diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 8dca6d8bb43..9b526e40cdd 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -3074,7 +3074,7 @@ describeOneTableDetails(const char *schemaname, /* Visually distinguish inherited triggers */ if (!PQgetisnull(result, i, 4)) appendPQExpBuffer(&buf, ", ON TABLE %s", - PQgetvalue(result, i, 4)); + PQgetvalue(result, i, 4)); printTableAddFooter(&cont, buf.data); } diff --git a/src/bin/psql/mainloop.c b/src/bin/psql/mainloop.c index bdf803a0535..7abe016e403 100644 --- a/src/bin/psql/mainloop.c +++ b/src/bin/psql/mainloop.c @@ -238,11 +238,10 @@ MainLoop(FILE *source) bool found_q = false; /* - * The assistance words, help/exit/quit, must have no - * whitespace before them, and only whitespace after, with an - * optional semicolon. This prevents indented use of these - * words, perhaps as identifiers, from invoking the assistance - * behavior. + * The assistance words, help/exit/quit, must have no whitespace + * before them, and only whitespace after, with an optional + * semicolon. This prevents indented use of these words, perhaps + * as identifiers, from invoking the assistance behavior. */ if (pg_strncasecmp(first_word, "help", 4) == 0) { diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index f6fd623c98b..1e931a56cb6 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -1743,14 +1743,14 @@ psql_completion(const char *text, int start, int end) /* ALTER INDEX <foo> SET|RESET ( */ else if (Matches("ALTER", "INDEX", MatchAny, "RESET", "(")) COMPLETE_WITH("fillfactor", - "vacuum_cleanup_index_scale_factor", "deduplicate_items", /* BTREE */ + "vacuum_cleanup_index_scale_factor", "deduplicate_items", /* BTREE */ "fastupdate", "gin_pending_list_limit", /* GIN */ "buffering", /* GiST */ "pages_per_range", "autosummarize" /* BRIN */ ); else if (Matches("ALTER", "INDEX", MatchAny, "SET", "(")) COMPLETE_WITH("fillfactor =", - "vacuum_cleanup_index_scale_factor =", "deduplicate_items =", /* BTREE */ + "vacuum_cleanup_index_scale_factor =", "deduplicate_items =", /* BTREE */ "fastupdate =", "gin_pending_list_limit =", /* GIN */ "buffering =", /* GiST */ "pages_per_range =", "autosummarize =" /* BRIN */ diff --git a/src/bin/scripts/createuser.c b/src/bin/scripts/createuser.c index ebdb72a45cc..9ced079ac75 100644 --- a/src/bin/scripts/createuser.c +++ b/src/bin/scripts/createuser.c @@ -88,7 +88,7 @@ main(int argc, char *argv[]) while ((c = getopt_long(argc, argv, "h:p:U:g:wWedDsSrRiIlLc:PE", long_options, &optindex)) != -1) { - char *endptr; + char *endptr; switch (c) { @@ -145,7 +145,7 @@ main(int argc, char *argv[]) break; case 'c': conn_limit = strtol(optarg, &endptr, 10); - if (*endptr != '\0' || conn_limit < -1) /* minimum valid value */ + if (*endptr != '\0' || conn_limit < -1) /* minimum valid value */ { pg_log_error("invalid value for --connection-limit: %s", optarg); diff --git a/src/bin/scripts/t/090_reindexdb.pl b/src/bin/scripts/t/090_reindexdb.pl index c20ffbd505c..87417c86ff4 100644 --- a/src/bin/scripts/t/090_reindexdb.pl +++ b/src/bin/scripts/t/090_reindexdb.pl @@ -109,7 +109,7 @@ $node->issues_sql_like( qr/statement:\ REINDEX TABLE s1.t1;/, 'parallel reindexdb for schemas does a per-table REINDEX'); $node->command_ok( - ['reindexdb', '-j', '2', '-S', 's3'], + [ 'reindexdb', '-j', '2', '-S', 's3' ], 'parallel reindexdb with empty schema'); $node->command_checks_all( [ 'reindexdb', '-j', '2', '--concurrently', '-d', 'postgres' ], diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl index c2284c8195f..b136bd44570 100644 --- a/src/bin/scripts/t/100_vacuumdb.pl +++ b/src/bin/scripts/t/100_vacuumdb.pl @@ -89,8 +89,7 @@ $node->command_fails( $node->command_fails( [ 'vacuumdb', '--analyze', '--table', 'vactable(c)', 'postgres' ], 'incorrect column name with ANALYZE'); -$node->command_fails( - [ 'vacuumdb', '-P', -1, 'postgres' ], +$node->command_fails([ 'vacuumdb', '-P', -1, 'postgres' ], 'negative parallel degree'); $node->issues_sql_like( [ 'vacuumdb', '--analyze', '--table', 'vactable(a, b)', 'postgres' ], diff --git a/src/common/jsonapi.c b/src/common/jsonapi.c index 7df231c3851..aa917d0fc9d 100644 --- a/src/common/jsonapi.c +++ b/src/common/jsonapi.c @@ -54,7 +54,7 @@ typedef enum /* contexts of JSON parser */ static inline JsonParseErrorType json_lex_string(JsonLexContext *lex); static inline JsonParseErrorType json_lex_number(JsonLexContext *lex, char *s, - bool *num_err, int *total_len); + bool *num_err, int *total_len); static inline JsonParseErrorType parse_scalar(JsonLexContext *lex, JsonSemAction *sem); static JsonParseErrorType parse_object_field(JsonLexContext *lex, JsonSemAction *sem); static JsonParseErrorType parse_object(JsonLexContext *lex, JsonSemAction *sem); @@ -179,7 +179,7 @@ JsonParseErrorType pg_parse_json(JsonLexContext *lex, JsonSemAction *sem) { JsonTokenType tok; - JsonParseErrorType result; + JsonParseErrorType result; /* get the initial token */ result = json_lex(lex); @@ -198,7 +198,7 @@ pg_parse_json(JsonLexContext *lex, JsonSemAction *sem) result = parse_array(lex, sem); break; default: - result = parse_scalar(lex, sem); /* json can be a bare scalar */ + result = parse_scalar(lex, sem); /* json can be a bare scalar */ } if (result == JSON_SUCCESS) @@ -220,7 +220,7 @@ json_count_array_elements(JsonLexContext *lex, int *elements) { JsonLexContext copylex; int count; - JsonParseErrorType result; + JsonParseErrorType result; /* * It's safe to do this with a shallow copy because the lexical routines @@ -252,7 +252,7 @@ json_count_array_elements(JsonLexContext *lex, int *elements) } } result = lex_expect(JSON_PARSE_ARRAY_NEXT, ©lex, - JSON_TOKEN_ARRAY_END); + JSON_TOKEN_ARRAY_END); if (result != JSON_SUCCESS) return result; @@ -527,7 +527,7 @@ json_lex(JsonLexContext *lex) { char *s; int len; - JsonParseErrorType result; + JsonParseErrorType result; /* Skip leading whitespace. */ s = lex->token_terminator; @@ -1123,8 +1123,8 @@ json_errdetail(JsonParseErrorType error, JsonLexContext *lex) static char * extract_token(JsonLexContext *lex) { - int toklen = lex->token_terminator - lex->token_start; - char *token = palloc(toklen + 1); + int toklen = lex->token_terminator - lex->token_start; + char *token = palloc(toklen + 1); memcpy(token, lex->token_start, toklen); token[toklen] = '\0'; diff --git a/src/common/pg_lzcompress.c b/src/common/pg_lzcompress.c index 80c86d7924e..d24d4803a98 100644 --- a/src/common/pg_lzcompress.c +++ b/src/common/pg_lzcompress.c @@ -820,7 +820,7 @@ pglz_decompress(const char *source, int32 slen, char *dest, int32 pglz_maximum_compressed_size(int32 rawsize, int32 total_compressed_size) { - int32 compressed_size; + int32 compressed_size; /* * pglz uses one control bit per byte, so we need (rawsize * 9) bits. We diff --git a/src/common/scram-common.c b/src/common/scram-common.c index fe84fe9eac7..4971134b22a 100644 --- a/src/common/scram-common.c +++ b/src/common/scram-common.c @@ -190,7 +190,7 @@ scram_ServerKey(const uint8 *salted_password, uint8 *result) */ char * scram_build_secret(const char *salt, int saltlen, int iterations, - const char *password) + const char *password) { uint8 salted_password[SCRAM_KEY_LEN]; uint8 stored_key[SCRAM_KEY_LEN]; diff --git a/src/common/unicode/generate-norm_test_table.pl b/src/common/unicode/generate-norm_test_table.pl index 6417b3f0cd1..acc67967b7b 100644 --- a/src/common/unicode/generate-norm_test_table.pl +++ b/src/common/unicode/generate-norm_test_table.pl @@ -94,7 +94,8 @@ while (my $line = <$INPUT>) my $nfkc_utf8 = codepoint_string_to_hex($nfkc); my $nfkd_utf8 = codepoint_string_to_hex($nfkd); - print $OUTPUT "\t{ $linenum, { $source_utf8 }, { { $nfc_utf8 }, { $nfd_utf8 }, { $nfkc_utf8 }, { $nfkd_utf8 } } },\n"; + print $OUTPUT + "\t{ $linenum, { $source_utf8 }, { { $nfc_utf8 }, { $nfd_utf8 }, { $nfkc_utf8 }, { $nfkd_utf8 } } },\n"; } # Output terminator entry diff --git a/src/common/unicode/generate-unicode_combining_table.pl b/src/common/unicode/generate-unicode_combining_table.pl index e468a5f8c99..c0fc3cc2259 100644 --- a/src/common/unicode/generate-unicode_combining_table.pl +++ b/src/common/unicode/generate-unicode_combining_table.pl @@ -14,39 +14,40 @@ my $codepoint; my $prev_codepoint; my $count = 0; -print "/* generated by src/common/unicode/generate-unicode_combining_table.pl, do not edit */\n\n"; +print + "/* generated by src/common/unicode/generate-unicode_combining_table.pl, do not edit */\n\n"; print "static const struct mbinterval combining[] = {\n"; foreach my $line (<ARGV>) { - chomp $line; - my @fields = split ';', $line; - $codepoint = hex $fields[0]; - - next if $codepoint > 0xFFFF; - - if ($fields[2] eq 'Me' || $fields[2] eq 'Mn') - { - # combining character, save for start of range - if (!defined($range_start)) - { - $range_start = $codepoint; - } - } - else - { - # not a combining character, print out previous range if any - if (defined($range_start)) - { - printf "\t{0x%04X, 0x%04X},\n", $range_start, $prev_codepoint; - $range_start = undef; - } - } + chomp $line; + my @fields = split ';', $line; + $codepoint = hex $fields[0]; + + next if $codepoint > 0xFFFF; + + if ($fields[2] eq 'Me' || $fields[2] eq 'Mn') + { + # combining character, save for start of range + if (!defined($range_start)) + { + $range_start = $codepoint; + } + } + else + { + # not a combining character, print out previous range if any + if (defined($range_start)) + { + printf "\t{0x%04X, 0x%04X},\n", $range_start, $prev_codepoint; + $range_start = undef; + } + } } continue { - $prev_codepoint = $codepoint; + $prev_codepoint = $codepoint; } print "};\n"; diff --git a/src/common/unicode/generate-unicode_norm_table.pl b/src/common/unicode/generate-unicode_norm_table.pl index cd5f502d540..7ce15e1a039 100644 --- a/src/common/unicode/generate-unicode_norm_table.pl +++ b/src/common/unicode/generate-unicode_norm_table.pl @@ -138,7 +138,7 @@ foreach my $char (@characters) # Decomposition size # Print size of decomposition my $decomp_size = scalar(@decomp_elts); - die if $decomp_size > 0x1F; # to not overrun bitmask + die if $decomp_size > 0x1F; # to not overrun bitmask my $first_decomp = shift @decomp_elts; @@ -153,7 +153,7 @@ foreach my $char (@characters) if ($decomp_size == 2) { # Should this be used for recomposition? - if ($character_hash{$first_decomp} + if ( $character_hash{$first_decomp} && $character_hash{$first_decomp}->{class} != 0) { $flags .= " | DECOMP_NO_COMPOSE"; diff --git a/src/common/unicode/generate-unicode_normprops_table.pl b/src/common/unicode/generate-unicode_normprops_table.pl index c07a04a58aa..e8e5097c094 100644 --- a/src/common/unicode/generate-unicode_normprops_table.pl +++ b/src/common/unicode/generate-unicode_normprops_table.pl @@ -11,7 +11,8 @@ use warnings; my %data; -print "/* generated by src/common/unicode/generate-unicode_normprops_table.pl, do not edit */\n\n"; +print + "/* generated by src/common/unicode/generate-unicode_normprops_table.pl, do not edit */\n\n"; print <<EOS; #include "common/unicode_norm.h" @@ -44,7 +45,7 @@ foreach my $line (<ARGV>) $first = $last = $codepoint; } - foreach my $cp (hex($first)..hex($last)) + foreach my $cp (hex($first) .. hex($last)) { $data{$prop}{$cp} = $value; } @@ -61,9 +62,10 @@ foreach my $prop (sort keys %data) next if $prop eq "NFD_QC" || $prop eq "NFKD_QC"; print "\n"; - print "static const pg_unicode_normprops UnicodeNormProps_${prop}[] = {\n"; + print + "static const pg_unicode_normprops UnicodeNormProps_${prop}[] = {\n"; - my %subdata = %{$data{$prop}}; + my %subdata = %{ $data{$prop} }; foreach my $cp (sort { $a <=> $b } keys %subdata) { my $qc; diff --git a/src/common/unicode_norm.c b/src/common/unicode_norm.c index 4f4c029075b..ab5ce593456 100644 --- a/src/common/unicode_norm.c +++ b/src/common/unicode_norm.c @@ -112,8 +112,8 @@ get_decomposed_size(pg_wchar code, bool compat) /* * Fast path for Hangul characters not stored in tables to save memory as * decomposition is algorithmic. See - * https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details on - * the matter. + * https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details + * on the matter. */ if (code >= SBASE && code < SBASE + SCOUNT) { @@ -238,8 +238,8 @@ decompose_code(pg_wchar code, bool compat, pg_wchar **result, int *current) /* * Fast path for Hangul characters not stored in tables to save memory as * decomposition is algorithmic. See - * https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details on - * the matter. + * https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details + * on the matter. */ if (code >= SBASE && code < SBASE + SCOUNT) { @@ -369,8 +369,8 @@ unicode_normalize(UnicodeNormalizationForm form, const pg_wchar *input) continue; /* - * Per Unicode (https://www.unicode.org/reports/tr15/tr15-18.html) annex 4, - * a sequence of two adjacent characters in a string is an + * Per Unicode (https://www.unicode.org/reports/tr15/tr15-18.html) + * annex 4, a sequence of two adjacent characters in a string is an * exchangeable pair if the combining class (from the Unicode * Character Database) for the first character is greater than the * combining class for the second, and the second is not a starter. A @@ -396,10 +396,10 @@ unicode_normalize(UnicodeNormalizationForm form, const pg_wchar *input) return decomp_chars; /* - * The last phase of NFC and NFKC is the recomposition of the reordered Unicode - * string using combining classes. The recomposed string cannot be longer - * than the decomposed one, so make the allocation of the output string - * based on that assumption. + * The last phase of NFC and NFKC is the recomposition of the reordered + * Unicode string using combining classes. The recomposed string cannot be + * longer than the decomposed one, so make the allocation of the output + * string based on that assumption. */ recomp_chars = (pg_wchar *) ALLOC((decomp_size + 1) * sizeof(pg_wchar)); if (!recomp_chars) @@ -551,4 +551,4 @@ unicode_is_normalized_quickcheck(UnicodeNormalizationForm form, const pg_wchar * return result; } -#endif /* !FRONTEND */ +#endif /* !FRONTEND */ diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 94903dd8de1..8c34935c343 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -482,9 +482,9 @@ typedef struct TableAmRoutine double *tups_recently_dead); /* - * React to VACUUM command on the relation. The VACUUM can be - * triggered by a user or by autovacuum. The specific actions - * performed by the AM will depend heavily on the individual AM. + * React to VACUUM command on the relation. The VACUUM can be triggered by + * a user or by autovacuum. The specific actions performed by the AM will + * depend heavily on the individual AM. * * On entry a transaction is already established, and the relation is * locked with a ShareUpdateExclusive lock. @@ -586,7 +586,7 @@ typedef struct TableAmRoutine * TOAST tables for this AM. If the relation_needs_toast_table callback * always returns false, this callback is not required. */ - Oid (*relation_toast_am) (Relation rel); + Oid (*relation_toast_am) (Relation rel); /* * This callback is invoked when detoasting a value stored in a toast diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h index 22eed9abdd9..b462e90ee8f 100644 --- a/src/include/access/visibilitymap.h +++ b/src/include/access/visibilitymap.h @@ -45,6 +45,6 @@ extern void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf); extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen); extern BlockNumber visibilitymap_prepare_truncate(Relation rel, - BlockNumber nheapblocks); + BlockNumber nheapblocks); #endif /* VISIBILITYMAP_H */ diff --git a/src/include/catalog/pg_publication.h b/src/include/catalog/pg_publication.h index ec02f48da0f..5955ba0cf21 100644 --- a/src/include/catalog/pg_publication.h +++ b/src/include/catalog/pg_publication.h @@ -98,7 +98,7 @@ typedef enum PublicationPartOpt PUBLICATION_PART_ROOT, PUBLICATION_PART_LEAF, PUBLICATION_PART_ALL, -} PublicationPartOpt; +} PublicationPartOpt; extern List *GetPublicationRelations(Oid pubid, PublicationPartOpt pub_partopt); extern List *GetAllTablesPublications(void); diff --git a/src/include/catalog/pg_statistic_ext.h b/src/include/catalog/pg_statistic_ext.h index e9491a0a874..a8cb16997a7 100644 --- a/src/include/catalog/pg_statistic_ext.h +++ b/src/include/catalog/pg_statistic_ext.h @@ -41,7 +41,7 @@ CATALOG(pg_statistic_ext,3381,StatisticExtRelationId) Oid stxnamespace; /* OID of statistics object's namespace */ Oid stxowner; /* statistics object's owner */ - int32 stxstattarget BKI_DEFAULT(-1); /* statistics target */ + int32 stxstattarget BKI_DEFAULT(-1); /* statistics target */ /* * variable-length fields start here, but we allow direct access to diff --git a/src/include/commands/dbcommands_xlog.h b/src/include/commands/dbcommands_xlog.h index 4f01567eba9..7438d55ae7f 100644 --- a/src/include/commands/dbcommands_xlog.h +++ b/src/include/commands/dbcommands_xlog.h @@ -33,7 +33,7 @@ typedef struct xl_dbase_create_rec typedef struct xl_dbase_drop_rec { Oid db_id; - int ntablespaces; /* number of tablespace IDs */ + int ntablespaces; /* number of tablespace IDs */ Oid tablespace_ids[FLEXIBLE_ARRAY_MEMBER]; } xl_dbase_drop_rec; #define MinSizeOfDbaseDropRec offsetof(xl_dbase_drop_rec, tablespace_ids) diff --git a/src/include/common/scram-common.h b/src/include/common/scram-common.h index 1d24b7f67e8..2edae2dd3c0 100644 --- a/src/include/common/scram-common.h +++ b/src/include/common/scram-common.h @@ -65,6 +65,6 @@ extern void scram_ClientKey(const uint8 *salted_password, uint8 *result); extern void scram_ServerKey(const uint8 *salted_password, uint8 *result); extern char *scram_build_secret(const char *salt, int saltlen, int iterations, - const char *password); + const char *password); #endif /* SCRAM_COMMON_H */ diff --git a/src/include/common/unicode_normprops_table.h b/src/include/common/unicode_normprops_table.h index 74cfaa35bf1..93a2e55b758 100644 --- a/src/include/common/unicode_normprops_table.h +++ b/src/include/common/unicode_normprops_table.h @@ -9,7 +9,7 @@ typedef struct { unsigned int codepoint:21; signed int quickcheck:4; /* really UnicodeNormalizationQC */ -} pg_unicode_normprops; +} pg_unicode_normprops; static const pg_unicode_normprops UnicodeNormProps_NFC_QC[] = { {0x0300, UNICODE_NORM_QC_MAYBE}, diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h index c2b55728bfa..92c2337fd3a 100644 --- a/src/include/executor/nodeAgg.h +++ b/src/include/executor/nodeAgg.h @@ -281,10 +281,13 @@ typedef struct AggStatePerPhaseData ExprState *evaltrans; /* evaluation of transition functions */ - /* cached variants of the compiled expression */ - ExprState *evaltrans_cache - [2] /* 0: outerops; 1: TTSOpsMinimalTuple */ - [2]; /* 0: no NULL check; 1: with NULL check */ + /*---------- + * Cached variants of the compiled expression. + * first subscript: 0: outerops; 1: TTSOpsMinimalTuple + * second subscript: 0: no NULL check; 1: with NULL check + *---------- + */ + ExprState *evaltrans_cache[2][2]; } AggStatePerPhaseData; /* diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h index f7af921f5ac..90dfa8a695d 100644 --- a/src/include/lib/simplehash.h +++ b/src/include/lib/simplehash.h @@ -515,7 +515,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize) * This is a separate static inline function, so it can be reliably be inlined * into its wrapper functions even if SH_SCOPE is extern. */ -static inline SH_ELEMENT_TYPE * +static inline SH_ELEMENT_TYPE * SH_INSERT_HASH_INTERNAL(SH_TYPE * tb, SH_KEY_TYPE key, uint32 hash, bool *found) { uint32 startelem; @@ -689,7 +689,7 @@ restart: SH_SCOPE SH_ELEMENT_TYPE * SH_INSERT(SH_TYPE * tb, SH_KEY_TYPE key, bool *found) { - uint32 hash = SH_HASH_KEY(tb, key); + uint32 hash = SH_HASH_KEY(tb, key); return SH_INSERT_HASH_INTERNAL(tb, key, hash, found); } @@ -709,7 +709,7 @@ SH_INSERT_HASH(SH_TYPE * tb, SH_KEY_TYPE key, uint32 hash, bool *found) * This is a separate static inline function, so it can be reliably be inlined * into its wrapper functions even if SH_SCOPE is extern. */ -static inline SH_ELEMENT_TYPE * +static inline SH_ELEMENT_TYPE * SH_LOOKUP_HASH_INTERNAL(SH_TYPE * tb, SH_KEY_TYPE key, uint32 hash) { const uint32 startelem = SH_INITIAL_BUCKET(tb, hash); @@ -746,7 +746,7 @@ SH_LOOKUP_HASH_INTERNAL(SH_TYPE * tb, SH_KEY_TYPE key, uint32 hash) SH_SCOPE SH_ELEMENT_TYPE * SH_LOOKUP(SH_TYPE * tb, SH_KEY_TYPE key) { - uint32 hash = SH_HASH_KEY(tb, key); + uint32 hash = SH_HASH_KEY(tb, key); return SH_LOOKUP_HASH_INTERNAL(tb, key, hash); } @@ -991,8 +991,8 @@ SH_STAT(SH_TYPE * tb) } sh_log("size: " UINT64_FORMAT ", members: %u, filled: %f, total chain: %u, max chain: %u, avg chain: %f, total_collisions: %u, max_collisions: %i, avg_collisions: %f", - tb->size, tb->members, fillfactor, total_chain_length, max_chain_length, avg_chain_length, - total_collisions, max_collisions, avg_collisions); + tb->size, tb->members, fillfactor, total_chain_length, max_chain_length, avg_chain_length, + total_collisions, max_collisions, avg_collisions); } #endif /* SH_DEFINE */ diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h index ffd943ac3bd..179ebaa104b 100644 --- a/src/include/libpq/libpq-be.h +++ b/src/include/libpq/libpq-be.h @@ -289,7 +289,7 @@ extern char *be_tls_get_certificate_hash(Port *port, size_t *len); /* init hook for SSL, the default sets the password callback if appropriate */ #ifdef USE_OPENSSL -typedef void(* openssl_tls_init_hook_typ)(SSL_CTX *context, bool isServerStart); +typedef void (*openssl_tls_init_hook_typ) (SSL_CTX *context, bool isServerStart); extern PGDLLIMPORT openssl_tls_init_hook_typ openssl_tls_init_hook; #endif diff --git a/src/include/libpq/scram.h b/src/include/libpq/scram.h index 63eccd4a77b..83f1bbc996c 100644 --- a/src/include/libpq/scram.h +++ b/src/include/libpq/scram.h @@ -30,7 +30,7 @@ extern int pg_be_scram_exchange(void *opaq, const char *input, int inputlen, /* Routines to handle and check SCRAM-SHA-256 secret */ extern char *pg_be_scram_build_secret(const char *password); extern bool parse_scram_secret(const char *secret, int *iterations, char **salt, - uint8 *stored_key, uint8 *server_key); + uint8 *stored_key, uint8 *server_key); extern bool scram_verify_plain_password(const char *username, const char *password, const char *secret); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 4fee043bb2b..98e0072b8ad 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -2036,7 +2036,7 @@ typedef struct IncrementalSortGroupInfo long totalDiskSpaceUsed; long maxMemorySpaceUsed; long totalMemorySpaceUsed; - bits32 sortMethods; /* bitmask of TuplesortMethod */ + bits32 sortMethods; /* bitmask of TuplesortMethod */ } IncrementalSortGroupInfo; typedef struct IncrementalSortInfo @@ -2161,23 +2161,23 @@ typedef struct AggState /* these fields are used in AGG_HASHED and AGG_MIXED modes: */ bool table_filled; /* hash table filled yet? */ int num_hashes; - MemoryContext hash_metacxt; /* memory for hash table itself */ + MemoryContext hash_metacxt; /* memory for hash table itself */ struct HashTapeInfo *hash_tapeinfo; /* metadata for spill tapes */ - struct HashAggSpill *hash_spills; /* HashAggSpill for each grouping set, - exists only during first pass */ - TupleTableSlot *hash_spill_slot; /* slot for reading from spill files */ + struct HashAggSpill *hash_spills; /* HashAggSpill for each grouping set, + * exists only during first pass */ + TupleTableSlot *hash_spill_slot; /* slot for reading from spill files */ List *hash_batches; /* hash batches remaining to be processed */ bool hash_ever_spilled; /* ever spilled during this execution? */ bool hash_spill_mode; /* we hit a limit during the current batch - and we must not create new groups */ - Size hash_mem_limit; /* limit before spilling hash table */ - uint64 hash_ngroups_limit; /* limit before spilling hash table */ - int hash_planned_partitions; /* number of partitions planned - for first pass */ + * and we must not create new groups */ + Size hash_mem_limit; /* limit before spilling hash table */ + uint64 hash_ngroups_limit; /* limit before spilling hash table */ + int hash_planned_partitions; /* number of partitions planned + * for first pass */ double hashentrysize; /* estimate revised during execution */ Size hash_mem_peak; /* peak hash table memory usage */ uint64 hash_ngroups_current; /* number of groups currently in - memory in all hash tables */ + * memory in all hash tables */ uint64 hash_disk_used; /* kB of disk space used */ int hash_batches_used; /* batches used during entire execution */ diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h index 96e5825a6b2..4898d90848e 100644 --- a/src/include/nodes/params.h +++ b/src/include/nodes/params.h @@ -115,7 +115,7 @@ typedef struct ParamListInfoData void *paramCompileArg; ParserSetupHook parserSetup; /* parser setup hook */ void *parserSetupArg; - char *paramValuesStr; /* params as a single string for errors */ + char *paramValuesStr; /* params as a single string for errors */ int numParams; /* nominal/maximum # of Params represented */ /* @@ -153,7 +153,7 @@ typedef struct ParamExecData /* type of argument for ParamsErrorCallback */ typedef struct ParamsErrorCbData { - const char *portalName; + const char *portalName; ParamListInfo params; } ParamsErrorCbData; @@ -164,7 +164,7 @@ extern Size EstimateParamListSpace(ParamListInfo paramLI); extern void SerializeParamList(ParamListInfo paramLI, char **start_address); extern ParamListInfo RestoreParamList(char **start_address); extern char *BuildParamLogString(ParamListInfo params, char **paramTextValues, - int valueLen); + int valueLen); extern void ParamsErrorCallback(void *arg); #endif /* PARAMS_H */ diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index 1c83772d62b..485d1b06c91 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -740,8 +740,8 @@ typedef struct RelOptInfo /* used for partitioned relations: */ PartitionScheme part_scheme; /* Partitioning scheme */ - int nparts; /* Number of partitions; -1 if not yet set; - * in case of a join relation 0 means it's + int nparts; /* Number of partitions; -1 if not yet set; in + * case of a join relation 0 means it's * considered unpartitioned */ struct PartitionBoundInfoData *boundinfo; /* Partition bounds */ bool partbounds_merged; /* True if partition bounds were created @@ -1654,7 +1654,7 @@ typedef struct SortPath typedef struct IncrementalSortPath { SortPath spath; - int nPresortedCols; /* number of presorted columns */ + int nPresortedCols; /* number of presorted columns */ } IncrementalSortPath; /* diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 55f363f70c5..83e01074ed1 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -781,7 +781,7 @@ typedef struct Sort typedef struct IncrementalSort { Sort sort; - int nPresortedCols; /* number of presorted columns */ + int nPresortedCols; /* number of presorted columns */ } IncrementalSort; /* --------------- diff --git a/src/include/port.h b/src/include/port.h index 20a5de1b7a7..271ff0d00bc 100644 --- a/src/include/port.h +++ b/src/include/port.h @@ -383,7 +383,7 @@ extern float pg_strtof(const char *nptr, char **endptr); #endif #ifndef HAVE_LINK -extern int link(const char *src, const char *dst); +extern int link(const char *src, const char *dst); #endif #ifndef HAVE_MKDTEMP diff --git a/src/include/port/win32.h b/src/include/port/win32.h index c280c131c03..d8ae49e22d1 100644 --- a/src/include/port/win32.h +++ b/src/include/port/win32.h @@ -64,6 +64,6 @@ struct sockaddr_un { unsigned short sun_family; - char sun_path[108]; + char sun_path[108]; }; #define HAVE_STRUCT_SOCKADDR_UN 1 diff --git a/src/include/replication/backup_manifest.h b/src/include/replication/backup_manifest.h index 06b114f3d77..fb1291cbe4d 100644 --- a/src/include/replication/backup_manifest.h +++ b/src/include/replication/backup_manifest.h @@ -42,7 +42,7 @@ extern void AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid, const char *pathname, size_t size, pg_time_t mtime, - pg_checksum_context * checksum_ctx); + pg_checksum_context *checksum_ctx); extern void AddWALInfoToBackupManifest(backup_manifest_info *manifest, XLogRecPtr startptr, TimeLineID starttli, XLogRecPtr endptr, diff --git a/src/include/replication/logicalrelation.h b/src/include/replication/logicalrelation.h index 4650b4f9e1b..a6b44b12bd1 100644 --- a/src/include/replication/logicalrelation.h +++ b/src/include/replication/logicalrelation.h @@ -35,7 +35,7 @@ extern void logicalrep_relmap_update(LogicalRepRelation *remoterel); extern LogicalRepRelMapEntry *logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode); extern LogicalRepRelMapEntry *logicalrep_partition_open(LogicalRepRelMapEntry *root, - Relation partrel, AttrMap *map); + Relation partrel, AttrMap *map); extern void logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode); diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h index f1aa6e9977c..ac1acbb27ec 100644 --- a/src/include/replication/walreceiver.h +++ b/src/include/replication/walreceiver.h @@ -74,11 +74,11 @@ typedef struct TimeLineID receiveStartTLI; /* - * flushedUpto-1 is the last byte position that has already been - * received, and receivedTLI is the timeline it came from. At the first - * startup of walreceiver, these are set to receiveStart and - * receiveStartTLI. After that, walreceiver updates these whenever it - * flushes the received WAL to disk. + * flushedUpto-1 is the last byte position that has already been received, + * and receivedTLI is the timeline it came from. At the first startup of + * walreceiver, these are set to receiveStart and receiveStartTLI. After + * that, walreceiver updates these whenever it flushes the received WAL to + * disk. */ XLogRecPtr flushedUpto; TimeLineID receivedTLI; diff --git a/src/include/statistics/extended_stats_internal.h b/src/include/statistics/extended_stats_internal.h index 2b14ab238cb..61e69696cfe 100644 --- a/src/include/statistics/extended_stats_internal.h +++ b/src/include/statistics/extended_stats_internal.h @@ -35,7 +35,7 @@ typedef struct DimensionInfo { int nvalues; /* number of deduplicated values */ int nbytes; /* number of bytes (serialized) */ - int nbytes_aligned; /* size of deserialized data with alignment */ + int nbytes_aligned; /* size of deserialized data with alignment */ int typlen; /* pg_type.typlen */ bool typbyval; /* pg_type.typbyval */ } DimensionInfo; diff --git a/src/include/statistics/statistics.h b/src/include/statistics/statistics.h index f5d9b6c73a9..50fce4935f3 100644 --- a/src/include/statistics/statistics.h +++ b/src/include/statistics/statistics.h @@ -100,8 +100,8 @@ extern MCVList *statext_mcv_load(Oid mvoid); extern void BuildRelationExtStatistics(Relation onerel, double totalrows, int numrows, HeapTuple *rows, int natts, VacAttrStats **vacattrstats); -extern int ComputeExtStatisticsRows(Relation onerel, - int natts, VacAttrStats **stats); +extern int ComputeExtStatisticsRows(Relation onerel, + int natts, VacAttrStats **stats); extern bool statext_is_kind_built(HeapTuple htup, char kind); extern Selectivity dependencies_clauselist_selectivity(PlannerInfo *root, List *clauses, diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h index 0c1af892062..e9e32ab8f1f 100644 --- a/src/include/storage/shmem.h +++ b/src/include/storage/shmem.h @@ -60,7 +60,7 @@ typedef struct char key[SHMEM_INDEX_KEYSIZE]; /* string name */ void *location; /* location in shared mem */ Size size; /* # bytes requested for the structure */ - Size allocated_size; /* # bytes actually allocated */ + Size allocated_size; /* # bytes actually allocated */ } ShmemIndexEnt; /* diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h index c9c68e2f4f8..91aed1f5a51 100644 --- a/src/include/utils/lsyscache.h +++ b/src/include/utils/lsyscache.h @@ -183,7 +183,7 @@ extern char *get_namespace_name_or_temp(Oid nspid); extern Oid get_range_subtype(Oid rangeOid); extern Oid get_range_collation(Oid rangeOid); extern Oid get_index_column_opclass(Oid index_oid, int attno); -extern bool get_index_isreplident(Oid index_oid); +extern bool get_index_isreplident(Oid index_oid); extern bool get_index_isvalid(Oid index_oid); extern bool get_index_isclustered(Oid index_oid); diff --git a/src/include/utils/rangetypes.h b/src/include/utils/rangetypes.h index 0cbbf090333..b77c41cf1b8 100644 --- a/src/include/utils/rangetypes.h +++ b/src/include/utils/rangetypes.h @@ -128,10 +128,10 @@ extern char range_get_flags(const RangeType *range); extern void range_set_contain_empty(RangeType *range); extern RangeType *make_range(TypeCacheEntry *typcache, RangeBound *lower, RangeBound *upper, bool empty); -extern int range_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *b1, - const RangeBound *b2); -extern int range_cmp_bound_values(TypeCacheEntry *typcache, const RangeBound *b1, - const RangeBound *b2); +extern int range_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *b1, + const RangeBound *b2); +extern int range_cmp_bound_values(TypeCacheEntry *typcache, const RangeBound *b1, + const RangeBound *b2); extern bool bounds_adjacent(TypeCacheEntry *typcache, RangeBound bound1, RangeBound bound2); extern RangeType *make_empty_range(TypeCacheEntry *typcache); diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c index b2a19a1dd3a..0bca383ebe5 100644 --- a/src/interfaces/ecpg/compatlib/informix.c +++ b/src/interfaces/ecpg/compatlib/informix.c @@ -186,8 +186,8 @@ deccvasc(const char *cp, int len, decimal *np) if (risnull(CSTRINGTYPE, cp)) return 0; - str = pnstrdup(cp, len); /* decimal_in always converts the complete - * string */ + str = pnstrdup(cp, len); /* decimal_in always converts the complete + * string */ if (!str) ret = ECPG_INFORMIX_NUM_UNDERFLOW; else diff --git a/src/interfaces/ecpg/pgtypeslib/dt_common.c b/src/interfaces/ecpg/pgtypeslib/dt_common.c index c1a3a3e2cb7..81bd7aa526f 100644 --- a/src/interfaces/ecpg/pgtypeslib/dt_common.c +++ b/src/interfaces/ecpg/pgtypeslib/dt_common.c @@ -1039,7 +1039,8 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct tm *tm, char **tzn) } else tm->tm_isdst = -1; -#else /* not (HAVE_STRUCT_TM_TM_ZONE || HAVE_INT_TIMEZONE) */ +#else /* not (HAVE_STRUCT_TM_TM_ZONE || + * HAVE_INT_TIMEZONE) */ if (tzp != NULL) { /* default to UTC */ diff --git a/src/interfaces/ecpg/pgtypeslib/timestamp.c b/src/interfaces/ecpg/pgtypeslib/timestamp.c index ddb82a1ad6e..3f82ee54a5f 100644 --- a/src/interfaces/ecpg/pgtypeslib/timestamp.c +++ b/src/interfaces/ecpg/pgtypeslib/timestamp.c @@ -158,7 +158,8 @@ timestamp2tm(timestamp dt, int *tzp, struct tm *tm, fsec_t *fsec, const char **t if (tzn != NULL) *tzn = TZNAME_GLOBAL[(tm->tm_isdst > 0)]; #endif -#else /* not (HAVE_STRUCT_TM_TM_ZONE || HAVE_INT_TIMEZONE) */ +#else /* not (HAVE_STRUCT_TM_TM_ZONE || + * HAVE_INT_TIMEZONE) */ *tzp = 0; /* Mark this as *no* time zone available */ tm->tm_isdst = -1; diff --git a/src/interfaces/libpq/fe-auth-scram.c b/src/interfaces/libpq/fe-auth-scram.c index 69fcbdf6e2e..6d266e97965 100644 --- a/src/interfaces/libpq/fe-auth-scram.c +++ b/src/interfaces/libpq/fe-auth-scram.c @@ -851,7 +851,7 @@ pg_fe_scram_build_secret(const char *password) } result = scram_build_secret(saltbuf, SCRAM_DEFAULT_SALT_LEN, - SCRAM_DEFAULT_ITERATIONS, password); + SCRAM_DEFAULT_ITERATIONS, password); if (prep_password) free(prep_password); diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index 6d36e1439ef..df1ac209f91 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -71,7 +71,7 @@ static int initialize_SSL(PGconn *conn); static PostgresPollingStatusType open_client_SSL(PGconn *); static char *SSLerrmessage(unsigned long ecode); static void SSLerrfree(char *buf); -static int PQssl_passwd_cb(char *buf, int size, int rwflag, void *userdata); +static int PQssl_passwd_cb(char *buf, int size, int rwflag, void *userdata); static int my_sock_read(BIO *h, char *buf, int size); static int my_sock_write(BIO *h, const char *buf, int size); @@ -819,17 +819,16 @@ initialize_SSL(PGconn *conn) } /* - * Delegate the client cert password prompt to the libpq wrapper - * callback if any is defined. + * Delegate the client cert password prompt to the libpq wrapper callback + * if any is defined. * * If the application hasn't installed its own and the sslpassword - * parameter is non-null, we install ours now to make sure we - * supply PGconn->sslpassword to OpenSSL instead of letting it - * prompt on stdin. + * parameter is non-null, we install ours now to make sure we supply + * PGconn->sslpassword to OpenSSL instead of letting it prompt on stdin. * - * This will replace OpenSSL's default PEM_def_callback (which - * prompts on stdin), but we're only setting it for this SSL - * context so it's harmless. + * This will replace OpenSSL's default PEM_def_callback (which prompts on + * stdin), but we're only setting it for this SSL context so it's + * harmless. */ if (PQsslKeyPassHook || (conn->sslpassword && strlen(conn->sslpassword) > 0)) @@ -1205,14 +1204,14 @@ initialize_SSL(PGconn *conn) /* * We'll try to load the file in DER (binary ASN.1) format, and if * that fails too, report the original error. This could mask - * issues where there's something wrong with a DER-format cert, but - * we'd have to duplicate openssl's format detection to be smarter - * than this. We can't just probe for a leading -----BEGIN because - * PEM can have leading non-matching lines and blanks. OpenSSL - * doesn't expose its get_name(...) and its PEM routines don't - * differentiate between failure modes in enough detail to let us - * tell the difference between "not PEM, try DER" and "wrong - * password". + * issues where there's something wrong with a DER-format cert, + * but we'd have to duplicate openssl's format detection to be + * smarter than this. We can't just probe for a leading -----BEGIN + * because PEM can have leading non-matching lines and blanks. + * OpenSSL doesn't expose its get_name(...) and its PEM routines + * don't differentiate between failure modes in enough detail to + * let us tell the difference between "not PEM, try DER" and + * "wrong password". */ if (SSL_use_PrivateKey_file(conn->ssl, fnbuf, SSL_FILETYPE_ASN1) != 1) { @@ -1677,7 +1676,7 @@ PQdefaultSSLKeyPassHook(char *buf, int size, PGconn *conn) if (strlen(conn->sslpassword) + 1 > size) fprintf(stderr, libpq_gettext("WARNING: sslpassword truncated\n")); strncpy(buf, conn->sslpassword, size); - buf[size-1] = '\0'; + buf[size - 1] = '\0'; return strlen(buf); } else @@ -1707,7 +1706,7 @@ PQsetSSLKeyPassHook(PQsslKeyPassHook_type hook) static int PQssl_passwd_cb(char *buf, int size, int rwflag, void *userdata) { - PGconn *conn = userdata; + PGconn *conn = userdata; if (PQsslKeyPassHook) return PQsslKeyPassHook(buf, size, conn); diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h index c9e6ac2b769..ea13f5afb8a 100644 --- a/src/interfaces/libpq/libpq-fe.h +++ b/src/interfaces/libpq/libpq-fe.h @@ -620,10 +620,10 @@ extern int pg_valid_server_encoding_id(int encoding); /* == in fe-secure-openssl.c === */ /* Support for overriding sslpassword handling with a callback. */ -typedef int (*PQsslKeyPassHook_type)(char *buf, int size, PGconn *conn); +typedef int (*PQsslKeyPassHook_type) (char *buf, int size, PGconn *conn); extern PQsslKeyPassHook_type PQgetSSLKeyPassHook(void); extern void PQsetSSLKeyPassHook(PQsslKeyPassHook_type hook); -extern int PQdefaultSSLKeyPassHook(char *buf, int size, PGconn *conn); +extern int PQdefaultSSLKeyPassHook(char *buf, int size, PGconn *conn); #ifdef __cplusplus } diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index 99d11c8e3b7..24d4b57f1a5 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -768,7 +768,10 @@ pltcl_handler(PG_FUNCTION_ARGS, bool pltrusted) PG_FINALLY(); { /* Restore static pointer, then clean up the prodesc refcount if any */ - /* (We're being paranoid in case an error is thrown in context deletion) */ + /* + * (We're being paranoid in case an error is thrown in context + * deletion) + */ pltcl_current_call_state = save_call_state; if (current_call_state.prodesc != NULL) { @@ -2780,9 +2783,8 @@ pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp *interp, if (callObjc != qdesc->nargs) { Tcl_SetObjResult(interp, - Tcl_NewStringObj( - "argument list length doesn't match number of arguments for query" - ,-1)); + Tcl_NewStringObj("argument list length doesn't match number of arguments for query", + -1)); return TCL_ERROR; } } diff --git a/src/port/explicit_bzero.c b/src/port/explicit_bzero.c index 35e6d884a3a..6bd8b0dd9d2 100644 --- a/src/port/explicit_bzero.c +++ b/src/port/explicit_bzero.c @@ -44,7 +44,7 @@ bzero2(void *buf, size_t len) memset(buf, 0, len); } -static void (* volatile bzero_p)(void *, size_t) = bzero2; +static void (*volatile bzero_p) (void *, size_t) = bzero2; void explicit_bzero(void *buf, size_t len) diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl index b8d6cc52e9d..82536eb60fb 100644 --- a/src/test/authentication/t/001_password.pl +++ b/src/test/authentication/t/001_password.pl @@ -12,7 +12,8 @@ use TestLib; use Test::More; if (!$use_unix_sockets) { - plan skip_all => "authentication tests cannot run without Unix-domain sockets"; + plan skip_all => + "authentication tests cannot run without Unix-domain sockets"; } else { diff --git a/src/test/authentication/t/002_saslprep.pl b/src/test/authentication/t/002_saslprep.pl index bf57933d94b..32d4e43fc7d 100644 --- a/src/test/authentication/t/002_saslprep.pl +++ b/src/test/authentication/t/002_saslprep.pl @@ -9,7 +9,8 @@ use TestLib; use Test::More; if (!$use_unix_sockets) { - plan skip_all => "authentication tests cannot run without Unix-domain sockets"; + plan skip_all => + "authentication tests cannot run without Unix-domain sockets"; } else { diff --git a/src/test/modules/dummy_index_am/dummy_index_am.c b/src/test/modules/dummy_index_am/dummy_index_am.c index f32632089b1..e97a32d5be2 100644 --- a/src/test/modules/dummy_index_am/dummy_index_am.c +++ b/src/test/modules/dummy_index_am/dummy_index_am.c @@ -31,10 +31,11 @@ relopt_parse_elt di_relopt_tab[6]; /* Kind of relation options for dummy index */ relopt_kind di_relopt_kind; -typedef enum DummyAmEnum { +typedef enum DummyAmEnum +{ DUMMY_AM_ENUM_ONE, DUMMY_AM_ENUM_TWO -} DummyAmEnum; +} DummyAmEnum; /* Dummy index options */ typedef struct DummyIndexOptions @@ -43,16 +44,16 @@ typedef struct DummyIndexOptions int option_int; double option_real; bool option_bool; - DummyAmEnum option_enum; + DummyAmEnum option_enum; int option_string_val_offset; int option_string_null_offset; -} DummyIndexOptions; +} DummyIndexOptions; relopt_enum_elt_def dummyAmEnumValues[] = { {"one", DUMMY_AM_ENUM_ONE}, {"two", DUMMY_AM_ENUM_TWO}, - {(const char *)NULL} /* list terminator */ + {(const char *) NULL} /* list terminator */ }; /* Handler for index AM */ diff --git a/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c b/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c index c95cb509458..563ff144cc1 100644 --- a/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c +++ b/src/test/modules/ssl_passphrase_callback/ssl_passphrase_func.c @@ -26,9 +26,11 @@ void _PG_fini(void); static char *ssl_passphrase = NULL; /* callback function */ -static int rot13_passphrase(char *buf, int size, int rwflag, void *userdata); +static int rot13_passphrase(char *buf, int size, int rwflag, void *userdata); + /* hook function to set the callback */ static void set_rot13(SSL_CTX *context, bool isServerStart); + /* * Module load callback */ @@ -60,7 +62,7 @@ static void set_rot13(SSL_CTX *context, bool isServerStart) { /* warn if the user has set ssl_passphrase_command */ - if(ssl_passphrase_command[0]) + if (ssl_passphrase_command[0]) ereport(WARNING, (errmsg("ssl_passphrase_command setting ignored by ssl_passphrase_func module"))); diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index 1d5450758e4..3f3a1d81f68 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -672,7 +672,7 @@ sub init_from_backup $params{has_streaming} = 0 unless defined $params{has_streaming}; $params{has_restoring} = 0 unless defined $params{has_restoring}; - $params{standby} = 1 unless defined $params{standby}; + $params{standby} = 1 unless defined $params{standby}; print "# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n"; @@ -703,7 +703,8 @@ port = $port "unix_socket_directories = '$host'"); } $self->enable_streaming($root_node) if $params{has_streaming}; - $self->enable_restoring($root_node, $params{standby}) if $params{has_restoring}; + $self->enable_restoring($root_node, $params{standby}) + if $params{has_restoring}; return; } diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index 0e6c4819e4c..d579d5c177b 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -1,3 +1,4 @@ + =pod =head1 NAME @@ -122,7 +123,8 @@ BEGIN # Specifies whether to use Unix sockets for test setups. On # Windows we don't use them by default since it's not universally # supported, but it can be overridden if desired. - $use_unix_sockets = (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS}); + $use_unix_sockets = + (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS}); } =pod diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index 52585a10149..0c316c18082 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -348,8 +348,7 @@ is($catalog_xmin, '', 'catalog xmin of cascaded slot still null with hs_feedback reset'); note "check change primary_conninfo without restart"; -$node_standby_2->append_conf('postgresql.conf', - "primary_slot_name = ''"); +$node_standby_2->append_conf('postgresql.conf', "primary_slot_name = ''"); $node_standby_2->enable_streaming($node_master); $node_standby_2->reload; @@ -357,7 +356,7 @@ $node_standby_2->reload; $node_standby_1->stop; my $newval = $node_master->safe_psql('postgres', -'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val' + 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val' ); $node_master->wait_for_catchup($node_standby_2, 'replay', $node_master->lsn('insert')); @@ -370,22 +369,26 @@ is($is_replayed, qq(1), "standby_2 didn't replay master value $newval"); my $phys_slot = 'phys_slot'; $node_master->safe_psql('postgres', "SELECT pg_create_physical_replication_slot('$phys_slot', true);"); -$node_master->psql('postgres', " +$node_master->psql( + 'postgres', " CREATE TABLE tab_phys_slot (a int); INSERT INTO tab_phys_slot VALUES (generate_series(1,10));"); -my $current_lsn = $node_master->safe_psql('postgres', - "SELECT pg_current_wal_lsn();"); +my $current_lsn = + $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); chomp($current_lsn); my $psql_rc = $node_master->psql('postgres', - "SELECT pg_replication_slot_advance('$phys_slot', '$current_lsn'::pg_lsn);"); + "SELECT pg_replication_slot_advance('$phys_slot', '$current_lsn'::pg_lsn);" +); is($psql_rc, '0', 'slot advancing with physical slot'); my $phys_restart_lsn_pre = $node_master->safe_psql('postgres', - "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"); + "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';" +); chomp($phys_restart_lsn_pre); # Slot advance should persist across clean restarts. $node_master->restart; my $phys_restart_lsn_post = $node_master->safe_psql('postgres', - "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"); + "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';" +); chomp($phys_restart_lsn_post); -ok(($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0, +ok( ($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0, "physical slot advance persists across restarts"); diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl index 86333e7758d..8d114eb7ad5 100644 --- a/src/test/recovery/t/003_recovery_targets.pl +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -150,20 +150,26 @@ ok($logfile =~ qr/multiple recovery targets specified/, # Check behavior when recovery ends before target is reached $node_standby = get_new_node('standby_8'); -$node_standby->init_from_backup($node_master, 'my_backup', - has_restoring => 1, standby => 0); +$node_standby->init_from_backup( + $node_master, 'my_backup', + has_restoring => 1, + standby => 0); $node_standby->append_conf('postgresql.conf', - "recovery_target_name = 'does_not_exist'"); + "recovery_target_name = 'does_not_exist'"); -run_log(['pg_ctl', '-D', $node_standby->data_dir, - '-l', $node_standby->logfile, 'start']); +run_log( + [ + 'pg_ctl', '-D', $node_standby->data_dir, '-l', + $node_standby->logfile, 'start' + ]); # wait up to 180s for postgres to terminate -foreach my $i (0..1800) +foreach my $i (0 .. 1800) { - last if ! -f $node_standby->data_dir . '/postmaster.pid'; + last if !-f $node_standby->data_dir . '/postmaster.pid'; usleep(100_000); } $logfile = slurp_file($node_standby->logfile()); -ok($logfile =~ qr/FATAL: recovery ended before configured recovery target was reached/, +ok( $logfile =~ + qr/FATAL: recovery ended before configured recovery target was reached/, 'recovery end before target reached is a fatal error'); diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index 1334bf6a601..d40a500ed47 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -93,8 +93,7 @@ $stdout_recv = $node_master->pg_recvlogical_upto( 'include-xids' => '0', 'skip-empty-xacts' => '1'); chomp($stdout_recv); -is($stdout_recv, '', - 'pg_recvlogical acknowledged changes'); +is($stdout_recv, '', 'pg_recvlogical acknowledged changes'); $node_master->safe_psql('postgres', 'CREATE DATABASE otherdb'); @@ -143,23 +142,28 @@ is($node_master->slot('otherdb_slot')->{'slot_name'}, # Test logical slot advancing and its durability. my $logical_slot = 'logical_slot'; $node_master->safe_psql('postgres', - "SELECT pg_create_logical_replication_slot('$logical_slot', 'test_decoding', false);"); -$node_master->psql('postgres', " + "SELECT pg_create_logical_replication_slot('$logical_slot', 'test_decoding', false);" +); +$node_master->psql( + 'postgres', " CREATE TABLE tab_logical_slot (a int); INSERT INTO tab_logical_slot VALUES (generate_series(1,10));"); -my $current_lsn = $node_master->safe_psql('postgres', - "SELECT pg_current_wal_lsn();"); +my $current_lsn = + $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); chomp($current_lsn); my $psql_rc = $node_master->psql('postgres', - "SELECT pg_replication_slot_advance('$logical_slot', '$current_lsn'::pg_lsn);"); + "SELECT pg_replication_slot_advance('$logical_slot', '$current_lsn'::pg_lsn);" +); is($psql_rc, '0', 'slot advancing with logical slot'); my $logical_restart_lsn_pre = $node_master->safe_psql('postgres', - "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"); + "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';" +); chomp($logical_restart_lsn_pre); # Slot advance should persist across clean restarts. $node_master->restart; my $logical_restart_lsn_post = $node_master->safe_psql('postgres', - "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"); + "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';" +); chomp($logical_restart_lsn_post); ok(($logical_restart_lsn_pre cmp $logical_restart_lsn_post) == 0, "logical slot advance persists across restarts"); diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index 634f2bec8bb..cba7df920c0 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -16,16 +16,20 @@ $ENV{PGDATABASE} = 'postgres'; # Initialize master node, setting wal-segsize to 1MB my $node_master = get_new_node('master'); $node_master->init(allows_streaming => 1, extra => ['--wal-segsize=1']); -$node_master->append_conf('postgresql.conf', qq( +$node_master->append_conf( + 'postgresql.conf', qq( min_wal_size = 2MB max_wal_size = 4MB log_checkpoints = yes )); $node_master->start; -$node_master->safe_psql('postgres', "SELECT pg_create_physical_replication_slot('rep1')"); +$node_master->safe_psql('postgres', + "SELECT pg_create_physical_replication_slot('rep1')"); # The slot state and remain should be null before the first connection -my $result = $node_master->safe_psql('postgres', "SELECT restart_lsn IS NULL, wal_status is NULL, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"); +my $result = $node_master->safe_psql('postgres', + "SELECT restart_lsn IS NULL, wal_status is NULL, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" +); is($result, "t|t|t", 'check the state of non-reserved slot is "unknown"'); @@ -35,7 +39,8 @@ $node_master->backup($backup_name); # Create a standby linking to it using the replication slot my $node_standby = get_new_node('standby_1'); -$node_standby->init_from_backup($node_master, $backup_name, has_streaming => 1); +$node_standby->init_from_backup($node_master, $backup_name, + has_streaming => 1); $node_standby->append_conf('postgresql.conf', "primary_slot_name = 'rep1'"); $node_standby->start; @@ -48,7 +53,9 @@ $node_master->wait_for_catchup($node_standby, 'replay', $start_lsn); $node_standby->stop; # Preparation done, the slot is the state "normal" now -$result = $node_master->safe_psql('postgres', "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"); +$result = $node_master->safe_psql('postgres', + "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" +); is($result, "normal|t", 'check the catching-up state'); # Advance WAL by five segments (= 5MB) on master @@ -56,14 +63,18 @@ advance_wal($node_master, 1); $node_master->safe_psql('postgres', "CHECKPOINT;"); # The slot is always "safe" when fitting max_wal_size -$result = $node_master->safe_psql('postgres', "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"); +$result = $node_master->safe_psql('postgres', + "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" +); is($result, "normal|t", 'check that it is safe if WAL fits in max_wal_size'); advance_wal($node_master, 4); $node_master->safe_psql('postgres', "CHECKPOINT;"); # The slot is always "safe" when max_slot_wal_keep_size is not set -$result = $node_master->safe_psql('postgres', "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"); +$result = $node_master->safe_psql('postgres', + "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" +); is($result, "normal|t", 'check that slot is working'); # The standby can reconnect to master @@ -76,7 +87,8 @@ $node_standby->stop; # Set max_slot_wal_keep_size on master my $max_slot_wal_keep_size_mb = 6; -$node_master->append_conf('postgresql.conf', qq( +$node_master->append_conf( + 'postgresql.conf', qq( max_slot_wal_keep_size = ${max_slot_wal_keep_size_mb}MB )); $node_master->reload; @@ -85,7 +97,8 @@ $node_master->reload; # be as almost (max_slot_wal_keep_size - 1) times large as the segment # size -$result = $node_master->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); +$result = $node_master->safe_psql('postgres', + "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); is($result, "normal", 'check that max_slot_wal_keep_size is working'); # Advance WAL again then checkpoint, reducing remain by 2 MB. @@ -93,8 +106,10 @@ advance_wal($node_master, 2); $node_master->safe_psql('postgres', "CHECKPOINT;"); # The slot is still working -$result = $node_master->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); -is($result, "normal", 'check that min_safe_lsn gets close to the current LSN'); +$result = $node_master->safe_psql('postgres', + "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); +is($result, "normal", + 'check that min_safe_lsn gets close to the current LSN'); # The standby can reconnect to master $node_standby->start; @@ -103,13 +118,18 @@ $node_master->wait_for_catchup($node_standby, 'replay', $start_lsn); $node_standby->stop; # wal_keep_segments overrides max_slot_wal_keep_size -$result = $node_master->safe_psql('postgres', "ALTER SYSTEM SET wal_keep_segments to 8; SELECT pg_reload_conf();"); +$result = $node_master->safe_psql('postgres', + "ALTER SYSTEM SET wal_keep_segments to 8; SELECT pg_reload_conf();"); # Advance WAL again then checkpoint, reducing remain by 6 MB. advance_wal($node_master, 6); -$result = $node_master->safe_psql('postgres', "SELECT wal_status as remain FROM pg_replication_slots WHERE slot_name = 'rep1'"); -is($result, "normal", 'check that wal_keep_segments overrides max_slot_wal_keep_size'); +$result = $node_master->safe_psql('postgres', + "SELECT wal_status as remain FROM pg_replication_slots WHERE slot_name = 'rep1'" +); +is($result, "normal", + 'check that wal_keep_segments overrides max_slot_wal_keep_size'); # restore wal_keep_segments -$result = $node_master->safe_psql('postgres', "ALTER SYSTEM SET wal_keep_segments to 0; SELECT pg_reload_conf();"); +$result = $node_master->safe_psql('postgres', + "ALTER SYSTEM SET wal_keep_segments to 0; SELECT pg_reload_conf();"); # The standby can reconnect to master $node_standby->start; @@ -121,7 +141,8 @@ $node_standby->stop; advance_wal($node_master, 6); # Slot gets into 'reserved' state -$result = $node_master->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); +$result = $node_master->safe_psql('postgres', + "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); is($result, "reserved", 'check that the slot state changes to "reserved"'); # do checkpoint so that the next checkpoint runs too early @@ -131,7 +152,9 @@ $node_master->safe_psql('postgres', "CHECKPOINT;"); advance_wal($node_master, 1); # Slot gets into 'lost' state -$result = $node_master->safe_psql('postgres', "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"); +$result = $node_master->safe_psql('postgres', + "SELECT wal_status, min_safe_lsn is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" +); is($result, "lost|t", 'check that the slot state changes to "lost"'); # The standby still can connect to master before a checkpoint @@ -142,9 +165,10 @@ $node_master->wait_for_catchup($node_standby, 'replay', $start_lsn); $node_standby->stop; -ok(!find_in_log($node_standby, - "requested WAL segment [0-9A-F]+ has already been removed"), - 'check that required WAL segments are still available'); +ok( !find_in_log( + $node_standby, + "requested WAL segment [0-9A-F]+ has already been removed"), + 'check that required WAL segments are still available'); # Advance WAL again, the slot loses the oldest segment. my $logstart = get_log_size($node_master); @@ -152,13 +176,16 @@ advance_wal($node_master, 7); $node_master->safe_psql('postgres', "CHECKPOINT;"); # WARNING should be issued -ok(find_in_log($node_master, - "invalidating slot \"rep1\" because its restart_lsn [0-9A-F/]+ exceeds max_slot_wal_keep_size", - $logstart), - 'check that the warning is logged'); +ok( find_in_log( + $node_master, + "invalidating slot \"rep1\" because its restart_lsn [0-9A-F/]+ exceeds max_slot_wal_keep_size", + $logstart), + 'check that the warning is logged'); # This slot should be broken -$result = $node_master->safe_psql('postgres', "SELECT slot_name, active, restart_lsn IS NULL, wal_status, min_safe_lsn FROM pg_replication_slots WHERE slot_name = 'rep1'"); +$result = $node_master->safe_psql('postgres', + "SELECT slot_name, active, restart_lsn IS NULL, wal_status, min_safe_lsn FROM pg_replication_slots WHERE slot_name = 'rep1'" +); is($result, "rep1|f|t||", 'check that the slot became inactive'); # The standby no longer can connect to the master @@ -168,9 +195,10 @@ $node_standby->start; my $failed = 0; for (my $i = 0; $i < 10000; $i++) { - if (find_in_log($node_standby, - "requested WAL segment [0-9A-F]+ has already been removed", - $logstart)) + if (find_in_log( + $node_standby, + "requested WAL segment [0-9A-F]+ has already been removed", + $logstart)) { $failed = 1; last; @@ -228,9 +256,10 @@ sub advance_wal my ($node, $n) = @_; # Advance by $n segments (= (16 * $n) MB) on master - for (my $i = 0 ; $i < $n ; $i++) + for (my $i = 0; $i < $n; $i++) { - $node->safe_psql('postgres', "CREATE TABLE t (); DROP TABLE t; SELECT pg_switch_wal();"); + $node->safe_psql('postgres', + "CREATE TABLE t (); DROP TABLE t; SELECT pg_switch_wal();"); } return; } diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index 3e68a49ca93..a454bb0274a 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -37,13 +37,17 @@ my $common_connstr; # # This changes ssl/client.key to ssl/client_tmp.key etc for the rest # of the tests. -my @keys = ("client", "client-revoked", "client-der", "client-encrypted-pem", "client-encrypted-der"); +my @keys = ( + "client", "client-revoked", + "client-der", "client-encrypted-pem", + "client-encrypted-der"); foreach my $key (@keys) { - copy("ssl/${key}.key", "ssl/${key}_tmp.key") - or die "couldn't copy ssl/${key}.key to ssl/${key}_tmp.key for permissions change: $!"; - chmod 0600, "ssl/${key}_tmp.key" - or die "failed to change permissions on ssl/${key}_tmp.key: $!"; + copy("ssl/${key}.key", "ssl/${key}_tmp.key") + or die + "couldn't copy ssl/${key}.key to ssl/${key}_tmp.key for permissions change: $!"; + chmod 0600, "ssl/${key}_tmp.key" + or die "failed to change permissions on ssl/${key}_tmp.key: $!"; } # Also make a copy of that explicitly world-readable. We can't @@ -99,15 +103,17 @@ $node->_update_pid(1); # Test compatibility of SSL protocols. # TLSv1.1 is lower than TLSv1.2, so it won't work. -$node->append_conf('postgresql.conf', - qq{ssl_min_protocol_version='TLSv1.2' +$node->append_conf( + 'postgresql.conf', + qq{ssl_min_protocol_version='TLSv1.2' ssl_max_protocol_version='TLSv1.1'}); command_fails( [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], 'restart fails with incorrect SSL protocol bounds'); # Go back to the defaults, this works. -$node->append_conf('postgresql.conf', - qq{ssl_min_protocol_version='TLSv1.2' +$node->append_conf( + 'postgresql.conf', + qq{ssl_min_protocol_version='TLSv1.2' ssl_max_protocol_version=''}); command_ok( [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], @@ -395,32 +401,37 @@ test_connect_fails( test_connect_ok( $common_connstr, "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key", - "certificate authorization succeeds with correct client cert in PEM format"); + "certificate authorization succeeds with correct client cert in PEM format" +); # correct client cert in unencrypted DER test_connect_ok( $common_connstr, "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-der_tmp.key", - "certificate authorization succeeds with correct client cert in DER format"); + "certificate authorization succeeds with correct client cert in DER format" +); # correct client cert in encrypted PEM test_connect_ok( $common_connstr, "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-pem_tmp.key sslpassword='dUmmyP^#+'", - "certificate authorization succeeds with correct client cert in encrypted PEM format"); + "certificate authorization succeeds with correct client cert in encrypted PEM format" +); # correct client cert in encrypted DER test_connect_ok( $common_connstr, "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-der_tmp.key sslpassword='dUmmyP^#+'", - "certificate authorization succeeds with correct client cert in encrypted DER format"); + "certificate authorization succeeds with correct client cert in encrypted DER format" +); # correct client cert in encrypted PEM with wrong password test_connect_fails( $common_connstr, "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-pem_tmp.key sslpassword='wrong'", qr!\Qprivate key file "ssl/client-encrypted-pem_tmp.key": bad decrypt\E!, - "certificate authorization fails with correct client cert and wrong password in encrypted PEM format"); + "certificate authorization fails with correct client cert and wrong password in encrypted PEM format" +); TODO: { @@ -434,14 +445,16 @@ TODO: $common_connstr, "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-pem_tmp.key sslpassword=''", qr!\Qprivate key file "ssl/client-encrypted-pem_tmp.key": processing error\E!, - "certificate authorization fails with correct client cert and empty password in encrypted PEM format"); + "certificate authorization fails with correct client cert and empty password in encrypted PEM format" + ); # correct client cert in encrypted PEM with no password test_connect_fails( $common_connstr, "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client-encrypted-pem_tmp.key", qr!\Qprivate key file "ssl/client-encrypted-pem_tmp.key": processing error\E!, - "certificate authorization fails with correct client cert and no password in encrypted PEM format"); + "certificate authorization fails with correct client cert and no password in encrypted PEM format" + ); } @@ -533,5 +546,5 @@ test_connect_fails($common_connstr, "sslmode=require sslcert=ssl/client.crt", # clean up foreach my $key (@keys) { - unlink("ssl/${key}_tmp.key"); + unlink("ssl/${key}_tmp.key"); } diff --git a/src/test/subscription/t/003_constraints.pl b/src/test/subscription/t/003_constraints.pl index 34ab11e7fed..3a590f871a5 100644 --- a/src/test/subscription/t/003_constraints.pl +++ b/src/test/subscription/t/003_constraints.pl @@ -115,7 +115,8 @@ $node_publisher->wait_for_catchup('tap_sub'); # The trigger should cause the update to be skipped on subscriber $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;"); -is($result, qq(2|1|2), 'check replica update column trigger applied on subscriber'); +is($result, qq(2|1|2), + 'check replica update column trigger applied on subscriber'); # Update on a column not specified in the trigger, but it will trigger # anyway because logical replication ships all columns in an update. @@ -126,7 +127,8 @@ $node_publisher->wait_for_catchup('tap_sub'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(id), max(id) FROM tab_fk_ref;"); -is($result, qq(2|1|2), 'check column trigger applied on even for other column'); +is($result, qq(2|1|2), + 'check column trigger applied on even for other column'); $node_subscriber->stop('fast'); $node_publisher->stop('fast'); diff --git a/src/test/subscription/t/008_diff_schema.pl b/src/test/subscription/t/008_diff_schema.pl index 81520a73329..963334ed89d 100644 --- a/src/test/subscription/t/008_diff_schema.pl +++ b/src/test/subscription/t/008_diff_schema.pl @@ -95,11 +95,9 @@ is($result, qq(3|3|3|3), # progressing. # (https://www.postgresql.org/message-id/flat/a9139c29-7ddd-973b-aa7f-71fed9c38d75%40minerva.info) -$node_publisher->safe_psql('postgres', - "CREATE TABLE test_tab2 (a int)"); +$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab2 (a int)"); -$node_subscriber->safe_psql('postgres', - "CREATE TABLE test_tab2 (a int)"); +$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab2 (a int)"); $node_subscriber->safe_psql('postgres', "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION"); @@ -113,15 +111,14 @@ $node_subscriber->poll_query_until('postgres', $synced_query) $node_subscriber->safe_psql('postgres', "ALTER TABLE test_tab2 ADD COLUMN b serial PRIMARY KEY"); -$node_publisher->safe_psql('postgres', - "INSERT INTO test_tab2 VALUES (1)"); +$node_publisher->safe_psql('postgres', "INSERT INTO test_tab2 VALUES (1)"); $node_publisher->wait_for_catchup('tap_sub'); -is($node_subscriber->safe_psql('postgres', - "SELECT count(*), min(a), max(a) FROM test_tab2"), - qq(1|1|1), - 'check replicated inserts on subscriber'); +is( $node_subscriber->safe_psql( + 'postgres', "SELECT count(*), min(a), max(a) FROM test_tab2"), + qq(1|1|1), + 'check replicated inserts on subscriber'); $node_subscriber->stop; diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl index 208bb556ce4..a04c03a7e24 100644 --- a/src/test/subscription/t/013_partition.pl +++ b/src/test/subscription/t/013_partition.pl @@ -22,8 +22,7 @@ $node_subscriber2->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; # publisher -$node_publisher->safe_psql('postgres', - "CREATE PUBLICATION pub1"); +$node_publisher->safe_psql('postgres', "CREATE PUBLICATION pub1"); $node_publisher->safe_psql('postgres', "CREATE PUBLICATION pub_all FOR ALL TABLES"); $node_publisher->safe_psql('postgres', @@ -45,13 +44,16 @@ $node_publisher->safe_psql('postgres', # subpartitioned. This tests the tuple routing code on the # subscriber. $node_subscriber1->safe_psql('postgres', - "CREATE TABLE tab1 (c text, a int PRIMARY KEY, b text) PARTITION BY LIST (a)"); + "CREATE TABLE tab1 (c text, a int PRIMARY KEY, b text) PARTITION BY LIST (a)" +); $node_subscriber1->safe_psql('postgres', - "CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"); + "CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)" +); $node_subscriber1->safe_psql('postgres', "ALTER TABLE tab1 ATTACH PARTITION tab1_1 FOR VALUES IN (1, 2, 3)"); $node_subscriber1->safe_psql('postgres', - "CREATE TABLE tab1_2 PARTITION OF tab1 (c DEFAULT 'sub1_tab1') FOR VALUES IN (4, 5, 6) PARTITION BY LIST (a)"); + "CREATE TABLE tab1_2 PARTITION OF tab1 (c DEFAULT 'sub1_tab1') FOR VALUES IN (4, 5, 6) PARTITION BY LIST (a)" +); $node_subscriber1->safe_psql('postgres', "CREATE TABLE tab1_2_1 (c text, b text, a int NOT NULL)"); $node_subscriber1->safe_psql('postgres', @@ -59,24 +61,31 @@ $node_subscriber1->safe_psql('postgres', $node_subscriber1->safe_psql('postgres', "CREATE TABLE tab1_2_2 PARTITION OF tab1_2 FOR VALUES IN (4, 6)"); $node_subscriber1->safe_psql('postgres', - "CREATE TABLE tab1_def PARTITION OF tab1 (c DEFAULT 'sub1_tab1') DEFAULT"); + "CREATE TABLE tab1_def PARTITION OF tab1 (c DEFAULT 'sub1_tab1') DEFAULT" +); $node_subscriber1->safe_psql('postgres', - "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1"); + "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1" +); # subscriber 2 # # This does not use partitioning. The tables match the leaf tables on # the publisher. $node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text)"); + "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text)" +); $node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab1_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_1', b text)"); + "CREATE TABLE tab1_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_1', b text)" +); $node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab1_2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_2', b text)"); + "CREATE TABLE tab1_2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1_2', b text)" +); $node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab1_def (a int PRIMARY KEY, b text, c text DEFAULT 'sub2_tab1_def')"); + "CREATE TABLE tab1_def (a int PRIMARY KEY, b text, c text DEFAULT 'sub2_tab1_def')" +); $node_subscriber2->safe_psql('postgres', - "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr' PUBLICATION pub_all"); + "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr' PUBLICATION pub_all" +); # Wait for initial sync of all subscriptions my $synced_query = @@ -89,21 +98,17 @@ $node_subscriber2->poll_query_until('postgres', $synced_query) # Tests for replication using leaf partition identity and schema # insert -$node_publisher->safe_psql('postgres', - "INSERT INTO tab1 VALUES (1)"); -$node_publisher->safe_psql('postgres', - "INSERT INTO tab1_1 (a) VALUES (3)"); -$node_publisher->safe_psql('postgres', - "INSERT INTO tab1_2 VALUES (5)"); -$node_publisher->safe_psql('postgres', - "INSERT INTO tab1 VALUES (0)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab1_1 (a) VALUES (3)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab1_2 VALUES (5)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (0)"); $node_publisher->wait_for_catchup('sub1'); $node_publisher->wait_for_catchup('sub2'); my $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab1 ORDER BY 1, 2"); -is($result, qq(sub1_tab1|0 +is( $result, qq(sub1_tab1|0 sub1_tab1|1 sub1_tab1|3 sub1_tab1|5), 'inserts into tab1 and its partitions replicated'); @@ -118,7 +123,7 @@ is($result, qq(), 'inserts into tab1_2 replicated into tab1_2_2 correctly'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab1_1 ORDER BY 1, 2"); -is($result, qq(sub2_tab1_1|1 +is( $result, qq(sub2_tab1_1|1 sub2_tab1_1|3), 'inserts into tab1_1 replicated'); $result = $node_subscriber2->safe_psql('postgres', @@ -130,24 +135,20 @@ $result = $node_subscriber2->safe_psql('postgres', is($result, qq(sub2_tab1_def|0), 'inserts into tab1_def replicated'); # update (replicated as update) -$node_publisher->safe_psql('postgres', - "UPDATE tab1 SET a = 2 WHERE a = 1"); +$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 2 WHERE a = 1"); # All of the following cause an update to be applied to a partitioned # table on subscriber1: tab1_2 is leaf partition on publisher, whereas # it's sub-partitioned on subscriber1. -$node_publisher->safe_psql('postgres', - "UPDATE tab1 SET a = 6 WHERE a = 5"); -$node_publisher->safe_psql('postgres', - "UPDATE tab1 SET a = 4 WHERE a = 6"); -$node_publisher->safe_psql('postgres', - "UPDATE tab1 SET a = 6 WHERE a = 4"); +$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 5"); +$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 4 WHERE a = 6"); +$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 4"); $node_publisher->wait_for_catchup('sub1'); $node_publisher->wait_for_catchup('sub2'); $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab1 ORDER BY 1, 2"); -is($result, qq(sub1_tab1|0 +is( $result, qq(sub1_tab1|0 sub1_tab1|2 sub1_tab1|3 sub1_tab1|6), 'update of tab1_1, tab1_2 replicated'); @@ -162,7 +163,7 @@ is($result, qq(6), 'updates of tab1_2 replicated into tab1_2_2 correctly'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab1_1 ORDER BY 1, 2"); -is($result, qq(sub2_tab1_1|2 +is( $result, qq(sub2_tab1_1|2 sub2_tab1_1|3), 'update of tab1_1 replicated'); $result = $node_subscriber2->safe_psql('postgres', @@ -174,34 +175,33 @@ $result = $node_subscriber2->safe_psql('postgres', is($result, qq(sub2_tab1_def|0), 'tab1_def unchanged'); # update (replicated as delete+insert) -$node_publisher->safe_psql('postgres', - "UPDATE tab1 SET a = 1 WHERE a = 0"); -$node_publisher->safe_psql('postgres', - "UPDATE tab1 SET a = 4 WHERE a = 1"); +$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 1 WHERE a = 0"); +$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 4 WHERE a = 1"); $node_publisher->wait_for_catchup('sub1'); $node_publisher->wait_for_catchup('sub2'); $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab1 ORDER BY 1, 2"); -is($result, qq(sub1_tab1|2 +is( $result, qq(sub1_tab1|2 sub1_tab1|3 sub1_tab1|4 -sub1_tab1|6), 'update of tab1 (delete from tab1_def + insert into tab1_1) replicated'); +sub1_tab1|6), + 'update of tab1 (delete from tab1_def + insert into tab1_1) replicated'); $result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1_2_2 ORDER BY 1"); -is($result, qq(4 +is( $result, qq(4 6), 'updates of tab1 (delete + insert) replicated into tab1_2_2 correctly'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab1_1 ORDER BY 1, 2"); -is($result, qq(sub2_tab1_1|2 +is( $result, qq(sub2_tab1_1|2 sub2_tab1_1|3), 'tab1_1 unchanged'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab1_2 ORDER BY 1, 2"); -is($result, qq(sub2_tab1_2|4 +is( $result, qq(sub2_tab1_2|4 sub2_tab1_2|6), 'insert into tab1_2 replicated'); $result = $node_subscriber2->safe_psql('postgres', @@ -211,62 +211,54 @@ is($result, qq(), 'delete from tab1_def replicated'); # delete $node_publisher->safe_psql('postgres', "DELETE FROM tab1 WHERE a IN (2, 3, 5)"); -$node_publisher->safe_psql('postgres', - "DELETE FROM tab1_2"); +$node_publisher->safe_psql('postgres', "DELETE FROM tab1_2"); $node_publisher->wait_for_catchup('sub1'); $node_publisher->wait_for_catchup('sub2'); -$result = $node_subscriber1->safe_psql('postgres', - "SELECT a FROM tab1"); +$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1"); is($result, qq(), 'delete from tab1_1, tab1_2 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab1_1"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_1"); is($result, qq(), 'delete from tab1_1 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab1_2"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_2"); is($result, qq(), 'delete from tab1_2 replicated'); # truncate $node_subscriber1->safe_psql('postgres', "INSERT INTO tab1 (a) VALUES (1), (2), (5)"); -$node_subscriber2->safe_psql('postgres', - "INSERT INTO tab1_2 (a) VALUES (2)"); -$node_publisher->safe_psql('postgres', - "TRUNCATE tab1_2"); +$node_subscriber2->safe_psql('postgres', "INSERT INTO tab1_2 (a) VALUES (2)"); +$node_publisher->safe_psql('postgres', "TRUNCATE tab1_2"); $node_publisher->wait_for_catchup('sub1'); $node_publisher->wait_for_catchup('sub2'); -$result = $node_subscriber1->safe_psql('postgres', - "SELECT a FROM tab1 ORDER BY 1"); -is($result, qq(1 +$result = + $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1"); +is( $result, qq(1 2), 'truncate of tab1_2 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab1_2 ORDER BY 1"); +$result = + $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1_2 ORDER BY 1"); is($result, qq(), 'truncate of tab1_2 replicated'); -$node_publisher->safe_psql('postgres', - "TRUNCATE tab1"); +$node_publisher->safe_psql('postgres', "TRUNCATE tab1"); $node_publisher->wait_for_catchup('sub1'); $node_publisher->wait_for_catchup('sub2'); -$result = $node_subscriber1->safe_psql('postgres', - "SELECT a FROM tab1 ORDER BY 1"); +$result = + $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1"); is($result, qq(), 'truncate of tab1_1 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab1 ORDER BY 1"); +$result = + $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1"); is($result, qq(), 'truncate of tab1 replicated'); # Tests for replication using root table identity and schema # publisher -$node_publisher->safe_psql('postgres', - "DROP PUBLICATION pub1"); +$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub1"); $node_publisher->safe_psql('postgres', "CREATE TABLE tab2 (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"); $node_publisher->safe_psql('postgres', @@ -285,40 +277,49 @@ $node_publisher->safe_psql('postgres', # Note: tab3_1's parent is not in the publication, in which case its # changes are published using own identity. $node_publisher->safe_psql('postgres', - "CREATE PUBLICATION pub_viaroot FOR TABLE tab2, tab3_1 WITH (publish_via_partition_root = true)"); + "CREATE PUBLICATION pub_viaroot FOR TABLE tab2, tab3_1 WITH (publish_via_partition_root = true)" +); # subscriber 1 +$node_subscriber1->safe_psql('postgres', "DROP SUBSCRIPTION sub1"); $node_subscriber1->safe_psql('postgres', - "DROP SUBSCRIPTION sub1"); + "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub1_tab2', b text) PARTITION BY RANGE (a)" +); $node_subscriber1->safe_psql('postgres', - "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub1_tab2', b text) PARTITION BY RANGE (a)"); -$node_subscriber1->safe_psql('postgres', - "CREATE TABLE tab2_1 (c text DEFAULT 'sub1_tab2', b text, a int NOT NULL)"); + "CREATE TABLE tab2_1 (c text DEFAULT 'sub1_tab2', b text, a int NOT NULL)" +); $node_subscriber1->safe_psql('postgres', "ALTER TABLE tab2 ATTACH PARTITION tab2_1 FOR VALUES FROM (0) TO (10)"); $node_subscriber1->safe_psql('postgres', - "CREATE TABLE tab3_1 (c text DEFAULT 'sub1_tab3_1', b text, a int NOT NULL PRIMARY KEY)"); + "CREATE TABLE tab3_1 (c text DEFAULT 'sub1_tab3_1', b text, a int NOT NULL PRIMARY KEY)" +); $node_subscriber1->safe_psql('postgres', - "CREATE SUBSCRIPTION sub_viaroot CONNECTION '$publisher_connstr' PUBLICATION pub_viaroot"); + "CREATE SUBSCRIPTION sub_viaroot CONNECTION '$publisher_connstr' PUBLICATION pub_viaroot" +); # subscriber 2 +$node_subscriber2->safe_psql('postgres', "DROP TABLE tab1"); $node_subscriber2->safe_psql('postgres', - "DROP TABLE tab1"); -$node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text) PARTITION BY HASH (a)"); + "CREATE TABLE tab1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab1', b text) PARTITION BY HASH (a)" +); # Note: tab1's partitions are named tab1_1 and tab1_2 on the publisher. $node_subscriber2->safe_psql('postgres', "CREATE TABLE tab1_part1 (b text, c text, a int NOT NULL)"); $node_subscriber2->safe_psql('postgres', - "ALTER TABLE tab1 ATTACH PARTITION tab1_part1 FOR VALUES WITH (MODULUS 2, REMAINDER 0)"); + "ALTER TABLE tab1 ATTACH PARTITION tab1_part1 FOR VALUES WITH (MODULUS 2, REMAINDER 0)" +); $node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab1_part2 PARTITION OF tab1 FOR VALUES WITH (MODULUS 2, REMAINDER 1)"); + "CREATE TABLE tab1_part2 PARTITION OF tab1 FOR VALUES WITH (MODULUS 2, REMAINDER 1)" +); $node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab2', b text)"); + "CREATE TABLE tab2 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab2', b text)" +); $node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab3 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3', b text)"); + "CREATE TABLE tab3 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3', b text)" +); $node_subscriber2->safe_psql('postgres', - "CREATE TABLE tab3_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3_1', b text)"); + "CREATE TABLE tab3_1 (a int PRIMARY KEY, c text DEFAULT 'sub2_tab3_1', b text)" +); # Publication that sub2 points to now publishes via root, so must update # subscription target relations. $node_subscriber2->safe_psql('postgres', @@ -331,12 +332,9 @@ $node_subscriber2->poll_query_until('postgres', $synced_query) or die "Timed out while waiting for subscriber to synchronize data"; # insert -$node_publisher->safe_psql('postgres', - "INSERT INTO tab1 VALUES (1), (0)"); -$node_publisher->safe_psql('postgres', - "INSERT INTO tab1_1 (a) VALUES (3)"); -$node_publisher->safe_psql('postgres', - "INSERT INTO tab1_2 VALUES (5)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (0)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab1_1 (a) VALUES (3)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab1_2 VALUES (5)"); $node_publisher->safe_psql('postgres', "INSERT INTO tab2 VALUES (1), (0), (3), (5)"); $node_publisher->safe_psql('postgres', @@ -347,156 +345,143 @@ $node_publisher->wait_for_catchup('sub2'); $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab2 ORDER BY 1, 2"); -is($result, qq(sub1_tab2|0 +is( $result, qq(sub1_tab2|0 sub1_tab2|1 sub1_tab2|3 sub1_tab2|5), 'inserts into tab2 replicated'); $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab3_1 ORDER BY 1, 2"); -is($result, qq(sub1_tab3_1|0 +is( $result, qq(sub1_tab3_1|0 sub1_tab3_1|1 sub1_tab3_1|3 sub1_tab3_1|5), 'inserts into tab3_1 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab1 ORDER BY 1, 2"); -is($result, qq(sub2_tab1|0 +is( $result, qq(sub2_tab1|0 sub2_tab1|1 sub2_tab1|3 sub2_tab1|5), 'inserts into tab1 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab2 ORDER BY 1, 2"); -is($result, qq(sub2_tab2|0 +is( $result, qq(sub2_tab2|0 sub2_tab2|1 sub2_tab2|3 sub2_tab2|5), 'inserts into tab2 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab3 ORDER BY 1, 2"); -is($result, qq(sub2_tab3|0 +is( $result, qq(sub2_tab3|0 sub2_tab3|1 sub2_tab3|3 sub2_tab3|5), 'inserts into tab3 replicated'); # update (replicated as update) -$node_publisher->safe_psql('postgres', - "UPDATE tab1 SET a = 6 WHERE a = 5"); -$node_publisher->safe_psql('postgres', - "UPDATE tab2 SET a = 6 WHERE a = 5"); -$node_publisher->safe_psql('postgres', - "UPDATE tab3 SET a = 6 WHERE a = 5"); +$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 5"); +$node_publisher->safe_psql('postgres', "UPDATE tab2 SET a = 6 WHERE a = 5"); +$node_publisher->safe_psql('postgres', "UPDATE tab3 SET a = 6 WHERE a = 5"); $node_publisher->wait_for_catchup('sub_viaroot'); $node_publisher->wait_for_catchup('sub2'); $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab2 ORDER BY 1, 2"); -is($result, qq(sub1_tab2|0 +is( $result, qq(sub1_tab2|0 sub1_tab2|1 sub1_tab2|3 sub1_tab2|6), 'update of tab2 replicated'); $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab3_1 ORDER BY 1, 2"); -is($result, qq(sub1_tab3_1|0 +is( $result, qq(sub1_tab3_1|0 sub1_tab3_1|1 sub1_tab3_1|3 sub1_tab3_1|6), 'update of tab3_1 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab1 ORDER BY 1, 2"); -is($result, qq(sub2_tab1|0 +is( $result, qq(sub2_tab1|0 sub2_tab1|1 sub2_tab1|3 sub2_tab1|6), 'inserts into tab1 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab2 ORDER BY 1, 2"); -is($result, qq(sub2_tab2|0 +is( $result, qq(sub2_tab2|0 sub2_tab2|1 sub2_tab2|3 sub2_tab2|6), 'inserts into tab2 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab3 ORDER BY 1, 2"); -is($result, qq(sub2_tab3|0 +is( $result, qq(sub2_tab3|0 sub2_tab3|1 sub2_tab3|3 sub2_tab3|6), 'inserts into tab3 replicated'); # update (replicated as delete+insert) -$node_publisher->safe_psql('postgres', - "UPDATE tab1 SET a = 2 WHERE a = 6"); -$node_publisher->safe_psql('postgres', - "UPDATE tab2 SET a = 2 WHERE a = 6"); -$node_publisher->safe_psql('postgres', - "UPDATE tab3 SET a = 2 WHERE a = 6"); +$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 2 WHERE a = 6"); +$node_publisher->safe_psql('postgres', "UPDATE tab2 SET a = 2 WHERE a = 6"); +$node_publisher->safe_psql('postgres', "UPDATE tab3 SET a = 2 WHERE a = 6"); $node_publisher->wait_for_catchup('sub_viaroot'); $node_publisher->wait_for_catchup('sub2'); $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab2 ORDER BY 1, 2"); -is($result, qq(sub1_tab2|0 +is( $result, qq(sub1_tab2|0 sub1_tab2|1 sub1_tab2|2 sub1_tab2|3), 'update of tab2 replicated'); $result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab3_1 ORDER BY 1, 2"); -is($result, qq(sub1_tab3_1|0 +is( $result, qq(sub1_tab3_1|0 sub1_tab3_1|1 sub1_tab3_1|2 sub1_tab3_1|3), 'update of tab3_1 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab1 ORDER BY 1, 2"); -is($result, qq(sub2_tab1|0 +is( $result, qq(sub2_tab1|0 sub2_tab1|1 sub2_tab1|2 sub2_tab1|3), 'update of tab1 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab2 ORDER BY 1, 2"); -is($result, qq(sub2_tab2|0 +is( $result, qq(sub2_tab2|0 sub2_tab2|1 sub2_tab2|2 sub2_tab2|3), 'update of tab2 replicated'); $result = $node_subscriber2->safe_psql('postgres', "SELECT c, a FROM tab3 ORDER BY 1, 2"); -is($result, qq(sub2_tab3|0 +is( $result, qq(sub2_tab3|0 sub2_tab3|1 sub2_tab3|2 sub2_tab3|3), 'update of tab3 replicated'); # delete -$node_publisher->safe_psql('postgres', - "DELETE FROM tab1"); -$node_publisher->safe_psql('postgres', - "DELETE FROM tab2"); -$node_publisher->safe_psql('postgres', - "DELETE FROM tab3"); +$node_publisher->safe_psql('postgres', "DELETE FROM tab1"); +$node_publisher->safe_psql('postgres', "DELETE FROM tab2"); +$node_publisher->safe_psql('postgres', "DELETE FROM tab3"); $node_publisher->wait_for_catchup('sub_viaroot'); $node_publisher->wait_for_catchup('sub2'); -$result = $node_subscriber1->safe_psql('postgres', - "SELECT a FROM tab2"); +$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2"); is($result, qq(), 'delete tab2 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab1"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1"); is($result, qq(), 'delete from tab1 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab2"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2"); is($result, qq(), 'delete from tab2 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab3"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3"); is($result, qq(), 'delete from tab3 replicated'); # truncate @@ -505,52 +490,45 @@ $node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "INSERT INTO tab2 VALUES (1), (2), (5)"); # these will NOT be replicated -$node_publisher->safe_psql('postgres', - "TRUNCATE tab1_2, tab2_1, tab3_1"); +$node_publisher->safe_psql('postgres', "TRUNCATE tab1_2, tab2_1, tab3_1"); $node_publisher->wait_for_catchup('sub_viaroot'); $node_publisher->wait_for_catchup('sub2'); -$result = $node_subscriber1->safe_psql('postgres', - "SELECT a FROM tab2 ORDER BY 1"); -is($result, qq(1 +$result = + $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2 ORDER BY 1"); +is( $result, qq(1 2 5), 'truncate of tab2_1 NOT replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab1 ORDER BY 1"); -is($result, qq(1 +$result = + $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1 ORDER BY 1"); +is( $result, qq(1 2 5), 'truncate of tab1_2 NOT replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab2 ORDER BY 1"); -is($result, qq(1 +$result = + $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2 ORDER BY 1"); +is( $result, qq(1 2 5), 'truncate of tab2_1 NOT replicated'); -$node_publisher->safe_psql('postgres', - "TRUNCATE tab1, tab2, tab3"); +$node_publisher->safe_psql('postgres', "TRUNCATE tab1, tab2, tab3"); $node_publisher->wait_for_catchup('sub_viaroot'); $node_publisher->wait_for_catchup('sub2'); -$result = $node_subscriber1->safe_psql('postgres', - "SELECT a FROM tab2"); +$result = $node_subscriber1->safe_psql('postgres', "SELECT a FROM tab2"); is($result, qq(), 'truncate of tab2 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab1"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab1"); is($result, qq(), 'truncate of tab1 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab2"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab2"); is($result, qq(), 'truncate of tab2 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab3"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3"); is($result, qq(), 'truncate of tab3 replicated'); -$result = $node_subscriber2->safe_psql('postgres', - "SELECT a FROM tab3_1"); +$result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3_1"); is($result, qq(), 'truncate of tab3_1 replicated'); diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index 6daa18f70ec..c21c94dc1f7 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -43,15 +43,14 @@ my $contrib_extrasource = { 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], }; my @contrib_excludes = ( - 'bool_plperl', - 'commit_ts', 'hstore_plperl', - 'hstore_plpython', 'intagg', - 'jsonb_plperl', 'jsonb_plpython', - 'ltree_plpython', 'pgcrypto', - 'sepgsql', 'brin', - 'test_extensions', 'test_misc', - 'test_pg_dump', 'snapshot_too_old', - 'unsafe_tests'); + 'bool_plperl', 'commit_ts', + 'hstore_plperl', 'hstore_plpython', + 'intagg', 'jsonb_plperl', + 'jsonb_plpython', 'ltree_plpython', + 'pgcrypto', 'sepgsql', + 'brin', 'test_extensions', + 'test_misc', 'test_pg_dump', + 'snapshot_too_old', 'unsafe_tests'); # Set of variables for frontend modules my $frontend_defines = { 'initdb' => 'FRONTEND' }; @@ -121,7 +120,7 @@ sub mkvcbuild our @pgcommonallfiles = qw( archive.c base64.c checksum_helper.c - config_info.c controldata_utils.c d2s.c encnames.c exec.c + config_info.c controldata_utils.c d2s.c encnames.c exec.c f2s.c file_perm.c hashfn.c ip.c jsonapi.c keywords.c kwlookup.c link-canary.c md5.c pg_lzcompress.c pgfnames.c psprintf.c relpath.c rmtree.c @@ -303,7 +302,8 @@ sub mkvcbuild $libecpgcompat->AddIncludeDir('src/interfaces/ecpg/include'); $libecpgcompat->AddIncludeDir('src/interfaces/libpq'); $libecpgcompat->UseDef('src/interfaces/ecpg/compatlib/compatlib.def'); - $libecpgcompat->AddReference($pgtypes, $libecpg, $libpgport, $libpgcommon); + $libecpgcompat->AddReference($pgtypes, $libecpg, $libpgport, + $libpgcommon); my $ecpg = $solution->AddProject('ecpg', 'exe', 'interfaces', 'src/interfaces/ecpg/preproc'); @@ -651,11 +651,13 @@ sub mkvcbuild # 'Can't spawn "conftest.exe"'; suppress that. no warnings; - no strict 'subs'; ## no critic (ProhibitNoStrict) + no strict 'subs'; ## no critic (ProhibitNoStrict) # Disable error dialog boxes like we do in the postmaster. # Here, we run code that triggers relevant errors. - use if ($^O eq "MSWin32"), 'Win32API::File', qw(SetErrorMode :SEM_); + use + if ($^O eq "MSWin32"), 'Win32API::File', + qw(SetErrorMode :SEM_); my $oldmode = SetErrorMode( SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); system(".\\$exe"); @@ -766,8 +768,8 @@ sub mkvcbuild # Add transform modules dependent on plperl my $bool_plperl = AddTransformModule( - 'bool_plperl', 'contrib/bool_plperl', - 'plperl', 'src/pl/plperl'); + 'bool_plperl', 'contrib/bool_plperl', + 'plperl', 'src/pl/plperl'); my $hstore_plperl = AddTransformModule( 'hstore_plperl', 'contrib/hstore_plperl', 'plperl', 'src/pl/plperl', diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm index 545bdcef7be..a13ca6e02e8 100644 --- a/src/tools/msvc/Solution.pm +++ b/src/tools/msvc/Solution.pm @@ -64,7 +64,8 @@ sub DeterminePlatform # Examine CL help output to determine if we are in 32 or 64-bit mode. my $output = `cl /? 2>&1`; $? >> 8 == 0 or die "cl command not found"; - $self->{platform} = ($output =~ /^\/favor:<.+AMD64/m) ? 'x64' : 'Win32'; + $self->{platform} = + ($output =~ /^\/favor:<.+AMD64/m) ? 'x64' : 'Win32'; } else { @@ -143,8 +144,8 @@ sub GetOpenSSLVersion sub GenerateFiles { - my $self = shift; - my $bits = $self->{platform} eq 'Win32' ? 32 : 64; + my $self = shift; + my $bits = $self->{platform} eq 'Win32' ? 32 : 64; my $ac_init_found = 0; my $package_name; my $package_version; @@ -157,7 +158,8 @@ sub GenerateFiles || confess("Could not open configure.in for reading\n"); while (<$c>) { - if (/^AC_INIT\(\[([^\]]+)\], \[([^\]]+)\], \[([^\]]+)\], \[([^\]]*)\], \[([^\]]+)\]/) + if (/^AC_INIT\(\[([^\]]+)\], \[([^\]]+)\], \[([^\]]+)\], \[([^\]]*)\], \[([^\]]+)\]/ + ) { $ac_init_found = 1; @@ -165,7 +167,7 @@ sub GenerateFiles $package_version = $2; $package_bugreport = $3; #$package_tarname = $4; - $package_url = $5; + $package_url = $5; if ($package_version !~ /^(\d+)(?:\.(\d+))?/) { @@ -494,8 +496,8 @@ sub GenerateFiles inline => '__inline', pg_restrict => '__restrict', # not defined, because it'd conflict with __declspec(restrict) - restrict => undef, - typeof => undef,); + restrict => undef, + typeof => undef,); if ($self->{options}->{uuid}) { @@ -528,9 +530,10 @@ sub GenerateFiles } } - $self->GenerateConfigHeader('src/include/pg_config.h', \%define, 1); + $self->GenerateConfigHeader('src/include/pg_config.h', \%define, 1); $self->GenerateConfigHeader('src/include/pg_config_ext.h', \%define, 0); - $self->GenerateConfigHeader('src/interfaces/ecpg/include/ecpg_config.h', \%define, 0); + $self->GenerateConfigHeader('src/interfaces/ecpg/include/ecpg_config.h', + \%define, 0); $self->GenerateDefFile( "src/interfaces/libpq/libpqdll.def", @@ -835,8 +838,8 @@ sub GenerateConfigHeader my $config_header_in = $config_header . '.in'; - if (IsNewer($config_header, $config_header_in) || - IsNewer($config_header, __FILE__)) + if ( IsNewer($config_header, $config_header_in) + || IsNewer($config_header, __FILE__)) { my %defines_copy = %$defines; @@ -858,7 +861,8 @@ sub GenerateConfigHeader { if (defined $defines->{$macro}) { - print $o "#${ws}define $macro ", $defines->{$macro}, "\n"; + print $o "#${ws}define $macro ", $defines->{$macro}, + "\n"; } else { @@ -868,7 +872,8 @@ sub GenerateConfigHeader } else { - croak "undefined symbol: $macro at $config_header line $."; + croak + "undefined symbol: $macro at $config_header line $."; } } else diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index f95f7a5c7ab..0a98f6e37d5 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -603,9 +603,7 @@ sub upgradecheck print "\nSetting up new cluster\n\n"; standard_initdb() or exit 1; print "\nRunning pg_upgrade\n\n"; - @args = ( - 'pg_upgrade', '-d', "$data.old", '-D', $data, '-b', - $bindir); + @args = ('pg_upgrade', '-d', "$data.old", '-D', $data, '-b', $bindir); system(@args) == 0 or exit 1; print "\nStarting new cluster\n\n"; @args = ('pg_ctl', '-l', "$logdir/postmaster2.log", 'start'); diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index b319266bb35..ac9fb292ce5 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -1,4 +1,3 @@ -ABITVEC ACCESS_ALLOWED_ACE ACL ACL_SIZE_INFORMATION @@ -23,6 +22,7 @@ AclMaskHow AclMode AclResult AcquireSampleRowsFunc +ActionList ActiveSnapshotElt AddForeignUpdateTargets_function AffixNode @@ -83,6 +83,7 @@ AlterPublicationStmt AlterRoleSetStmt AlterRoleStmt AlterSeqStmt +AlterStatsStmt AlterSubscriptionStmt AlterSubscriptionType AlterSystemStmt @@ -94,6 +95,9 @@ AlterTableMoveAllStmt AlterTableSpaceOptionsStmt AlterTableStmt AlterTableType +AlterTableUtilityContext +AlterTypeRecurseParams +AlterTypeStmt AlterUserMappingStmt AlteredTableInfo AlternativeSubPlan @@ -155,7 +159,6 @@ BF_word_signed BIGNUM BIO BIO_METHOD -BITVEC BITVECP BMS_Comparison BMS_Membership @@ -166,6 +169,9 @@ BOX BTArrayKeyInfo BTBuildState BTCycleId +BTDedupInterval +BTDedupState +BTDedupStateData BTIndexStat BTInsertState BTInsertStateData @@ -193,6 +199,8 @@ BTStack BTStackData BTVacInfo BTVacState +BTVacuumPosting +BTVacuumPostingData BTWriteState BYTE Backend @@ -305,7 +313,6 @@ CRSSnapshotAction CState CTEMaterialize CV -C_block CachedExpression CachedPlan CachedPlanSource @@ -373,6 +380,8 @@ ComboCidKeyData Command CommandDest CommandId +CommandTag +CommandTagBehavior CommentItem CommentStmt CommitTimestampEntry @@ -537,8 +546,6 @@ DropSubscriptionStmt DropTableSpaceStmt DropUserMappingStmt DropdbStmt -DummyAmEnum -DummyIndexOptions DumpComponents DumpId DumpOptions @@ -627,6 +634,7 @@ ExplainFormat ExplainOneQuery_hook_type ExplainState ExplainStmt +ExplainWorkersState ExportedSnapshot Expr ExprContext @@ -659,7 +667,6 @@ FdwInfo FdwRoutine FetchDirection FetchStmt -FieldNot FieldSelect FieldStore File @@ -678,6 +685,12 @@ FlushPosition FmgrBuiltin FmgrHookEventType FmgrInfo +ForBothCellState +ForBothState +ForEachState +ForFiveState +ForFourState +ForThreeState ForeignDataWrapper ForeignKeyCacheInfo ForeignKeyOptInfo @@ -720,7 +733,6 @@ FormData_pg_opclass FormData_pg_operator FormData_pg_opfamily FormData_pg_partitioned_table -FormData_pg_pltemplate FormData_pg_policy FormData_pg_proc FormData_pg_publication @@ -777,7 +789,6 @@ Form_pg_opclass Form_pg_operator Form_pg_opfamily Form_pg_partitioned_table -Form_pg_pltemplate Form_pg_policy Form_pg_proc Form_pg_publication @@ -838,6 +849,7 @@ GBT_VARKEY_R GENERAL_NAME GISTBuildBuffers GISTBuildState +GISTDeletedPageContents GISTENTRY GISTInsertStack GISTInsertState @@ -908,7 +920,6 @@ GinTernaryValue GinTupleCollector GinVacuumState GistBufferingMode -GistBulkDeleteResult GistEntryVector GistHstoreOptions GistInetKey @@ -973,6 +984,8 @@ HTAB HTSV_Result HV Hash +HashAggBatch +HashAggSpill HashAllocFunc HashBuildState HashCompareFunc @@ -997,6 +1010,7 @@ HashScanPosData HashScanPosItem HashSkewBucket HashState +HashTapeInfo HashValueFunc HbaLine HbaToken @@ -1033,6 +1047,12 @@ ImportQual IncludeWal InclusionOpaque IncrementVarSublevelsUp_context +IncrementalSort +IncrementalSortExecutionStatus +IncrementalSortGroupInfo +IncrementalSortInfo +IncrementalSortPath +IncrementalSortState Index IndexAMProperty IndexAmRoutine @@ -1133,7 +1153,13 @@ JsonHashEntry JsonIterateStringValuesAction JsonLexContext JsonLikeRegexContext +JsonManifestFileField +JsonManifestParseContext +JsonManifestParseState +JsonManifestSemanticState +JsonManifestWALRangeField JsonParseContext +JsonParseErrorType JsonPath JsonPathBool JsonPathExecContext @@ -1216,7 +1242,6 @@ LPDWORD LPSECURITY_ATTRIBUTES LPSERVICE_STATUS LPSTR -LPTHREAD_START_ROUTINE LPTSTR LPVOID LPWSTR @@ -1246,6 +1271,7 @@ LexemeKey LexizeData LibraryInfo Limit +LimitOption LimitPath LimitState LimitStateCond @@ -1297,6 +1323,7 @@ LogicalOutputPluginWriterWrite LogicalRepBeginData LogicalRepCommitData LogicalRepCtxStruct +LogicalRepPartMapEntry LogicalRepRelId LogicalRepRelMapEntry LogicalRepRelation @@ -1362,7 +1389,6 @@ MultiXactMember MultiXactOffset MultiXactStateData MultiXactStatus -MyData NDBOX NODE NUMCacheEntry @@ -1391,6 +1417,8 @@ Node NodeTag NonEmptyRange Notification +NotificationHash +NotificationList NotifyStmt Nsrt NullIfExpr @@ -1410,7 +1438,6 @@ OSAPerQueryState OSInfo OSSLCipher OSSLDigest -OSVERSIONINFO OVERLAPPED ObjectAccessDrop ObjectAccessNamespaceSearch @@ -1436,7 +1463,6 @@ OldSerXidControl OldSnapshotControlData OldToNewMapping OldToNewMappingData -OldTriggerInfo OnCommitAction OnCommitItem OnConflictAction @@ -1524,7 +1550,6 @@ PGresult PGresult_data PHANDLE PLAINTREE -PLTemplate PLUID_AND_ATTRIBUTES PLcword PLpgSQL_arrayelem @@ -1634,6 +1659,7 @@ PQconninfoOption PQnoticeProcessor PQnoticeReceiver PQprintOpt +PQsslKeyPassHook_type PREDICATELOCK PREDICATELOCKTAG PREDICATELOCKTARGET @@ -1695,9 +1721,11 @@ ParamKind ParamListInfo ParamPathInfo ParamRef +ParamsErrorCbData ParentMapEntry ParseCallbackState ParseExprKind +ParseNamespaceColumn ParseNamespaceItem ParseParamRefHook ParseState @@ -1723,6 +1751,7 @@ PartitionElem PartitionHashBound PartitionKey PartitionListValue +PartitionMap PartitionPruneCombineOp PartitionPruneContext PartitionPruneInfo @@ -1753,6 +1782,7 @@ Pattern_Prefix_Status Pattern_Type PendingFsyncEntry PendingRelDelete +PendingRelSync PendingUnlinkEntry PendingWriteback PerlInterpreter @@ -1802,10 +1832,13 @@ PgStat_MsgRecoveryConflict PgStat_MsgResetcounter PgStat_MsgResetsharedcounter PgStat_MsgResetsinglecounter +PgStat_MsgResetslrucounter +PgStat_MsgSLRU PgStat_MsgTabpurge PgStat_MsgTabstat PgStat_MsgTempFile PgStat_MsgVacuum +PgStat_SLRUStats PgStat_Shared_Reset_Target PgStat_Single_Reset_Type PgStat_StatDBEntry @@ -1842,7 +1875,6 @@ Pool PopulateArrayContext PopulateArrayState PopulateRecordCache -PopulateRecordsetCache PopulateRecordsetState Port Portal @@ -1861,10 +1893,12 @@ PredXactList PredXactListElement PredicateLockData PredicateLockTargetType +PrefetchBufferResult PrepParallelRestorePtrType PrepareStmt PreparedParamsData PreparedStatement +PresortedKeyData PrewarmType PrintExtraTocPtrType PrintTocDataPtrType @@ -1876,6 +1910,8 @@ PrivTarget PrivateRefCountEntry ProcArrayStruct ProcLangInfo +ProcSignalBarrierType +ProcSignalHeader ProcSignalReason ProcSignalSlot ProcState @@ -1901,6 +1937,7 @@ PsqlSettings Publication PublicationActions PublicationInfo +PublicationPartOpt PublicationRelInfo PullFilter PullFilterOps @@ -1924,6 +1961,7 @@ QUERY_SECURITY_CONTEXT_TOKEN_FN QualCost QualItem Query +QueryCompletion QueryDesc QueryEnvironment QueryInfo @@ -2118,6 +2156,7 @@ SISeg SIZE_T SMgrRelation SMgrRelationData +SMgrSortArray SOCKADDR SOCKET SPELL @@ -2156,12 +2195,14 @@ ScanKeywordHashFunc ScanKeywordList ScanState ScanTypeControl +ScannerCallbackState SchemaQuery SecBuffer SecBufferDesc SecLabelItem SecLabelStmt SeenRelsEntry +SelectLimit SelectStmt Selectivity SemTPadded @@ -2200,6 +2241,7 @@ SharedDependencyType SharedExecutorInstrumentation SharedFileSet SharedHashInfo +SharedIncrementalSortInfo SharedInvalCatalogMsg SharedInvalCatcacheMsg SharedInvalRelcacheMsg @@ -2231,6 +2273,8 @@ SimpleActionListCell SimpleEcontextStackEntry SimpleOidList SimpleOidListCell +SimplePtrList +SimplePtrListCell SimpleStats SimpleStringList SimpleStringListCell @@ -2240,6 +2284,7 @@ SlabBlock SlabChunk SlabContext SlabSlot +SlotAcquireBehavior SlotErrCallbackArg SlotNumber SlruCtl @@ -2351,6 +2396,7 @@ SupportRequestSimplify Syn SyncOps SyncRepConfigData +SyncRepStandbyData SyncRequestType SysScanDesc SyscacheCallbackFunction @@ -2396,6 +2442,7 @@ TSQueryParserState TSQuerySign TSReadPointer TSTemplateInfo +TSTernaryValue TSTokenTypeStorage TSVector TSVectorBuildState @@ -2404,6 +2451,7 @@ TSVectorParseState TSVectorStat TState TStoreState +TXNEntryFile TYPCATEGORY T_Action T_WorkerStatus @@ -2438,6 +2486,7 @@ Tcl_Interp Tcl_NotifierProcs Tcl_Obj Tcl_Time +TempNamespaceStatus TestDecodingData TestSpec TextFreq @@ -2532,8 +2581,6 @@ TwoPhasePredicateXactRecord TwoPhaseRecordOnDisk TwoPhaseRmgrId TwoPhaseStateData -TxidEpoch -TxidSnapshot Type TypeCacheEntry TypeCacheEnumData @@ -2558,11 +2605,14 @@ ULONG ULONG_PTR UV UVersionInfo +UnicodeNormalizationForm +UnicodeNormalizationQC Unique UniquePath UniquePathMethod UniqueState UnlistenStmt +UnpackTarState UnresolvedTup UnresolvedTupData UpdateStmt @@ -2607,10 +2657,15 @@ ViewStmt VirtualTransactionId VirtualTupleTableSlot Vsrt -WAITORTIMERCALLBACK WAIT_ORDER +WALAvailability WALInsertLock WALInsertLockPadded +WALOpenSegment +WALReadError +WALSegmentCloseCB +WALSegmentContext +WALSegmentOpenCB WCHAR WCOKind WFW_WaitOption @@ -2678,9 +2733,12 @@ WorkerJobRestorePtrType Working_State WriteBufPtrType WriteBytePtrType +WriteDataCallback WriteDataPtrType WriteExtraTocPtrType WriteFunc +WriteManifestState +WriteTarState WritebackContext X509 X509_EXTENSION @@ -2700,6 +2758,7 @@ XLogPageHeader XLogPageHeaderData XLogPageReadCB XLogPageReadPrivate +XLogReaderRoutine XLogReaderState XLogRecData XLogRecPtr @@ -2736,7 +2795,6 @@ __CreateJobObject __CreateRestrictedToken __IsProcessInJob __QueryInformationJobObject -__RegisterWaitForSingleObject __SetInformationJobObject _resultmap _stringlist @@ -2775,6 +2833,8 @@ avl_node avl_tree avw_dbase backslashResult +backup_manifest_info +backup_manifest_option base_yy_extra_type basebackup_options bgworker_main_type @@ -2831,7 +2891,6 @@ deparse_namespace destructor dev_t digit -directory_fctx disassembledLeaf dlist_head dlist_iter @@ -2871,8 +2930,6 @@ ec_member_foreign_arg ec_member_matches_arg emit_log_hook_type eval_const_expressions_context -event_trigger_command_tag_check_result -event_trigger_support_data exec_thread_arg execution_state explain_get_index_name_hook_type @@ -2996,6 +3053,9 @@ iterator jmp_buf join_search_hook_type json_aelem_action +json_manifest_error_callback +json_manifest_perfile_callback +json_manifest_perwalrange_callback json_ofield_action json_scalar_action json_struct_action @@ -3007,7 +3067,7 @@ leafSegmentInfo leaf_item line_t lineno_t -list_qsort_comparator +list_sort_comparator local_relopt local_relopts locale_t @@ -3028,6 +3088,10 @@ macKEY macaddr macaddr8 macaddr_sortsupport_state +manifest_file +manifest_files_hash +manifest_files_iterator +manifest_wal_range map_variable_attnos_context max_parallel_hazard_context mb2wchar_with_len_converter @@ -3053,6 +3117,7 @@ mpz_t mxact mxtruncinfo needs_fmgr_hook_type +network_sortsupport_state nodeitem normal_rand_fctx ntile_context @@ -3063,6 +3128,7 @@ oidKEY oidvector on_dsm_detach_callback on_exit_nicely_callback +openssl_tls_init_hook_typ ossl_EVP_cipher_func other output_type @@ -3073,11 +3139,16 @@ pairingheap_comparator pairingheap_node parallel_worker_main_type parse_error_callback_arg +parser_context +partition_method_t pendingPosition pgParameterStatus pg_atomic_flag pg_atomic_uint32 pg_atomic_uint64 +pg_checksum_context +pg_checksum_raw_context +pg_checksum_type pg_conn_host pg_conn_host_type pg_conv_map @@ -3107,6 +3178,7 @@ pg_tz pg_tz_cache pg_tzenum pg_unicode_decomposition +pg_unicode_normprops pg_utf_to_local_combined pg_uuid_t pg_wc_probefunc @@ -3147,6 +3219,7 @@ pltcl_proc_key pltcl_proc_ptr pltcl_query_desc pointer +polymorphic_actuals pos_trgm post_parse_analyze_hook_type pqbool @@ -3180,6 +3253,7 @@ pull_vars_context pullup_replace_vars_context pushdown_safety_info qsort_arg_comparator +qsort_comparator query_pathkeys_callback radius_attribute radius_packet @@ -3222,6 +3296,7 @@ rijndael_ctx rm_detail_t role_auth_extra row_security_policy_hook_type +rsv_callback save_buffer scram_HMAC_ctx scram_state @@ -3314,14 +3389,12 @@ trgm trgm_mb_char trivalue tsKEY -ts_db_fctx ts_parserstate ts_tokenizer ts_tokentype tsearch_readline_state tuplehash_hash tuplehash_iterator -txid type tzEntry u1byte @@ -3360,12 +3433,14 @@ varattrib_1b varattrib_1b_e varattrib_4b vbits +verifier_context walrcv_check_conninfo_fn walrcv_connect_fn walrcv_create_slot_fn walrcv_disconnect_fn walrcv_endstreaming_fn walrcv_exec_fn +walrcv_get_backend_pid_fn walrcv_get_conninfo_fn walrcv_get_senderinfo_fn walrcv_identify_system_fn @@ -3388,6 +3463,7 @@ xl_brin_insert xl_brin_revmap_extend xl_brin_samepage_update xl_brin_update +xl_btree_dedup xl_btree_delete xl_btree_insert xl_btree_mark_page_halfdead @@ -3396,6 +3472,7 @@ xl_btree_newroot xl_btree_reuse_page xl_btree_split xl_btree_unlink_page +xl_btree_update xl_btree_vacuum xl_clog_truncate xl_commit_ts_set @@ -3460,6 +3537,7 @@ xl_xact_origin xl_xact_parsed_abort xl_xact_parsed_commit xl_xact_parsed_prepare +xl_xact_prepare xl_xact_relfilenodes xl_xact_subxacts xl_xact_twophase diff --git a/src/tools/version_stamp.pl b/src/tools/version_stamp.pl index fcd3f180487..80a8efc6d52 100755 --- a/src/tools/version_stamp.pl +++ b/src/tools/version_stamp.pl @@ -35,23 +35,23 @@ my ($dotneeded); if ($minor =~ m/^\d+$/) { - $dotneeded = 1; + $dotneeded = 1; } elsif ($minor eq "devel") { - $dotneeded = 0; + $dotneeded = 0; } elsif ($minor =~ m/^alpha\d+$/) { - $dotneeded = 0; + $dotneeded = 0; } elsif ($minor =~ m/^beta\d+$/) { - $dotneeded = 0; + $dotneeded = 0; } elsif ($minor =~ m/^rc\d+$/) { - $dotneeded = 0; + $dotneeded = 0; } else { |