diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2019-05-22 12:55:34 -0400 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2019-05-22 12:55:34 -0400 |
commit | be76af171cdb3e7465c4ef234af403f97ad79b7b (patch) | |
tree | 1fa62d2b7a6680a4237a1548f7002fa0b234b143 /src/backend | |
parent | 66a4bad83aaa6613a45a00a488c04427f9969fb4 (diff) | |
download | postgresql-be76af171cdb3e7465c4ef234af403f97ad79b7b.tar.gz postgresql-be76af171cdb3e7465c4ef234af403f97ad79b7b.zip |
Initial pgindent run for v12.
This is still using the 2.0 version of pg_bsd_indent.
I thought it would be good to commit this separately,
so as to document the differences between 2.0 and 2.1 behavior.
Discussion: https://postgr.es/m/16296.1558103386@sss.pgh.pa.us
Diffstat (limited to 'src/backend')
109 files changed, 599 insertions, 560 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 783b04a3cb9..a48a6cd757f 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -787,8 +787,8 @@ expand_tuple(HeapTuple *targetHeapTuple, } /* - * Now walk the missing attributes. If there is a missing value - * make space for it. Otherwise, it's going to be NULL. + * Now walk the missing attributes. If there is a missing value make + * space for it. Otherwise, it's going to be NULL. */ for (attnum = firstmissingnum; attnum < natts; diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index b9a28d18633..dc46f2460e2 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -394,17 +394,17 @@ ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno) * There is at least one empty page. So we have to rescan the tree * deleting empty pages. */ - Buffer buffer; + Buffer buffer; DataPageDeleteStack root, - *ptr, - *tmp; + *ptr, + *tmp; buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno, RBM_NORMAL, gvs->strategy); /* - * Lock posting tree root for cleanup to ensure there are no concurrent - * inserts. + * Lock posting tree root for cleanup to ensure there are no + * concurrent inserts. */ LockBufferForCleanup(buffer); diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index b648af1ff65..c945b282721 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -205,8 +205,8 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) while (segno < a_segno) { /* - * Once modification is started and page tail is copied, we've - * to copy unmodified segments. + * Once modification is started and page tail is copied, we've to + * copy unmodified segments. */ segsize = SizeOfGinPostingList(oldseg); if (tailCopy) @@ -257,12 +257,12 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) } /* - * We're about to start modification of the page. So, copy tail of the - * page if it's not done already. + * We're about to start modification of the page. So, copy tail of + * the page if it's not done already. */ if (!tailCopy && segptr != segmentend) { - int tailSize = segmentend - segptr; + int tailSize = segmentend - segptr; tailCopy = (Pointer) palloc(tailSize); memcpy(tailCopy, segptr, tailSize); @@ -304,7 +304,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) segptr = (Pointer) oldseg; if (segptr != segmentend && tailCopy) { - int restSize = segmentend - segptr; + int restSize = segmentend - segptr; Assert(writePtr + restSize <= PageGetSpecialPointer(page)); memcpy(writePtr, segptr, restSize); diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index 94b6ad6a59b..49df05653b3 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -839,16 +839,16 @@ gistNewBuffer(Relation r) gistcheckpage(r, buffer); /* - * Otherwise, recycle it if deleted, and too old to have any processes - * interested in it. + * Otherwise, recycle it if deleted, and too old to have any + * processes interested in it. */ if (gistPageRecyclable(page)) { /* - * If we are generating WAL for Hot Standby then create a - * WAL record that will allow us to conflict with queries - * running on standby, in case they have snapshots older - * than the page's deleteXid. + * If we are generating WAL for Hot Standby then create a WAL + * record that will allow us to conflict with queries running + * on standby, in case they have snapshots older than the + * page's deleteXid. */ if (XLogStandbyInfoActive() && RelationNeedsWAL(r)) gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page)); diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index 0bf15ae7236..6ec1ec3df3a 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -246,7 +246,7 @@ hashtext(PG_FUNCTION_ARGS) { text *key = PG_GETARG_TEXT_PP(0); Oid collid = PG_GET_COLLATION(); - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; Datum result; if (!collid) @@ -271,7 +271,7 @@ hashtext(PG_FUNCTION_ARGS) int32_t ulen = -1; UChar *uchar = NULL; Size bsize; - uint8_t *buf; + uint8_t *buf; ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key)); @@ -302,7 +302,7 @@ hashtextextended(PG_FUNCTION_ARGS) { text *key = PG_GETARG_TEXT_PP(0); Oid collid = PG_GET_COLLATION(); - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; Datum result; if (!collid) @@ -328,7 +328,7 @@ hashtextextended(PG_FUNCTION_ARGS) int32_t ulen = -1; UChar *uchar = NULL; Size bsize; - uint8_t *buf; + uint8_t *buf; ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key)); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 19d2c529d80..723e153705d 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1684,8 +1684,8 @@ void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid) { - Relation relation = sscan->rs_rd; - Snapshot snapshot = sscan->rs_snapshot; + Relation relation = sscan->rs_rd; + Snapshot snapshot = sscan->rs_snapshot; ItemPointerData ctid; TransactionId priorXmax; diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index 56b2abda5fb..674c1d3a818 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -474,6 +474,7 @@ tuple_lock_retry: HeapTupleHeaderGetCmin(tuple->t_data) >= cid) { tmfd->xmax = priorXmax; + /* * Cmin is the problematic value, so store that. See * above. @@ -1172,7 +1173,7 @@ heapam_index_build_range_scan(Relation heapRelation, Snapshot snapshot; bool need_unregister_snapshot = false; TransactionId OldestXmin; - BlockNumber previous_blkno = InvalidBlockNumber; + BlockNumber previous_blkno = InvalidBlockNumber; BlockNumber root_blkno = InvalidBlockNumber; OffsetNumber root_offsets[MaxHeapTuplesPerPage]; @@ -1263,7 +1264,7 @@ heapam_index_build_range_scan(Relation heapRelation, /* Publish number of blocks to scan */ if (progress) { - BlockNumber nblocks; + BlockNumber nblocks; if (hscan->rs_base.rs_parallel != NULL) { @@ -1314,7 +1315,7 @@ heapam_index_build_range_scan(Relation heapRelation, /* Report scan progress, if asked to. */ if (progress) { - BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan); + BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan); if (blocks_done != previous_blkno) { @@ -1668,7 +1669,7 @@ heapam_index_build_range_scan(Relation heapRelation, /* Report scan progress one last time. */ if (progress) { - BlockNumber blks_done; + BlockNumber blks_done; if (hscan->rs_base.rs_parallel != NULL) { @@ -1720,7 +1721,7 @@ heapam_index_validate_scan(Relation heapRelation, BlockNumber root_blkno = InvalidBlockNumber; OffsetNumber root_offsets[MaxHeapTuplesPerPage]; bool in_index[MaxHeapTuplesPerPage]; - BlockNumber previous_blkno = InvalidBlockNumber; + BlockNumber previous_blkno = InvalidBlockNumber; /* state variables for the merge */ ItemPointer indexcursor = NULL; @@ -1955,8 +1956,8 @@ static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan) { ParallelBlockTableScanDesc bpscan = NULL; - BlockNumber startblock; - BlockNumber blocks_done; + BlockNumber startblock; + BlockNumber blocks_done; if (hscan->rs_base.rs_parallel != NULL) { @@ -1974,7 +1975,7 @@ heapam_scan_get_blocks_done(HeapScanDesc hscan) blocks_done = hscan->rs_cblock - startblock; else { - BlockNumber nblocks; + BlockNumber nblocks; nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks; blocks_done = nblocks - startblock + diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index bce4274362c..131ec7b8d7f 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -652,7 +652,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup) } else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD) { - int options = HEAP_INSERT_SKIP_FSM; + int options = HEAP_INSERT_SKIP_FSM; if (!state->rs_use_wal) options |= HEAP_INSERT_SKIP_WAL; diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index 74e957abb72..e10715a7755 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -2295,16 +2295,16 @@ static struct varlena * toast_decompress_datum_slice(struct varlena *attr, int32 slicelength) { struct varlena *result; - int32 rawsize; + int32 rawsize; Assert(VARATT_IS_COMPRESSED(attr)); result = (struct varlena *) palloc(slicelength + VARHDRSZ); rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr), - VARSIZE(attr) - TOAST_COMPRESS_HDRSZ, - VARDATA(result), - slicelength, false); + VARSIZE(attr) - TOAST_COMPRESS_HDRSZ, + VARDATA(result), + slicelength, false); if (rawsize < 0) elog(ERROR, "compressed data is corrupted"); diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 9e17acc110e..637e47c08ce 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -164,7 +164,7 @@ static void lazy_cleanup_index(Relation indrel, static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer, int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer); static bool should_attempt_truncation(VacuumParams *params, - LVRelStats *vacrelstats); + LVRelStats *vacrelstats); static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats); static BlockNumber count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats); @@ -1067,9 +1067,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, * cheaper to get rid of it in the next pruning pass than * to treat it like an indexed tuple. Finally, if index * cleanup is disabled, the second heap pass will not - * execute, and the tuple will not get removed, so we - * must treat it like any other dead tuple that we choose - * to keep. + * execute, and the tuple will not get removed, so we must + * treat it like any other dead tuple that we choose to + * keep. * * If this were to happen for a tuple that actually needed * to be deleted, we'd be in trouble, because it'd @@ -1087,6 +1087,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, all_visible = false; break; case HEAPTUPLE_LIVE: + /* * Count it as live. Not only is this natural, but it's * also what acquire_sample_rows() does. @@ -1251,13 +1252,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats, else { /* - * Here, we have indexes but index cleanup is disabled. Instead of - * vacuuming the dead tuples on the heap, we just forget them. + * Here, we have indexes but index cleanup is disabled. + * Instead of vacuuming the dead tuples on the heap, we just + * forget them. * * Note that vacrelstats->dead_tuples could have tuples which * became dead after HOT-pruning but are not marked dead yet. - * We do not process them because it's a very rare condition, and - * the next vacuum will process them anyway. + * We do not process them because it's a very rare condition, + * and the next vacuum will process them anyway. */ Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED); } diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 0a9472c71b5..36a570045ac 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -1811,11 +1811,11 @@ _bt_insert_parent(Relation rel, /* * Re-find and write lock the parent of buf. * - * It's possible that the location of buf's downlink has changed - * since our initial _bt_search() descent. _bt_getstackbuf() will - * detect and recover from this, updating the stack, which ensures - * that the new downlink will be inserted at the correct offset. - * Even buf's parent may have changed. + * It's possible that the location of buf's downlink has changed since + * our initial _bt_search() descent. _bt_getstackbuf() will detect + * and recover from this, updating the stack, which ensures that the + * new downlink will be inserted at the correct offset. Even buf's + * parent may have changed. */ stack->bts_btentry = bknum; pbuf = _bt_getstackbuf(rel, stack); diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 5906c41f316..dc42213ac6c 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -166,8 +166,8 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access, new_stack->bts_parent = stack_in; /* - * Page level 1 is lowest non-leaf page level prior to leaves. So, - * if we're on the level 1 and asked to lock leaf page in write mode, + * Page level 1 is lowest non-leaf page level prior to leaves. So, if + * we're on the level 1 and asked to lock leaf page in write mode, * then lock next page in write mode, because it must be a leaf. */ if (opaque->btpo.level == 1 && access == BT_WRITE) @@ -1235,7 +1235,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) /* Initialize remaining insertion scan key fields */ inskey.heapkeyspace = _bt_heapkeyspace(rel); - inskey.anynullkeys = false; /* unusued */ + inskey.anynullkeys = false; /* unused */ inskey.nextkey = nextkey; inskey.pivotsearch = false; inskey.scantid = NULL; diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 0b5be776d63..d6fa5742384 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -962,10 +962,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) * much smaller. * * Since the truncated tuple is often smaller than the original - * tuple, it cannot just be copied in place (besides, we want - * to actually save space on the leaf page). We delete the - * original high key, and add our own truncated high key at the - * same offset. + * tuple, it cannot just be copied in place (besides, we want to + * actually save space on the leaf page). We delete the original + * high key, and add our own truncated high key at the same + * offset. * * Note that the page layout won't be changed very much. oitup is * already located at the physical beginning of tuple space, so we diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 77c9c7285cd..1238d544cd3 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup) key = palloc(offsetof(BTScanInsertData, scankeys) + sizeof(ScanKeyData) * indnkeyatts); key->heapkeyspace = itup == NULL || _bt_heapkeyspace(rel); - key->anynullkeys = false; /* initial assumption */ + key->anynullkeys = false; /* initial assumption */ key->nextkey = false; key->pivotsearch = false; key->keysz = Min(indnkeyatts, tupnatts); diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index 9365bc57ad5..7bc5ec09bf9 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -39,8 +39,8 @@ static int pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a, const pairingheap_node *b, void *arg) { - const SpGistSearchItem *sa = (const SpGistSearchItem *) a; - const SpGistSearchItem *sb = (const SpGistSearchItem *) b; + const SpGistSearchItem *sa = (const SpGistSearchItem *) a; + const SpGistSearchItem *sb = (const SpGistSearchItem *) b; SpGistScanOpaque so = (SpGistScanOpaque) arg; int i; @@ -79,7 +79,7 @@ pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a, } static void -spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item) +spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item) { if (!so->state.attLeafType.attbyval && DatumGetPointer(item->value) != NULL) @@ -97,7 +97,7 @@ spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item) * Called in queue context */ static void -spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item) +spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item) { pairingheap_add(so->scanQueue, &item->phNode); } @@ -439,7 +439,7 @@ spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr, * the scan is not ordered AND the item satisfies the scankeys */ static bool -spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item, +spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item, SpGistLeafTuple leafTuple, bool isnull, bool *reportedSome, storeRes_func storeRes) { @@ -530,7 +530,7 @@ spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item, static void spgInitInnerConsistentIn(spgInnerConsistentIn *in, SpGistScanOpaque so, - SpGistSearchItem * item, + SpGistSearchItem *item, SpGistInnerTuple innerTuple) { in->scankeys = so->keyData; @@ -551,7 +551,7 @@ spgInitInnerConsistentIn(spgInnerConsistentIn *in, static SpGistSearchItem * spgMakeInnerItem(SpGistScanOpaque so, - SpGistSearchItem * parentItem, + SpGistSearchItem *parentItem, SpGistNodeTuple tuple, spgInnerConsistentOut *out, int i, bool isnull, double *distances) @@ -585,7 +585,7 @@ spgMakeInnerItem(SpGistScanOpaque so, } static void -spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item, +spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item, SpGistInnerTuple innerTuple, bool isnull) { MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt); @@ -683,7 +683,7 @@ enum SpGistSpecialOffsetNumbers static OffsetNumber spgTestLeafTuple(SpGistScanOpaque so, - SpGistSearchItem * item, + SpGistSearchItem *item, Page page, OffsetNumber offset, bool isnull, bool isroot, bool *reportedSome, diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c index d22998c54bf..a7c1a09e05f 100644 --- a/src/backend/access/spgist/spgtextproc.c +++ b/src/backend/access/spgist/spgtextproc.c @@ -632,8 +632,8 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS) res = (level >= queryLen) || DatumGetBool(DirectFunctionCall2Coll(text_starts_with, PG_GET_COLLATION(), - out->leafValue, - PointerGetDatum(query))); + out->leafValue, + PointerGetDatum(query))); if (!res) /* no need to consider remaining conditions */ break; diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index fc85c6f9407..2b1662a267d 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -192,9 +192,9 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, * happened since VACUUM started. * * Note: we could make a tighter test by seeing if the xid is - * "running" according to the active snapshot; but snapmgr.c doesn't - * currently export a suitable API, and it's not entirely clear - * that a tighter test is worth the cycles anyway. + * "running" according to the active snapshot; but snapmgr.c + * doesn't currently export a suitable API, and it's not entirely + * clear that a tighter test is worth the cycles anyway. */ if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin)) spgAddPendingTID(bds, &dt->pointer); diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index c3455bc48ba..12adf590853 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -94,7 +94,7 @@ TableScanDesc table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key) { uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT; Oid relid = RelationGetRelid(relation); Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); @@ -158,7 +158,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan) { Snapshot snapshot; uint32 flags = SO_TYPE_SEQSCAN | - SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; + SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE; Assert(RelationGetRelid(relation) == parallel_scan->phs_relid); @@ -223,7 +223,7 @@ table_index_fetch_tuple_check(Relation rel, void table_get_latest_tid(TableScanDesc scan, ItemPointer tid) { - Relation rel = scan->rs_rd; + Relation rel = scan->rs_rd; const TableAmRoutine *tableam = rel->rd_tableam; /* diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 20feeec3270..b40da74e092 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -570,9 +570,9 @@ AssignTransactionId(TransactionState s) /* * Ensure parent(s) have XIDs, so that a child always has an XID later - * than its parent. Mustn't recurse here, or we might get a stack overflow - * if we're at the bottom of a huge stack of subtransactions none of which - * have XIDs yet. + * than its parent. Mustn't recurse here, or we might get a stack + * overflow if we're at the bottom of a huge stack of subtransactions none + * of which have XIDs yet. */ if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId)) { @@ -2868,8 +2868,8 @@ StartTransactionCommand(void) * just skipping the reset in StartTransaction() won't work.) */ static int save_XactIsoLevel; -static bool save_XactReadOnly; -static bool save_XactDeferrable; +static bool save_XactReadOnly; +static bool save_XactDeferrable; void SaveTransactionCharacteristics(void) @@ -5193,7 +5193,7 @@ SerializeTransactionState(Size maxsize, char *start_address) nxids = add_size(nxids, s->nChildXids); } Assert(SerializedTransactionStateHeaderSize + nxids * sizeof(TransactionId) - <= maxsize); + <= maxsize); /* Copy them to our scratch space. */ workspace = palloc(nxids * sizeof(TransactionId)); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 527522f1655..c7c9e91b6a4 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -6397,9 +6397,9 @@ StartupXLOG(void) ereport(FATAL, (errmsg("could not find redo location referenced by checkpoint record"), errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" and add required recovery options.\n" - "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n" - "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.", - DataDir, DataDir, DataDir))); + "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n" + "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.", + DataDir, DataDir, DataDir))); } } else diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index a600f43a675..f0fdda1eb91 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -848,7 +848,7 @@ objectsInSchemaToOids(ObjectType objtype, List *nspnames) while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { - Oid oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid; + Oid oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid; objects = lappend_oid(objects, oid); } @@ -895,7 +895,7 @@ getRelationsInNamespace(Oid namespaceId, char relkind) while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { - Oid oid = ((Form_pg_class) GETSTRUCT(tuple))->oid; + Oid oid = ((Form_pg_class) GETSTRUCT(tuple))->oid; relations = lappend_oid(relations, oid); } @@ -1311,7 +1311,7 @@ SetDefaultACL(InternalDefaultACL *iacls) } else { - Oid defAclOid; + Oid defAclOid; /* Prepare to insert or update pg_default_acl entry */ MemSet(values, 0, sizeof(values)); @@ -1384,7 +1384,7 @@ SetDefaultACL(InternalDefaultACL *iacls) if (isNew) InvokeObjectPostCreateHook(DefaultAclRelationId, defAclOid, 0); else - InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0); + InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0); } if (HeapTupleIsValid(tuple)) diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c index 2878e6a5b03..11936a65713 100644 --- a/src/backend/catalog/catalog.c +++ b/src/backend/catalog/catalog.c @@ -476,15 +476,15 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence) Datum pg_nextoid(PG_FUNCTION_ARGS) { - Oid reloid = PG_GETARG_OID(0); - Name attname = PG_GETARG_NAME(1); - Oid idxoid = PG_GETARG_OID(2); - Relation rel; - Relation idx; - HeapTuple atttuple; + Oid reloid = PG_GETARG_OID(0); + Name attname = PG_GETARG_NAME(1); + Oid idxoid = PG_GETARG_OID(2); + Relation rel; + Relation idx; + HeapTuple atttuple; Form_pg_attribute attform; - AttrNumber attno; - Oid newoid; + AttrNumber attno; + Oid newoid; /* * As this function is not intended to be used during normal running, and diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 6cffe550b31..3c46c251071 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -2550,8 +2550,8 @@ AddRelationNewConstraints(Relation rel, /* * If the expression is just a NULL constant, we do not bother to make * an explicit pg_attrdef entry, since the default behavior is - * equivalent. This applies to column defaults, but not for generation - * expressions. + * equivalent. This applies to column defaults, but not for + * generation expressions. * * Note a nonobvious property of this test: if the column is of a * domain type, what we'll get is not a bare null Const but a diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index c8d22e1b655..b7d1ac0923f 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -1236,8 +1236,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char Anum_pg_class_reloptions, &isnull); /* - * Extract the list of column names to be used for the index - * creation. + * Extract the list of column names to be used for the index creation. */ for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++) { @@ -1270,8 +1269,8 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char optionDatum, INDEX_CREATE_SKIP_BUILD | INDEX_CREATE_CONCURRENT, 0, - true, /* allow table to be a system catalog? */ - false, /* is_internal? */ + true, /* allow table to be a system catalog? */ + false, /* is_internal? */ NULL); /* Close the relations used and clean up */ @@ -1540,7 +1539,7 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName) values, nulls, replaces); CatalogTupleUpdate(description, &tuple->t_self, tuple); - break; /* Assume there can be only one match */ + break; /* Assume there can be only one match */ } systable_endscan(sd); @@ -1552,8 +1551,8 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName) */ if (get_rel_relispartition(oldIndexId)) { - List *ancestors = get_partition_ancestors(oldIndexId); - Oid parentIndexRelid = linitial_oid(ancestors); + List *ancestors = get_partition_ancestors(oldIndexId); + Oid parentIndexRelid = linitial_oid(ancestors); DeleteInheritsTuple(oldIndexId, parentIndexRelid); StoreSingleInheritance(newIndexId, parentIndexRelid, 1); @@ -1583,7 +1582,11 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName) newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched; newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched; newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit; - /* The data will be sent by the next pgstat_report_stat() call. */ + + /* + * The data will be sent by the next pgstat_report_stat() + * call. + */ } } } @@ -1614,27 +1617,26 @@ index_concurrently_set_dead(Oid heapId, Oid indexId) Relation userIndexRelation; /* - * No more predicate locks will be acquired on this index, and we're - * about to stop doing inserts into the index which could show - * conflicts with existing predicate locks, so now is the time to move - * them to the heap relation. + * No more predicate locks will be acquired on this index, and we're about + * to stop doing inserts into the index which could show conflicts with + * existing predicate locks, so now is the time to move them to the heap + * relation. */ userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock); userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock); TransferPredicateLocksToHeapRelation(userIndexRelation); /* - * Now we are sure that nobody uses the index for queries; they just - * might have it open for updating it. So now we can unset indisready - * and indislive, then wait till nobody could be using it at all - * anymore. + * Now we are sure that nobody uses the index for queries; they just might + * have it open for updating it. So now we can unset indisready and + * indislive, then wait till nobody could be using it at all anymore. */ index_set_state_flags(indexId, INDEX_DROP_SET_DEAD); /* - * Invalidate the relcache for the table, so that after this commit - * all sessions will refresh the table's index list. Forgetting just - * the index's relcache entry is not enough. + * Invalidate the relcache for the table, so that after this commit all + * sessions will refresh the table's index list. Forgetting just the + * index's relcache entry is not enough. */ CacheInvalidateRelcache(userHeapRelation); @@ -1786,7 +1788,7 @@ index_constraint_create(Relation heapRelation, */ if (OidIsValid(parentConstraintId)) { - ObjectAddress referenced; + ObjectAddress referenced; ObjectAddressSet(referenced, ConstraintRelationId, parentConstraintId); recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI); @@ -2709,7 +2711,7 @@ index_build(Relation heapRelation, PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL }; - const int64 val[] = { + const int64 val[] = { PROGRESS_CREATEIDX_PHASE_BUILD, PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE, 0, 0, 0, 0 @@ -3014,10 +3016,11 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot) PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL }; - const int64 val[] = { + const int64 val[] = { PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN, 0, 0, 0, 0 }; + pgstat_progress_update_multi_param(5, index, val); } @@ -3080,7 +3083,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot) PROGRESS_SCAN_BLOCKS_DONE, PROGRESS_SCAN_BLOCKS_TOTAL }; - const int64 val[] = { + const int64 val[] = { PROGRESS_CREATEIDX_PHASE_VALIDATE_SORT, 0, 0 }; diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 8b51ec7f394..7a32ac1fb13 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -3050,7 +3050,7 @@ getObjectDescription(const ObjectAddress *object) StringInfoData opfam; amprocDesc = table_open(AccessMethodProcedureRelationId, - AccessShareLock); + AccessShareLock); ScanKeyInit(&skey[0], Anum_pg_amproc_oid, diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index cdc8d9453d9..310d45266ff 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -612,7 +612,7 @@ AggregateCreate(const char *aggName, myself = ProcedureCreate(aggName, aggNamespace, - replace, /* maybe replacement */ + replace, /* maybe replacement */ false, /* doesn't return a set */ finaltype, /* returnType */ GetUserId(), /* proowner */ @@ -693,10 +693,9 @@ AggregateCreate(const char *aggName, /* * If we're replacing an existing entry, we need to validate that - * we're not changing anything that would break callers. - * Specifically we must not change aggkind or aggnumdirectargs, - * which affect how an aggregate call is treated in parse - * analysis. + * we're not changing anything that would break callers. Specifically + * we must not change aggkind or aggnumdirectargs, which affect how an + * aggregate call is treated in parse analysis. */ if (aggKind != oldagg->aggkind) ereport(ERROR, diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index fb22035a2a6..3487caf82f6 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -423,7 +423,11 @@ ProcedureCreate(const char *procedureName, prokind == PROKIND_PROCEDURE ? errmsg("cannot change whether a procedure has output parameters") : errmsg("cannot change return type of existing function"), - /* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP AGGREGATE */ + + /* + * translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP + * AGGREGATE + */ errhint("Use %s %s first.", dropcmd, format_procedure(oldproc->oid)))); @@ -450,7 +454,7 @@ ProcedureCreate(const char *procedureName, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot change return type of existing function"), errdetail("Row type defined by OUT parameters is different."), - /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ errhint("Use %s %s first.", dropcmd, format_procedure(oldproc->oid)))); @@ -495,7 +499,7 @@ ProcedureCreate(const char *procedureName, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot change name of input parameter \"%s\"", old_arg_names[j]), - /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ errhint("Use %s %s first.", dropcmd, format_procedure(oldproc->oid)))); @@ -521,7 +525,7 @@ ProcedureCreate(const char *procedureName, ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot remove parameter defaults from existing function"), - /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ errhint("Use %s %s first.", dropcmd, format_procedure(oldproc->oid)))); @@ -549,7 +553,7 @@ ProcedureCreate(const char *procedureName, ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot change data type of existing parameter default value"), - /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ errhint("Use %s %s first.", dropcmd, format_procedure(oldproc->oid)))); @@ -575,7 +579,7 @@ ProcedureCreate(const char *procedureName, else { /* Creating a new procedure */ - Oid newOid; + Oid newOid; /* First, get default permissions and set up proacl */ proacl = get_user_default_acl(OBJECT_FUNCTION, proowner, diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c index f8475c1aba2..b3bf81ae633 100644 --- a/src/backend/catalog/pg_publication.c +++ b/src/backend/catalog/pg_publication.c @@ -317,7 +317,7 @@ GetAllTablesPublications(void) result = NIL; while (HeapTupleIsValid(tup = systable_getnext(scan))) { - Oid oid = ((Form_pg_publication) GETSTRUCT(tup))->oid; + Oid oid = ((Form_pg_publication) GETSTRUCT(tup))->oid; result = lappend_oid(result, oid); } diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index fb41f223ada..3cc886f7fe2 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -99,7 +99,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence) break; default: elog(ERROR, "invalid relpersistence: %c", relpersistence); - return NULL; /* placate compiler */ + return NULL; /* placate compiler */ } srel = smgropen(rnode, backend); diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c index c1603737eb5..c0e40980d5f 100644 --- a/src/backend/commands/amcmds.c +++ b/src/backend/commands/amcmds.c @@ -61,7 +61,7 @@ CreateAccessMethod(CreateAmStmt *stmt) errhint("Must be superuser to create an access method."))); /* Check if name is used */ - amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid, + amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid, CStringGetDatum(stmt->amname)); if (OidIsValid(amoid)) { diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 3ee70560476..cacc023619c 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -70,8 +70,8 @@ typedef struct static void rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose); static void copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, - bool verbose, bool *pSwapToastByContent, - TransactionId *pFreezeXid, MultiXactId *pCutoffMulti); + bool verbose, bool *pSwapToastByContent, + TransactionId *pFreezeXid, MultiXactId *pCutoffMulti); static List *get_tables_to_cluster(MemoryContext cluster_context); @@ -614,7 +614,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose) /* Copy the heap data into the new table in the desired order */ copy_table_data(OIDNewHeap, tableOid, indexOid, verbose, - &swap_toast_by_content, &frozenXid, &cutoffMulti); + &swap_toast_by_content, &frozenXid, &cutoffMulti); /* * Swap the physical files of the target and transient tables, then diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c index cd04e4ea81b..806962a686b 100644 --- a/src/backend/commands/constraint.c +++ b/src/backend/commands/constraint.c @@ -83,7 +83,7 @@ unique_key_recheck(PG_FUNCTION_ARGS) (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("function \"%s\" must be fired for INSERT or UPDATE", funcname))); - ItemPointerSetInvalid(&checktid); /* keep compiler quiet */ + ItemPointerSetInvalid(&checktid); /* keep compiler quiet */ } slot = table_slot_create(trigdata->tg_relation, NULL); @@ -109,7 +109,7 @@ unique_key_recheck(PG_FUNCTION_ARGS) tmptid = checktid; { IndexFetchTableData *scan = table_index_fetch_begin(trigdata->tg_relation); - bool call_again = false; + bool call_again = false; if (!table_index_fetch_tuple(scan, &tmptid, SnapshotSelf, slot, &call_again, NULL)) diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 9707afabd98..5015e5b3b60 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -2033,7 +2033,7 @@ get_database_oid(const char *dbname, bool missing_ok) /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(dbtuple)) - oid = ((Form_pg_database)GETSTRUCT(dbtuple))->oid; + oid = ((Form_pg_database) GETSTRUCT(dbtuple))->oid; else oid = InvalidOid; diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index a6c6de78f11..039a87c1551 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -606,7 +606,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, static void ExplainPrintSettings(ExplainState *es) { - int num; + int num; struct config_generic **gucs; /* bail out if information about settings not requested */ @@ -622,13 +622,13 @@ ExplainPrintSettings(ExplainState *es) if (es->format != EXPLAIN_FORMAT_TEXT) { - int i; + int i; ExplainOpenGroup("Settings", "Settings", true, es); for (i = 0; i < num; i++) { - char *setting; + char *setting; struct config_generic *conf = gucs[i]; setting = GetConfigOptionByName(conf->name, NULL, true); @@ -640,14 +640,14 @@ ExplainPrintSettings(ExplainState *es) } else { - int i; - StringInfoData str; + int i; + StringInfoData str; initStringInfo(&str); for (i = 0; i < num; i++) { - char *setting; + char *setting; struct config_generic *conf = gucs[i]; if (i > 0) @@ -705,8 +705,8 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc) ExplainNode(ps, NIL, NULL, NULL, es); /* - * If requested, include information about GUC parameters with values - * that don't match the built-in defaults. + * If requested, include information about GUC parameters with values that + * don't match the built-in defaults. */ ExplainPrintSettings(es); } @@ -1674,7 +1674,7 @@ ExplainNode(PlanState *planstate, List *ancestors, if (es->costs && es->verbose && outerPlanState(planstate)->worker_jit_instrument) { - PlanState *child = outerPlanState(planstate); + PlanState *child = outerPlanState(planstate); int n; SharedJitInstrumentation *w = child->worker_jit_instrument; diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index d4723fced89..300bb1261f7 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -903,9 +903,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, t_sql = DirectFunctionCall3Coll(replace_text, C_COLLATION_OID, - t_sql, - CStringGetTextDatum("@extschema@"), - CStringGetTextDatum(qSchemaName)); + t_sql, + CStringGetTextDatum("@extschema@"), + CStringGetTextDatum(qSchemaName)); } /* @@ -916,9 +916,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, { t_sql = DirectFunctionCall3Coll(replace_text, C_COLLATION_OID, - t_sql, - CStringGetTextDatum("MODULE_PATHNAME"), - CStringGetTextDatum(control->module_pathname)); + t_sql, + CStringGetTextDatum("MODULE_PATHNAME"), + CStringGetTextDatum(control->module_pathname)); } /* And now back to C string */ diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 7e7c03ef124..62a4c4fb9be 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -96,8 +96,8 @@ static void update_relispartition(Oid relationId, bool newval); */ struct ReindexIndexCallbackState { - bool concurrent; /* flag from statement */ - Oid locked_table_oid; /* tracks previously locked table */ + bool concurrent; /* flag from statement */ + Oid locked_table_oid; /* tracks previously locked table */ }; /* @@ -396,7 +396,7 @@ WaitForOlderSnapshots(TransactionId limitXmin, bool progress) { if (progress) { - PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId); + PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId); pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID, holder->pid); @@ -984,7 +984,7 @@ DefineIndex(Oid relationId, */ if (partitioned && stmt->relation && !stmt->relation->inh) { - PartitionDesc pd = RelationGetPartitionDesc(rel); + PartitionDesc pd = RelationGetPartitionDesc(rel); if (pd->nparts != 0) flags |= INDEX_CREATE_INVALID; @@ -3003,7 +3003,7 @@ ReindexRelationConcurrently(Oid relationOid, int options) /* Get a session-level lock on each table. */ foreach(lc, relationLocks) { - LockRelId *lockrelid = (LockRelId *) lfirst(lc); + LockRelId *lockrelid = (LockRelId *) lfirst(lc); LockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock); } @@ -3112,8 +3112,8 @@ ReindexRelationConcurrently(Oid relationOid, int options) /* * The index is now valid in the sense that it contains all currently - * interesting tuples. But since it might not contain tuples deleted just - * before the reference snap was taken, we have to wait out any + * interesting tuples. But since it might not contain tuples deleted + * just before the reference snap was taken, we have to wait out any * transactions that might have older snapshots. */ pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE, @@ -3250,7 +3250,7 @@ ReindexRelationConcurrently(Oid relationOid, int options) */ foreach(lc, relationLocks) { - LockRelId *lockrelid = (LockRelId *) lfirst(lc); + LockRelId *lockrelid = (LockRelId *) lfirst(lc); UnlockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock); } diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index a191916d032..95ec352abe6 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -465,9 +465,9 @@ UpdateStatisticsForTypeChange(Oid statsOid, Oid relationOid, int attnum, elog(ERROR, "cache lookup failed for statistics object %u", statsOid); /* - * When none of the defined statistics types contain datum values - * from the table's columns then there's no need to reset the stats. - * Functional dependencies and ndistinct stats should still hold true. + * When none of the defined statistics types contain datum values from the + * table's columns then there's no need to reset the stats. Functional + * dependencies and ndistinct stats should still hold true. */ if (!statext_is_kind_built(oldtup, STATS_EXT_MCV)) { diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index bfcf9472d7a..7fa8dcce614 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -379,7 +379,7 @@ static void ATExecCheckNotNull(AlteredTableInfo *tab, Relation rel, const char *colName, LOCKMODE lockmode); static bool NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr); static bool ConstraintImpliedByRelConstraint(Relation scanrel, - List *partConstraint, List *existedConstraints); + List *partConstraint, List *existedConstraints); static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName, Node *newDefault, LOCKMODE lockmode); static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName, @@ -1099,9 +1099,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, } /* - * Now add any newly specified CHECK constraints to the new relation. - * Same as for defaults above, but these need to come after partitioning - * is set up. + * Now add any newly specified CHECK constraints to the new relation. Same + * as for defaults above, but these need to come after partitioning is set + * up. */ if (stmt->constraints) AddRelationNewConstraints(rel, NIL, stmt->constraints, @@ -1401,9 +1401,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid, */ if (IsSystemClass(relOid, classform) && relkind == RELKIND_INDEX) { - HeapTuple locTuple; - Form_pg_index indexform; - bool indisvalid; + HeapTuple locTuple; + Form_pg_index indexform; + bool indisvalid; locTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(relOid)); if (!HeapTupleIsValid(locTuple)) @@ -1786,6 +1786,7 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, { Relation toastrel = relation_open(toast_relid, AccessExclusiveLock); + RelationSetNewRelfilenode(toastrel, toastrel->rd_rel->relpersistence); table_close(toastrel, NoLock); @@ -4336,6 +4337,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, /* nothing to do here, oid columns don't exist anymore */ break; case AT_SetTableSpace: /* SET TABLESPACE */ + /* * Only do this for partitioned tables and indexes, for which this * is just a catalog change. Other relation types which have @@ -4626,8 +4628,8 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode) { /* * If required, test the current data within the table against new - * constraints generated by ALTER TABLE commands, but don't rebuild - * data. + * constraints generated by ALTER TABLE commands, but don't + * rebuild data. */ if (tab->constraints != NIL || tab->verify_new_notnull || tab->partition_constraint != NULL) @@ -4798,8 +4800,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) { /* * If we are rebuilding the tuples OR if we added any new but not - * verified NOT NULL constraints, check all not-null constraints. - * This is a bit of overkill but it minimizes risk of bugs, and + * verified NOT NULL constraints, check all not-null constraints. This + * is a bit of overkill but it minimizes risk of bugs, and * heap_attisnull is a pretty cheap test anyway. */ for (i = 0; i < newTupDesc->natts; i++) @@ -4941,8 +4943,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) { /* * If there's no rewrite, old and new table are guaranteed to - * have the same AM, so we can just use the old slot to - * verify new constraints etc. + * have the same AM, so we can just use the old slot to verify + * new constraints etc. */ insertslot = oldslot; } @@ -6209,9 +6211,8 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel, /* * Ordinarily phase 3 must ensure that no NULLs exist in columns that * are set NOT NULL; however, if we can find a constraint which proves - * this then we can skip that. We needn't bother looking if - * we've already found that we must verify some other NOT NULL - * constraint. + * this then we can skip that. We needn't bother looking if we've + * already found that we must verify some other NOT NULL constraint. */ if (!tab->verify_new_notnull && !NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple))) @@ -10503,7 +10504,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, */ if (tab->rewrite) { - Relation newrel; + Relation newrel; newrel = table_open(RelationGetRelid(rel), NoLock); RelationClearMissing(newrel); @@ -10657,8 +10658,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, { /* * Changing the type of a column that is used by a - * generated column is not allowed by SQL standard. - * It might be doable with some thinking and effort. + * generated column is not allowed by SQL standard. It + * might be doable with some thinking and effort. */ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -10862,13 +10863,13 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, /* * Here we go --- change the recorded column type and collation. (Note - * heapTup is a copy of the syscache entry, so okay to scribble on.) - * First fix up the missing value if any. + * heapTup is a copy of the syscache entry, so okay to scribble on.) First + * fix up the missing value if any. */ if (attTup->atthasmissing) { - Datum missingval; - bool missingNull; + Datum missingval; + bool missingNull; /* if rewrite is true the missing value should already be cleared */ Assert(tab->rewrite == 0); @@ -10881,7 +10882,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, /* if it's a null array there is nothing to do */ - if (! missingNull) + if (!missingNull) { /* * Get the datum out of the array and repack it in a new array @@ -10890,12 +10891,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, * changed, only the array metadata. */ - int one = 1; - bool isNull; - Datum valuesAtt[Natts_pg_attribute]; - bool nullsAtt[Natts_pg_attribute]; - bool replacesAtt[Natts_pg_attribute]; - HeapTuple newTup; + int one = 1; + bool isNull; + Datum valuesAtt[Natts_pg_attribute]; + bool nullsAtt[Natts_pg_attribute]; + bool replacesAtt[Natts_pg_attribute]; + HeapTuple newTup; MemSet(valuesAtt, 0, sizeof(valuesAtt)); MemSet(nullsAtt, false, sizeof(nullsAtt)); @@ -10910,12 +10911,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, attTup->attalign, &isNull); missingval = PointerGetDatum( - construct_array(&missingval, - 1, - targettype, - tform->typlen, - tform->typbyval, - tform->typalign)); + construct_array(&missingval, + 1, + targettype, + tform->typlen, + tform->typbyval, + tform->typalign)); valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval; replacesAtt[Anum_pg_attribute_attmissingval - 1] = true; @@ -12311,16 +12312,16 @@ ATExecSetTableSpaceNoStorage(Relation rel, Oid newTableSpace) Oid reloid = RelationGetRelid(rel); /* - * Shouldn't be called on relations having storage; these are processed - * in phase 3. + * Shouldn't be called on relations having storage; these are processed in + * phase 3. */ Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind)); /* Can't allow a non-shared relation in pg_global */ if (newTableSpace == GLOBALTABLESPACE_OID) ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("only shared relations can be placed in pg_global tablespace"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("only shared relations can be placed in pg_global tablespace"))); /* * No work if no change in tablespace. @@ -15044,7 +15045,7 @@ ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNu i = -1; while ((i = bms_next_member(expr_attrs, i)) >= 0) { - AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber; + AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber; if (TupleDescAttr(RelationGetDescr(rel), attno - 1)->attgenerated) ereport(ERROR, @@ -15202,7 +15203,7 @@ PartConstraintImpliedByRelConstraint(Relation scanrel, bool ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *provenConstraint) { - List *existConstraint = list_copy(provenConstraint); + List *existConstraint = list_copy(provenConstraint); TupleConstr *constr = RelationGetDescr(scanrel)->constr; int num_check, i; @@ -15240,8 +15241,8 @@ ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *p * not-false and try to prove the same for testConstraint. * * Note that predicate_implied_by assumes its first argument is known - * immutable. That should always be true for both NOT NULL and - * partition constraints, so we don't test it here. + * immutable. That should always be true for both NOT NULL and partition + * constraints, so we don't test it here. */ return predicate_implied_by(testConstraint, existConstraint, true); } diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 8ec963f1cfb..33df2ec0af3 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -1143,9 +1143,9 @@ GetDefaultTablespace(char relpersistence, bool partitioned) /* * Allow explicit specification of database's default tablespace in - * default_tablespace without triggering permissions checks. Don't - * allow specifying that when creating a partitioned table, however, - * since the result is confusing. + * default_tablespace without triggering permissions checks. Don't allow + * specifying that when creating a partitioned table, however, since the + * result is confusing. */ if (result == MyDatabaseTableSpace) { diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 2beb3781450..209021a61a9 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -4245,9 +4245,9 @@ AfterTriggerExecute(EState *estate, case AFTER_TRIGGER_FDW_REUSE: /* - * Store tuple in the slot so that tg_trigtuple does not - * reference tuplestore memory. (It is formally possible for the - * trigger function to queue trigger events that add to the same + * Store tuple in the slot so that tg_trigtuple does not reference + * tuplestore memory. (It is formally possible for the trigger + * function to queue trigger events that add to the same * tuplestore, which can push other tuples out of memory.) The * distinction is academic, because we start with a minimal tuple * that is stored as a heap tuple, constructed in different memory diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index afdd3307acd..d69a73d13ed 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -88,13 +88,13 @@ void ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel) { VacuumParams params; - bool verbose = false; - bool skip_locked = false; - bool analyze = false; - bool freeze = false; - bool full = false; - bool disable_page_skipping = false; - ListCell *lc; + bool verbose = false; + bool skip_locked = false; + bool analyze = false; + bool freeze = false; + bool full = false; + bool disable_page_skipping = false; + ListCell *lc; /* Set default value */ params.index_cleanup = VACOPT_TERNARY_DEFAULT; @@ -103,7 +103,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel) /* Parse options list */ foreach(lc, vacstmt->options) { - DefElem *opt = (DefElem *) lfirst(lc); + DefElem *opt = (DefElem *) lfirst(lc); /* Parse common options for VACUUM and ANALYZE */ if (strcmp(opt->defname, "verbose") == 0) @@ -593,8 +593,9 @@ vacuum_open_relation(Oid relid, RangeVar *relation, int options, /* * Determine the log level. * - * For manual VACUUM or ANALYZE, we emit a WARNING to match the log statements - * in the permission checks; otherwise, only log if the caller so requested. + * For manual VACUUM or ANALYZE, we emit a WARNING to match the log + * statements in the permission checks; otherwise, only log if the caller + * so requested. */ if (!IsAutoVacuumWorkerProcess()) elevel = WARNING; @@ -1328,9 +1329,9 @@ vac_update_datfrozenxid(void) } /* - * Some table AMs might not need per-relation xid / multixid - * horizons. It therefore seems reasonable to allow relfrozenxid and - * relminmxid to not be set (i.e. set to their respective Invalid*Id) + * Some table AMs might not need per-relation xid / multixid horizons. + * It therefore seems reasonable to allow relfrozenxid and relminmxid + * to not be set (i.e. set to their respective Invalid*Id) * independently. Thus validate and compute horizon for each only if * set. * diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index 0a7b2b8f477..5d64471eeda 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -2367,10 +2367,10 @@ get_last_attnums_walker(Node *node, LastAttnumInfo *info) static void ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op) { - PlanState *parent = state->parent; + PlanState *parent = state->parent; TupleDesc desc = NULL; const TupleTableSlotOps *tts_ops = NULL; - bool isfixed = false; + bool isfixed = false; if (op->d.fetch.known_desc != NULL) { @@ -3313,7 +3313,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate, */ ExprState * ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc, - const TupleTableSlotOps * lops, const TupleTableSlotOps * rops, + const TupleTableSlotOps *lops, const TupleTableSlotOps *rops, int numCols, const AttrNumber *keyColIdx, const Oid *eqfunctions, diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index a018925d4ed..612a88456eb 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -4038,7 +4038,7 @@ void ExecEvalSysVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext, TupleTableSlot *slot) { - Datum d; + Datum d; /* slot_getsysattr has sufficient defenses against bad attnums */ d = slot_getsysattr(slot, diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index ed7c0606bf1..44e4a6d104c 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -2551,7 +2551,7 @@ EvalPlanQualSlot(EPQState *epqstate, if (relation) *slot = table_slot_create(relation, - &epqstate->estate->es_tupleTable); + &epqstate->estate->es_tupleTable); else *slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable, epqstate->origslot->tts_tupleDescriptor, diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 3d4b01cb4d6..da9074c54c9 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -1058,7 +1058,7 @@ ExecParallelRetrieveJitInstrumentation(PlanState *planstate, * instrumentation in per-query context. */ ibytes = offsetof(SharedJitInstrumentation, jit_instr) - + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation)); + + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation)); planstate->worker_jit_instrument = MemoryContextAlloc(planstate->state->es_query_cxt, ibytes); @@ -1133,7 +1133,7 @@ ExecParallelCleanup(ParallelExecutorInfo *pei) /* Accumulate JIT instrumentation, if any. */ if (pei->jit_instrumentation) ExecParallelRetrieveJitInstrumentation(pei->planstate, - pei->jit_instrumentation); + pei->jit_instrumentation); /* Free any serialized parameters. */ if (DsaPointerIsValid(pei->param_exec)) diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index 6cdbb9db421..73ba298c5da 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -145,12 +145,12 @@ typedef struct PartitionDispatchData TupleTableSlot *tupslot; AttrNumber *tupmap; int indexes[FLEXIBLE_ARRAY_MEMBER]; -} PartitionDispatchData; +} PartitionDispatchData; /* struct to hold result relations coming from UPDATE subplans */ typedef struct SubplanResultRelHashElem { - Oid relid; /* hash key -- must be first */ + Oid relid; /* hash key -- must be first */ ResultRelInfo *rri; } SubplanResultRelHashElem; @@ -375,7 +375,7 @@ ExecFindPartition(ModifyTableState *mtstate, if (proute->subplan_resultrel_htab) { Oid partoid = partdesc->oids[partidx]; - SubplanResultRelHashElem *elem; + SubplanResultRelHashElem *elem; elem = hash_search(proute->subplan_resultrel_htab, &partoid, HASH_FIND, NULL); @@ -474,7 +474,7 @@ ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate, ResultRelInfo *rri = &mtstate->resultRelInfo[i]; bool found; Oid partoid = RelationGetRelid(rri->ri_RelationDesc); - SubplanResultRelHashElem *elem; + SubplanResultRelHashElem *elem; elem = (SubplanResultRelHashElem *) hash_search(htab, &partoid, HASH_ENTER, &found); @@ -762,9 +762,9 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, * It's safe to reuse these from the partition root, as we * only process one tuple at a time (therefore we won't * overwrite needed data in slots), and the results of - * projections are independent of the underlying - * storage. Projections and where clauses themselves don't - * store state / are independent of the underlying storage. + * projections are independent of the underlying storage. + * Projections and where clauses themselves don't store state + * / are independent of the underlying storage. */ leaf_part_rri->ri_onConflict->oc_ProjSlot = rootResultRelInfo->ri_onConflict->oc_ProjSlot; @@ -892,7 +892,7 @@ ExecInitRoutingInfo(ModifyTableState *mtstate, { MemoryContext oldcxt; PartitionRoutingInfo *partrouteinfo; - int rri_index; + int rri_index; oldcxt = MemoryContextSwitchTo(proute->memcxt); @@ -1668,16 +1668,16 @@ ExecCreatePartitionPruneState(PlanState *planstate, } else { - int pd_idx = 0; - int pp_idx; + int pd_idx = 0; + int pp_idx; /* * Some new partitions have appeared since plan time, and * those are reflected in our PartitionDesc but were not * present in the one used to construct subplan_map and * subpart_map. So we must construct new and longer arrays - * where the partitions that were originally present map to the - * same place, and any added indexes map to -1, as if the + * where the partitions that were originally present map to + * the same place, and any added indexes map to -1, as if the * new partitions had been pruned. */ pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts); diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index f8f6463358f..0326284c83f 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -227,7 +227,7 @@ retry: static bool tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2) { - int attrnum; + int attrnum; Assert(slot1->tts_tupleDescriptor->natts == slot2->tts_tupleDescriptor->natts); @@ -265,8 +265,8 @@ tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2) if (!DatumGetBool(FunctionCall2Coll(&typentry->eq_opr_finfo, att->attcollation, - slot1->tts_values[attrnum], - slot2->tts_values[attrnum]))) + slot1->tts_values[attrnum], + slot2->tts_values[attrnum]))) return false; } @@ -406,7 +406,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) resultRelInfo->ri_TrigDesc->trig_insert_before_row) { if (!ExecBRInsertTriggers(estate, resultRelInfo, slot)) - skip_tuple = true; /* "do nothing" */ + skip_tuple = true; /* "do nothing" */ } if (!skip_tuple) @@ -471,7 +471,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, { if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo, tid, NULL, slot)) - skip_tuple = true; /* "do nothing" */ + skip_tuple = true; /* "do nothing" */ } if (!skip_tuple) @@ -490,7 +490,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, if (resultRelInfo->ri_PartitionCheck) ExecPartitionCheck(resultRelInfo, slot, estate, true); - simple_table_update(rel, tid, slot,estate->es_snapshot, + simple_table_update(rel, tid, slot, estate->es_snapshot, &update_indexes); if (resultRelInfo->ri_NumIndices > 0 && update_indexes) @@ -591,8 +591,8 @@ CheckSubscriptionRelkind(char relkind, const char *nspname, const char *relname) { /* - * We currently only support writing to regular tables. However, give - * a more specific error for partitioned and foreign tables. + * We currently only support writing to regular tables. However, give a + * more specific error for partitioned and foreign tables. */ if (relkind == RELKIND_PARTITIONED_TABLE) ereport(ERROR, @@ -600,14 +600,14 @@ CheckSubscriptionRelkind(char relkind, const char *nspname, errmsg("cannot use relation \"%s.%s\" as logical replication target", nspname, relname), errdetail("\"%s.%s\" is a partitioned table.", - nspname, relname))); + nspname, relname))); else if (relkind == RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot use relation \"%s.%s\" as logical replication target", nspname, relname), errdetail("\"%s.%s\" is a foreign table.", - nspname, relname))); + nspname, relname))); if (relkind != RELKIND_RELATION) ereport(ERROR, @@ -615,5 +615,5 @@ CheckSubscriptionRelkind(char relkind, const char *nspname, errmsg("cannot use relation \"%s.%s\" as logical replication target", nspname, relname), errdetail("\"%s.%s\" is not a table.", - nspname, relname))); + nspname, relname))); } diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c index 881131aff26..67c4be5108f 100644 --- a/src/backend/executor/execScan.c +++ b/src/backend/executor/execScan.c @@ -81,7 +81,8 @@ ExecScanFetch(ScanState *node, /* Check if it meets the access-method conditions */ if (!(*recheckMtd) (node, slot)) - return ExecClearTuple(slot); /* would not be returned by scan */ + return ExecClearTuple(slot); /* would not be returned by + * scan */ return slot; } diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 55d1669db09..ad13fd9a05b 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -71,13 +71,12 @@ static TupleDesc ExecTypeFromTLInternal(List *targetList, bool skipjunk); -static pg_attribute_always_inline void -slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp, +static pg_attribute_always_inline void slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp, int natts); static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot, - HeapTuple tuple, - Buffer buffer, - bool transfer_pin); + HeapTuple tuple, + Buffer buffer, + bool transfer_pin); static void tts_heap_store_tuple(TupleTableSlot *slot, HeapTuple tuple, bool shouldFree); @@ -138,7 +137,7 @@ tts_virtual_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull) { elog(ERROR, "virtual tuple table slot does not have system attributes"); - return 0; /* silence compiler warnings */ + return 0; /* silence compiler warnings */ } /* @@ -164,7 +163,7 @@ tts_virtual_materialize(TupleTableSlot *slot) for (int natt = 0; natt < desc->natts; natt++) { Form_pg_attribute att = TupleDescAttr(desc, natt); - Datum val; + Datum val; if (att->attbyval || slot->tts_isnull[natt]) continue; @@ -200,7 +199,7 @@ tts_virtual_materialize(TupleTableSlot *slot) for (int natt = 0; natt < desc->natts; natt++) { Form_pg_attribute att = TupleDescAttr(desc, natt); - Datum val; + Datum val; if (att->attbyval || slot->tts_isnull[natt]) continue; @@ -210,7 +209,7 @@ tts_virtual_materialize(TupleTableSlot *slot) if (att->attlen == -1 && VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val))) { - Size data_length; + Size data_length; /* * We want to flatten the expanded value so that the materialized @@ -228,7 +227,7 @@ tts_virtual_materialize(TupleTableSlot *slot) } else { - Size data_length = 0; + Size data_length = 0; data = (char *) att_align_nominal(data, att->attalign); data_length = att_addlength_datum(data_length, att->attlen, val); @@ -382,7 +381,7 @@ tts_heap_materialize(TupleTableSlot *slot) static void tts_heap_copyslot(TupleTableSlot *dstslot, TupleTableSlot *srcslot) { - HeapTuple tuple; + HeapTuple tuple; MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(dstslot->tts_mcxt); @@ -499,7 +498,7 @@ tts_minimal_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull) { elog(ERROR, "minimal tuple table slot does not have system attributes"); - return 0; /* silence compiler warnings */ + return 0; /* silence compiler warnings */ } static void @@ -1077,8 +1076,10 @@ TupleTableSlot * MakeTupleTableSlot(TupleDesc tupleDesc, const TupleTableSlotOps *tts_ops) { - Size basesz, allocsz; + Size basesz, + allocsz; TupleTableSlot *slot; + basesz = tts_ops->base_slot_size; /* @@ -1866,7 +1867,7 @@ void slot_getsomeattrs_int(TupleTableSlot *slot, int attnum) { /* Check for caller errors */ - Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */ + Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */ Assert(attnum > 0); if (unlikely(attnum > slot->tts_tupleDescriptor->natts)) @@ -1876,8 +1877,8 @@ slot_getsomeattrs_int(TupleTableSlot *slot, int attnum) slot->tts_ops->getsomeattrs(slot, attnum); /* - * If the underlying tuple doesn't have enough attributes, tuple descriptor - * must have the missing attributes. + * If the underlying tuple doesn't have enough attributes, tuple + * descriptor must have the missing attributes. */ if (unlikely(slot->tts_nvalid < attnum)) { diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 965e5dea70e..b34f565bfe7 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -1762,7 +1762,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (junkFilter) { TupleTableSlot *slot = - MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple); + MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple); *junkFilter = ExecInitJunkFilter(tlist, slot); } @@ -1929,7 +1929,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (junkFilter) { TupleTableSlot *slot = - MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple); + MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple); *junkFilter = ExecInitJunkFilterConversion(tlist, CreateTupleDescCopy(tupdesc), diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index fd3c71e7641..43ab9fb3924 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -754,7 +754,7 @@ process_ordered_aggregate_single(AggState *aggstate, oldAbbrevVal == newAbbrevVal && DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne, pertrans->aggCollation, - oldVal, *newVal))))) + oldVal, *newVal))))) { /* equal to prior, so forget this one */ if (!pertrans->inputtypeByVal && !*isNull) diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 4de1d2b484d..d2da5d3a951 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -700,10 +700,10 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait) Assert(HeapTupleIsValid(tup)); /* Build the TupleTableSlot for the given tuple */ - ExecStoreHeapTuple(tup, /* tuple to store */ + ExecStoreHeapTuple(tup, /* tuple to store */ gm_state->gm_slots[reader], /* slot in which to store * the tuple */ - true); /* pfree tuple when done with it */ + true); /* pfree tuple when done with it */ return true; } diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index aa43296e26c..5ccdc1af2e8 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -750,7 +750,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) rclauses = lappend(rclauses, ExecInitExpr(lsecond(hclause->args), (PlanState *) hjstate)); rhclauses = lappend(rhclauses, ExecInitExpr(lsecond(hclause->args), - innerPlanState(hjstate))); + innerPlanState(hjstate))); hoperators = lappend_oid(hoperators, hclause->opno); hcollations = lappend_oid(hcollations, hclause->inputcollid); } diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index 8fd52e9c803..5dce284fe73 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -192,9 +192,9 @@ IndexOnlyNext(IndexOnlyScanState *node) /* * Fill the scan tuple slot with data from the index. This might be - * provided in either HeapTuple or IndexTuple format. Conceivably - * an index AM might fill both fields, in which case we prefer the - * heap format, since it's probably a bit cheaper to fill a slot from. + * provided in either HeapTuple or IndexTuple format. Conceivably an + * index AM might fill both fields, in which case we prefer the heap + * format, since it's probably a bit cheaper to fill a slot from. */ if (scandesc->xs_hitup) { diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index c97eb60f779..73bfd424d98 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -242,7 +242,7 @@ IndexNextWithReorder(IndexScanState *node) scandesc->xs_orderbynulls, node) <= 0) { - HeapTuple tuple; + HeapTuple tuple; tuple = reorderqueue_pop(node); diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index 7674ac893c2..4067554ed94 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -327,7 +327,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) /* node returns unmodified slots from the outer plan */ lrstate->ps.resultopsset = true; lrstate->ps.resultops = ExecGetResultSlotOps(outerPlanState(lrstate), - &lrstate->ps.resultopsfixed); + &lrstate->ps.resultopsfixed); /* * LockRows nodes do no projections, so initialize projection info for diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index d3a0dece5ad..8acdaf20573 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -865,6 +865,7 @@ ldelete:; goto ldelete; case TM_SelfModified: + /* * This can be reached when following an update * chain from a tuple updated by another session, @@ -1070,7 +1071,7 @@ ExecUpdate(ModifyTableState *mtstate, { if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo, tupleid, oldtuple, slot)) - return NULL; /* "do nothing" */ + return NULL; /* "do nothing" */ } /* INSTEAD OF ROW UPDATE Triggers */ @@ -1079,7 +1080,7 @@ ExecUpdate(ModifyTableState *mtstate, { if (!ExecIRUpdateTriggers(estate, resultRelInfo, oldtuple, slot)) - return NULL; /* "do nothing" */ + return NULL; /* "do nothing" */ } else if (resultRelInfo->ri_FdwRoutine) { @@ -1401,6 +1402,7 @@ lreplace:; return NULL; case TM_SelfModified: + /* * This can be reached when following an update * chain from a tuple updated by another session, diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index 8bd7430a918..436b43f8ca5 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -228,8 +228,8 @@ ExecReScanSeqScan(SeqScanState *node) scan = node->ss.ss_currentScanDesc; if (scan != NULL) - table_rescan(scan, /* scan desc */ - NULL); /* new scan keys */ + table_rescan(scan, /* scan desc */ + NULL); /* new scan keys */ ExecScanReScan((ScanState *) node); } diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 749b4eced34..3662fcada8c 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -684,7 +684,7 @@ execTuplesUnequal(TupleTableSlot *slot1, /* Apply the type-specific equality function */ if (!DatumGetBool(FunctionCall2Coll(&eqfunctions[i], collations[i], - attr1, attr2))) + attr1, attr2))) { result = true; /* they are unequal */ break; diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c index 707ec0d1901..de8c0060510 100644 --- a/src/backend/executor/nodeSubqueryscan.c +++ b/src/backend/executor/nodeSubqueryscan.c @@ -131,6 +131,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags) ExecInitScanTupleSlot(estate, &subquerystate->ss, ExecGetResultType(subquerystate->subplan), ExecGetResultSlotOps(subquerystate->subplan, NULL)); + /* * The slot used as the scantuple isn't the slot above (outside of EPQ), * but the one from the node below. diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 62466be7023..caf3b71f9e1 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -2154,8 +2154,8 @@ CheckPAMAuth(Port *port, const char *user, const char *password) * later used inside the PAM conversation to pass the password to the * authentication module. */ - pam_passw_conv.appdata_ptr = unconstify(char *, password); /* from password above, - * not allocated */ + pam_passw_conv.appdata_ptr = unconstify(char *, password); /* from password above, + * not allocated */ /* Optionally, one can set the service name in pg_hba.conf */ if (port->hba->pamservice && port->hba->pamservice[0] != '\0') diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index c38a71df587..673066a4561 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -68,10 +68,10 @@ static bool dummy_ssl_passwd_cb_called = false; static bool ssl_is_server_start; static int ssl_protocol_version_to_openssl(int v, const char *guc_name, - int loglevel); + int loglevel); #ifndef SSL_CTX_set_min_proto_version -static int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version); -static int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version); +static int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version); +static int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version); #endif @@ -192,9 +192,10 @@ be_tls_init(bool isServerStart) if (ssl_min_protocol_version) { - int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version, - "ssl_min_protocol_version", - isServerStart ? FATAL : LOG); + int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version, + "ssl_min_protocol_version", + isServerStart ? FATAL : LOG); + if (ssl_ver == -1) goto error; SSL_CTX_set_min_proto_version(context, ssl_ver); @@ -202,9 +203,10 @@ be_tls_init(bool isServerStart) if (ssl_max_protocol_version) { - int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version, - "ssl_max_protocol_version", - isServerStart ? FATAL : LOG); + int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version, + "ssl_max_protocol_version", + isServerStart ? FATAL : LOG); + if (ssl_ver == -1) goto error; SSL_CTX_set_max_proto_version(context, ssl_ver); @@ -1150,6 +1152,7 @@ be_tls_get_peer_serial(Port *port, char *ptr, size_t len) serial = X509_get_serialNumber(port->peer); b = ASN1_INTEGER_to_BN(serial, NULL); decimal = BN_bn2dec(b); + BN_free(b); strlcpy(ptr, decimal, len); OPENSSL_free(decimal); diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index 4ad17d0c31e..665149defe2 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -476,10 +476,10 @@ bms_member_index(Bitmapset *a, int x) } /* - * Now add bits of the last word, but only those before the item. - * We can do that by applying a mask and then using popcount again. - * To get 0-based index, we want to count only preceding bits, not - * the item itself, so we subtract 1. + * Now add bits of the last word, but only those before the item. We can + * do that by applying a mask and then using popcount again. To get + * 0-based index, we want to count only preceding bits, not the item + * itself, so we subtract 1. */ mask = ((bitmapword) 1 << bitnum) - 1; result += bmw_popcount(a->words[wordnum] & mask); diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c index 9dae586a518..bfad6b79878 100644 --- a/src/backend/optimizer/path/clausesel.c +++ b/src/backend/optimizer/path/clausesel.c @@ -161,9 +161,9 @@ clauselist_selectivity_simple(PlannerInfo *root, int listidx; /* - * If there's exactly one clause (and it was not estimated yet), just - * go directly to clause_selectivity(). None of what we might do below - * is relevant. + * If there's exactly one clause (and it was not estimated yet), just go + * directly to clause_selectivity(). None of what we might do below is + * relevant. */ if ((list_length(clauses) == 1) && bms_num_members(estimatedclauses) == 0) diff --git a/src/backend/optimizer/util/inherit.c b/src/backend/optimizer/util/inherit.c index ccc8c11a985..bbf204ddfb8 100644 --- a/src/backend/optimizer/util/inherit.c +++ b/src/backend/optimizer/util/inherit.c @@ -311,6 +311,7 @@ expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo, if (!root->partColsUpdated) root->partColsUpdated = has_partition_attrs(parentrel, parentrte->updatedCols, NULL); + /* * There shouldn't be any generated columns in the partition key. */ diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 36aee35d462..d66471c7a46 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -3621,7 +3621,7 @@ create_limit_path(PlannerInfo *root, RelOptInfo *rel, */ void adjust_limit_rows_costs(double *rows, /* in/out parameter */ - Cost *startup_cost, /* in/out parameter */ + Cost *startup_cost, /* in/out parameter */ Cost *total_cost, /* in/out parameter */ int64 offset_est, int64 count_est) diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 4564c0ae815..bbeaada2ae1 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -1053,8 +1053,8 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla InvalidOid, &found_whole_row); /* - * Prevent this for the same reason as for constraints below. - * Note that defaults cannot contain any vars, so it's OK that the + * Prevent this for the same reason as for constraints below. Note + * that defaults cannot contain any vars, so it's OK that the * error message refers to generated columns. */ if (found_whole_row) @@ -3845,11 +3845,11 @@ transformPartitionBound(ParseState *pstate, Relation parent, * any necessary validation. */ result_spec->lowerdatums = - transformPartitionRangeBounds(pstate, spec->lowerdatums, - parent); + transformPartitionRangeBounds(pstate, spec->lowerdatums, + parent); result_spec->upperdatums = - transformPartitionRangeBounds(pstate, spec->upperdatums, - parent); + transformPartitionRangeBounds(pstate, spec->upperdatums, + parent); } else elog(ERROR, "unexpected partition strategy: %d", (int) strategy); @@ -3876,17 +3876,17 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist, i = j = 0; foreach(lc, blist) { - Node *expr = lfirst(lc); + Node *expr = lfirst(lc); PartitionRangeDatum *prd = NULL; /* - * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed - * in as ColumnRefs. + * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed in + * as ColumnRefs. */ if (IsA(expr, ColumnRef)) { - ColumnRef *cref = (ColumnRef *) expr; - char *cname = NULL; + ColumnRef *cref = (ColumnRef *) expr; + char *cname = NULL; /* * There should be a single field named either "minvalue" or @@ -3899,8 +3899,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist, if (cname == NULL) { /* - * ColumnRef is not in the desired single-field-name form. - * For consistency between all partition strategies, let the + * ColumnRef is not in the desired single-field-name form. For + * consistency between all partition strategies, let the * expression transformation report any errors rather than * doing it ourselves. */ @@ -3965,8 +3965,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist, } /* - * Once we see MINVALUE or MAXVALUE for one column, the remaining - * columns must be the same. + * Once we see MINVALUE or MAXVALUE for one column, the remaining columns + * must be the same. */ validateInfiniteBounds(pstate, result); @@ -4030,13 +4030,13 @@ transformPartitionBoundValue(ParseState *pstate, Node *val, /* * Check that the input expression's collation is compatible with one - * specified for the parent's partition key (partcollation). Don't - * throw an error if it's the default collation which we'll replace with - * the parent's collation anyway. + * specified for the parent's partition key (partcollation). Don't throw + * an error if it's the default collation which we'll replace with the + * parent's collation anyway. */ if (IsA(value, CollateExpr)) { - Oid exprCollOid = exprCollation(value); + Oid exprCollOid = exprCollation(value); if (OidIsValid(exprCollOid) && exprCollOid != DEFAULT_COLLATION_OID && diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c index 4d6595b2497..b207b765f2a 100644 --- a/src/backend/partitioning/partdesc.c +++ b/src/backend/partitioning/partdesc.c @@ -38,7 +38,7 @@ typedef struct PartitionDirectoryData { MemoryContext pdir_mcxt; HTAB *pdir_hash; -} PartitionDirectoryData; +} PartitionDirectoryData; typedef struct PartitionDirectoryEntry { @@ -74,9 +74,9 @@ RelationBuildPartitionDesc(Relation rel) /* * Get partition oids from pg_inherits. This uses a single snapshot to - * fetch the list of children, so while more children may be getting - * added concurrently, whatever this function returns will be accurate - * as of some well-defined point in time. + * fetch the list of children, so while more children may be getting added + * concurrently, whatever this function returns will be accurate as of + * some well-defined point in time. */ inhoids = find_inheritance_children(RelationGetRelid(rel), NoLock); nparts = list_length(inhoids); @@ -122,14 +122,14 @@ RelationBuildPartitionDesc(Relation rel) * * Note that this algorithm assumes that PartitionBoundSpec we manage * to fetch is the right one -- so this is only good enough for - * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION - * or some hypothetical operation that changes the partition bounds. + * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION or + * some hypothetical operation that changes the partition bounds. */ if (boundspec == NULL) { Relation pg_class; - SysScanDesc scan; - ScanKeyData key[1]; + SysScanDesc scan; + ScanKeyData key[1]; Datum datum; bool isnull; @@ -301,7 +301,7 @@ PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel) void DestroyPartitionDirectory(PartitionDirectory pdir) { - HASH_SEQ_STATUS status; + HASH_SEQ_STATUS status; PartitionDirectoryEntry *pde; hash_seq_init(&status, pdir->pdir_hash); diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 765d58d120c..b455c59cd70 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -57,7 +57,7 @@ static void libpqrcv_get_senderinfo(WalReceiverConn *conn, char **sender_host, int *sender_port); static char *libpqrcv_identify_system(WalReceiverConn *conn, TimeLineID *primary_tli); -static int libpqrcv_server_version(WalReceiverConn *conn); +static int libpqrcv_server_version(WalReceiverConn *conn); static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn, TimeLineID tli, char **filename, char **content, int *len); diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 65f86ad73db..acebf5893e9 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -468,8 +468,8 @@ ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple) Oid * ReorderBufferGetRelids(ReorderBuffer *rb, int nrelids) { - Oid *relids; - Size alloc_len; + Oid *relids; + Size alloc_len; alloc_len = sizeof(Oid) * nrelids; @@ -1327,8 +1327,8 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn) else { /* - * Maybe we already saw this tuple before in this transaction, - * but if so it must have the same cmin. + * Maybe we already saw this tuple before in this transaction, but + * if so it must have the same cmin. */ Assert(ent->cmin == change->data.tuplecid.cmin); @@ -2464,8 +2464,8 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, } case REORDER_BUFFER_CHANGE_TRUNCATE: { - Size size; - char *data; + Size size; + char *data; /* account for the OIDs of truncated relations */ size = sizeof(Oid) * change->data.truncate.nrelids; @@ -2767,7 +2767,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, /* the base struct contains all the data, easy peasy */ case REORDER_BUFFER_CHANGE_TRUNCATE: { - Oid *relids; + Oid *relids; relids = ReorderBufferGetRelids(rb, change->data.truncate.nrelids); diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index 182fe5bc825..808a6f5b836 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -730,11 +730,11 @@ copy_replication_slot(FunctionCallInfo fcinfo, bool logical_slot) SpinLockRelease(&src->mutex); /* - * Check if the source slot still exists and is valid. We regard it - * as invalid if the type of replication slot or name has been - * changed, or the restart_lsn either is invalid or has gone backward. - * (The restart_lsn could go backwards if the source slot is dropped - * and copied from an older slot during installation.) + * Check if the source slot still exists and is valid. We regard it as + * invalid if the type of replication slot or name has been changed, + * or the restart_lsn either is invalid or has gone backward. (The + * restart_lsn could go backwards if the source slot is dropped and + * copied from an older slot during installation.) * * Since erroring out will release and drop the destination slot we * don't need to release it here. diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 6c160c13c6f..83734575c28 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -276,9 +276,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit) WAIT_EVENT_SYNC_REP); /* - * If the postmaster dies, we'll probably never get an - * acknowledgment, because all the wal sender processes will exit. So - * just bail out. + * If the postmaster dies, we'll probably never get an acknowledgment, + * because all the wal sender processes will exit. So just bail out. */ if (rc & WL_POSTMASTER_DEATH) { diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index d52ec7b2cf1..6abc7807783 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -808,11 +808,11 @@ WalRcvQuickDieHandler(SIGNAL_ARGS) * anyway. * * Note we use _exit(2) not _exit(0). This is to force the postmaster - * into a system reset cycle if someone sends a manual SIGQUIT to a - * random backend. This is necessary precisely because we don't clean up - * our shared memory state. (The "dead man switch" mechanism in - * pmsignal.c should ensure the postmaster sees this as a crash, too, but - * no harm in being doubly sure.) + * into a system reset cycle if someone sends a manual SIGQUIT to a random + * backend. This is necessary precisely because we don't clean up our + * shared memory state. (The "dead man switch" mechanism in pmsignal.c + * should ensure the postmaster sees this as a crash, too, but no harm in + * being doubly sure.) */ _exit(2); } diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 8aa12ec9124..3f313680223 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -218,7 +218,7 @@ typedef struct int write_head; int read_heads[NUM_SYNC_REP_WAIT_MODE]; WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE]; -} LagTracker; +} LagTracker; static LagTracker *lag_tracker; @@ -1407,7 +1407,7 @@ WalSndWaitForWal(XLogRecPtr loc) sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp()); wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | - WL_SOCKET_READABLE | WL_TIMEOUT; + WL_SOCKET_READABLE | WL_TIMEOUT; if (pq_is_send_pending()) wakeEvents |= WL_SOCKET_WRITEABLE; @@ -2255,7 +2255,7 @@ WalSndLoop(WalSndSendDataCallback send_data) int wakeEvents; wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT | - WL_SOCKET_READABLE; + WL_SOCKET_READABLE; /* * Use fresh timestamp, not last_processed, to reduce the chance diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c index 0b26e4166d9..8a71c2b534a 100644 --- a/src/backend/statistics/dependencies.c +++ b/src/backend/statistics/dependencies.c @@ -279,8 +279,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency, * build an array of SortItem(s) sorted using the multi-sort support * * XXX This relies on all stats entries pointing to the same tuple - * descriptor. For now that assumption holds, but it might change in - * the future for example if we support statistics on multiple tables. + * descriptor. For now that assumption holds, but it might change in the + * future for example if we support statistics on multiple tables. */ items = build_sorted_items(numrows, &nitems, rows, stats[0]->tupDesc, mss, k, attnums_dep); @@ -300,8 +300,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency, { /* * Check if the group ended, which may be either because we processed - * all the items (i==nitems), or because the i-th item is not equal - * to the preceding one. + * all the items (i==nitems), or because the i-th item is not equal to + * the preceding one. */ if (i == nitems || multi_sort_compare_dims(0, k - 2, &items[i - 1], &items[i], mss) != 0) diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c index ac0ae52ecfd..cc6112df3b3 100644 --- a/src/backend/statistics/extended_stats.c +++ b/src/backend/statistics/extended_stats.c @@ -67,7 +67,7 @@ static VacAttrStats **lookup_var_attr_stats(Relation rel, Bitmapset *attrs, int nvacatts, VacAttrStats **vacatts); static void statext_store(Relation pg_stext, Oid relid, MVNDistinct *ndistinct, MVDependencies *dependencies, - MCVList * mcvlist, VacAttrStats **stats); + MCVList *mcvlist, VacAttrStats **stats); /* @@ -317,7 +317,7 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs, static void statext_store(Relation pg_stext, Oid statOid, MVNDistinct *ndistinct, MVDependencies *dependencies, - MCVList * mcv, VacAttrStats **stats) + MCVList *mcv, VacAttrStats **stats) { HeapTuple stup, oldtup; @@ -538,9 +538,9 @@ build_attnums_array(Bitmapset *attrs, int *numattrs) { /* * Make sure the bitmap contains only user-defined attributes. As - * bitmaps can't contain negative values, this can be violated in - * two ways. Firstly, the bitmap might contain 0 as a member, and - * secondly the integer value might be larger than MaxAttrNumber. + * bitmaps can't contain negative values, this can be violated in two + * ways. Firstly, the bitmap might contain 0 as a member, and secondly + * the integer value might be larger than MaxAttrNumber. */ Assert(AttrNumberIsForUserDefinedAttr(j)); Assert(j <= MaxAttrNumber); @@ -600,7 +600,7 @@ build_sorted_items(int numrows, int *nitems, HeapTuple *rows, TupleDesc tdesc, idx = 0; for (i = 0; i < numrows; i++) { - bool toowide = false; + bool toowide = false; items[idx].values = &values[idx * numattrs]; items[idx].isnull = &isnull[idx * numattrs]; @@ -608,8 +608,8 @@ build_sorted_items(int numrows, int *nitems, HeapTuple *rows, TupleDesc tdesc, /* load the values/null flags from sample rows */ for (j = 0; j < numattrs; j++) { - Datum value; - bool isnull; + Datum value; + bool isnull; value = heap_getattr(rows[i], attnums[j], tdesc, &isnull); @@ -988,7 +988,7 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli int listidx; StatisticExtInfo *stat; List *stat_clauses; - Selectivity simple_sel, + Selectivity simple_sel, mcv_sel, mcv_basesel, mcv_totalsel, @@ -1006,9 +1006,9 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli * Pre-process the clauses list to extract the attnums seen in each item. * We need to determine if there's any clauses which will be useful for * selectivity estimations with extended stats. Along the way we'll record - * all of the attnums for each clause in a list which we'll reference later - * so we don't need to repeat the same work again. We'll also keep track of - * all attnums seen. + * all of the attnums for each clause in a list which we'll reference + * later so we don't need to repeat the same work again. We'll also keep + * track of all attnums seen. * * We also skip clauses that we already estimated using different types of * statistics (we treat them as incompatible). @@ -1066,9 +1066,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli } /* - * First compute "simple" selectivity, i.e. without the extended statistics, - * and essentially assuming independence of the columns/clauses. We'll then - * use the various selectivities computed from MCV list to improve it. + * First compute "simple" selectivity, i.e. without the extended + * statistics, and essentially assuming independence of the + * columns/clauses. We'll then use the various selectivities computed from + * MCV list to improve it. */ simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid, jointype, sjinfo, NULL); @@ -1105,16 +1106,16 @@ statext_clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo, RelOptInfo *rel, Bitmapset **estimatedclauses) { - Selectivity sel; + Selectivity sel; /* First, try estimating clauses using a multivariate MCV list. */ sel = statext_mcv_clauselist_selectivity(root, clauses, varRelid, jointype, sjinfo, rel, estimatedclauses); /* - * Then, apply functional dependencies on the remaining clauses by - * calling dependencies_clauselist_selectivity. Pass 'estimatedclauses' - * so the function can properly skip clauses already estimated above. + * Then, apply functional dependencies on the remaining clauses by calling + * dependencies_clauselist_selectivity. Pass 'estimatedclauses' so the + * function can properly skip clauses already estimated above. * * The reasoning for applying dependencies last is that the more complex * stats can track more complex correlations between the attributes, and diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c index 05ab6c9bb7a..d22820dec74 100644 --- a/src/backend/statistics/mcv.c +++ b/src/backend/statistics/mcv.c @@ -209,20 +209,20 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs, * * Using the same algorithm might exclude items that are close to the * "average" frequency of the sample. But that does not say whether the - * observed frequency is close to the base frequency or not. We also - * need to consider unexpectedly uncommon items (again, compared to the - * base frequency), and the single-column algorithm does not have to. + * observed frequency is close to the base frequency or not. We also need + * to consider unexpectedly uncommon items (again, compared to the base + * frequency), and the single-column algorithm does not have to. * * We simply decide how many items to keep by computing minimum count - * using get_mincount_for_mcv_list() and then keep all items that seem - * to be more common than that. + * using get_mincount_for_mcv_list() and then keep all items that seem to + * be more common than that. */ mincount = get_mincount_for_mcv_list(numrows, totalrows); /* - * Walk the groups until we find the first group with a count below - * the mincount threshold (the index of that group is the number of - * groups we want to keep). + * Walk the groups until we find the first group with a count below the + * mincount threshold (the index of that group is the number of groups we + * want to keep). */ for (i = 0; i < nitems; i++) { @@ -240,7 +240,7 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs, */ if (nitems > 0) { - int j; + int j; /* * Allocate the MCV list structure, set the global parameters. @@ -485,7 +485,7 @@ statext_mcv_load(Oid mvoid) * (or a longer type) instead of using an array of bool items. */ bytea * -statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats) +statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats) { int i; int dim; @@ -603,7 +603,7 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats) info[dim].nbytes = 0; for (i = 0; i < info[dim].nvalues; i++) { - Size len; + Size len; values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i])); @@ -616,7 +616,7 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats) info[dim].nbytes = 0; for (i = 0; i < info[dim].nvalues; i++) { - Size len; + Size len; /* c-strings include terminator, so +1 byte */ values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i])); @@ -636,11 +636,11 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats) * for each attribute, deduplicated values and items). * * The header fields are copied one by one, so that we don't need any - * explicit alignment (we copy them while deserializing). All fields - * after this need to be properly aligned, for direct access. + * explicit alignment (we copy them while deserializing). All fields after + * this need to be properly aligned, for direct access. */ total_length = MAXALIGN(VARHDRSZ + (3 * sizeof(uint32)) - + sizeof(AttrNumber) + (ndims * sizeof(Oid))); + + sizeof(AttrNumber) + (ndims * sizeof(Oid))); /* dimension info */ total_length += MAXALIGN(ndims * sizeof(DimensionInfo)); @@ -650,14 +650,14 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats) total_length += MAXALIGN(info[i].nbytes); /* - * And finally the items (no additional alignment needed, we start - * at proper alignment and the itemsize formula uses MAXALIGN) + * And finally the items (no additional alignment needed, we start at + * proper alignment and the itemsize formula uses MAXALIGN) */ total_length += mcvlist->nitems * itemsize; /* - * Allocate space for the whole serialized MCV list (we'll skip bytes, - * so we set them to zero to make the result more compressible). + * Allocate space for the whole serialized MCV list (we'll skip bytes, so + * we set them to zero to make the result more compressible). */ raw = palloc0(total_length); SET_VARSIZE(raw, total_length); @@ -1189,8 +1189,8 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS) HeapTuple tuple; Datum result; - StringInfoData itemValues; - StringInfoData itemNulls; + StringInfoData itemValues; + StringInfoData itemNulls; int i; @@ -1213,9 +1213,9 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS) */ values = (char **) palloc0(5 * sizeof(char *)); - values[0] = (char *) palloc(64 * sizeof(char)); /* item index */ - values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */ - values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */ + values[0] = (char *) palloc(64 * sizeof(char)); /* item index */ + values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */ + values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */ outfuncs = (Oid *) palloc0(sizeof(Oid) * mcvlist->ndimensions); fmgrinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * mcvlist->ndimensions); @@ -1376,7 +1376,7 @@ pg_mcv_list_send(PG_FUNCTION_ARGS) */ static bool * mcv_get_match_bitmap(PlannerInfo *root, List *clauses, - Bitmapset *keys, MCVList * mcvlist, bool is_or) + Bitmapset *keys, MCVList *mcvlist, bool is_or) { int i; ListCell *l; diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 33d7941a405..bee79d84dc9 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2813,12 +2813,12 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum) case RELKIND_MATVIEW: { /* - * Not every table AM uses BLCKSZ wide fixed size - * blocks. Therefore tableam returns the size in bytes - but - * for the purpose of this routine, we want the number of - * blocks. Therefore divide, rounding up. + * Not every table AM uses BLCKSZ wide fixed size blocks. + * Therefore tableam returns the size in bytes - but for the + * purpose of this routine, we want the number of blocks. + * Therefore divide, rounding up. */ - uint64 szbytes; + uint64 szbytes; szbytes = table_relation_size(relation, forkNum); diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index fdac9850e02..ffae52089f7 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -1731,7 +1731,7 @@ FileClose(File file) * see LruDelete. */ elog(vfdP->fdstate & FD_TEMP_FILE_LIMIT ? LOG : data_sync_elevel(LOG), - "could not close file \"%s\": %m", vfdP->fileName); + "could not close file \"%s\": %m", vfdP->fileName); } --nfile; diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c index e0712f906a1..bff254c2b28 100644 --- a/src/backend/storage/ipc/latch.c +++ b/src/backend/storage/ipc/latch.c @@ -856,7 +856,7 @@ WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action) if (rc < 0) ereport(ERROR, (errcode_for_socket_access(), - /* translator: %s is a syscall name, such as "poll()" */ + /* translator: %s is a syscall name, such as "poll()" */ errmsg("%s failed: %m", "epoll_ctl()"))); } @@ -1089,7 +1089,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, waiting = false; ereport(ERROR, (errcode_for_socket_access(), - /* translator: %s is a syscall name, such as "poll()" */ + /* translator: %s is a syscall name, such as "poll()" */ errmsg("%s failed: %m", "epoll_wait()"))); } @@ -1215,7 +1215,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, waiting = false; ereport(ERROR, (errcode_for_socket_access(), - /* translator: %s is a syscall name, such as "poll()" */ + /* translator: %s is a syscall name, such as "poll()" */ errmsg("%s failed: %m", "poll()"))); } diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c index 48f43114642..86acec09f38 100644 --- a/src/backend/storage/ipc/pmsignal.c +++ b/src/backend/storage/ipc/pmsignal.c @@ -370,7 +370,7 @@ void PostmasterDeathSignalInit(void) { #ifdef USE_POSTMASTER_DEATH_SIGNAL - int signum = POSTMASTER_DEATH_SIGNAL; + int signum = POSTMASTER_DEATH_SIGNAL; /* Register our signal handler. */ pqsignal(signum, postmaster_death_handler); diff --git a/src/backend/storage/ipc/signalfuncs.c b/src/backend/storage/ipc/signalfuncs.c index 4bfbd57464c..ade8d713aae 100644 --- a/src/backend/storage/ipc/signalfuncs.c +++ b/src/backend/storage/ipc/signalfuncs.c @@ -181,7 +181,7 @@ pg_rotate_logfile(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to rotate log files with adminpack 1.0"), - /* translator: %s is a SQL function name */ + /* translator: %s is a SQL function name */ errhint("Consider using %s, which is part of core, instead.", "pg_logfile_rotate()")))); diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index 106d227a5ac..f838b0f758a 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -906,7 +906,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress) */ if (progress) { - PGPROC *holder = BackendIdGetProc(lockholders->backendId); + PGPROC *holder = BackendIdGetProc(lockholders->backendId); pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID, holder->pid); @@ -925,9 +925,10 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress) PROGRESS_WAITFOR_DONE, PROGRESS_WAITFOR_CURRENT_PID }; - const int64 values[] = { + const int64 values[] = { 0, 0, 0 }; + pgstat_progress_update_multi_param(3, index, values); } diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index 8191118b619..dba8c397feb 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -91,7 +91,7 @@ static const int NSmgr = lengthof(smgrsw); */ static HTAB *SMgrRelationHash = NULL; -static dlist_head unowned_relns; +static dlist_head unowned_relns; /* local function prototypes */ static void smgrshutdown(int code, Datum arg); @@ -713,7 +713,7 @@ smgrimmedsync(SMgrRelation reln, ForkNumber forknum) void AtEOXact_SMgr(void) { - dlist_mutable_iter iter; + dlist_mutable_iter iter; /* * Zap all unowned SMgrRelations. We rely on smgrclose() to remove each @@ -721,8 +721,8 @@ AtEOXact_SMgr(void) */ dlist_foreach_modify(iter, &unowned_relns) { - SMgrRelation rel = dlist_container(SMgrRelationData, node, - iter.cur); + SMgrRelation rel = dlist_container(SMgrRelationData, node, + iter.cur); Assert(rel->smgr_owner == NULL); diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c index 096735c8071..705f229b27f 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c @@ -548,8 +548,8 @@ RegisterSyncRequest(const FileTag *ftag, SyncRequestType type, for (;;) { /* - * Notify the checkpointer about it. If we fail to queue a message - * in retryOnError mode, we have to sleep and try again ... ugly, but + * Notify the checkpointer about it. If we fail to queue a message in + * retryOnError mode, we have to sleep and try again ... ugly, but * hopefully won't happen often. * * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an diff --git a/src/backend/tcop/dest.c b/src/backend/tcop/dest.c index ee9e349a5ba..7bb81df9707 100644 --- a/src/backend/tcop/dest.c +++ b/src/backend/tcop/dest.c @@ -113,8 +113,8 @@ DestReceiver * CreateDestReceiver(CommandDest dest) { /* - * It's ok to cast the constness away as any modification of the none receiver - * would be a bug (which gets easier to catch this way). + * It's ok to cast the constness away as any modification of the none + * receiver would be a bug (which gets easier to catch this way). */ switch (dest) diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 69a691f18e7..3a6a878ffac 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -3023,6 +3023,7 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out) int len, value; bool fx_mode = false; + /* number of extra skipped characters (more than given in format string) */ int extra_skip = 0; @@ -3049,8 +3050,8 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out) /* * In non FX (fixed format) mode one format string space or * separator match to one space or separator in input string. - * Or match nothing if there is no space or separator in - * the current position of input string. + * Or match nothing if there is no space or separator in the + * current position of input string. */ extra_skip--; if (isspace((unsigned char) *s) || is_separator_char(s)) @@ -3176,11 +3177,13 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out) n->key->name))); break; case DCH_TZH: + /* * Value of TZH might be negative. And the issue is that we * might swallow minus sign as the separator. So, if we have - * skipped more characters than specified in the format string, - * then we consider prepending last skipped minus to TZH. + * skipped more characters than specified in the format + * string, then we consider prepending last skipped minus to + * TZH. */ if (*s == '+' || *s == '-' || *s == ' ') { diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c index a3c6adaf640..f5261065305 100644 --- a/src/backend/utils/adt/genfile.c +++ b/src/backend/utils/adt/genfile.c @@ -219,7 +219,7 @@ pg_read_file(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to read files with adminpack 1.0"), - /* translator: %s is a SQL function name */ + /* translator: %s is a SQL function name */ errhint("Consider using %s, which is part of core, instead.", "pg_file_read()")))); diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c index 28e85e397e5..f2be6143105 100644 --- a/src/backend/utils/adt/geo_ops.c +++ b/src/backend/utils/adt/geo_ops.c @@ -88,7 +88,7 @@ static int point_inside(Point *p, int npts, Point *plist); static inline void line_construct(LINE *result, Point *pt, float8 m); static inline float8 line_sl(LINE *line); static inline float8 line_invsl(LINE *line); -static bool line_interpt_line(Point *result, LINE *l1, LINE *l2); +static bool line_interpt_line(Point *result, LINE *l1, LINE *l2); static bool line_contain_point(LINE *line, Point *point); static float8 line_closept_point(Point *result, LINE *line, Point *pt); @@ -96,10 +96,10 @@ static float8 line_closept_point(Point *result, LINE *line, Point *pt); static inline void statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2); static inline float8 lseg_sl(LSEG *lseg); static inline float8 lseg_invsl(LSEG *lseg); -static bool lseg_interpt_line(Point *result, LSEG *lseg, LINE *line); -static bool lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2); +static bool lseg_interpt_line(Point *result, LSEG *lseg, LINE *line); +static bool lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2); static int lseg_crossing(float8 x, float8 y, float8 px, float8 py); -static bool lseg_contain_point(LSEG *lseg, Point *point); +static bool lseg_contain_point(LSEG *lseg, Point *point); static float8 lseg_closept_point(Point *result, LSEG *lseg, Point *pt); static float8 lseg_closept_line(Point *result, LSEG *lseg, LINE *line); static float8 lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg); @@ -692,9 +692,9 @@ static bool box_contain_box(BOX *contains_box, BOX *contained_box) { return FPge(contains_box->high.x, contained_box->high.x) && - FPle(contains_box->low.x, contained_box->low.x) && - FPge(contains_box->high.y, contained_box->high.y) && - FPle(contains_box->low.y, contained_box->low.y); + FPle(contains_box->low.x, contained_box->low.x) && + FPge(contains_box->high.y, contained_box->high.y) && + FPle(contains_box->low.y, contained_box->low.y); } @@ -2378,8 +2378,8 @@ dist_ppath(PG_FUNCTION_ARGS) Assert(path->npts > 0); /* - * The distance from a point to a path is the smallest distance - * from the point to any of its constituent segments. + * The distance from a point to a path is the smallest distance from the + * point to any of its constituent segments. */ for (i = 0; i < path->npts; i++) { @@ -2553,9 +2553,9 @@ lseg_interpt_line(Point *result, LSEG *lseg, LINE *line) LINE tmp; /* - * First, we promote the line segment to a line, because we know how - * to find the intersection point of two lines. If they don't have - * an intersection point, we are done. + * First, we promote the line segment to a line, because we know how to + * find the intersection point of two lines. If they don't have an + * intersection point, we are done. */ line_construct(&tmp, &lseg->p[0], lseg_sl(lseg)); if (!line_interpt_line(&interpt, &tmp, line)) @@ -2602,8 +2602,8 @@ line_closept_point(Point *result, LINE *line, Point *point) LINE tmp; /* - * We drop a perpendicular to find the intersection point. Ordinarily - * we should always find it, but that can fail in the presence of NaN + * We drop a perpendicular to find the intersection point. Ordinarily we + * should always find it, but that can fail in the presence of NaN * coordinates, and perhaps even from simple roundoff issues. */ line_construct(&tmp, point, line_invsl(line)); @@ -2693,8 +2693,8 @@ lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg) return 0.0; /* - * Then, we find the closest points from the endpoints of the second - * line segment, and keep the closest one. + * Then, we find the closest points from the endpoints of the second line + * segment, and keep the closest one. */ dist = lseg_closept_point(result, on_lseg, &to_lseg->p[0]); d = lseg_closept_point(&point, on_lseg, &to_lseg->p[1]); @@ -3063,7 +3063,7 @@ static bool box_contain_point(BOX *box, Point *point) { return box->high.x >= point->x && box->low.x <= point->x && - box->high.y >= point->y && box->low.y <= point-> y; + box->high.y >= point->y && box->low.y <= point->y; } Datum @@ -3150,7 +3150,7 @@ static bool box_contain_lseg(BOX *box, LSEG *lseg) { return box_contain_point(box, &lseg->p[0]) && - box_contain_point(box, &lseg->p[1]); + box_contain_point(box, &lseg->p[1]); } Datum diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index bb4bac85f7d..a9784d067c7 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -207,7 +207,7 @@ IsValidJsonNumber(const char *str, int len) */ if (*str == '-') { - dummy_lex.input = unconstify(char *, str) + 1; + dummy_lex.input = unconstify(char *, str) +1; dummy_lex.input_length = len - 1; } else @@ -2192,7 +2192,7 @@ json_build_object(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("argument list must have even number of elements"), - /* translator: %s is a SQL function name */ + /* translator: %s is a SQL function name */ errhint("The arguments of %s must consist of alternating keys and values.", "json_build_object()"))); diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 036d771386f..c742172bd8c 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -1155,7 +1155,7 @@ jsonb_build_object(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("argument list must have even number of elements"), - /* translator: %s is a SQL function name */ + /* translator: %s is a SQL function name */ errhint("The arguments of %s must consist of alternating keys and values.", "jsonb_build_object()"))); diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index 704e5720cf5..f4dfc504d6e 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -152,7 +152,7 @@ GenericMatchText(const char *s, int slen, const char *p, int plen, Oid collation { if (collation && !lc_ctype_is_c(collation) && collation != DEFAULT_COLLATION_OID) { - pg_locale_t locale = pg_newlocale_from_collation(collation); + pg_locale_t locale = pg_newlocale_from_collation(collation); if (locale && !locale->deterministic) ereport(ERROR, diff --git a/src/backend/utils/adt/like_support.c b/src/backend/utils/adt/like_support.c index 7528c80f7c3..e2583bc680a 100644 --- a/src/backend/utils/adt/like_support.c +++ b/src/backend/utils/adt/like_support.c @@ -262,9 +262,9 @@ match_pattern_prefix(Node *leftop, * optimized equality or prefix tests use bytewise comparisons, which is * not consistent with nondeterministic collations. The actual * pattern-matching implementation functions will later error out that - * pattern-matching is not supported with nondeterministic collations. - * (We could also error out here, but by doing it later we get more - * precise error messages.) (It should be possible to support at least + * pattern-matching is not supported with nondeterministic collations. (We + * could also error out here, but by doing it later we get more precise + * error messages.) (It should be possible to support at least * Pattern_Prefix_Exact, but no point as along as the actual * pattern-matching implementations don't support it.) * diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c index c7df630c3c3..70138feb29e 100644 --- a/src/backend/utils/adt/numutils.c +++ b/src/backend/utils/adt/numutils.c @@ -182,7 +182,7 @@ invalid_syntax: errmsg("invalid input syntax for type %s: \"%s\"", "smallint", s))); - return 0; /* keep compiler quiet */ + return 0; /* keep compiler quiet */ } /* @@ -258,7 +258,7 @@ invalid_syntax: errmsg("invalid input syntax for type %s: \"%s\"", "integer", s))); - return 0; /* keep compiler quiet */ + return 0; /* keep compiler quiet */ } /* diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c index c5be472bced..00a9a33eccc 100644 --- a/src/backend/utils/adt/regexp.c +++ b/src/backend/utils/adt/regexp.c @@ -1101,8 +1101,8 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags, /* enlarge output space if needed */ while (array_idx + matchctx->npatterns * 2 + 1 > array_len) { - array_len += array_len + 1; /* 2^n-1 => 2^(n+1)-1 */ - if (array_len > MaxAllocSize/sizeof(int)) + array_len += array_len + 1; /* 2^n-1 => 2^(n+1)-1 */ + if (array_len > MaxAllocSize / sizeof(int)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("too many regular expression matches"))); @@ -1117,8 +1117,9 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags, for (i = 1; i <= matchctx->npatterns; i++) { - int so = pmatch[i].rm_so; - int eo = pmatch[i].rm_eo; + int so = pmatch[i].rm_so; + int eo = pmatch[i].rm_eo; + matchctx->match_locs[array_idx++] = so; matchctx->match_locs[array_idx++] = eo; if (so >= 0 && eo >= 0 && (eo - so) > maxlen) @@ -1127,8 +1128,9 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags, } else { - int so = pmatch[0].rm_so; - int eo = pmatch[0].rm_eo; + int so = pmatch[0].rm_so; + int eo = pmatch[0].rm_eo; + matchctx->match_locs[array_idx++] = so; matchctx->match_locs[array_idx++] = eo; if (so >= 0 && eo >= 0 && (eo - so) > maxlen) @@ -1190,10 +1192,10 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags, * interest. * * Worst case: assume we need the maximum size (maxlen*eml), but take - * advantage of the fact that the original string length in bytes is an - * upper bound on the byte length of any fetched substring (and we know - * that len+1 is safe to allocate because the varlena header is longer - * than 1 byte). + * advantage of the fact that the original string length in bytes is + * an upper bound on the byte length of any fetched substring (and we + * know that len+1 is safe to allocate because the varlena header is + * longer than 1 byte). */ if (maxsiz > orig_len) conv_bufsiz = orig_len + 1; @@ -1248,9 +1250,10 @@ build_regexp_match_result(regexp_matches_ctx *matchctx) } else if (buf) { - int len = pg_wchar2mb_with_len(matchctx->wide_str + so, - buf, - eo - so); + int len = pg_wchar2mb_with_len(matchctx->wide_str + so, + buf, + eo - so); + Assert(len < bufsiz); elems[i] = PointerGetDatum(cstring_to_text_with_len(buf, len)); nulls[i] = false; @@ -1409,15 +1412,15 @@ build_regexp_split_result(regexp_matches_ctx *splitctx) if (buf) { - int bufsiz PG_USED_FOR_ASSERTS_ONLY = splitctx->conv_bufsiz; - int len; + int bufsiz PG_USED_FOR_ASSERTS_ONLY = splitctx->conv_bufsiz; + int len; endpos = splitctx->match_locs[splitctx->next_match * 2]; if (endpos < startpos) elog(ERROR, "invalid match starting position"); len = pg_wchar2mb_with_len(splitctx->wide_str + startpos, buf, - endpos-startpos); + endpos - startpos); Assert(len < bufsiz); return PointerGetDatum(cstring_to_text_with_len(buf, len)); } diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 095334b3363..b9e0f5c048d 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -635,10 +635,10 @@ ri_restrict(TriggerData *trigdata, bool is_no_action) oldslot = trigdata->tg_trigslot; /* - * If another PK row now exists providing the old key values, we - * should not do anything. However, this check should only be - * made in the NO ACTION case; in RESTRICT cases we don't wish to - * allow another row to be substituted. + * If another PK row now exists providing the old key values, we should + * not do anything. However, this check should only be made in the NO + * ACTION case; in RESTRICT cases we don't wish to allow another row to be + * substituted. */ if (is_no_action && ri_Check_Pk_Match(pk_rel, fk_rel, oldslot, riinfo)) @@ -651,8 +651,8 @@ ri_restrict(TriggerData *trigdata, bool is_no_action) elog(ERROR, "SPI_connect failed"); /* - * Fetch or prepare a saved plan for the restrict lookup (it's the - * same query for delete and update cases) + * Fetch or prepare a saved plan for the restrict lookup (it's the same + * query for delete and update cases) */ ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_RESTRICT_CHECKREF); @@ -713,7 +713,7 @@ ri_restrict(TriggerData *trigdata, bool is_no_action) ri_PerformCheck(riinfo, &qkey, qplan, fk_rel, pk_rel, oldslot, NULL, - true, /* must detect new rows */ + true, /* must detect new rows */ SPI_OK_SELECT); if (SPI_finish() != SPI_OK_FINISH) @@ -813,13 +813,13 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS) } /* - * We have a plan now. Build up the arguments from the key values - * in the deleted PK tuple and delete the referencing rows + * We have a plan now. Build up the arguments from the key values in the + * deleted PK tuple and delete the referencing rows */ ri_PerformCheck(riinfo, &qkey, qplan, fk_rel, pk_rel, oldslot, NULL, - true, /* must detect new rows */ + true, /* must detect new rows */ SPI_OK_DELETE); if (SPI_finish() != SPI_OK_FINISH) @@ -940,7 +940,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS) ri_PerformCheck(riinfo, &qkey, qplan, fk_rel, pk_rel, oldslot, newslot, - true, /* must detect new rows */ + true, /* must detect new rows */ SPI_OK_UPDATE); if (SPI_finish() != SPI_OK_FINISH) @@ -1119,7 +1119,7 @@ ri_set(TriggerData *trigdata, bool is_set_null) ri_PerformCheck(riinfo, &qkey, qplan, fk_rel, pk_rel, oldslot, NULL, - true, /* must detect new rows */ + true, /* must detect new rows */ SPI_OK_UPDATE); if (SPI_finish() != SPI_OK_FINISH) @@ -1132,18 +1132,17 @@ ri_set(TriggerData *trigdata, bool is_set_null) else { /* - * If we just deleted or updated the PK row whose key was equal to - * the FK columns' default values, and a referencing row exists in - * the FK table, we would have updated that row to the same values - * it already had --- and RI_FKey_fk_upd_check_required would - * hence believe no check is necessary. So we need to do another - * lookup now and in case a reference still exists, abort the - * operation. That is already implemented in the NO ACTION - * trigger, so just run it. (This recheck is only needed in the - * SET DEFAULT case, since CASCADE would remove such rows in case - * of a DELETE operation or would change the FK key values in case - * of an UPDATE, while SET NULL is certain to result in rows that - * satisfy the FK constraint.) + * If we just deleted or updated the PK row whose key was equal to the + * FK columns' default values, and a referencing row exists in the FK + * table, we would have updated that row to the same values it already + * had --- and RI_FKey_fk_upd_check_required would hence believe no + * check is necessary. So we need to do another lookup now and in + * case a reference still exists, abort the operation. That is + * already implemented in the NO ACTION trigger, so just run it. (This + * recheck is only needed in the SET DEFAULT case, since CASCADE would + * remove such rows in case of a DELETE operation or would change the + * FK key values in case of an UPDATE, while SET NULL is certain to + * result in rows that satisfy the FK constraint.) */ return ri_restrict(trigdata, true); } @@ -1170,8 +1169,8 @@ RI_FKey_pk_upd_check_required(Trigger *trigger, Relation pk_rel, riinfo = ri_FetchConstraintInfo(trigger, pk_rel, true); /* - * If any old key value is NULL, the row could not have been - * referenced by an FK row, so no check is needed. + * If any old key value is NULL, the row could not have been referenced by + * an FK row, so no check is needed. */ if (ri_NullCheck(RelationGetDescr(pk_rel), oldslot, riinfo, true) != RI_KEYS_NONE_NULL) return false; @@ -1213,14 +1212,17 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel, */ if (ri_nullcheck == RI_KEYS_ALL_NULL) return false; + /* - * If some new key values are NULL, the behavior depends on the match type. + * If some new key values are NULL, the behavior depends on the match + * type. */ else if (ri_nullcheck == RI_KEYS_SOME_NULL) { switch (riinfo->confmatchtype) { case FKCONSTR_MATCH_SIMPLE: + /* * If any new key value is NULL, the row must satisfy the * constraint, so no check is needed. @@ -1228,12 +1230,14 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel, return false; case FKCONSTR_MATCH_PARTIAL: + /* * Don't know, must run full check. */ break; case FKCONSTR_MATCH_FULL: + /* * If some new key values are NULL, the row fails the * constraint. We must not throw error here, because the row @@ -1251,12 +1255,12 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel, */ /* - * If the original row was inserted by our own transaction, we - * must fire the trigger whether or not the keys are equal. This - * is because our UPDATE will invalidate the INSERT so that the - * INSERT RI trigger will not do anything; so we had better do the - * UPDATE check. (We could skip this if we knew the INSERT - * trigger already fired, but there is no easy way to know that.) + * If the original row was inserted by our own transaction, we must fire + * the trigger whether or not the keys are equal. This is because our + * UPDATE will invalidate the INSERT so that the INSERT RI trigger will + * not do anything; so we had better do the UPDATE check. (We could skip + * this if we knew the INSERT trigger already fired, but there is no easy + * way to know that.) */ xminDatum = slot_getsysattr(oldslot, MinTransactionIdAttributeNumber, &isnull); Assert(!isnull); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 1e3bcb47b86..f911511158d 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -1566,7 +1566,7 @@ pg_get_statisticsobj_worker(Oid statextid, bool missing_ok) */ if (!ndistinct_enabled || !dependencies_enabled || !mcv_enabled) { - bool gotone = false; + bool gotone = false; appendStringInfoString(&buf, " ("); diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 4003631d8f5..332dc860c4f 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -755,8 +755,8 @@ bpchareq(PG_FUNCTION_ARGS) pg_newlocale_from_collation(collid)->deterministic) { /* - * Since we only care about equality or not-equality, we can avoid all the - * expense of strcoll() here, and just do bitwise comparison. + * Since we only care about equality or not-equality, we can avoid all + * the expense of strcoll() here, and just do bitwise comparison. */ if (len1 != len2) result = false; @@ -793,8 +793,8 @@ bpcharne(PG_FUNCTION_ARGS) pg_newlocale_from_collation(collid)->deterministic) { /* - * Since we only care about equality or not-equality, we can avoid all the - * expense of strcoll() here, and just do bitwise comparison. + * Since we only care about equality or not-equality, we can avoid all + * the expense of strcoll() here, and just do bitwise comparison. */ if (len1 != len2) result = true; @@ -983,7 +983,7 @@ hashbpchar(PG_FUNCTION_ARGS) Oid collid = PG_GET_COLLATION(); char *keydata; int keylen; - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; Datum result; if (!collid) @@ -1010,7 +1010,7 @@ hashbpchar(PG_FUNCTION_ARGS) int32_t ulen = -1; UChar *uchar = NULL; Size bsize; - uint8_t *buf; + uint8_t *buf; ulen = icu_to_uchar(&uchar, keydata, keylen); @@ -1043,7 +1043,7 @@ hashbpcharextended(PG_FUNCTION_ARGS) Oid collid = PG_GET_COLLATION(); char *keydata; int keylen; - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; Datum result; if (!collid) @@ -1071,7 +1071,7 @@ hashbpcharextended(PG_FUNCTION_ARGS) int32_t ulen = -1; UChar *uchar = NULL; Size bsize; - uint8_t *buf; + uint8_t *buf; ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key)); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index f82ce92ce3d..e166effa5e5 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -1152,7 +1152,7 @@ text_position_setup(text *t1, text *t2, Oid collid, TextPositionState *state) { int len1 = VARSIZE_ANY_EXHDR(t1); int len2 = VARSIZE_ANY_EXHDR(t2); - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; check_collation_set(collid); @@ -1723,11 +1723,11 @@ texteq(PG_FUNCTION_ARGS) len2; /* - * Since we only care about equality or not-equality, we can avoid all the - * expense of strcoll() here, and just do bitwise comparison. In fact, we - * don't even have to do a bitwise comparison if we can show the lengths - * of the strings are unequal; which might save us from having to detoast - * one or both values. + * Since we only care about equality or not-equality, we can avoid all + * the expense of strcoll() here, and just do bitwise comparison. In + * fact, we don't even have to do a bitwise comparison if we can show + * the lengths of the strings are unequal; which might save us from + * having to detoast one or both values. */ len1 = toast_raw_datum_size(arg1); len2 = toast_raw_datum_size(arg2); @@ -1873,7 +1873,7 @@ text_starts_with(PG_FUNCTION_ARGS) Datum arg1 = PG_GETARG_DATUM(0); Datum arg2 = PG_GETARG_DATUM(1); Oid collid = PG_GET_COLLATION(); - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; bool result; Size len1, len2; @@ -5346,7 +5346,7 @@ text_concat_ws(PG_FUNCTION_ARGS) Datum text_left(PG_FUNCTION_ARGS) { - int n = PG_GETARG_INT32(1); + int n = PG_GETARG_INT32(1); if (n < 0) { diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index d0f6f715e6f..969884d4856 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -2635,9 +2635,9 @@ RelationClearRelation(Relation relation, bool rebuild) * there should be no PartitionDirectory with a pointer to the old * entry. * - * Note that newrel and relation have already been swapped, so - * the "old" partition descriptor is actually the one hanging off - * of newrel. + * Note that newrel and relation have already been swapped, so the + * "old" partition descriptor is actually the one hanging off of + * newrel. */ MemoryContextSetParent(newrel->rd_pdcxt, relation->rd_pdcxt); newrel->rd_partdesc = NULL; diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index f870a07d2a1..7ad0aa0b944 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -656,7 +656,7 @@ EstimateRelationMapSpace(void) void SerializeRelationMap(Size maxSize, char *startAddress) { - SerializedActiveRelMaps *relmaps; + SerializedActiveRelMaps *relmaps; Assert(maxSize >= EstimateRelationMapSpace()); @@ -673,7 +673,7 @@ SerializeRelationMap(Size maxSize, char *startAddress) void RestoreRelationMap(char *startAddress) { - SerializedActiveRelMaps *relmaps; + SerializedActiveRelMaps *relmaps; if (active_shared_updates.num_mappings != 0 || active_local_updates.num_mappings != 0 || diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index ac98c19155f..476538354dd 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -1235,7 +1235,7 @@ GetSysCacheOid(int cacheId, result = heap_getattr(tuple, oidcol, SysCache[cacheId]->cc_tupdesc, &isNull); - Assert(!isNull); /* columns used as oids should never be NULL */ + Assert(!isNull); /* columns used as oids should never be NULL */ ReleaseSysCache(tuple); return result; } diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index ead8b371a73..f039567e209 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -554,7 +554,7 @@ static void record_C_func(HeapTuple procedureTuple, PGFunction user_fn, const Pg_finfo_record *inforec) { - Oid fn_oid = ((Form_pg_proc) GETSTRUCT(procedureTuple))->oid; + Oid fn_oid = ((Form_pg_proc) GETSTRUCT(procedureTuple))->oid; CFuncHashTabEntry *entry; bool found; diff --git a/src/backend/utils/hash/hashfn.c b/src/backend/utils/hash/hashfn.c index 9f5e2925dec..66985cc2e92 100644 --- a/src/backend/utils/hash/hashfn.c +++ b/src/backend/utils/hash/hashfn.c @@ -653,6 +653,7 @@ hash_uint32_extended(uint32 k, uint64 seed) /* report the result */ PG_RETURN_UINT64(((uint64) b << 32) | c); } + /* * string_hash: hash function for keys that are NUL-terminated strings. * diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c index a5950c1e8c2..3bf96de256d 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -39,7 +39,7 @@ volatile uint32 CritSectionCount = 0; int MyProcPid; pg_time_t MyStartTime; -TimestampTz MyStartTimestamp; +TimestampTz MyStartTimestamp; struct Port *MyProcPort; int32 MyCancelKey; int MyPMChildSlot; diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index c180a9910d4..83c95148562 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -592,8 +592,8 @@ InitializeSessionUserId(const char *rolename, Oid roleid) AssertState(!OidIsValid(AuthenticatedUserId)); /* - * Make sure syscache entries are flushed for recent catalog changes. - * This allows us to find roles that were created on-the-fly during + * Make sure syscache entries are flushed for recent catalog changes. This + * allows us to find roles that were created on-the-fly during * authentication. */ AcceptInvalidationMessages(); diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c index dea5dcb3685..bc2be43e216 100644 --- a/src/backend/utils/mb/mbutils.c +++ b/src/backend/utils/mb/mbutils.c @@ -561,7 +561,7 @@ char * pg_any_to_server(const char *s, int len, int encoding) { if (len <= 0) - return unconstify(char *, s); /* empty string is always valid */ + return unconstify(char *, s); /* empty string is always valid */ if (encoding == DatabaseEncoding->encoding || encoding == PG_SQL_ASCII) @@ -634,11 +634,11 @@ char * pg_server_to_any(const char *s, int len, int encoding) { if (len <= 0) - return unconstify(char *, s); /* empty string is always valid */ + return unconstify(char *, s); /* empty string is always valid */ if (encoding == DatabaseEncoding->encoding || encoding == PG_SQL_ASCII) - return unconstify(char *, s); /* assume data is valid */ + return unconstify(char *, s); /* assume data is valid */ if (DatabaseEncoding->encoding == PG_SQL_ASCII) { diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index ed51da42341..8acfa303c5d 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -459,13 +459,13 @@ const struct config_enum_entry ssl_protocol_versions_info[] = { static struct config_enum_entry shared_memory_options[] = { #ifndef WIN32 - { "sysv", SHMEM_TYPE_SYSV, false}, + {"sysv", SHMEM_TYPE_SYSV, false}, #endif #ifndef EXEC_BACKEND - { "mmap", SHMEM_TYPE_MMAP, false}, + {"mmap", SHMEM_TYPE_MMAP, false}, #endif #ifdef WIN32 - { "windows", SHMEM_TYPE_WINDOWS, false}, + {"windows", SHMEM_TYPE_WINDOWS, false}, #endif {NULL, 0, false} }; @@ -1599,6 +1599,7 @@ static struct config_bool ConfigureNamesBool[] = true, NULL, NULL, NULL }, + /* * WITH OIDS support, and consequently default_with_oids, was removed in * PostgreSQL 12, but we tolerate the parameter being set to false to @@ -8894,21 +8895,21 @@ ShowAllGUCConfig(DestReceiver *dest) struct config_generic ** get_explain_guc_options(int *num) { - int i; + int i; struct config_generic **result; *num = 0; /* - * Allocate enough space to fit all GUC_EXPLAIN options. We may not - * need all the space, but there are fairly few such options so we - * don't waste a lot of memory. + * Allocate enough space to fit all GUC_EXPLAIN options. We may not need + * all the space, but there are fairly few such options so we don't waste + * a lot of memory. */ result = palloc(sizeof(struct config_generic *) * num_guc_explain_variables); for (i = 0; i < num_guc_variables; i++) { - bool modified; + bool modified; struct config_generic *conf = guc_variables[i]; /* return only options visible to the user */ @@ -8927,15 +8928,17 @@ get_explain_guc_options(int *num) switch (conf->vartype) { case PGC_BOOL: - { - struct config_bool *lconf = (struct config_bool *) conf; - modified = (lconf->boot_val != *(lconf->variable)); - } - break; + { + struct config_bool *lconf = (struct config_bool *) conf; + + modified = (lconf->boot_val != *(lconf->variable)); + } + break; case PGC_INT: { struct config_int *lconf = (struct config_int *) conf; + modified = (lconf->boot_val != *(lconf->variable)); } break; @@ -8943,6 +8946,7 @@ get_explain_guc_options(int *num) case PGC_REAL: { struct config_real *lconf = (struct config_real *) conf; + modified = (lconf->boot_val != *(lconf->variable)); } break; @@ -8950,6 +8954,7 @@ get_explain_guc_options(int *num) case PGC_STRING: { struct config_string *lconf = (struct config_string *) conf; + modified = (strcmp(lconf->boot_val, *(lconf->variable)) != 0); } break; @@ -8957,6 +8962,7 @@ get_explain_guc_options(int *num) case PGC_ENUM: { struct config_enum *lconf = (struct config_enum *) conf; + modified = (lconf->boot_val != *(lconf->variable)); } break; diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c index 62e6b652af9..1135ca91227 100644 --- a/src/backend/utils/mmgr/dsa.c +++ b/src/backend/utils/mmgr/dsa.c @@ -1676,6 +1676,7 @@ ensure_active_superblock(dsa_area *area, dsa_area_pool *pool, return false; } } + /* * This shouldn't happen: get_best_segment() or make_new_segment() * promised that we can successfully allocate npages. @@ -2267,7 +2268,7 @@ static void check_for_freed_segments_locked(dsa_area *area) { size_t freed_segment_counter; - int i; + int i; Assert(LWLockHeldByMe(DSA_AREA_LOCK(area))); freed_segment_counter = area->control->freed_segment_counter; |