aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/brin/brin.c18
-rw-r--r--src/backend/access/brin/brin_bloom.c8
-rw-r--r--src/backend/access/brin/brin_minmax_multi.c38
-rw-r--r--src/backend/access/brin/brin_revmap.c1
-rw-r--r--src/backend/access/brin/brin_tuple.c20
-rw-r--r--src/backend/access/common/indextuple.c8
-rw-r--r--src/backend/access/common/toast_compression.c12
-rw-r--r--src/backend/access/common/toast_internals.c2
-rw-r--r--src/backend/access/common/tupconvert.c2
-rw-r--r--src/backend/access/gist/gistproc.c4
-rw-r--r--src/backend/access/gist/gistvalidate.c2
-rw-r--r--src/backend/access/heap/heapam.c71
-rw-r--r--src/backend/access/heap/heapam_handler.c12
-rw-r--r--src/backend/access/heap/heapam_visibility.c12
-rw-r--r--src/backend/access/heap/hio.c4
-rw-r--r--src/backend/access/heap/pruneheap.c4
-rw-r--r--src/backend/access/heap/vacuumlazy.c6
-rw-r--r--src/backend/access/index/genam.c4
-rw-r--r--src/backend/access/nbtree/nbtpage.c30
-rw-r--r--src/backend/access/nbtree/nbtxlog.c4
-rw-r--r--src/backend/access/transam/multixact.c12
-rw-r--r--src/backend/access/transam/twophase.c6
-rw-r--r--src/backend/access/transam/varsup.c24
-rw-r--r--src/backend/access/transam/xlog.c113
-rw-r--r--src/backend/access/transam/xlogfuncs.c4
-rw-r--r--src/backend/access/transam/xloginsert.c4
-rw-r--r--src/backend/bootstrap/bootstrap.c18
-rw-r--r--src/backend/catalog/Catalog.pm11
-rw-r--r--src/backend/catalog/aclchk.c26
-rw-r--r--src/backend/catalog/dependency.c2
-rw-r--r--src/backend/catalog/genbki.pl6
-rw-r--r--src/backend/catalog/index.c17
-rw-r--r--src/backend/catalog/objectaddress.c10
-rw-r--r--src/backend/catalog/pg_inherits.c4
-rw-r--r--src/backend/catalog/pg_proc.c12
-rw-r--r--src/backend/catalog/pg_shdepend.c2
-rw-r--r--src/backend/catalog/pg_subscription.c1
-rw-r--r--src/backend/catalog/toasting.c11
-rw-r--r--src/backend/commands/analyze.c15
-rw-r--r--src/backend/commands/copyto.c14
-rw-r--r--src/backend/commands/explain.c8
-rw-r--r--src/backend/commands/extension.c4
-rw-r--r--src/backend/commands/indexcmds.c6
-rw-r--r--src/backend/commands/subscriptioncmds.c8
-rw-r--r--src/backend/commands/tablecmds.c52
-rw-r--r--src/backend/commands/trigger.c2
-rw-r--r--src/backend/commands/typecmds.c14
-rw-r--r--src/backend/commands/vacuum.c4
-rw-r--r--src/backend/executor/execAmi.c1
-rw-r--r--src/backend/executor/execAsync.c4
-rw-r--r--src/backend/executor/execMain.c4
-rw-r--r--src/backend/executor/execPartition.c4
-rw-r--r--src/backend/executor/nodeAgg.c21
-rw-r--r--src/backend/executor/nodeAppend.c16
-rw-r--r--src/backend/executor/nodeGather.c4
-rw-r--r--src/backend/executor/nodeGatherMerge.c6
-rw-r--r--src/backend/executor/nodeIncrementalSort.c4
-rw-r--r--src/backend/executor/nodeModifyTable.c66
-rw-r--r--src/backend/jit/llvm/llvmjit.c8
-rw-r--r--src/backend/libpq/auth.c9
-rw-r--r--src/backend/libpq/be-secure-openssl.c1
-rw-r--r--src/backend/libpq/pqcomm.c4
-rw-r--r--src/backend/optimizer/plan/createplan.c10
-rw-r--r--src/backend/optimizer/prep/preptlist.c56
-rw-r--r--src/backend/optimizer/util/clauses.c130
-rw-r--r--src/backend/parser/analyze.c4
-rw-r--r--src/backend/parser/parse_agg.c15
-rw-r--r--src/backend/parser/parse_cte.c2
-rw-r--r--src/backend/parser/parse_relation.c4
-rw-r--r--src/backend/parser/parse_utilcmd.c4
-rw-r--r--src/backend/partitioning/partbounds.c9
-rw-r--r--src/backend/partitioning/partdesc.c4
-rw-r--r--src/backend/port/win32_shmem.c6
-rw-r--r--src/backend/postmaster/bgworker.c4
-rw-r--r--src/backend/postmaster/checkpointer.c2
-rw-r--r--src/backend/postmaster/pgstat.c2
-rw-r--r--src/backend/postmaster/postmaster.c3
-rw-r--r--src/backend/postmaster/syslogger.c3
-rw-r--r--src/backend/replication/basebackup.c2
-rw-r--r--src/backend/replication/logical/launcher.c8
-rw-r--r--src/backend/replication/logical/origin.c4
-rw-r--r--src/backend/replication/logical/reorderbuffer.c10
-rw-r--r--src/backend/replication/logical/snapbuild.c4
-rw-r--r--src/backend/replication/slot.c50
-rw-r--r--src/backend/replication/slotfuncs.c10
-rw-r--r--src/backend/replication/syncrep.c13
-rw-r--r--src/backend/replication/walreceiver.c4
-rw-r--r--src/backend/replication/walsender.c4
-rw-r--r--src/backend/statistics/dependencies.c4
-rw-r--r--src/backend/statistics/extended_stats.c18
-rw-r--r--src/backend/storage/buffer/bufmgr.c4
-rw-r--r--src/backend/storage/file/fd.c4
-rw-r--r--src/backend/storage/file/sharedfileset.c4
-rw-r--r--src/backend/storage/ipc/latch.c6
-rw-r--r--src/backend/storage/ipc/procarray.c14
-rw-r--r--src/backend/storage/ipc/procsignal.c38
-rw-r--r--src/backend/storage/ipc/signalfuncs.c23
-rw-r--r--src/backend/storage/ipc/standby.c2
-rw-r--r--src/backend/storage/lmgr/proc.c31
-rw-r--r--src/backend/storage/lmgr/spin.c2
-rw-r--r--src/backend/storage/page/bufpage.c2
-rw-r--r--src/backend/storage/sync/sync.c2
-rw-r--r--src/backend/tcop/postgres.c10
-rw-r--r--src/backend/utils/activity/backend_progress.c2
-rw-r--r--src/backend/utils/activity/backend_status.c13
-rw-r--r--src/backend/utils/activity/wait_event.c4
-rw-r--r--src/backend/utils/adt/acl.c6
-rw-r--r--src/backend/utils/adt/dbsize.c14
-rw-r--r--src/backend/utils/adt/genfile.c13
-rw-r--r--src/backend/utils/adt/lockfuncs.c8
-rw-r--r--src/backend/utils/adt/mcxtfuncs.c14
-rw-r--r--src/backend/utils/adt/name.c2
-rw-r--r--src/backend/utils/adt/pg_locale.c10
-rw-r--r--src/backend/utils/adt/rangetypes_typanalyze.c2
-rw-r--r--src/backend/utils/adt/ri_triggers.c4
-rw-r--r--src/backend/utils/adt/rowtypes.c8
-rw-r--r--src/backend/utils/adt/ruleutils.c57
-rw-r--r--src/backend/utils/adt/selfuncs.c16
-rw-r--r--src/backend/utils/adt/timestamp.c8
-rw-r--r--src/backend/utils/adt/varlena.c2
-rw-r--r--src/backend/utils/cache/inval.c35
-rw-r--r--src/backend/utils/cache/plancache.c5
-rw-r--r--src/backend/utils/cache/relcache.c6
-rw-r--r--src/backend/utils/cache/typcache.c6
-rw-r--r--src/backend/utils/error/elog.c4
-rw-r--r--src/backend/utils/mb/Unicode/convutils.pm2
-rw-r--r--src/backend/utils/misc/guc.c7
-rw-r--r--src/backend/utils/misc/queryjumble.c20
-rw-r--r--src/backend/utils/sort/logtape.c1
-rw-r--r--src/backend/utils/time/snapmgr.c16
130 files changed, 835 insertions, 805 deletions
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index c23ea44866a..ccc9fa0959a 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -645,11 +645,11 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
* range values; if so, have the pages in the range added
* to the output bitmap.
*
- * The opclass may or may not support processing of multiple
- * scan keys. We can determine that based on the number of
- * arguments - functions with extra parameter (number of scan
- * keys) do support this, otherwise we have to simply pass the
- * scan keys one by one.
+ * The opclass may or may not support processing of
+ * multiple scan keys. We can determine that based on the
+ * number of arguments - functions with extra parameter
+ * (number of scan keys) do support this, otherwise we
+ * have to simply pass the scan keys one by one.
*/
if (consistentFn[attno - 1].fn_nargs >= 4)
{
@@ -667,10 +667,10 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
/*
* Check keys one by one
*
- * When there are multiple scan keys, failure to meet the
- * criteria for a single one of them is enough to discard
- * the range as a whole, so break out of the loop as soon
- * as a false return value is obtained.
+ * When there are multiple scan keys, failure to meet
+ * the criteria for a single one of them is enough to
+ * discard the range as a whole, so break out of the
+ * loop as soon as a false return value is obtained.
*/
int keyno;
diff --git a/src/backend/access/brin/brin_bloom.c b/src/backend/access/brin/brin_bloom.c
index e83c2b82e15..99b2543f767 100644
--- a/src/backend/access/brin/brin_bloom.c
+++ b/src/backend/access/brin/brin_bloom.c
@@ -258,7 +258,7 @@ typedef struct BloomFilter
/* data of the bloom filter */
char data[FLEXIBLE_ARRAY_MEMBER];
-} BloomFilter;
+} BloomFilter;
/*
@@ -341,7 +341,7 @@ bloom_init(int ndistinct, double false_positive_rate)
* Add value to the bloom filter.
*/
static BloomFilter *
-bloom_add_value(BloomFilter * filter, uint32 value, bool *updated)
+bloom_add_value(BloomFilter *filter, uint32 value, bool *updated)
{
int i;
uint64 h1,
@@ -378,7 +378,7 @@ bloom_add_value(BloomFilter * filter, uint32 value, bool *updated)
* Check if the bloom filter contains a particular value.
*/
static bool
-bloom_contains_value(BloomFilter * filter, uint32 value)
+bloom_contains_value(BloomFilter *filter, uint32 value)
{
int i;
uint64 h1,
@@ -414,7 +414,7 @@ typedef struct BloomOpaque
*/
FmgrInfo extra_procinfos[BLOOM_MAX_PROCNUMS];
bool extra_proc_missing[BLOOM_MAX_PROCNUMS];
-} BloomOpaque;
+} BloomOpaque;
static FmgrInfo *bloom_get_procinfo(BrinDesc *bdesc, uint16 attno,
uint16 procnum);
diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c
index 5e4b234cc61..bd14184d767 100644
--- a/src/backend/access/brin/brin_minmax_multi.c
+++ b/src/backend/access/brin/brin_minmax_multi.c
@@ -114,7 +114,7 @@ typedef struct MinmaxMultiOpaque
bool extra_proc_missing[MINMAX_MAX_PROCNUMS];
Oid cached_subtype;
FmgrInfo strategy_procinfos[BTMaxStrategyNumber];
-} MinmaxMultiOpaque;
+} MinmaxMultiOpaque;
/*
* Storage type for BRIN's minmax reloptions
@@ -261,7 +261,7 @@ typedef struct compare_context
{
FmgrInfo *cmpFn;
Oid colloid;
-} compare_context;
+} compare_context;
static int compare_values(const void *a, const void *b, void *arg);
@@ -670,11 +670,11 @@ range_serialize(Ranges *range)
/*
* For values passed by value, we need to copy just the
* significant bytes - we can't use memcpy directly, as that
- * assumes little endian behavior. store_att_byval does
- * almost what we need, but it requires properly aligned
- * buffer - the output buffer does not guarantee that. So we
- * simply use a local Datum variable (which guarantees proper
- * alignment), and then copy the value from it.
+ * assumes little endian behavior. store_att_byval does almost
+ * what we need, but it requires properly aligned buffer - the
+ * output buffer does not guarantee that. So we simply use a local
+ * Datum variable (which guarantees proper alignment), and then
+ * copy the value from it.
*/
store_att_byval(&tmp, range->values[i], typlen);
@@ -771,7 +771,7 @@ range_deserialize(int maxvalues, SerializedRanges *serialized)
dataptr = NULL;
for (i = 0; (i < nvalues) && (!typbyval); i++)
{
- if (typlen > 0) /* fixed-length by-ref types */
+ if (typlen > 0) /* fixed-length by-ref types */
datalen += MAXALIGN(typlen);
else if (typlen == -1) /* varlena */
{
@@ -824,7 +824,8 @@ range_deserialize(int maxvalues, SerializedRanges *serialized)
}
else if (typlen == -2) /* cstring */
{
- Size slen = strlen(ptr) + 1;
+ Size slen = strlen(ptr) + 1;
+
range->values[i] = PointerGetDatum(dataptr);
memcpy(dataptr, ptr, slen);
@@ -2156,8 +2157,8 @@ brin_minmax_multi_distance_interval(PG_FUNCTION_ARGS)
/*
* Delta is (fractional) number of days between the intervals. Assume
- * months have 30 days for consistency with interval_cmp_internal.
- * We don't need to be exact, in the worst case we'll build a bit less
+ * months have 30 days for consistency with interval_cmp_internal. We
+ * don't need to be exact, in the worst case we'll build a bit less
* efficient ranges. But we should not contradict interval_cmp.
*/
dayfraction = result->time % USECS_PER_DAY;
@@ -2315,13 +2316,12 @@ brin_minmax_multi_distance_inet(PG_FUNCTION_ARGS)
/*
* The length is calculated from the mask length, because we sort the
- * addresses by first address in the range, so A.B.C.D/24 < A.B.C.1
- * (the first range starts at A.B.C.0, which is before A.B.C.1). We
- * don't want to produce negative delta in this case, so we just cut
- * the extra bytes.
+ * addresses by first address in the range, so A.B.C.D/24 < A.B.C.1 (the
+ * first range starts at A.B.C.0, which is before A.B.C.1). We don't want
+ * to produce negative delta in this case, so we just cut the extra bytes.
*
- * XXX Maybe this should be a bit more careful and cut the bits, not
- * just whole bytes.
+ * XXX Maybe this should be a bit more careful and cut the bits, not just
+ * whole bytes.
*/
lena = ip_bits(ipa);
lenb = ip_bits(ipb);
@@ -2331,8 +2331,8 @@ brin_minmax_multi_distance_inet(PG_FUNCTION_ARGS)
/* apply the network mask to both addresses */
for (i = 0; i < len; i++)
{
- unsigned char mask;
- int nbits;
+ unsigned char mask;
+ int nbits;
nbits = lena - (i * 8);
if (nbits < 8)
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index bab2a88ee3f..c574c8a06ef 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -371,6 +371,7 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
regBuf = ReadBuffer(idxrel, ItemPointerGetBlockNumber(iptr));
LockBuffer(regBuf, BUFFER_LOCK_EXCLUSIVE);
regPg = BufferGetPage(regBuf);
+
/*
* We're only removing data, not reading it, so there's no need to
* TestForOldSnapshot here.
diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c
index 8c94e4aa8c5..ee05372f795 100644
--- a/src/backend/access/brin/brin_tuple.c
+++ b/src/backend/access/brin/brin_tuple.c
@@ -177,15 +177,15 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
datumno < brdesc->bd_info[keyno]->oi_nstored;
datumno++)
{
- Datum value = tuple->bt_columns[keyno].bv_values[datumno];
+ Datum value = tuple->bt_columns[keyno].bv_values[datumno];
#ifdef TOAST_INDEX_HACK
/* We must look at the stored type, not at the index descriptor. */
- TypeCacheEntry *atttype = brdesc->bd_info[keyno]->oi_typcache[datumno];
+ TypeCacheEntry *atttype = brdesc->bd_info[keyno]->oi_typcache[datumno];
/* Do we need to free the value at the end? */
- bool free_value = false;
+ bool free_value = false;
/* For non-varlena types we don't need to do anything special */
if (atttype->typlen != -1)
@@ -201,9 +201,9 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
* If value is stored EXTERNAL, must fetch it so we are not
* depending on outside storage.
*
- * XXX Is this actually true? Could it be that the summary is
- * NULL even for range with non-NULL data? E.g. degenerate bloom
- * filter may be thrown away, etc.
+ * XXX Is this actually true? Could it be that the summary is NULL
+ * even for range with non-NULL data? E.g. degenerate bloom filter
+ * may be thrown away, etc.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(value)))
{
@@ -213,16 +213,16 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
}
/*
- * If value is above size target, and is of a compressible datatype,
- * try to compress it in-line.
+ * If value is above size target, and is of a compressible
+ * datatype, try to compress it in-line.
*/
if (!VARATT_IS_EXTENDED(DatumGetPointer(value)) &&
VARSIZE(DatumGetPointer(value)) > TOAST_INDEX_TARGET &&
(atttype->typstorage == TYPSTORAGE_EXTENDED ||
atttype->typstorage == TYPSTORAGE_MAIN))
{
- Datum cvalue;
- char compression;
+ Datum cvalue;
+ char compression;
Form_pg_attribute att = TupleDescAttr(brdesc->bd_tupdesc,
keyno);
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index a4cb8914cc6..52125604113 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -103,14 +103,14 @@ index_form_tuple(TupleDesc tupleDescriptor,
(att->attstorage == TYPSTORAGE_EXTENDED ||
att->attstorage == TYPSTORAGE_MAIN))
{
- Datum cvalue;
- char compression = att->attcompression;
+ Datum cvalue;
+ char compression = att->attcompression;
/*
* If the compression method is not valid, use the default. We
* don't expect this to happen for regular index columns, which
- * inherit the setting from the corresponding table column, but
- * we do expect it to happen whenever an expression is indexed.
+ * inherit the setting from the corresponding table column, but we
+ * do expect it to happen whenever an expression is indexed.
*/
if (!CompressionMethodIsValid(compression))
compression = GetDefaultToastCompression();
diff --git a/src/backend/access/common/toast_compression.c b/src/backend/access/common/toast_compression.c
index 682fd70e2ef..9e9d4457ace 100644
--- a/src/backend/access/common/toast_compression.c
+++ b/src/backend/access/common/toast_compression.c
@@ -24,7 +24,7 @@
#include "utils/builtins.h"
/* GUC */
-int default_toast_compression = TOAST_PGLZ_COMPRESSION;
+int default_toast_compression = TOAST_PGLZ_COMPRESSION;
#define NO_LZ4_SUPPORT() \
ereport(ERROR, \
@@ -109,7 +109,7 @@ pglz_decompress_datum(const struct varlena *value)
*/
struct varlena *
pglz_decompress_datum_slice(const struct varlena *value,
- int32 slicelength)
+ int32 slicelength)
{
struct varlena *result;
int32 rawsize;
@@ -255,12 +255,12 @@ lz4_decompress_datum_slice(const struct varlena *value, int32 slicelength)
ToastCompressionId
toast_get_compression_id(struct varlena *attr)
{
- ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
+ ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
/*
- * If it is stored externally then fetch the compression method id from the
- * external toast pointer. If compressed inline, fetch it from the toast
- * compression header.
+ * If it is stored externally then fetch the compression method id from
+ * the external toast pointer. If compressed inline, fetch it from the
+ * toast compression header.
*/
if (VARATT_IS_EXTERNAL_ONDISK(attr))
{
diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c
index 730cd04a2d7..c036319a0b8 100644
--- a/src/backend/access/common/toast_internals.c
+++ b/src/backend/access/common/toast_internals.c
@@ -48,7 +48,7 @@ toast_compress_datum(Datum value, char cmethod)
{
struct varlena *tmp = NULL;
int32 valsize;
- ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
+ ToastCompressionId cmid = TOAST_INVALID_COMPRESSION_ID;
Assert(!VARATT_IS_EXTERNAL(DatumGetPointer(value)));
Assert(!VARATT_IS_COMPRESSED(DatumGetPointer(value)));
diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c
index e055df2f323..64f54393f35 100644
--- a/src/backend/access/common/tupconvert.c
+++ b/src/backend/access/common/tupconvert.c
@@ -236,7 +236,7 @@ execute_attr_map_slot(AttrMap *attrMap,
Bitmapset *
execute_attr_map_cols(AttrMap *attrMap, Bitmapset *in_cols)
{
- Bitmapset *out_cols;
+ Bitmapset *out_cols;
int out_attnum;
/* fast path for the common trivial case */
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index b8a39cd5439..d474612b77d 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -35,9 +35,9 @@ static bool rtree_internal_consistent(BOX *key, BOX *query,
static uint64 point_zorder_internal(float4 x, float4 y);
static uint64 part_bits32_by2(uint32 x);
static uint32 ieee_float32_to_uint32(float f);
-static int gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup);
+static int gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup);
static Datum gist_bbox_zorder_abbrev_convert(Datum original, SortSupport ssup);
-static int gist_bbox_zorder_cmp_abbrev(Datum z1, Datum z2, SortSupport ssup);
+static int gist_bbox_zorder_cmp_abbrev(Datum z1, Datum z2, SortSupport ssup);
static bool gist_bbox_zorder_abbrev_abort(int memtupcount, SortSupport ssup);
diff --git a/src/backend/access/gist/gistvalidate.c b/src/backend/access/gist/gistvalidate.c
index 7d83b1143c6..b885fa2b256 100644
--- a/src/backend/access/gist/gistvalidate.c
+++ b/src/backend/access/gist/gistvalidate.c
@@ -267,7 +267,7 @@ gistvalidate(Oid opclassoid)
continue; /* got it */
if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC ||
i == GIST_COMPRESS_PROC || i == GIST_DECOMPRESS_PROC ||
- i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC)
+ i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC)
continue; /* optional methods */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index ba36da2b83c..6ac07f2fdac 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -432,11 +432,11 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
* transactions on the primary might still be invisible to a read-only
* transaction in the standby. We partly handle this problem by tracking
* the minimum xmin of visible tuples as the cut-off XID while marking a
- * page all-visible on the primary and WAL log that along with the visibility
- * map SET operation. In hot standby, we wait for (or abort) all
- * transactions that can potentially may not see one or more tuples on the
- * page. That's how index-only scans work fine in hot standby. A crucial
- * difference between index-only scans and heap scans is that the
+ * page all-visible on the primary and WAL log that along with the
+ * visibility map SET operation. In hot standby, we wait for (or abort)
+ * all transactions that can potentially may not see one or more tuples on
+ * the page. That's how index-only scans work fine in hot standby. A
+ * crucial difference between index-only scans and heap scans is that the
* index-only scan completely relies on the visibility map where as heap
* scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
* the page-level flag can be trusted in the same way, because it might
@@ -2095,11 +2095,11 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
- * If we're inserting frozen entry into an empty page,
- * set visibility map bits and PageAllVisible() hint.
+ * If we're inserting frozen entry into an empty page, set visibility map
+ * bits and PageAllVisible() hint.
*
- * If we're inserting frozen entry into already all_frozen page,
- * preserve this state.
+ * If we're inserting frozen entry into already all_frozen page, preserve
+ * this state.
*/
if (options & HEAP_INSERT_FROZEN)
{
@@ -2109,7 +2109,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
if (visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer))
vmstatus = visibilitymap_get_status(relation,
- BufferGetBlockNumber(buffer), &vmbuffer);
+ BufferGetBlockNumber(buffer), &vmbuffer);
if ((starting_with_empty_page || vmstatus & VISIBILITYMAP_ALL_FROZEN))
all_frozen_set = true;
@@ -2139,8 +2139,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
(options & HEAP_INSERT_SPECULATIVE) != 0);
/*
- * If the page is all visible, need to clear that, unless we're only
- * going to add further frozen rows to it.
+ * If the page is all visible, need to clear that, unless we're only going
+ * to add further frozen rows to it.
*
* If we're only adding already frozen rows to a page that was empty or
* marked as all visible, mark it as all-visible.
@@ -2258,11 +2258,11 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
END_CRIT_SECTION();
/*
- * If we've frozen everything on the page, update the visibilitymap.
- * We're already holding pin on the vmbuffer.
+ * If we've frozen everything on the page, update the visibilitymap. We're
+ * already holding pin on the vmbuffer.
*
- * No need to update the visibilitymap if it had all_frozen bit set
- * before this insertion.
+ * No need to update the visibilitymap if it had all_frozen bit set before
+ * this insertion.
*/
if (all_frozen_set && ((vmstatus & VISIBILITYMAP_ALL_FROZEN) == 0))
{
@@ -2270,14 +2270,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer));
/*
- * It's fine to use InvalidTransactionId here - this is only used
- * when HEAP_INSERT_FROZEN is specified, which intentionally
- * violates visibility rules.
+ * It's fine to use InvalidTransactionId here - this is only used when
+ * HEAP_INSERT_FROZEN is specified, which intentionally violates
+ * visibility rules.
*/
visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
- InvalidXLogRecPtr, vmbuffer,
- InvalidTransactionId,
- VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
+ InvalidXLogRecPtr, vmbuffer,
+ InvalidTransactionId,
+ VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
}
UnlockReleaseBuffer(buffer);
@@ -2547,7 +2547,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
tupledata = scratchptr;
/* check that the mutually exclusive flags are not both set */
- Assert (!(all_visible_cleared && all_frozen_set));
+ Assert(!(all_visible_cleared && all_frozen_set));
xlrec->flags = 0;
if (all_visible_cleared)
@@ -3063,7 +3063,10 @@ l1:
xl_heap_header xlhdr;
XLogRecPtr recptr;
- /* For logical decode we need combo CIDs to properly decode the catalog */
+ /*
+ * For logical decode we need combo CIDs to properly decode the
+ * catalog
+ */
if (RelationIsAccessibleInLogicalDecoding(relation))
log_heap_new_cid(relation, &tp);
@@ -7932,16 +7935,16 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
* TIDs as each other. The goal is to ignore relatively small differences
* in the total number of promising entries, so that the whole process can
* give a little weight to heapam factors (like heap block locality)
- * instead. This isn't a trade-off, really -- we have nothing to lose.
- * It would be foolish to interpret small differences in npromisingtids
+ * instead. This isn't a trade-off, really -- we have nothing to lose. It
+ * would be foolish to interpret small differences in npromisingtids
* values as anything more than noise.
*
* We tiebreak on nhtids when sorting block group subsets that have the
* same npromisingtids, but this has the same issues as npromisingtids,
- * and so nhtids is subject to the same power-of-two bucketing scheme.
- * The only reason that we don't fix nhtids in the same way here too is
- * that we'll need accurate nhtids values after the sort. We handle
- * nhtids bucketization dynamically instead (in the sort comparator).
+ * and so nhtids is subject to the same power-of-two bucketing scheme. The
+ * only reason that we don't fix nhtids in the same way here too is that
+ * we'll need accurate nhtids values after the sort. We handle nhtids
+ * bucketization dynamically instead (in the sort comparator).
*
* See bottomup_nblocksfavorable() for a full explanation of when and how
* heap locality/favorable blocks can significantly influence when and how
@@ -8944,8 +8947,8 @@ heap_xlog_insert(XLogReaderState *record)
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
/* check that the mutually exclusive flags are not both set */
- Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
- (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
+ Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
+ (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
/*
* The visibility map may need to be fixed even if the heap page is
@@ -9072,8 +9075,8 @@ heap_xlog_multi_insert(XLogReaderState *record)
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
/* check that the mutually exclusive flags are not both set */
- Assert (!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
- (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
+ Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
+ (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
/*
* The visibility map may need to be fixed even if the heap page is
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 7a9a640989a..61d90448161 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -1659,13 +1659,13 @@ heapam_index_build_range_scan(Relation heapRelation,
offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
/*
- * If a HOT tuple points to a root that we don't know
- * about, obtain root items afresh. If that still fails,
- * report it as corruption.
+ * If a HOT tuple points to a root that we don't know about,
+ * obtain root items afresh. If that still fails, report it as
+ * corruption.
*/
if (root_offsets[offnum - 1] == InvalidOffsetNumber)
{
- Page page = BufferGetPage(hscan->rs_cbuf);
+ Page page = BufferGetPage(hscan->rs_cbuf);
LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE);
heap_get_root_tuples(page, root_offsets);
@@ -2482,8 +2482,8 @@ reform_and_rewrite_tuple(HeapTuple tuple,
else if (!isnull[i] && TupleDescAttr(newTupDesc, i)->attlen == -1)
{
struct varlena *new_value;
- ToastCompressionId cmid;
- char cmethod;
+ ToastCompressionId cmid;
+ char cmethod;
new_value = (struct varlena *) DatumGetPointer(values[i]);
cmid = toast_get_compression_id(new_value);
diff --git a/src/backend/access/heap/heapam_visibility.c b/src/backend/access/heap/heapam_visibility.c
index cc0bed52435..d3c57cd16a8 100644
--- a/src/backend/access/heap/heapam_visibility.c
+++ b/src/backend/access/heap/heapam_visibility.c
@@ -1608,8 +1608,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
/*
* another transaction might have (tried to) delete this tuple or
- * cmin/cmax was stored in a combo CID. So we need to lookup the actual
- * values externally.
+ * cmin/cmax was stored in a combo CID. So we need to lookup the
+ * actual values externally.
*/
resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
htup, buffer,
@@ -1629,8 +1629,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
* elog inside ResolveCminCmaxDuringDecoding.
*
* XXX For the streaming case, we can track the largest combo CID
- * assigned, and error out based on this (when unable to resolve
- * combo CID below that observed maximum value).
+ * assigned, and error out based on this (when unable to resolve combo
+ * CID below that observed maximum value).
*/
if (!resolved)
return false;
@@ -1717,8 +1717,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
* elog inside ResolveCminCmaxDuringDecoding.
*
* XXX For the streaming case, we can track the largest combo CID
- * assigned, and error out based on this (when unable to resolve
- * combo CID below that observed maximum value).
+ * assigned, and error out based on this (when unable to resolve combo
+ * CID below that observed maximum value).
*/
if (!resolved || cmax == InvalidCommandId)
return true;
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index ffc89685bff..d34edb4190c 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -410,8 +410,8 @@ RelationGetBufferForTuple(Relation relation, Size len,
}
/*
- * If the FSM knows nothing of the rel, try the last page before we
- * give up and extend. This avoids one-tuple-per-page syndrome during
+ * If the FSM knows nothing of the rel, try the last page before we give
+ * up and extend. This avoids one-tuple-per-page syndrome during
* bootstrapping or in a recently-started system.
*/
if (targetBlock == InvalidBlockNumber)
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 0c8e49d3e6c..15ca1b304a0 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -95,8 +95,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
/*
* We can't write WAL in recovery mode, so there's no point trying to
- * clean the page. The primary will likely issue a cleaning WAL record soon
- * anyway, so this is no particular loss.
+ * clean the page. The primary will likely issue a cleaning WAL record
+ * soon anyway, so this is no particular loss.
*/
if (RecoveryInProgress())
return;
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 4b4db4c81b5..17519a970fe 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -691,8 +691,8 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
*
* Deliberately avoid telling the stats collector about LP_DEAD items that
* remain in the table due to VACUUM bypassing index and heap vacuuming.
- * ANALYZE will consider the remaining LP_DEAD items to be dead tuples.
- * It seems like a good idea to err on the side of not vacuuming again too
+ * ANALYZE will consider the remaining LP_DEAD items to be dead tuples. It
+ * seems like a good idea to err on the side of not vacuuming again too
* soon in cases where the failsafe prevented significant amounts of heap
* vacuuming.
*/
@@ -2284,7 +2284,7 @@ static void
lazy_vacuum_heap_rel(LVRelState *vacrel)
{
int tupindex;
- BlockNumber vacuumed_pages;
+ BlockNumber vacuumed_pages;
PGRUsage ru0;
Buffer vmbuffer = InvalidBuffer;
LVSavedErrInfo saved_err_info;
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 0aa26b448b7..b93288a6fe6 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -612,8 +612,8 @@ systable_endscan(SysScanDesc sysscan)
UnregisterSnapshot(sysscan->snapshot);
/*
- * Reset the bsysscan flag at the end of the systable scan. See
- * detailed comments in xact.c where these variables are declared.
+ * Reset the bsysscan flag at the end of the systable scan. See detailed
+ * comments in xact.c where these variables are declared.
*/
if (TransactionIdIsValid(CheckXidAlive))
bsysscan = false;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 706e16ae949..ebec8fa5b89 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -1054,22 +1054,22 @@ _bt_lockbuf(Relation rel, Buffer buf, int access)
LockBuffer(buf, access);
/*
- * It doesn't matter that _bt_unlockbuf() won't get called in the
- * event of an nbtree error (e.g. a unique violation error). That
- * won't cause Valgrind false positives.
+ * It doesn't matter that _bt_unlockbuf() won't get called in the event of
+ * an nbtree error (e.g. a unique violation error). That won't cause
+ * Valgrind false positives.
*
- * The nbtree client requests are superimposed on top of the
- * bufmgr.c buffer pin client requests. In the event of an nbtree
- * error the buffer will certainly get marked as defined when the
- * backend once again acquires its first pin on the buffer. (Of
- * course, if the backend never touches the buffer again then it
- * doesn't matter that it remains non-accessible to Valgrind.)
+ * The nbtree client requests are superimposed on top of the bufmgr.c
+ * buffer pin client requests. In the event of an nbtree error the buffer
+ * will certainly get marked as defined when the backend once again
+ * acquires its first pin on the buffer. (Of course, if the backend never
+ * touches the buffer again then it doesn't matter that it remains
+ * non-accessible to Valgrind.)
*
- * Note: When an IndexTuple C pointer gets computed using an
- * ItemId read from a page while a lock was held, the C pointer
- * becomes unsafe to dereference forever as soon as the lock is
- * released. Valgrind can only detect cases where the pointer
- * gets dereferenced with no _current_ lock/pin held, though.
+ * Note: When an IndexTuple C pointer gets computed using an ItemId read
+ * from a page while a lock was held, the C pointer becomes unsafe to
+ * dereference forever as soon as the lock is released. Valgrind can only
+ * detect cases where the pointer gets dereferenced with no _current_
+ * lock/pin held, though.
*/
if (!RelationUsesLocalBuffers(rel))
VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
@@ -2395,7 +2395,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
while (P_ISDELETED(opaque) || opaque->btpo_next != target)
{
- bool leftsibvalid = true;
+ bool leftsibvalid = true;
/*
* Before we follow the link from the page that was the left
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 1779b6ba470..c2e920f159c 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -898,8 +898,8 @@ btree_xlog_unlink_page(uint8 info, XLogReaderState *record)
* top parent link when deleting leafbuf because it's the last page
* we'll delete in the subtree undergoing deletion.
*/
- Buffer leafbuf;
- IndexTupleData trunctuple;
+ Buffer leafbuf;
+ IndexTupleData trunctuple;
Assert(!isleaf);
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 1f9f1a1fa10..daab546f296 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -2278,7 +2278,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid,
/* Log the info */
ereport(DEBUG1,
(errmsg_internal("MultiXactId wrap limit is %u, limited by database with OID %u",
- multiWrapLimit, oldest_datoid)));
+ multiWrapLimit, oldest_datoid)));
/*
* Computing the actual limits is only possible once the data directory is
@@ -2612,7 +2612,7 @@ SetOffsetVacuumLimit(bool is_startup)
if (oldestOffsetKnown)
ereport(DEBUG1,
(errmsg_internal("oldest MultiXactId member is at offset %u",
- oldestOffset)));
+ oldestOffset)));
else
ereport(LOG,
(errmsg("MultiXact member wraparound protections are disabled because oldest checkpointed MultiXact %u does not exist on disk",
@@ -2641,7 +2641,7 @@ SetOffsetVacuumLimit(bool is_startup)
ereport(DEBUG1,
(errmsg_internal("MultiXact member stop limit is now %u based on MultiXact %u",
- offsetStopLimit, oldestMultiXactId)));
+ offsetStopLimit, oldestMultiXactId)));
}
else if (prevOldestOffsetKnown)
{
@@ -3283,9 +3283,9 @@ multixact_redo(XLogReaderState *record)
xlrec->moff + xlrec->nmembers);
/*
- * Make sure nextXid is beyond any XID mentioned in the record.
- * This should be unnecessary, since any XID found here ought to have
- * other evidence in the XLOG, but let's be safe.
+ * Make sure nextXid is beyond any XID mentioned in the record. This
+ * should be unnecessary, since any XID found here ought to have other
+ * evidence in the XLOG, but let's be safe.
*/
max_xid = XLogRecGetXid(record);
for (i = 0; i < xlrec->nmembers; i++)
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 46f3d082492..f67d813c564 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -1134,9 +1134,9 @@ EndPrepare(GlobalTransaction gxact)
gxact->prepare_start_lsn = ProcLastRecPtr;
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks
- * MyProc as not running our XID (which it will do immediately after
- * this function returns), others can commit/rollback the xact.
+ * Mark the prepared transaction as valid. As soon as xact.c marks MyProc
+ * as not running our XID (which it will do immediately after this
+ * function returns), others can commit/rollback the xact.
*
* NB: a side effect of this is to make a dummy ProcArray entry for the
* prepared XID. This must happen before we clear the XID from MyProc /
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 142da4aaff3..a22bf375f85 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -179,10 +179,10 @@ GetNewTransactionId(bool isSubXact)
ExtendSUBTRANS(xid);
/*
- * Now advance the nextXid counter. This must not happen until after
- * we have successfully completed ExtendCLOG() --- if that routine fails,
- * we want the next incoming transaction to try it again. We cannot
- * assign more XIDs until there is CLOG space for them.
+ * Now advance the nextXid counter. This must not happen until after we
+ * have successfully completed ExtendCLOG() --- if that routine fails, we
+ * want the next incoming transaction to try it again. We cannot assign
+ * more XIDs until there is CLOG space for them.
*/
FullTransactionIdAdvance(&ShmemVariableCache->nextXid);
@@ -192,8 +192,8 @@ GetNewTransactionId(bool isSubXact)
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
- * Note that readers of ProcGlobal->xids/PGPROC->xid should be careful
- * to fetch the value for each proc only once, rather than assume they can
+ * Note that readers of ProcGlobal->xids/PGPROC->xid should be careful to
+ * fetch the value for each proc only once, rather than assume they can
* read a value multiple times and get the same answer each time. Note we
* are assuming that TransactionId and int fetch/store are atomic.
*
@@ -281,9 +281,9 @@ AdvanceNextFullTransactionIdPastXid(TransactionId xid)
uint32 epoch;
/*
- * It is safe to read nextXid without a lock, because this is only
- * called from the startup process or single-process mode, meaning that no
- * other process can modify it.
+ * It is safe to read nextXid without a lock, because this is only called
+ * from the startup process or single-process mode, meaning that no other
+ * process can modify it.
*/
Assert(AmStartupProcess() || !IsUnderPostmaster);
@@ -426,7 +426,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
/* Log the info */
ereport(DEBUG1,
(errmsg_internal("transaction ID wrap limit is %u, limited by database with OID %u",
- xidWrapLimit, oldest_datoid)));
+ xidWrapLimit, oldest_datoid)));
/*
* If past the autovacuum force point, immediately signal an autovac
@@ -617,8 +617,8 @@ AssertTransactionIdInAllowableRange(TransactionId xid)
* We can't acquire XidGenLock, as this may be called with XidGenLock
* already held (or with other locks that don't allow XidGenLock to be
* nested). That's ok for our purposes though, since we already rely on
- * 32bit reads to be atomic. While nextXid is 64 bit, we only look at
- * the lower 32bit, so a skewed read doesn't hurt.
+ * 32bit reads to be atomic. While nextXid is 64 bit, we only look at the
+ * lower 32bit, so a skewed read doesn't hurt.
*
* There's no increased danger of falling outside [oldest, next] by
* accessing them without a lock. xid needs to have been created with
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index c1d4415a433..8d163f190f3 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -723,7 +723,7 @@ typedef struct XLogCtlData
*/
TimestampTz currentChunkStartTime;
/* Recovery pause state */
- RecoveryPauseState recoveryPauseState;
+ RecoveryPauseState recoveryPauseState;
ConditionVariable recoveryNotPausedCV;
/*
@@ -2858,8 +2858,8 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
ereport(DEBUG2,
(errmsg_internal("updated min recovery point to %X/%X on timeline %u",
- LSN_FORMAT_ARGS(minRecoveryPoint),
- newMinRecoveryPointTLI)));
+ LSN_FORMAT_ARGS(minRecoveryPoint),
+ newMinRecoveryPointTLI)));
}
}
LWLockRelease(ControlFileLock);
@@ -3357,7 +3357,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
blocks = wal_segment_size / XLOG_BLCKSZ;
for (int i = 0; i < blocks;)
{
- int iovcnt = Min(blocks - i, lengthof(iov));
+ int iovcnt = Min(blocks - i, lengthof(iov));
off_t offset = i * XLOG_BLCKSZ;
if (pg_pwritev_with_retry(fd, iov, iovcnt, offset) < 0)
@@ -3814,8 +3814,8 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, XLogSource source)
* however, unless we actually find a valid segment. That way if there is
* neither a timeline history file nor a WAL segment in the archive, and
* streaming replication is set up, we'll read the timeline history file
- * streamed from the primary when we start streaming, instead of recovering
- * with a dummy history generated here.
+ * streamed from the primary when we start streaming, instead of
+ * recovering with a dummy history generated here.
*/
if (expectedTLEs)
tles = expectedTLEs;
@@ -4229,7 +4229,7 @@ RemoveXlogFile(const char *segname, XLogSegNo recycleSegNo,
{
ereport(DEBUG2,
(errmsg_internal("recycled write-ahead log file \"%s\"",
- segname)));
+ segname)));
CheckpointStats.ckpt_segs_recycled++;
/* Needn't recheck that slot on future iterations */
(*endlogSegNo)++;
@@ -4241,7 +4241,7 @@ RemoveXlogFile(const char *segname, XLogSegNo recycleSegNo,
ereport(DEBUG2,
(errmsg_internal("removing write-ahead log file \"%s\"",
- segname)));
+ segname)));
#ifdef WIN32
@@ -6093,7 +6093,7 @@ recoveryPausesHere(bool endOfRecovery)
RecoveryPauseState
GetRecoveryPauseState(void)
{
- RecoveryPauseState state;
+ RecoveryPauseState state;
SpinLockAcquire(&XLogCtl->info_lck);
state = XLogCtl->recoveryPauseState;
@@ -6347,7 +6347,11 @@ RecoveryRequiresIntParameter(const char *param_name, int currValue, int minValue
ereport(WARNING,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("promotion is not possible because of insufficient parameter settings"),
- /* Repeat the detail from above so it's easy to find in the log. */
+
+ /*
+ * Repeat the detail from above so it's easy to find
+ * in the log.
+ */
errdetail("%s = %d is a lower setting than on the primary server, where its value was %d.",
param_name,
currValue,
@@ -6357,15 +6361,15 @@ RecoveryRequiresIntParameter(const char *param_name, int currValue, int minValue
}
/*
- * If recovery pause is requested then set it paused. While we
- * are in the loop, user might resume and pause again so set
- * this every time.
+ * If recovery pause is requested then set it paused. While
+ * we are in the loop, user might resume and pause again so
+ * set this every time.
*/
ConfirmRecoveryPaused();
/*
- * We wait on a condition variable that will wake us as soon as
- * the pause ends, but we use a timeout so we can check the
+ * We wait on a condition variable that will wake us as soon
+ * as the pause ends, but we use a timeout so we can check the
* above conditions periodically too.
*/
ConditionVariableTimedSleep(&XLogCtl->recoveryNotPausedCV, 1000,
@@ -6377,7 +6381,7 @@ RecoveryRequiresIntParameter(const char *param_name, int currValue, int minValue
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("recovery aborted because of insufficient parameter settings"),
- /* Repeat the detail from above so it's easy to find in the log. */
+ /* Repeat the detail from above so it's easy to find in the log. */
errdetail("%s = %d is a lower setting than on the primary server, where its value was %d.",
param_name,
currValue,
@@ -6920,9 +6924,8 @@ StartupXLOG(void)
StartupReorderBuffer();
/*
- * Startup CLOG. This must be done after ShmemVariableCache->nextXid
- * has been initialized and before we accept connections or begin WAL
- * replay.
+ * Startup CLOG. This must be done after ShmemVariableCache->nextXid has
+ * been initialized and before we accept connections or begin WAL replay.
*/
StartupCLOG();
@@ -6969,11 +6972,11 @@ StartupXLOG(void)
* ourselves - the history file of the recovery target timeline covers all
* the previous timelines in the history too - a cascading standby server
* might be interested in them. Or, if you archive the WAL from this
- * server to a different archive than the primary, it'd be good for all the
- * history files to get archived there after failover, so that you can use
- * one of the old timelines as a PITR target. Timeline history files are
- * small, so it's better to copy them unnecessarily than not copy them and
- * regret later.
+ * server to a different archive than the primary, it'd be good for all
+ * the history files to get archived there after failover, so that you can
+ * use one of the old timelines as a PITR target. Timeline history files
+ * are small, so it's better to copy them unnecessarily than not copy them
+ * and regret later.
*/
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
@@ -7196,9 +7199,9 @@ StartupXLOG(void)
ProcArrayInitRecovery(XidFromFullTransactionId(ShmemVariableCache->nextXid));
/*
- * Startup subtrans only. CLOG, MultiXact and commit
- * timestamp have already been started up and other SLRUs are not
- * maintained during recovery and need not be started yet.
+ * Startup subtrans only. CLOG, MultiXact and commit timestamp
+ * have already been started up and other SLRUs are not maintained
+ * during recovery and need not be started yet.
*/
StartupSUBTRANS(oldestActiveXID);
@@ -7400,8 +7403,7 @@ StartupXLOG(void)
error_context_stack = &errcallback;
/*
- * ShmemVariableCache->nextXid must be beyond record's
- * xid.
+ * ShmemVariableCache->nextXid must be beyond record's xid.
*/
AdvanceNextFullTransactionIdPastXid(record->xl_xid);
@@ -8092,10 +8094,10 @@ StartupXLOG(void)
WalSndWakeup();
/*
- * If this was a promotion, request an (online) checkpoint now. This
- * isn't required for consistency, but the last restartpoint might be far
- * back, and in case of a crash, recovering from it might take a longer
- * than is appropriate now that we're not in standby mode anymore.
+ * If this was a promotion, request an (online) checkpoint now. This isn't
+ * required for consistency, but the last restartpoint might be far back,
+ * and in case of a crash, recovering from it might take a longer than is
+ * appropriate now that we're not in standby mode anymore.
*/
if (promoted)
RequestCheckpoint(CHECKPOINT_FORCE);
@@ -8674,7 +8676,7 @@ LogCheckpointStart(int flags, bool restartpoint)
{
if (restartpoint)
ereport(LOG,
- /* translator: the placeholders show checkpoint options */
+ /* translator: the placeholders show checkpoint options */
(errmsg("restartpoint starting:%s%s%s%s%s%s%s%s",
(flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
(flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
@@ -8686,7 +8688,7 @@ LogCheckpointStart(int flags, bool restartpoint)
(flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "")));
else
ereport(LOG,
- /* translator: the placeholders show checkpoint options */
+ /* translator: the placeholders show checkpoint options */
(errmsg("checkpoint starting:%s%s%s%s%s%s%s%s",
(flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
(flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
@@ -11851,12 +11853,12 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1)
ereport(DEBUG1,
(errmsg_internal("backup time %s in file \"%s\"",
- backuptime, BACKUP_LABEL_FILE)));
+ backuptime, BACKUP_LABEL_FILE)));
if (fscanf(lfp, "LABEL: %1023[^\n]\n", backuplabel) == 1)
ereport(DEBUG1,
(errmsg_internal("backup label %s in file \"%s\"",
- backuplabel, BACKUP_LABEL_FILE)));
+ backuplabel, BACKUP_LABEL_FILE)));
/*
* START TIMELINE is new as of 11. Its parsing is not mandatory, still use
@@ -11873,7 +11875,7 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
ereport(DEBUG1,
(errmsg_internal("backup timeline %u in file \"%s\"",
- tli_from_file, BACKUP_LABEL_FILE)));
+ tli_from_file, BACKUP_LABEL_FILE)));
}
if (ferror(lfp) || FreeFile(lfp))
@@ -12177,8 +12179,8 @@ retry:
Assert(readFile != -1);
/*
- * If the current segment is being streamed from the primary, calculate how
- * much of the current page we have received already. We know the
+ * If the current segment is being streamed from the primary, calculate
+ * how much of the current page we have received already. We know the
* requested record has been received, but this is for the benefit of
* future calls, to allow quick exit at the top of this function.
*/
@@ -12239,12 +12241,13 @@ retry:
* and replay reaches a record that's split across two WAL segments. The
* first page is only available locally, in pg_wal, because it's already
* been recycled on the primary. The second page, however, is not present
- * in pg_wal, and we should stream it from the primary. There is a recycled
- * WAL segment present in pg_wal, with garbage contents, however. We would
- * read the first page from the local WAL segment, but when reading the
- * second page, we would read the bogus, recycled, WAL segment. If we
- * didn't catch that case here, we would never recover, because
- * ReadRecord() would retry reading the whole record from the beginning.
+ * in pg_wal, and we should stream it from the primary. There is a
+ * recycled WAL segment present in pg_wal, with garbage contents, however.
+ * We would read the first page from the local WAL segment, but when
+ * reading the second page, we would read the bogus, recycled, WAL
+ * segment. If we didn't catch that case here, we would never recover,
+ * because ReadRecord() would retry reading the whole record from the
+ * beginning.
*
* Of course, this only catches errors in the page header, which is what
* happens in the case of a recycled WAL segment. Other kinds of errors or
@@ -12399,15 +12402,15 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
* Failure while streaming. Most likely, we got here
* because streaming replication was terminated, or
* promotion was triggered. But we also get here if we
- * find an invalid record in the WAL streamed from the primary,
- * in which case something is seriously wrong. There's
- * little chance that the problem will just go away, but
- * PANIC is not good for availability either, especially
- * in hot standby mode. So, we treat that the same as
- * disconnection, and retry from archive/pg_wal again. The
- * WAL in the archive should be identical to what was
- * streamed, so it's unlikely that it helps, but one can
- * hope...
+ * find an invalid record in the WAL streamed from the
+ * primary, in which case something is seriously wrong.
+ * There's little chance that the problem will just go
+ * away, but PANIC is not good for availability either,
+ * especially in hot standby mode. So, we treat that the
+ * same as disconnection, and retry from archive/pg_wal
+ * again. The WAL in the archive should be identical to
+ * what was streamed, so it's unlikely that it helps, but
+ * one can hope...
*/
/*
diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c
index f363a4c6399..b98deb72ec6 100644
--- a/src/backend/access/transam/xlogfuncs.c
+++ b/src/backend/access/transam/xlogfuncs.c
@@ -600,7 +600,7 @@ pg_is_wal_replay_paused(PG_FUNCTION_ARGS)
Datum
pg_get_wal_replay_pause_state(PG_FUNCTION_ARGS)
{
- char *statestr = NULL;
+ char *statestr = NULL;
if (!RecoveryInProgress())
ereport(ERROR,
@@ -609,7 +609,7 @@ pg_get_wal_replay_pause_state(PG_FUNCTION_ARGS)
errhint("Recovery control functions can only be executed during recovery.")));
/* get the recovery pause state */
- switch(GetRecoveryPauseState())
+ switch (GetRecoveryPauseState())
{
case RECOVERY_NOT_PAUSED:
statestr = "not paused";
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index 7052dc245ee..32b4cc84e79 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -1065,8 +1065,8 @@ log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
for (j = batch_start; j < i; j++)
{
/*
- * The page may be uninitialized. If so, we can't set the LSN because that
- * would corrupt the page.
+ * The page may be uninitialized. If so, we can't set the LSN
+ * because that would corrupt the page.
*/
if (!PageIsNew(pages[j]))
{
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 174727b501d..b1552374884 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -160,7 +160,7 @@ struct typmap
FormData_pg_type am_typ;
};
-static List *Typ = NIL; /* List of struct typmap* */
+static List *Typ = NIL; /* List of struct typmap* */
static struct typmap *Ap = NULL;
static Datum values[MAXATTR]; /* current row's attribute values */
@@ -926,11 +926,12 @@ gettype(char *type)
{
if (Typ != NIL)
{
- ListCell *lc;
+ ListCell *lc;
- foreach (lc, Typ)
+ foreach(lc, Typ)
{
struct typmap *app = lfirst(lc);
+
if (strncmp(NameStr(app->am_typ.typname), type, NAMEDATALEN) == 0)
{
Ap = app;
@@ -948,12 +949,13 @@ gettype(char *type)
populate_typ_list();
/*
- * Calling gettype would result in infinite recursion for types missing
- * in pg_type, so just repeat the lookup.
+ * Calling gettype would result in infinite recursion for types
+ * missing in pg_type, so just repeat the lookup.
*/
- foreach (lc, Typ)
+ foreach(lc, Typ)
{
struct typmap *app = lfirst(lc);
+
if (strncmp(NameStr(app->am_typ.typname), type, NAMEDATALEN) == 0)
{
Ap = app;
@@ -1004,9 +1006,9 @@ boot_get_type_io_data(Oid typid,
{
/* We have the boot-time contents of pg_type, so use it */
struct typmap *ap = NULL;
- ListCell *lc;
+ ListCell *lc;
- foreach (lc, Typ)
+ foreach(lc, Typ)
{
ap = lfirst(lc);
if (ap->am_oid == typid)
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index b44d568b544..a5e9869378b 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -94,18 +94,21 @@ sub ParseHeader
push @{ $catalog{toasting} },
{ parent_table => $1, toast_oid => $2, toast_index_oid => $3 };
}
- elsif (/^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(.+)\)/)
+ elsif (
+ /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(.+)\)/)
{
push @{ $catalog{indexing} },
- {
+ {
is_unique => $1 ? 1 : 0,
- is_pkey => $2 ? 1 : 0,
+ is_pkey => $2 ? 1 : 0,
index_name => $3,
index_oid => $4,
index_decl => $5
};
}
- elsif (/^DECLARE_(ARRAY_)?FOREIGN_KEY(_OPT)?\(\s*\(([^)]+)\),\s*(\w+),\s*\(([^)]+)\)\)/)
+ elsif (
+ /^DECLARE_(ARRAY_)?FOREIGN_KEY(_OPT)?\(\s*\(([^)]+)\),\s*(\w+),\s*\(([^)]+)\)\)/
+ )
{
push @{ $catalog{foreign_keys} },
{
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index e1573eb3984..53392414f10 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -3926,8 +3926,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask,
ReleaseSysCache(tuple);
/*
- * Check if ACL_SELECT is being checked and, if so, and not set already
- * as part of the result, then check if the user is a member of the
+ * Check if ACL_SELECT is being checked and, if so, and not set already as
+ * part of the result, then check if the user is a member of the
* pg_read_all_data role, which allows read access to all relations.
*/
if (mask & ACL_SELECT && !(result & ACL_SELECT) &&
@@ -3935,14 +3935,14 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask,
result |= ACL_SELECT;
/*
- * Check if ACL_INSERT, ACL_UPDATE, or ACL_DELETE is being checked
- * and, if so, and not set already as part of the result, then check
- * if the user is a member of the pg_write_all_data role, which
- * allows INSERT/UPDATE/DELETE access to all relations (except
- * system catalogs, which requires superuser, see above).
+ * Check if ACL_INSERT, ACL_UPDATE, or ACL_DELETE is being checked and, if
+ * so, and not set already as part of the result, then check if the user
+ * is a member of the pg_write_all_data role, which allows
+ * INSERT/UPDATE/DELETE access to all relations (except system catalogs,
+ * which requires superuser, see above).
*/
if (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE) &&
- !(result & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)) &&
+ !(result & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)) &&
has_privs_of_role(roleid, ROLE_PG_WRITE_ALL_DATA))
result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
@@ -4273,10 +4273,10 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid,
ReleaseSysCache(tuple);
/*
- * Check if ACL_USAGE is being checked and, if so, and not set already
- * as part of the result, then check if the user is a member of the
- * pg_read_all_data or pg_write_all_data roles, which allow usage
- * access to all schemas.
+ * Check if ACL_USAGE is being checked and, if so, and not set already as
+ * part of the result, then check if the user is a member of the
+ * pg_read_all_data or pg_write_all_data roles, which allow usage access
+ * to all schemas.
*/
if (mask & ACL_USAGE && !(result & ACL_USAGE) &&
(has_privs_of_role(roleid, ROLE_PG_READ_ALL_DATA) ||
@@ -4568,7 +4568,7 @@ pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum,
*/
AclResult
pg_attribute_aclcheck_ext(Oid table_oid, AttrNumber attnum,
- Oid roleid, AclMode mode, bool *is_missing)
+ Oid roleid, AclMode mode, bool *is_missing)
{
if (pg_attribute_aclmask_ext(table_oid, attnum, roleid, mode,
ACLMASK_ANY, is_missing) != 0)
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 259cde33976..0c37fc1d53f 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -1120,7 +1120,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
*/
ereport(DEBUG2,
(errmsg_internal("drop auto-cascades to %s",
- objDesc)));
+ objDesc)));
}
else if (behavior == DROP_RESTRICT)
{
diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl
index 9586c29ad06..bf080b5f124 100644
--- a/src/backend/catalog/genbki.pl
+++ b/src/backend/catalog/genbki.pl
@@ -786,9 +786,9 @@ close $fk_info;
close $constraints;
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($bkifile, $tmpext);
-Catalog::RenameTempFile($schemafile, $tmpext);
-Catalog::RenameTempFile($fk_info_file, $tmpext);
+Catalog::RenameTempFile($bkifile, $tmpext);
+Catalog::RenameTempFile($schemafile, $tmpext);
+Catalog::RenameTempFile($fk_info_file, $tmpext);
Catalog::RenameTempFile($constraints_file, $tmpext);
exit 0;
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 8ded2b53d4c..0f8cfae4ec9 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -382,8 +382,8 @@ ConstructTupleDescriptor(Relation heapRelation,
* For expression columns, set attcompression invalid, since
* there's no table column from which to copy the value. Whenever
* we actually need to compress a value, we'll use whatever the
- * current value of default_compression_method is at that point
- * in time.
+ * current value of default_compression_method is at that point in
+ * time.
*/
to->attcompression = InvalidCompressionMethod;
@@ -2927,14 +2927,14 @@ index_build(Relation heapRelation,
if (indexInfo->ii_ParallelWorkers == 0)
ereport(DEBUG1,
(errmsg_internal("building index \"%s\" on table \"%s\" serially",
- RelationGetRelationName(indexRelation),
- RelationGetRelationName(heapRelation))));
+ RelationGetRelationName(indexRelation),
+ RelationGetRelationName(heapRelation))));
else
ereport(DEBUG1,
(errmsg_internal("building index \"%s\" on table \"%s\" with request for %d parallel workers",
- RelationGetRelationName(indexRelation),
- RelationGetRelationName(heapRelation),
- indexInfo->ii_ParallelWorkers)));
+ RelationGetRelationName(indexRelation),
+ RelationGetRelationName(heapRelation),
+ indexInfo->ii_ParallelWorkers)));
/*
* Switch to the table owner's userid, so that any index functions are run
@@ -3619,8 +3619,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
SetRelationTableSpace(iRel, params->tablespaceOid, InvalidOid);
/*
- * Schedule unlinking of the old index storage at transaction
- * commit.
+ * Schedule unlinking of the old index storage at transaction commit.
*/
RelationDropStorage(iRel);
RelationAssumeNewRelfilenode(iRel);
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index d1d7a10b438..d79c3cde7c6 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -96,7 +96,8 @@
*/
typedef struct
{
- const char *class_descr; /* string describing the catalog, for internal error messages */
+ const char *class_descr; /* string describing the catalog, for internal
+ * error messages */
Oid class_oid; /* oid of catalog */
Oid oid_index_oid; /* oid of index on system oid column */
int oid_catcache_id; /* id of catcache on system oid column */
@@ -2871,6 +2872,7 @@ getObjectDescription(const ObjectAddress *object, bool missing_ok)
char *attname = get_attname(object->objectId,
object->objectSubId,
missing_ok);
+
if (!attname)
break;
@@ -2888,6 +2890,7 @@ getObjectDescription(const ObjectAddress *object, bool missing_ok)
bits16 flags = FORMAT_PROC_INVALID_AS_NULL;
char *proname = format_procedure_extended(object->objectId,
flags);
+
if (proname == NULL)
break;
@@ -2900,6 +2903,7 @@ getObjectDescription(const ObjectAddress *object, bool missing_ok)
bits16 flags = FORMAT_TYPE_INVALID_AS_NULL;
char *typname = format_type_extended(object->objectId, -1,
flags);
+
if (typname == NULL)
break;
@@ -3861,6 +3865,7 @@ getObjectDescription(const ObjectAddress *object, bool missing_ok)
{
char *pubname = get_publication_name(object->objectId,
missing_ok);
+
if (pubname)
appendStringInfo(&buffer, _("publication %s"), pubname);
break;
@@ -3901,6 +3906,7 @@ getObjectDescription(const ObjectAddress *object, bool missing_ok)
{
char *subname = get_subscription_name(object->objectId,
missing_ok);
+
if (subname)
appendStringInfo(&buffer, _("subscription %s"), subname);
break;
@@ -4708,6 +4714,7 @@ getObjectIdentityParts(const ObjectAddress *object,
bits16 flags = FORMAT_PROC_FORCE_QUALIFY | FORMAT_PROC_INVALID_AS_NULL;
char *proname = format_procedure_extended(object->objectId,
flags);
+
if (proname == NULL)
break;
@@ -4957,6 +4964,7 @@ getObjectIdentityParts(const ObjectAddress *object,
bits16 flags = FORMAT_OPERATOR_FORCE_QUALIFY | FORMAT_OPERATOR_INVALID_AS_NULL;
char *oprname = format_operator_extended(object->objectId,
flags);
+
if (oprname == NULL)
break;
diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c
index c373faf2d64..1c37a438c39 100644
--- a/src/backend/catalog/pg_inherits.c
+++ b/src/backend/catalog/pg_inherits.c
@@ -578,7 +578,7 @@ DeleteInheritsTuple(Oid inhrelid, Oid inhparent, bool expect_detach_pending,
parent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent;
if (!OidIsValid(inhparent) || parent == inhparent)
{
- bool detach_pending;
+ bool detach_pending;
detach_pending =
((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhdetachpending;
@@ -640,7 +640,7 @@ PartitionHasPendingDetach(Oid partoid)
while (HeapTupleIsValid(inheritsTuple = systable_getnext(scan)))
{
- bool detached;
+ bool detached;
detached =
((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhdetachpending;
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 478dbde3fe6..54031108204 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -903,10 +903,10 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
else
{
/*
- * We can't do full prechecking of the function definition if there
- * are any polymorphic input types, because actual datatypes of
- * expression results will be unresolvable. The check will be done at
- * runtime instead.
+ * We can't do full prechecking of the function definition if
+ * there are any polymorphic input types, because actual datatypes
+ * of expression results will be unresolvable. The check will be
+ * done at runtime instead.
*
* We can run the text through the raw parser though; this will at
* least catch silly syntactic errors.
@@ -917,8 +917,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
if (!haspolyarg)
{
/*
- * OK to do full precheck: analyze and rewrite the queries, then
- * verify the result type.
+ * OK to do full precheck: analyze and rewrite the queries,
+ * then verify the result type.
*/
SQLFunctionParseInfoPtr pinfo;
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 90b7a5de299..420ad965653 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -361,7 +361,7 @@ void
recordDependencyOnTablespace(Oid classId, Oid objectId, Oid tablespace)
{
ObjectAddress myself,
- referenced;
+ referenced;
ObjectAddressSet(myself, classId, objectId);
ObjectAddressSet(referenced, TableSpaceRelationId, tablespace);
diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c
index 7db1f7df08c..29fc4218cd4 100644
--- a/src/backend/catalog/pg_subscription.c
+++ b/src/backend/catalog/pg_subscription.c
@@ -433,6 +433,7 @@ RemoveSubscriptionRel(Oid subid, Oid relid)
get_subscription_name(subrel->srsubid, false)),
errdetail("Table synchronization for relation \"%s\" is in progress and is in state \"%c\".",
get_rel_name(relid), subrel->srsubstate),
+
/*
* translator: first %s is a SQL ALTER command and second %s is a
* SQL DROP command
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index 933a0734d1a..bf81f6ccc55 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -351,9 +351,8 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
table_close(class_rel, RowExclusiveLock);
/*
- * Register dependency from the toast table to the main, so that the
- * toast table will be deleted if the main is. Skip this in bootstrap
- * mode.
+ * Register dependency from the toast table to the main, so that the toast
+ * table will be deleted if the main is. Skip this in bootstrap mode.
*/
if (!IsBootstrapProcessingMode())
{
@@ -396,9 +395,9 @@ needs_toast_table(Relation rel)
/*
* Ignore attempts to create toast tables on catalog tables after initdb.
- * Which catalogs get toast tables is explicitly chosen in
- * catalog/pg_*.h. (We could get here via some ALTER TABLE command if
- * the catalog doesn't have a toast table.)
+ * Which catalogs get toast tables is explicitly chosen in catalog/pg_*.h.
+ * (We could get here via some ALTER TABLE command if the catalog doesn't
+ * have a toast table.)
*/
if (IsCatalogRelation(rel) && !IsBootstrapProcessingMode())
return false;
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 8aa329a2a03..426c1e67109 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -617,11 +617,10 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
*
* We assume that VACUUM hasn't set pg_class.reltuples already, even
* during a VACUUM ANALYZE. Although VACUUM often updates pg_class,
- * exceptions exist. A "VACUUM (ANALYZE, INDEX_CLEANUP OFF)" command
- * will never update pg_class entries for index relations. It's also
- * possible that an individual index's pg_class entry won't be updated
- * during VACUUM if the index AM returns NULL from its amvacuumcleanup()
- * routine.
+ * exceptions exist. A "VACUUM (ANALYZE, INDEX_CLEANUP OFF)" command will
+ * never update pg_class entries for index relations. It's also possible
+ * that an individual index's pg_class entry won't be updated during
+ * VACUUM if the index AM returns NULL from its amvacuumcleanup() routine.
*/
if (!inh)
{
@@ -659,9 +658,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
else if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
/*
- * Partitioned tables don't have storage, so we don't set any fields in
- * their pg_class entries except for reltuples, which is necessary for
- * auto-analyze to work properly.
+ * Partitioned tables don't have storage, so we don't set any fields
+ * in their pg_class entries except for reltuples, which is necessary
+ * for auto-analyze to work properly.
*/
vac_update_relstats(onerel, -1, totalrows,
0, false, InvalidTransactionId,
diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c
index 67bac9ccab6..89a4f8f810e 100644
--- a/src/backend/commands/copyto.c
+++ b/src/backend/commands/copyto.c
@@ -104,7 +104,7 @@ typedef struct CopyToStateData
typedef struct
{
DestReceiver pub; /* publicly-known function pointers */
- CopyToState cstate; /* CopyToStateData for the command */
+ CopyToState cstate; /* CopyToStateData for the command */
uint64 processed; /* # of tuples processed */
} DR_copy;
@@ -348,7 +348,7 @@ BeginCopyTo(ParseState *pstate,
List *attnamelist,
List *options)
{
- CopyToState cstate;
+ CopyToState cstate;
bool pipe = (filename == NULL);
TupleDesc tupDesc;
int num_phys_attrs;
@@ -415,7 +415,7 @@ BeginCopyTo(ParseState *pstate,
oldcontext = MemoryContextSwitchTo(cstate->copycontext);
/* Extract options from the statement node tree */
- ProcessCopyOptions(pstate, &cstate->opts, false /* is_from */, options);
+ ProcessCopyOptions(pstate, &cstate->opts, false /* is_from */ , options);
/* Process the source/target relation or query */
if (rel)
@@ -793,7 +793,7 @@ DoCopyTo(CopyToState cstate)
else
tupDesc = cstate->queryDesc->tupDesc;
num_phys_attrs = tupDesc->natts;
- cstate->opts.null_print_client = cstate->opts.null_print; /* default */
+ cstate->opts.null_print_client = cstate->opts.null_print; /* default */
/* We use fe_msgbuf as a per-row buffer regardless of copy_dest */
cstate->fe_msgbuf = makeStringInfo();
@@ -850,8 +850,8 @@ DoCopyTo(CopyToState cstate)
*/
if (cstate->need_transcoding)
cstate->opts.null_print_client = pg_server_to_any(cstate->opts.null_print,
- cstate->opts.null_print_len,
- cstate->file_encoding);
+ cstate->opts.null_print_len,
+ cstate->file_encoding);
/* if a header has been requested send the line */
if (cstate->opts.header_line)
@@ -1265,7 +1265,7 @@ static bool
copy_dest_receive(TupleTableSlot *slot, DestReceiver *self)
{
DR_copy *myState = (DR_copy *) self;
- CopyToState cstate = myState->cstate;
+ CopyToState cstate = myState->cstate;
/* Send the data */
CopyOneRowTo(cstate, slot);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 9867da83bca..1202bf85a36 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -167,7 +167,7 @@ ExplainQuery(ParseState *pstate, ExplainStmt *stmt,
ExplainState *es = NewExplainState();
TupOutputState *tstate;
JumbleState *jstate = NULL;
- Query *query;
+ Query *query;
List *rewritten;
ListCell *lc;
bool timing_set = false;
@@ -458,7 +458,7 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
else if (ctas->objtype == OBJECT_MATVIEW)
ExplainDummyGroup("CREATE MATERIALIZED VIEW", NULL, es);
else
- elog(ERROR, "unexpected object type: %d",
+ elog(ERROR, "unexpected object type: %d",
(int) ctas->objtype);
return;
}
@@ -612,7 +612,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
if (es->verbose && plannedstmt->queryId != UINT64CONST(0))
{
- char buf[MAXINT8LEN+1];
+ char buf[MAXINT8LEN + 1];
pg_lltoa(plannedstmt->queryId, buf);
ExplainPropertyText("Query Identifier", buf, es);
@@ -3298,7 +3298,7 @@ show_hashagg_info(AggState *aggstate, ExplainState *es)
if (aggstate->hash_batches_used > 1)
{
appendStringInfo(es->str, " Disk Usage: " UINT64_FORMAT "kB",
- aggstate->hash_disk_used);
+ aggstate->hash_disk_used);
}
}
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index 19db329fe6f..008505368c4 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -3293,8 +3293,8 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt,
case OBJECT_SUBSCRIPTION:
case OBJECT_TABLESPACE:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cannot add an object of this type to an extension")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("cannot add an object of this type to an extension")));
break;
default:
/* OK */
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 3edf61993ad..76774dce064 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -1086,9 +1086,9 @@ DefineIndex(Oid relationId,
ereport(DEBUG1,
(errmsg_internal("%s %s will create implicit index \"%s\" for table \"%s\"",
- is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
- constraint_type,
- indexRelationName, RelationGetRelationName(rel))));
+ is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
+ constraint_type,
+ indexRelationName, RelationGetRelationName(rel))));
}
/*
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index bbb2f5d029e..8aa6de17850 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -628,7 +628,7 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
InvalidXLogRecPtr);
ereport(DEBUG1,
(errmsg_internal("table \"%s.%s\" added to subscription \"%s\"",
- rv->schemaname, rv->relname, sub->name)));
+ rv->schemaname, rv->relname, sub->name)));
}
}
@@ -702,9 +702,9 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
ereport(DEBUG1,
(errmsg_internal("table \"%s.%s\" removed from subscription \"%s\"",
- get_namespace_name(get_rel_namespace(relid)),
- get_rel_name(relid),
- sub->name)));
+ get_namespace_name(get_rel_namespace(relid)),
+ get_rel_name(relid),
+ sub->name)));
}
}
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 591bf01189b..ebc62034d26 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -563,7 +563,7 @@ static void ATExecGenericOptions(Relation rel, List *options);
static void ATExecSetRowSecurity(Relation rel, bool rls);
static void ATExecForceNoForceRowSecurity(Relation rel, bool force_rls);
static ObjectAddress ATExecSetCompression(AlteredTableInfo *tab, Relation rel,
- const char *column, Node *newValue, LOCKMODE lockmode);
+ const char *column, Node *newValue, LOCKMODE lockmode);
static void index_copy_data(Relation rel, RelFileNode newrnode);
static const char *storage_name(char c);
@@ -2593,7 +2593,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
if (CompressionMethodIsValid(attribute->attcompression))
{
const char *compression =
- GetCompressionMethodName(attribute->attcompression);
+ GetCompressionMethodName(attribute->attcompression);
if (def->compression == NULL)
def->compression = pstrdup(compression);
@@ -2641,7 +2641,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
def->location = -1;
if (CompressionMethodIsValid(attribute->attcompression))
def->compression = pstrdup(GetCompressionMethodName(
- attribute->attcompression));
+ attribute->attcompression));
else
def->compression = NULL;
inhSchema = lappend(inhSchema, def);
@@ -4524,7 +4524,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
- case AT_SetCompression: /* ALTER COLUMN SET COMPRESSION */
+ case AT_SetCompression: /* ALTER COLUMN SET COMPRESSION */
ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW);
/* This command never recurses */
/* No command-specific prep needed */
@@ -5666,11 +5666,11 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
if (newrel)
ereport(DEBUG1,
(errmsg_internal("rewriting table \"%s\"",
- RelationGetRelationName(oldrel))));
+ RelationGetRelationName(oldrel))));
else
ereport(DEBUG1,
(errmsg_internal("verifying table \"%s\"",
- RelationGetRelationName(oldrel))));
+ RelationGetRelationName(oldrel))));
if (newrel)
{
@@ -7297,7 +7297,7 @@ NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr)
{
ereport(DEBUG1,
(errmsg_internal("existing constraints on column \"%s.%s\" are sufficient to prove that it does not contain nulls",
- RelationGetRelationName(rel), NameStr(attr->attname))));
+ RelationGetRelationName(rel), NameStr(attr->attname))));
return true;
}
@@ -12876,7 +12876,7 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd,
}
else if (IsA(stm, CreateStatsStmt))
{
- CreateStatsStmt *stmt = (CreateStatsStmt *) stm;
+ CreateStatsStmt *stmt = (CreateStatsStmt *) stm;
AlterTableCmd *newcmd;
/* keep the statistics object's comment */
@@ -14539,9 +14539,9 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
if (strcmp(child_expr, parent_expr) != 0)
ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" in child table has a conflicting generation expression",
- attributeName)));
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("column \"%s\" in child table has a conflicting generation expression",
+ attributeName)));
}
/*
@@ -14769,7 +14769,7 @@ static void
MarkInheritDetached(Relation child_rel, Relation parent_rel)
{
Relation catalogRelation;
- SysScanDesc scan;
+ SysScanDesc scan;
ScanKeyData key;
HeapTuple inheritsTuple;
bool found = false;
@@ -15645,7 +15645,7 @@ ATExecSetCompression(AlteredTableInfo *tab,
if (!IsStorageCompressible(typstorage))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("column data type %s does not support compression",
+ errmsg("column data type %s does not support compression",
format_type_be(atttableform->atttypid))));
/* get the attribute compression method. */
@@ -17010,11 +17010,11 @@ QueuePartitionConstraintValidation(List **wqueue, Relation scanrel,
if (!validate_default)
ereport(DEBUG1,
(errmsg_internal("partition constraint for table \"%s\" is implied by existing constraints",
- RelationGetRelationName(scanrel))));
+ RelationGetRelationName(scanrel))));
else
ereport(DEBUG1,
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
- RelationGetRelationName(scanrel))));
+ RelationGetRelationName(scanrel))));
return;
}
@@ -17745,8 +17745,8 @@ ATExecDetachPartition(List **wqueue, AlteredTableInfo *tab, Relation rel,
AccessExclusiveLock);
/*
- * Check inheritance conditions and either delete the pg_inherits row
- * (in non-concurrent mode) or just set the inhdetachpending flag.
+ * Check inheritance conditions and either delete the pg_inherits row (in
+ * non-concurrent mode) or just set the inhdetachpending flag.
*/
if (!concurrent)
RemoveInheritance(partRel, rel, false);
@@ -17771,11 +17771,11 @@ ATExecDetachPartition(List **wqueue, AlteredTableInfo *tab, Relation rel,
*/
if (concurrent)
{
- Oid partrelid,
- parentrelid;
+ Oid partrelid,
+ parentrelid;
LOCKTAG tag;
- char *parentrelname;
- char *partrelname;
+ char *parentrelname;
+ char *partrelname;
/*
* Add a new constraint to the partition being detached, which
@@ -17815,10 +17815,10 @@ ATExecDetachPartition(List **wqueue, AlteredTableInfo *tab, Relation rel,
StartTransactionCommand();
/*
- * Now wait. This ensures that all queries that were planned including
- * the partition are finished before we remove the rest of catalog
- * entries. We don't need or indeed want to acquire this lock, though
- * -- that would block later queries.
+ * Now wait. This ensures that all queries that were planned
+ * including the partition are finished before we remove the rest of
+ * catalog entries. We don't need or indeed want to acquire this
+ * lock, though -- that would block later queries.
*
* We don't need to concern ourselves with waiting for a lock on the
* partition itself, since we will acquire AccessExclusiveLock below.
@@ -18046,7 +18046,7 @@ DetachPartitionFinalize(Relation rel, Relation partRel, bool concurrent,
static ObjectAddress
ATExecDetachPartitionFinalize(Relation rel, RangeVar *name)
{
- Relation partRel;
+ Relation partRel;
ObjectAddress address;
Snapshot snap = GetActiveSnapshot();
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index f305f8bc0f2..ef34421f1ca 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -4353,7 +4353,7 @@ GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
/* Create it if not already done. */
if (!table->storeslot)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
/*
* We only need this slot only until AfterTriggerEndQuery, but making
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 036fa69d17d..58ec65c6afc 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -1569,7 +1569,7 @@ DefineRange(CreateRangeStmt *stmt)
/* Create the multirange that goes with it */
if (multirangeTypeName)
{
- Oid old_typoid;
+ Oid old_typoid;
/*
* Look to see if multirange type already exists.
@@ -1579,8 +1579,8 @@ DefineRange(CreateRangeStmt *stmt)
ObjectIdGetDatum(multirangeNamespace));
/*
- * If it's not a shell, see if it's an autogenerated array type, and if so
- * rename it out of the way.
+ * If it's not a shell, see if it's an autogenerated array type, and
+ * if so rename it out of the way.
*/
if (OidIsValid(old_typoid) && get_typisdefined(old_typoid))
{
@@ -1600,7 +1600,7 @@ DefineRange(CreateRangeStmt *stmt)
mltrngaddress =
TypeCreate(multirangeOid, /* force assignment of this type OID */
multirangeTypeName, /* type name */
- multirangeNamespace, /* namespace */
+ multirangeNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
GetUserId(), /* owner's ID */
@@ -1682,7 +1682,7 @@ DefineRange(CreateRangeStmt *stmt)
TypeCreate(multirangeArrayOid, /* force assignment of this type OID */
multirangeArrayName, /* type name */
- multirangeNamespace, /* namespace */
+ multirangeNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
GetUserId(), /* owner's ID */
@@ -1844,7 +1844,7 @@ makeMultirangeConstructors(const char *name, Oid namespace,
PROKIND_FUNCTION,
false, /* security_definer */
false, /* leakproof */
- true, /* isStrict */
+ true, /* isStrict */
PROVOLATILE_IMMUTABLE, /* volatility */
PROPARALLEL_SAFE, /* parallel safety */
argtypes, /* parameterTypes */
@@ -1929,7 +1929,7 @@ makeMultirangeConstructors(const char *name, Oid namespace,
PROKIND_FUNCTION,
false, /* security_definer */
false, /* leakproof */
- true, /* isStrict */
+ true, /* isStrict */
PROVOLATILE_IMMUTABLE, /* volatility */
PROPARALLEL_SAFE, /* parallel safety */
argtypes, /* parameterTypes */
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 39df05c7352..d549d0d86fb 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -1173,8 +1173,8 @@ vacuum_xid_failsafe_check(TransactionId relfrozenxid, MultiXactId relminmxid)
/*
* Similar to above, determine the index skipping age to use for
- * multixact. In any case no less than autovacuum_multixact_freeze_max_age
- * * 1.05.
+ * multixact. In any case no less than autovacuum_multixact_freeze_max_age *
+ * 1.05.
*/
skip_index_vacuum = Max(vacuum_multixact_failsafe_age,
autovacuum_multixact_freeze_max_age * 1.05);
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index b3726a54f37..10f0b349b58 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -427,6 +427,7 @@ ExecSupportsMarkRestore(Path *pathnode)
{
case T_IndexScan:
case T_IndexOnlyScan:
+
/*
* Not all index types support mark/restore.
*/
diff --git a/src/backend/executor/execAsync.c b/src/backend/executor/execAsync.c
index 75108d36be2..94a284a31e1 100644
--- a/src/backend/executor/execAsync.c
+++ b/src/backend/executor/execAsync.c
@@ -26,7 +26,7 @@ void
ExecAsyncRequest(AsyncRequest *areq)
{
if (areq->requestee->chgParam != NULL) /* something changed? */
- ExecReScan(areq->requestee); /* let ReScan handle this */
+ ExecReScan(areq->requestee); /* let ReScan handle this */
/* must provide our own instrumentation support */
if (areq->requestee->instrument)
@@ -124,7 +124,7 @@ ExecAsyncResponse(AsyncRequest *areq)
default:
/* If the node doesn't support async, caller messed up. */
elog(ERROR, "unrecognized node type: %d",
- (int) nodeTag(areq->requestor));
+ (int) nodeTag(areq->requestor));
}
}
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 58b49687350..b3ce4bae530 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -132,8 +132,8 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
/*
* In some cases (e.g. an EXECUTE statement) a query execution will skip
* parse analysis, which means that the query_id won't be reported. Note
- * that it's harmless to report the query_id multiple time, as the call will
- * be ignored if the top level query_id has already been reported.
+ * that it's harmless to report the query_id multiple time, as the call
+ * will be ignored if the top level query_id has already been reported.
*/
pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c
index 8e2feafd28c..606c920b068 100644
--- a/src/backend/executor/execPartition.c
+++ b/src/backend/executor/execPartition.c
@@ -917,8 +917,8 @@ ExecInitRoutingInfo(ModifyTableState *mtstate,
partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
/*
- * Determine if the FDW supports batch insert and determine the batch
- * size (a FDW may support batching, but it may be disabled for the
+ * Determine if the FDW supports batch insert and determine the batch size
+ * (a FDW may support batching, but it may be disabled for the
* server/table or for this particular query).
*
* If the FDW does not support batching, we set the batch size to 1.
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index d80adc519dd..8440a76fbdc 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -349,7 +349,7 @@ typedef struct HashAggSpill
int64 *ntuples; /* number of tuples in each partition */
uint32 mask; /* mask to find partition from hash value */
int shift; /* after masking, shift by this amount */
- hyperLogLogState *hll_card; /* cardinality estimate for contents */
+ hyperLogLogState *hll_card; /* cardinality estimate for contents */
} HashAggSpill;
/*
@@ -374,9 +374,9 @@ typedef struct HashAggBatch
/* used to find referenced colnos */
typedef struct FindColsContext
{
- bool is_aggref; /* is under an aggref */
- Bitmapset *aggregated; /* column references under an aggref */
- Bitmapset *unaggregated; /* other column references */
+ bool is_aggref; /* is under an aggref */
+ Bitmapset *aggregated; /* column references under an aggref */
+ Bitmapset *unaggregated; /* other column references */
} FindColsContext;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
@@ -1397,7 +1397,7 @@ project_aggregates(AggState *aggstate)
static void
find_cols(AggState *aggstate, Bitmapset **aggregated, Bitmapset **unaggregated)
{
- Agg *agg = (Agg *) aggstate->ss.ps.plan;
+ Agg *agg = (Agg *) aggstate->ss.ps.plan;
FindColsContext context;
context.is_aggref = false;
@@ -1579,7 +1579,8 @@ find_hash_columns(AggState *aggstate)
for (int i = 0; i < scanDesc->natts; i++)
{
- int colno = i + 1;
+ int colno = i + 1;
+
if (bms_is_member(colno, aggstate->colnos_needed))
aggstate->max_colno_needed = colno;
else
@@ -3158,10 +3159,10 @@ hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
for (i = 0; i < spill->npartitions; i++)
{
- LogicalTapeSet *tapeset = aggstate->hash_tapeinfo->tapeset;
- int tapenum = spill->partitions[i];
- HashAggBatch *new_batch;
- double cardinality;
+ LogicalTapeSet *tapeset = aggstate->hash_tapeinfo->tapeset;
+ int tapenum = spill->partitions[i];
+ HashAggBatch *new_batch;
+ double cardinality;
/* if the partition is empty, don't create a new batch of work */
if (spill->ntuples[i] == 0)
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index 1558fafad1e..62335ed4c47 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -566,9 +566,9 @@ choose_next_subplan_locally(AppendState *node)
/*
* If first call then have the bms member function choose the first valid
- * sync subplan by initializing whichplan to -1. If there happen to be
- * no valid sync subplans then the bms member function will handle that
- * by returning a negative number which will allow us to exit returning a
+ * sync subplan by initializing whichplan to -1. If there happen to be no
+ * valid sync subplans then the bms member function will handle that by
+ * returning a negative number which will allow us to exit returning a
* false value.
*/
if (whichplan == INVALID_SUBPLAN_INDEX)
@@ -925,8 +925,8 @@ ExecAppendAsyncGetNext(AppendState *node, TupleTableSlot **result)
/*
* If all sync subplans are complete, we're totally done scanning the
- * given node. Otherwise, we're done with the asynchronous stuff but
- * must continue scanning the sync subplans.
+ * given node. Otherwise, we're done with the asynchronous stuff but must
+ * continue scanning the sync subplans.
*/
if (node->as_syncdone)
{
@@ -1003,7 +1003,7 @@ ExecAppendAsyncEventWait(AppendState *node)
{
int nevents = node->as_nasyncplans + 1;
long timeout = node->as_syncdone ? -1 : 0;
- WaitEvent occurred_event[EVENT_BUFFER_SIZE];
+ WaitEvent occurred_event[EVENT_BUFFER_SIZE];
int noccurred;
int i;
@@ -1054,8 +1054,8 @@ ExecAppendAsyncEventWait(AppendState *node)
/*
* Mark it as no longer needing a callback. We must do this
- * before dispatching the callback in case the callback resets
- * the flag.
+ * before dispatching the callback in case the callback resets the
+ * flag.
*/
Assert(areq->callback_pending);
areq->callback_pending = false;
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c
index 9e1dc464cb0..734142b7b16 100644
--- a/src/backend/executor/nodeGather.c
+++ b/src/backend/executor/nodeGather.c
@@ -266,7 +266,7 @@ gather_getnext(GatherState *gatherstate)
PlanState *outerPlan = outerPlanState(gatherstate);
TupleTableSlot *outerTupleSlot;
TupleTableSlot *fslot = gatherstate->funnel_slot;
- MinimalTuple tup;
+ MinimalTuple tup;
while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
{
@@ -278,7 +278,7 @@ gather_getnext(GatherState *gatherstate)
if (HeapTupleIsValid(tup))
{
- ExecStoreMinimalTuple(tup, /* tuple to store */
+ ExecStoreMinimalTuple(tup, /* tuple to store */
fslot, /* slot to store the tuple */
false); /* don't pfree tuple */
return fslot;
diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c
index aa5743cebfc..03f02a19aab 100644
--- a/src/backend/executor/nodeGatherMerge.c
+++ b/src/backend/executor/nodeGatherMerge.c
@@ -700,9 +700,9 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
Assert(tup);
/* Build the TupleTableSlot for the given tuple */
- ExecStoreMinimalTuple(tup, /* tuple to store */
- gm_state->gm_slots[reader], /* slot in which to store
- * the tuple */
+ ExecStoreMinimalTuple(tup, /* tuple to store */
+ gm_state->gm_slots[reader], /* slot in which to
+ * store the tuple */
true); /* pfree tuple when done with it */
return true;
diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c
index 18f246a8233..934426a6679 100644
--- a/src/backend/executor/nodeIncrementalSort.c
+++ b/src/backend/executor/nodeIncrementalSort.c
@@ -1162,8 +1162,8 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
}
/*
- * If chgParam of subnode is not null, then the plan will be re-scanned
- * by the first ExecProcNode.
+ * If chgParam of subnode is not null, then the plan will be re-scanned by
+ * the first ExecProcNode.
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index a62928ae7ce..0816027f7f7 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -61,12 +61,12 @@ typedef struct MTTargetRelLookup
} MTTargetRelLookup;
static void ExecBatchInsert(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot **slots,
- TupleTableSlot **planSlots,
- int numSlots,
- EState *estate,
- bool canSetTag);
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int numSlots,
+ EState *estate,
+ bool canSetTag);
static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
ResultRelInfo *resultRelInfo,
ItemPointer conflictTid,
@@ -673,17 +673,17 @@ ExecInsert(ModifyTableState *mtstate,
if (resultRelInfo->ri_BatchSize > 1)
{
/*
- * If a certain number of tuples have already been accumulated,
- * or a tuple has come for a different relation than that for
- * the accumulated tuples, perform the batch insert
+ * If a certain number of tuples have already been accumulated, or
+ * a tuple has come for a different relation than that for the
+ * accumulated tuples, perform the batch insert
*/
if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
{
ExecBatchInsert(mtstate, resultRelInfo,
- resultRelInfo->ri_Slots,
- resultRelInfo->ri_PlanSlots,
- resultRelInfo->ri_NumSlots,
- estate, canSetTag);
+ resultRelInfo->ri_Slots,
+ resultRelInfo->ri_PlanSlots,
+ resultRelInfo->ri_NumSlots,
+ estate, canSetTag);
resultRelInfo->ri_NumSlots = 0;
}
@@ -692,9 +692,9 @@ ExecInsert(ModifyTableState *mtstate,
if (resultRelInfo->ri_Slots == NULL)
{
resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
- resultRelInfo->ri_BatchSize);
+ resultRelInfo->ri_BatchSize);
resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
- resultRelInfo->ri_BatchSize);
+ resultRelInfo->ri_BatchSize);
}
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
@@ -982,12 +982,12 @@ ExecInsert(ModifyTableState *mtstate,
*/
static void
ExecBatchInsert(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo,
- TupleTableSlot **slots,
- TupleTableSlot **planSlots,
- int numSlots,
- EState *estate,
- bool canSetTag)
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot **slots,
+ TupleTableSlot **planSlots,
+ int numSlots,
+ EState *estate,
+ bool canSetTag)
{
int i;
int numInserted = numSlots;
@@ -998,10 +998,10 @@ ExecBatchInsert(ModifyTableState *mtstate,
* insert into foreign table: let the FDW do it
*/
rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
- resultRelInfo,
- slots,
- planSlots,
- &numInserted);
+ resultRelInfo,
+ slots,
+ planSlots,
+ &numInserted);
for (i = 0; i < numInserted; i++)
{
@@ -2604,10 +2604,10 @@ ExecModifyTable(PlanState *pstate)
resultRelInfo = lfirst(lc);
if (resultRelInfo->ri_NumSlots > 0)
ExecBatchInsert(node, resultRelInfo,
- resultRelInfo->ri_Slots,
- resultRelInfo->ri_PlanSlots,
- resultRelInfo->ri_NumSlots,
- estate, node->canSetTag);
+ resultRelInfo->ri_Slots,
+ resultRelInfo->ri_PlanSlots,
+ resultRelInfo->ri_NumSlots,
+ estate, node->canSetTag);
}
/*
@@ -3091,12 +3091,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
mtstate->mt_resultOidHash = NULL;
/*
- * Determine if the FDW supports batch insert and determine the batch
- * size (a FDW may support batching, but it may be disabled for the
+ * Determine if the FDW supports batch insert and determine the batch size
+ * (a FDW may support batching, but it may be disabled for the
* server/table).
*
- * We only do this for INSERT, so that for UPDATE/DELETE the batch
- * size remains set to 0.
+ * We only do this for INSERT, so that for UPDATE/DELETE the batch size
+ * remains set to 0.
*/
if (operation == CMD_INSERT)
{
diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c
index 98a27f08bfd..71029a39a98 100644
--- a/src/backend/jit/llvm/llvmjit.c
+++ b/src/backend/jit/llvm/llvmjit.c
@@ -769,9 +769,9 @@ llvm_compile_module(LLVMJitContext *context)
ereport(DEBUG1,
(errmsg_internal("time to inline: %.3fs, opt: %.3fs, emit: %.3fs",
- INSTR_TIME_GET_DOUBLE(context->base.instr.inlining_counter),
- INSTR_TIME_GET_DOUBLE(context->base.instr.optimization_counter),
- INSTR_TIME_GET_DOUBLE(context->base.instr.emission_counter)),
+ INSTR_TIME_GET_DOUBLE(context->base.instr.inlining_counter),
+ INSTR_TIME_GET_DOUBLE(context->base.instr.optimization_counter),
+ INSTR_TIME_GET_DOUBLE(context->base.instr.emission_counter)),
errhidestmt(true),
errhidecontext(true)));
}
@@ -1094,7 +1094,7 @@ llvm_resolve_symbol(const char *symname, void *ctx)
static LLVMErrorRef
llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx,
- LLVMOrcLookupStateRef *LookupState, LLVMOrcLookupKind Kind,
+ LLVMOrcLookupStateRef * LookupState, LLVMOrcLookupKind Kind,
LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags,
LLVMOrcCLookupSet LookupSet, size_t LookupSetSize)
{
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 45a91235a45..68372fcea87 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -711,17 +711,16 @@ recv_password_packet(Port *port)
if (mtype != 'p')
{
/*
- * If the client just disconnects without offering a password,
- * don't make a log entry. This is legal per protocol spec and in
- * fact commonly done by psql, so complaining just clutters the
- * log.
+ * If the client just disconnects without offering a password, don't
+ * make a log entry. This is legal per protocol spec and in fact
+ * commonly done by psql, so complaining just clutters the log.
*/
if (mtype != EOF)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("expected password response, got message type %d",
mtype)));
- return NULL; /* EOF or bad message type */
+ return NULL; /* EOF or bad message type */
}
initStringInfo(&buf);
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index 40deab13c7d..c4e8113241d 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -602,6 +602,7 @@ aloop:
port->peer_cn = NULL;
return -1;
}
+
/*
* RFC2253 is the closest thing to an accepted standard format for
* DNs. We have documented how to produce this format from a
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index b9ccd4473f7..89a5f901aa0 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -175,8 +175,8 @@ WaitEventSet *FeBeWaitSet;
void
pq_init(void)
{
- int socket_pos PG_USED_FOR_ASSERTS_ONLY;
- int latch_pos PG_USED_FOR_ASSERTS_ONLY;
+ int socket_pos PG_USED_FOR_ASSERTS_ONLY;
+ int latch_pos PG_USED_FOR_ASSERTS_ONLY;
/* initialize state variables */
PqSendBufferSize = PQ_SEND_BUFFER_SIZE;
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 7003238d76b..b02f7809c96 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -1907,11 +1907,11 @@ create_gather_merge_plan(PlannerInfo *root, GatherMergePath *best_path)
/*
- * All gather merge paths should have already guaranteed the necessary sort
- * order either by adding an explicit sort node or by using presorted input.
- * We can't simply add a sort here on additional pathkeys, because we can't
- * guarantee the sort would be safe. For example, expressions may be
- * volatile or otherwise parallel unsafe.
+ * All gather merge paths should have already guaranteed the necessary
+ * sort order either by adding an explicit sort node or by using presorted
+ * input. We can't simply add a sort here on additional pathkeys, because
+ * we can't guarantee the sort would be safe. For example, expressions may
+ * be volatile or otherwise parallel unsafe.
*/
if (!pathkeys_contained_in(pathkeys, best_path->subpath->pathkeys))
elog(ERROR, "gather merge input not sufficiently sorted");
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index aefb6f8d4e8..e9434580d6d 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -350,34 +350,34 @@ expand_insert_targetlist(List *tlist, Relation rel)
Oid attcollation = att_tup->attcollation;
Node *new_expr;
- if (!att_tup->attisdropped)
- {
- new_expr = (Node *) makeConst(atttype,
- -1,
- attcollation,
- att_tup->attlen,
- (Datum) 0,
- true, /* isnull */
- att_tup->attbyval);
- new_expr = coerce_to_domain(new_expr,
- InvalidOid, -1,
- atttype,
- COERCION_IMPLICIT,
- COERCE_IMPLICIT_CAST,
- -1,
- false);
- }
- else
- {
- /* Insert NULL for dropped column */
- new_expr = (Node *) makeConst(INT4OID,
- -1,
- InvalidOid,
- sizeof(int32),
- (Datum) 0,
- true, /* isnull */
- true /* byval */ );
- }
+ if (!att_tup->attisdropped)
+ {
+ new_expr = (Node *) makeConst(atttype,
+ -1,
+ attcollation,
+ att_tup->attlen,
+ (Datum) 0,
+ true, /* isnull */
+ att_tup->attbyval);
+ new_expr = coerce_to_domain(new_expr,
+ InvalidOid, -1,
+ atttype,
+ COERCION_IMPLICIT,
+ COERCE_IMPLICIT_CAST,
+ -1,
+ false);
+ }
+ else
+ {
+ /* Insert NULL for dropped column */
+ new_expr = (Node *) makeConst(INT4OID,
+ -1,
+ InvalidOid,
+ sizeof(int32),
+ (Datum) 0,
+ true, /* isnull */
+ true /* byval */ );
+ }
new_tle = makeTargetEntry((Expr *) new_expr,
attrno,
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index d9ad4efc5ea..e117ab976e6 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -2563,9 +2563,9 @@ eval_const_expressions_mutator(Node *node,
}
case T_NullIfExpr:
{
- NullIfExpr *expr;
- ListCell *arg;
- bool has_nonconst_input = false;
+ NullIfExpr *expr;
+ ListCell *arg;
+ bool has_nonconst_input = false;
/* Copy the node and const-simplify its arguments */
expr = (NullIfExpr *) ece_generic_processing(node);
@@ -4359,49 +4359,49 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
}
else
{
- /*
- * Set up to handle parameters while parsing the function body. We need a
- * dummy FuncExpr node containing the already-simplified arguments to pass
- * to prepare_sql_fn_parse_info. (In some cases we don't really need
- * that, but for simplicity we always build it.)
- */
- fexpr = makeNode(FuncExpr);
- fexpr->funcid = funcid;
- fexpr->funcresulttype = result_type;
- fexpr->funcretset = false;
- fexpr->funcvariadic = funcvariadic;
- fexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
- fexpr->funccollid = result_collid; /* doesn't matter */
- fexpr->inputcollid = input_collid;
- fexpr->args = args;
- fexpr->location = -1;
-
- pinfo = prepare_sql_fn_parse_info(func_tuple,
- (Node *) fexpr,
- input_collid);
-
- /* fexpr also provides a convenient way to resolve a composite result */
- (void) get_expr_result_type((Node *) fexpr,
- NULL,
- &rettupdesc);
+ /*
+ * Set up to handle parameters while parsing the function body. We
+ * need a dummy FuncExpr node containing the already-simplified
+ * arguments to pass to prepare_sql_fn_parse_info. (In some cases we
+ * don't really need that, but for simplicity we always build it.)
+ */
+ fexpr = makeNode(FuncExpr);
+ fexpr->funcid = funcid;
+ fexpr->funcresulttype = result_type;
+ fexpr->funcretset = false;
+ fexpr->funcvariadic = funcvariadic;
+ fexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
+ fexpr->funccollid = result_collid; /* doesn't matter */
+ fexpr->inputcollid = input_collid;
+ fexpr->args = args;
+ fexpr->location = -1;
+
+ pinfo = prepare_sql_fn_parse_info(func_tuple,
+ (Node *) fexpr,
+ input_collid);
+
+ /* fexpr also provides a convenient way to resolve a composite result */
+ (void) get_expr_result_type((Node *) fexpr,
+ NULL,
+ &rettupdesc);
- /*
- * We just do parsing and parse analysis, not rewriting, because rewriting
- * will not affect table-free-SELECT-only queries, which is all that we
- * care about. Also, we can punt as soon as we detect more than one
- * command in the function body.
- */
- raw_parsetree_list = pg_parse_query(src);
- if (list_length(raw_parsetree_list) != 1)
- goto fail;
+ /*
+ * We just do parsing and parse analysis, not rewriting, because
+ * rewriting will not affect table-free-SELECT-only queries, which is
+ * all that we care about. Also, we can punt as soon as we detect
+ * more than one command in the function body.
+ */
+ raw_parsetree_list = pg_parse_query(src);
+ if (list_length(raw_parsetree_list) != 1)
+ goto fail;
- pstate = make_parsestate(NULL);
- pstate->p_sourcetext = src;
- sql_fn_parser_setup(pstate, pinfo);
+ pstate = make_parsestate(NULL);
+ pstate->p_sourcetext = src;
+ sql_fn_parser_setup(pstate, pinfo);
- querytree = transformTopLevelStmt(pstate, linitial(raw_parsetree_list));
+ querytree = transformTopLevelStmt(pstate, linitial(raw_parsetree_list));
- free_parsestate(pstate);
+ free_parsestate(pstate);
}
/*
@@ -4931,31 +4931,31 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
}
else
{
- /*
- * Set up to handle parameters while parsing the function body. We can
- * use the FuncExpr just created as the input for
- * prepare_sql_fn_parse_info.
- */
- pinfo = prepare_sql_fn_parse_info(func_tuple,
- (Node *) fexpr,
- fexpr->inputcollid);
+ /*
+ * Set up to handle parameters while parsing the function body. We
+ * can use the FuncExpr just created as the input for
+ * prepare_sql_fn_parse_info.
+ */
+ pinfo = prepare_sql_fn_parse_info(func_tuple,
+ (Node *) fexpr,
+ fexpr->inputcollid);
- /*
- * Parse, analyze, and rewrite (unlike inline_function(), we can't skip
- * rewriting here). We can fail as soon as we find more than one query,
- * though.
- */
- raw_parsetree_list = pg_parse_query(src);
- if (list_length(raw_parsetree_list) != 1)
- goto fail;
+ /*
+ * Parse, analyze, and rewrite (unlike inline_function(), we can't
+ * skip rewriting here). We can fail as soon as we find more than one
+ * query, though.
+ */
+ raw_parsetree_list = pg_parse_query(src);
+ if (list_length(raw_parsetree_list) != 1)
+ goto fail;
- querytree_list = pg_analyze_and_rewrite_params(linitial(raw_parsetree_list),
- src,
- (ParserSetupHook) sql_fn_parser_setup,
- pinfo, NULL);
- if (list_length(querytree_list) != 1)
- goto fail;
- querytree = linitial(querytree_list);
+ querytree_list = pg_analyze_and_rewrite_params(linitial(raw_parsetree_list),
+ src,
+ (ParserSetupHook) sql_fn_parser_setup,
+ pinfo, NULL);
+ if (list_length(querytree_list) != 1)
+ goto fail;
+ querytree = linitial(querytree_list);
}
/*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index e415bc3df0f..168198acd14 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -2752,7 +2752,7 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt)
(stmt->options & CURSOR_OPT_NO_SCROLL))
ereport(ERROR,
(errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
- /* translator: %s is a SQL keyword */
+ /* translator: %s is a SQL keyword */
errmsg("cannot specify both %s and %s",
"SCROLL", "NO SCROLL")));
@@ -2760,7 +2760,7 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt)
(stmt->options & CURSOR_OPT_INSENSITIVE))
ereport(ERROR,
(errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
- /* translator: %s is a SQL keyword */
+ /* translator: %s is a SQL keyword */
errmsg("cannot specify both %s and %s",
"ASENSITIVE", "INSENSITIVE")));
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index ceb0bf597d6..9562ffcf3e2 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -1749,19 +1749,20 @@ cmp_list_len_asc(const ListCell *a, const ListCell *b)
static int
cmp_list_len_contents_asc(const ListCell *a, const ListCell *b)
{
- int res = cmp_list_len_asc(a, b);
+ int res = cmp_list_len_asc(a, b);
if (res == 0)
{
- List *la = (List *) lfirst(a);
- List *lb = (List *) lfirst(b);
- ListCell *lca;
- ListCell *lcb;
+ List *la = (List *) lfirst(a);
+ List *lb = (List *) lfirst(b);
+ ListCell *lca;
+ ListCell *lcb;
forboth(lca, la, lcb, lb)
{
- int va = lfirst_int(lca);
- int vb = lfirst_int(lcb);
+ int va = lfirst_int(lca);
+ int vb = lfirst_int(lcb);
+
if (va > vb)
return 1;
if (va < vb)
diff --git a/src/backend/parser/parse_cte.c b/src/backend/parser/parse_cte.c
index ee7613187aa..f6ae96333af 100644
--- a/src/backend/parser/parse_cte.c
+++ b/src/backend/parser/parse_cte.c
@@ -356,7 +356,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
* than just being recursive. It basically means the query expression
* looks like
*
- * non-recursive query UNION [ALL] recursive query
+ * non-recursive query UNION [ALL] recursive query
*
* and that the recursive query is not itself a set operation.
*
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index d451f055f72..74659190447 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -2360,8 +2360,8 @@ addRangeTableEntryForCTE(ParseState *pstate,
* list --- caller must do that if appropriate.
*/
psi = buildNSItemFromLists(rte, list_length(pstate->p_rtable),
- rte->coltypes, rte->coltypmods,
- rte->colcollations);
+ rte->coltypes, rte->coltypmods,
+ rte->colcollations);
/*
* The columns added by search and cycle clauses are not included in star
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 48cce4567b4..d5b67d48cfc 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -447,8 +447,8 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
ereport(DEBUG1,
(errmsg_internal("%s will create implicit sequence \"%s\" for serial column \"%s.%s\"",
- cxt->stmtType, sname,
- cxt->relation->relname, column->colname)));
+ cxt->stmtType, sname,
+ cxt->relation->relname, column->colname)));
/*
* Build a CREATE SEQUENCE command to create the sequence object, and add
diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c
index d3dedfd7844..7925fcce3b3 100644
--- a/src/backend/partitioning/partbounds.c
+++ b/src/backend/partitioning/partbounds.c
@@ -2876,7 +2876,10 @@ check_new_partition_bound(char *relname, Relation parent,
{
int prev_modulus;
- /* We found the largest modulus less than or equal to ours. */
+ /*
+ * We found the largest modulus less than or equal to
+ * ours.
+ */
prev_modulus = DatumGetInt32(boundinfo->datums[offset][0]);
if (spec->modulus % prev_modulus != 0)
@@ -3171,7 +3174,7 @@ check_default_partition_contents(Relation parent, Relation default_rel,
{
ereport(DEBUG1,
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
- RelationGetRelationName(default_rel))));
+ RelationGetRelationName(default_rel))));
return;
}
@@ -3222,7 +3225,7 @@ check_default_partition_contents(Relation parent, Relation default_rel,
{
ereport(DEBUG1,
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
- RelationGetRelationName(part_rel))));
+ RelationGetRelationName(part_rel))));
table_close(part_rel, NoLock);
continue;
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index cf1ca0fe5f7..9a9d6a9643f 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -336,8 +336,8 @@ RelationBuildPartitionDesc(Relation rel, bool omit_detached)
* descriptor, it contains an old partition descriptor that may still be
* referenced somewhere. Preserve it, while not leaking it, by
* reattaching it as a child context of the new one. Eventually it will
- * get dropped by either RelationClose or RelationClearRelation.
- * (We keep the regular partdesc in rd_pdcxt, and the partdesc-excluding-
+ * get dropped by either RelationClose or RelationClearRelation. (We keep
+ * the regular partdesc in rd_pdcxt, and the partdesc-excluding-
* detached-partitions in rd_pddcxt.)
*/
if (is_omit)
diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c
index 6140ee7617f..d7a71992d81 100644
--- a/src/backend/port/win32_shmem.c
+++ b/src/backend/port/win32_shmem.c
@@ -142,7 +142,11 @@ EnableLockPagesPrivilege(int elevel)
{
ereport(elevel,
(errmsg("could not enable user right \"%s\": error code %lu",
- /* translator: This is a term from Windows and should be translated to match the Windows localization. */
+
+ /*
+ * translator: This is a term from Windows and should be translated to
+ * match the Windows localization.
+ */
_("Lock pages in memory"),
GetLastError()),
errdetail("Failed system call was %s.", "OpenProcessToken")));
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index 11fc1b78637..2d2c450ba35 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -403,7 +403,7 @@ BackgroundWorkerStateChange(bool allow_new_workers)
/* Log it! */
ereport(DEBUG1,
(errmsg_internal("registering background worker \"%s\"",
- rw->rw_worker.bgw_name)));
+ rw->rw_worker.bgw_name)));
slist_push_head(&BackgroundWorkerList, &rw->rw_lnode);
}
@@ -435,7 +435,7 @@ ForgetBackgroundWorker(slist_mutable_iter *cur)
ereport(DEBUG1,
(errmsg_internal("unregistering background worker \"%s\"",
- rw->rw_worker.bgw_name)));
+ rw->rw_worker.bgw_name)));
slist_delete_current(cur);
free(rw);
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index e7e6a2a4594..cdd07770a01 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -1238,7 +1238,7 @@ CompactCheckpointerRequestQueue(void)
}
ereport(DEBUG1,
(errmsg_internal("compacted fsync request queue from %d entries to %d entries",
- CheckpointerShmem->num_requests, preserve_count)));
+ CheckpointerShmem->num_requests, preserve_count)));
CheckpointerShmem->num_requests = preserve_count;
/* Cleanup. */
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index e94f5f55c78..249b17c92b7 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -3708,7 +3708,7 @@ pgstat_write_statsfiles(bool permanent, bool allDbs)
{
fputc('R', fpout);
rc = fwrite(slotent, sizeof(PgStat_StatReplSlotEntry), 1, fpout);
- (void) rc; /* we'll check for error with ferror */
+ (void) rc; /* we'll check for error with ferror */
}
}
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index b05db5a4735..6833f0f7f2d 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -660,6 +660,7 @@ PostmasterMain(int argc, char *argv[])
pqsignal_pm(SIGCHLD, reaper); /* handle child termination */
#ifdef SIGURG
+
/*
* Ignore SIGURG for now. Child processes may change this (see
* InitializeLatchSupport), but they will not receive any such signals
@@ -5780,7 +5781,7 @@ do_start_bgworker(RegisteredBgWorker *rw)
ereport(DEBUG1,
(errmsg_internal("starting background worker process \"%s\"",
- rw->rw_worker.bgw_name)));
+ rw->rw_worker.bgw_name)));
#ifdef EXEC_BACKEND
switch ((worker_pid = bgworker_forkexec(rw->rw_shmem_slot)))
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index e7a7486c354..cad43bdef23 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -239,7 +239,8 @@ SysLoggerMain(int argc, char *argv[])
* broken backends...
*/
- pqsignal(SIGHUP, SignalHandlerForConfigReload); /* set flag to read config file */
+ pqsignal(SIGHUP, SignalHandlerForConfigReload); /* set flag to read config
+ * file */
pqsignal(SIGINT, SIG_IGN);
pqsignal(SIGTERM, SIG_IGN);
pqsignal(SIGQUIT, SIG_IGN);
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
index 767eac33e4f..e09108d0ece 100644
--- a/src/backend/replication/basebackup.c
+++ b/src/backend/replication/basebackup.c
@@ -414,7 +414,7 @@ perform_base_backup(basebackup_options *opt)
if (ti->path == NULL)
{
struct stat statbuf;
- bool sendtblspclinks = true;
+ bool sendtblspclinks = true;
/* In the main tar, include the backup_label first... */
sendFileWithContent(BACKUP_LABEL_FILE, labelfile->data,
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index cb462a052ad..85f325c3896 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -67,12 +67,6 @@ typedef struct LogicalRepCtxStruct
LogicalRepCtxStruct *LogicalRepCtx;
-typedef struct LogicalRepWorkerId
-{
- Oid subid;
- Oid relid;
-} LogicalRepWorkerId;
-
static void ApplyLauncherWakeup(void);
static void logicalrep_launcher_onexit(int code, Datum arg);
static void logicalrep_worker_onexit(int code, Datum arg);
@@ -283,7 +277,7 @@ logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid,
ereport(DEBUG1,
(errmsg_internal("starting logical replication worker for subscription \"%s\"",
- subname)));
+ subname)));
/* Report this after the initial starting message for consistency. */
if (max_replication_slots == 0)
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index 39471fddad6..b955f434589 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -576,8 +576,8 @@ CheckPointReplicationOrigin(void)
tmppath)));
/*
- * no other backend can perform this at the same time; only one
- * checkpoint can happen at a time.
+ * no other backend can perform this at the same time; only one checkpoint
+ * can happen at a time.
*/
tmpfd = OpenTransientFile(tmppath,
O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index e80a195472e..b0ab91cc71b 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -2493,11 +2493,11 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
* need to do the cleanup and return gracefully on this error, see
* SetupCheckXidLive.
*
- * This error code can be thrown by one of the callbacks we call during
- * decoding so we need to ensure that we return gracefully only when we are
- * sending the data in streaming mode and the streaming is not finished yet
- * or when we are sending the data out on a PREPARE during a two-phase
- * commit.
+ * This error code can be thrown by one of the callbacks we call
+ * during decoding so we need to ensure that we return gracefully only
+ * when we are sending the data in streaming mode and the streaming is
+ * not finished yet or when we are sending the data out on a PREPARE
+ * during a two-phase commit.
*/
if (errdata->sqlerrcode == ERRCODE_TRANSACTION_ROLLBACK &&
(stream_started || rbtxn_prepared(txn)))
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 9118e214220..04f3355f602 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -1395,8 +1395,8 @@ SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff)
/*
* All transactions we needed to finish finished - try to ensure there is
* another xl_running_xacts record in a timely manner, without having to
- * wait for bgwriter or checkpointer to log one. During recovery we
- * can't enforce that, so we'll have to wait.
+ * wait for bgwriter or checkpointer to log one. During recovery we can't
+ * enforce that, so we'll have to wait.
*/
if (!RecoveryInProgress())
{
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index cf261e200e4..c88b803e5d0 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -99,8 +99,8 @@ ReplicationSlot *MyReplicationSlot = NULL;
int max_replication_slots = 0; /* the maximum number of replication
* slots */
-static int ReplicationSlotAcquireInternal(ReplicationSlot *slot,
- const char *name, SlotAcquireBehavior behavior);
+static int ReplicationSlotAcquireInternal(ReplicationSlot *slot,
+ const char *name, SlotAcquireBehavior behavior);
static void ReplicationSlotDropAcquired(void);
static void ReplicationSlotDropPtr(ReplicationSlot *slot);
@@ -451,8 +451,8 @@ retry:
/*
* If we found the slot but it's already active in another process, we
- * either error out, return the PID of the owning process, or retry
- * after a short wait, as caller specified.
+ * either error out, return the PID of the owning process, or retry after
+ * a short wait, as caller specified.
*/
if (active_pid != MyProcPid)
{
@@ -471,7 +471,7 @@ retry:
goto retry;
}
else if (behavior == SAB_Block)
- ConditionVariableCancelSleep(); /* no sleep needed after all */
+ ConditionVariableCancelSleep(); /* no sleep needed after all */
/* Let everybody know we've modified this slot */
ConditionVariableBroadcast(&s->active_cv);
@@ -1180,8 +1180,8 @@ restart:
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
XLogRecPtr restart_lsn = InvalidXLogRecPtr;
NameData slotname;
- int wspid;
- int last_signaled_pid = 0;
+ int wspid;
+ int last_signaled_pid = 0;
if (!s->in_use)
continue;
@@ -1204,20 +1204,20 @@ restart:
/*
* Try to mark this slot as used by this process.
*
- * Note that ReplicationSlotAcquireInternal(SAB_Inquire)
- * should not cancel the prepared condition variable
- * if this slot is active in other process. Because in this case
- * we have to wait on that CV for the process owning
- * the slot to be terminated, later.
+ * Note that ReplicationSlotAcquireInternal(SAB_Inquire) should
+ * not cancel the prepared condition variable if this slot is
+ * active in other process. Because in this case we have to wait
+ * on that CV for the process owning the slot to be terminated,
+ * later.
*/
wspid = ReplicationSlotAcquireInternal(s, NULL, SAB_Inquire);
/*
- * Exit the loop if we successfully acquired the slot or
- * the slot was dropped during waiting for the owning process
- * to be terminated. For example, the latter case is likely to
- * happen when the slot is temporary because it's automatically
- * dropped by the termination of the owning process.
+ * Exit the loop if we successfully acquired the slot or the slot
+ * was dropped during waiting for the owning process to be
+ * terminated. For example, the latter case is likely to happen
+ * when the slot is temporary because it's automatically dropped
+ * by the termination of the owning process.
*/
if (wspid <= 0)
break;
@@ -1225,13 +1225,13 @@ restart:
/*
* Signal to terminate the process that owns the slot.
*
- * There is the race condition where other process may own
- * the slot after the process using it was terminated and before
- * this process owns it. To handle this case, we signal again
- * if the PID of the owning process is changed than the last.
+ * There is the race condition where other process may own the
+ * slot after the process using it was terminated and before this
+ * process owns it. To handle this case, we signal again if the
+ * PID of the owning process is changed than the last.
*
- * XXX This logic assumes that the same PID is not reused
- * very quickly.
+ * XXX This logic assumes that the same PID is not reused very
+ * quickly.
*/
if (last_signaled_pid != wspid)
{
@@ -1248,8 +1248,8 @@ restart:
ConditionVariableCancelSleep();
/*
- * Do nothing here and start from scratch if the slot has
- * already been dropped.
+ * Do nothing here and start from scratch if the slot has already been
+ * dropped.
*/
if (wspid == -1)
goto restart;
diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c
index d9d36879ed7..e4e6632f82e 100644
--- a/src/backend/replication/slotfuncs.c
+++ b/src/backend/replication/slotfuncs.c
@@ -415,11 +415,11 @@ pg_get_replication_slots(PG_FUNCTION_ARGS)
nulls[i++] = true;
else
{
- XLogSegNo targetSeg;
- uint64 slotKeepSegs;
- uint64 keepSegs;
- XLogSegNo failSeg;
- XLogRecPtr failLSN;
+ XLogSegNo targetSeg;
+ uint64 slotKeepSegs;
+ uint64 keepSegs;
+ XLogSegNo failSeg;
+ XLogRecPtr failLSN;
XLByteToSeg(slot_contents.data.restart_lsn, targetSeg, wal_segment_size);
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 7fa1a87cd82..bdbc9ef844b 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -165,12 +165,11 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
* Since this routine gets called every commit time, it's important to
* exit quickly if sync replication is not requested. So we check
* WalSndCtl->sync_standbys_defined flag without the lock and exit
- * immediately if it's false. If it's true, we need to check it again later
- * while holding the lock, to check the flag and operate the sync rep
- * queue atomically. This is necessary to avoid the race condition
- * described in SyncRepUpdateSyncStandbysDefined(). On the other
- * hand, if it's false, the lock is not necessary because we don't touch
- * the queue.
+ * immediately if it's false. If it's true, we need to check it again
+ * later while holding the lock, to check the flag and operate the sync
+ * rep queue atomically. This is necessary to avoid the race condition
+ * described in SyncRepUpdateSyncStandbysDefined(). On the other hand, if
+ * it's false, the lock is not necessary because we don't touch the queue.
*/
if (!SyncRepRequested() ||
!((volatile WalSndCtlData *) WalSndCtl)->sync_standbys_defined)
@@ -426,7 +425,7 @@ SyncRepInitConfig(void)
ereport(DEBUG1,
(errmsg_internal("standby \"%s\" now has synchronous standby priority %u",
- application_name, priority)));
+ application_name, priority)));
}
}
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index 9a0e3806fcf..b94910bfe9a 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -747,8 +747,8 @@ WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last)
writeTimeLineHistoryFile(tli, content, len);
/*
- * Mark the streamed history file as ready for archiving
- * if archive_mode is always.
+ * Mark the streamed history file as ready for archiving if
+ * archive_mode is always.
*/
if (XLogArchiveMode != ARCHIVE_MODE_ALWAYS)
XLogArchiveForceDone(fname);
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 628c8d49d98..e94069c366a 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -2326,7 +2326,7 @@ WalSndLoop(WalSndSendDataCallback send_data)
{
ereport(DEBUG1,
(errmsg_internal("\"%s\" has now caught up with upstream server",
- application_name)));
+ application_name)));
WalSndSetState(WALSNDSTATE_STREAMING);
}
@@ -3139,7 +3139,7 @@ WalSndWakeup(void)
static void
WalSndWait(uint32 socket_events, long timeout, uint32 wait_event)
{
- WaitEvent event;
+ WaitEvent event;
ModifyWaitEvent(FeBeWaitSet, FeBeWaitSetSocketPos, socket_events, NULL);
if (WaitEventSetWait(FeBeWaitSet, timeout, &event, 1, wait_event) == 1 &&
diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c
index ba7decb6a4e..d703e9b9ba1 100644
--- a/src/backend/statistics/dependencies.c
+++ b/src/backend/statistics/dependencies.c
@@ -241,8 +241,8 @@ dependency_degree(StatsBuildData *data, int k, AttrNumber *dependency)
mss = multi_sort_init(k);
/*
- * Translate the array of indexes to regular attnums for the dependency (we
- * will need this to identify the columns in StatsBuildData).
+ * Translate the array of indexes to regular attnums for the dependency
+ * (we will need this to identify the columns in StatsBuildData).
*/
attnums_dep = (AttrNumber *) palloc(k * sizeof(AttrNumber));
for (i = 0; i < k; i++)
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index 5e53783ea66..b05e818ba9e 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -91,9 +91,9 @@ typedef struct AnlExprData
} AnlExprData;
static void compute_expr_stats(Relation onerel, double totalrows,
- AnlExprData * exprdata, int nexprs,
+ AnlExprData *exprdata, int nexprs,
HeapTuple *rows, int numrows);
-static Datum serialize_expr_stats(AnlExprData * exprdata, int nexprs);
+static Datum serialize_expr_stats(AnlExprData *exprdata, int nexprs);
static Datum expr_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
static AnlExprData *build_expr_data(List *exprs, int stattarget);
@@ -539,9 +539,9 @@ examine_attribute(Node *expr)
/*
* When analyzing an expression, believe the expression tree's type not
- * the column datatype --- the latter might be the opckeytype storage
- * type of the opclass, which is not interesting for our purposes. (Note:
- * if we did anything with non-expression statistics columns, we'd need to
+ * the column datatype --- the latter might be the opckeytype storage type
+ * of the opclass, which is not interesting for our purposes. (Note: if
+ * we did anything with non-expression statistics columns, we'd need to
* figure out where to get the correct type info from, but for now that's
* not a problem.) It's not clear whether anyone will care about the
* typmod, but we store that too just in case.
@@ -1788,16 +1788,16 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
* attnums of expressions from it. Ignore it if it's not fully
* covered by the chosen statistics.
*
- * We need to check both attributes and expressions, and reject
- * if either is not covered.
+ * We need to check both attributes and expressions, and reject if
+ * either is not covered.
*/
if (!bms_is_subset(list_attnums[listidx], stat->keys) ||
!stat_covers_expressions(stat, list_exprs[listidx], NULL))
continue;
/*
- * Now we know the clause is compatible (we have either attnums
- * or expressions extracted from it), and was not estimated yet.
+ * Now we know the clause is compatible (we have either attnums or
+ * expressions extracted from it), and was not estimated yet.
*/
/* record simple clauses (single column or expression) */
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 0c5b87864b9..4b296a22c45 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -3071,7 +3071,7 @@ DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
int j;
RelFileNodeBackend rnode;
BlockNumber nForkBlock[MAX_FORKNUM];
- uint64 nBlocksToInvalidate = 0;
+ uint64 nBlocksToInvalidate = 0;
rnode = smgr_reln->smgr_rnode;
@@ -3195,7 +3195,7 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
int n = 0;
SMgrRelation *rels;
BlockNumber (*block)[MAX_FORKNUM + 1];
- uint64 nBlocksToInvalidate = 0;
+ uint64 nBlocksToInvalidate = 0;
RelFileNode *nodes;
bool cached = true;
bool use_bsearch;
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 06b57ae71f1..e8cd7ef0886 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -3288,7 +3288,7 @@ looks_like_temp_rel_name(const char *name)
static void
do_syncfs(const char *path)
{
- int fd;
+ int fd;
fd = OpenTransientFile(path, O_RDONLY);
if (fd < 0)
@@ -3394,7 +3394,7 @@ SyncDataDirectory(void)
do_syncfs("pg_wal");
return;
}
-#endif /* !HAVE_SYNCFS */
+#endif /* !HAVE_SYNCFS */
/*
* If possible, hint to the kernel that we're soon going to fsync the data
diff --git a/src/backend/storage/file/sharedfileset.c b/src/backend/storage/file/sharedfileset.c
index de422b1ebdf..ed37c940adc 100644
--- a/src/backend/storage/file/sharedfileset.c
+++ b/src/backend/storage/file/sharedfileset.c
@@ -267,8 +267,8 @@ static void
SharedFileSetDeleteOnProcExit(int status, Datum arg)
{
/*
- * Remove all the pending shared fileset entries. We don't use foreach() here
- * because SharedFileSetDeleteAll will remove the current element in
+ * Remove all the pending shared fileset entries. We don't use foreach()
+ * here because SharedFileSetDeleteAll will remove the current element in
* filesetlist. Though we have used foreach_delete_current() to remove the
* element from filesetlist it could only fix up the state of one of the
* loops, see SharedFileSetUnregister.
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index ad781131e2a..1d893cf863d 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -1655,9 +1655,9 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
(cur_kqueue_event->fflags & NOTE_EXIT) != 0)
{
/*
- * The kernel will tell this kqueue object only once about the exit
- * of the postmaster, so let's remember that for next time so that
- * we provide level-triggered semantics.
+ * The kernel will tell this kqueue object only once about the
+ * exit of the postmaster, so let's remember that for next time so
+ * that we provide level-triggered semantics.
*/
set->report_postmaster_not_running = true;
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 5ff8cab394e..42a89fc5dc9 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -2056,7 +2056,7 @@ GetSnapshotDataInitOldSnapshot(Snapshot snapshot)
static bool
GetSnapshotDataReuse(Snapshot snapshot)
{
- uint64 curXactCompletionCount;
+ uint64 curXactCompletionCount;
Assert(LWLockHeldByMe(ProcArrayLock));
@@ -2080,8 +2080,8 @@ GetSnapshotDataReuse(Snapshot snapshot)
* holding ProcArrayLock) exclusively). Thus the xactCompletionCount check
* ensures we would detect if the snapshot would have changed.
*
- * As the snapshot contents are the same as it was before, it is safe
- * to re-enter the snapshot's xmin into the PGPROC array. None of the rows
+ * As the snapshot contents are the same as it was before, it is safe to
+ * re-enter the snapshot's xmin into the PGPROC array. None of the rows
* visible under the snapshot could already have been removed (that'd
* require the set of running transactions to change) and it fulfills the
* requirement that concurrent GetSnapshotData() calls yield the same
@@ -2259,10 +2259,10 @@ GetSnapshotData(Snapshot snapshot)
continue;
/*
- * The only way we are able to get here with a non-normal xid
- * is during bootstrap - with this backend using
- * BootstrapTransactionId. But the above test should filter
- * that out.
+ * The only way we are able to get here with a non-normal xid is
+ * during bootstrap - with this backend using
+ * BootstrapTransactionId. But the above test should filter that
+ * out.
*/
Assert(TransactionIdIsNormal(xid));
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index eac68951414..defb75aa26a 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -61,7 +61,7 @@
*/
typedef struct
{
- volatile pid_t pss_pid;
+ volatile pid_t pss_pid;
volatile sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
pg_atomic_uint64 pss_barrierGeneration;
pg_atomic_uint32 pss_barrierCheckMask;
@@ -454,7 +454,7 @@ ProcessProcSignalBarrier(void)
{
uint64 local_gen;
uint64 shared_gen;
- volatile uint32 flags;
+ volatile uint32 flags;
Assert(MyProcSignalSlot);
@@ -484,15 +484,15 @@ ProcessProcSignalBarrier(void)
* extract the flags, and that any subsequent state changes happen
* afterward.
*
- * NB: In order to avoid race conditions, we must zero pss_barrierCheckMask
- * first and only afterwards try to do barrier processing. If we did it
- * in the other order, someone could send us another barrier of some
- * type right after we called the barrier-processing function but before
- * we cleared the bit. We would have no way of knowing that the bit needs
- * to stay set in that case, so the need to call the barrier-processing
- * function again would just get forgotten. So instead, we tentatively
- * clear all the bits and then put back any for which we don't manage
- * to successfully absorb the barrier.
+ * NB: In order to avoid race conditions, we must zero
+ * pss_barrierCheckMask first and only afterwards try to do barrier
+ * processing. If we did it in the other order, someone could send us
+ * another barrier of some type right after we called the
+ * barrier-processing function but before we cleared the bit. We would
+ * have no way of knowing that the bit needs to stay set in that case, so
+ * the need to call the barrier-processing function again would just get
+ * forgotten. So instead, we tentatively clear all the bits and then put
+ * back any for which we don't manage to successfully absorb the barrier.
*/
flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0);
@@ -503,15 +503,15 @@ ProcessProcSignalBarrier(void)
*/
if (flags != 0)
{
- bool success = true;
+ bool success = true;
PG_TRY();
{
/*
* Process each type of barrier. The barrier-processing functions
- * should normally return true, but may return false if the barrier
- * can't be absorbed at the current time. This should be rare,
- * because it's pretty expensive. Every single
+ * should normally return true, but may return false if the
+ * barrier can't be absorbed at the current time. This should be
+ * rare, because it's pretty expensive. Every single
* CHECK_FOR_INTERRUPTS() will return here until we manage to
* absorb the barrier, and that cost will add up in a hurry.
*
@@ -521,8 +521,8 @@ ProcessProcSignalBarrier(void)
*/
while (flags != 0)
{
- ProcSignalBarrierType type;
- bool processed = true;
+ ProcSignalBarrierType type;
+ bool processed = true;
type = (ProcSignalBarrierType) pg_rightmost_one_pos32(flags);
switch (type)
@@ -533,8 +533,8 @@ ProcessProcSignalBarrier(void)
}
/*
- * To avoid an infinite loop, we must always unset the bit
- * in flags.
+ * To avoid an infinite loop, we must always unset the bit in
+ * flags.
*/
BARRIER_CLEAR_BIT(flags, type);
diff --git a/src/backend/storage/ipc/signalfuncs.c b/src/backend/storage/ipc/signalfuncs.c
index 0337b00226a..837699481ca 100644
--- a/src/backend/storage/ipc/signalfuncs.c
+++ b/src/backend/storage/ipc/signalfuncs.c
@@ -137,11 +137,12 @@ pg_wait_until_termination(int pid, int64 timeout)
* Wait in steps of waittime milliseconds until this function exits or
* timeout.
*/
- int64 waittime = 100;
+ int64 waittime = 100;
+
/*
* Initially remaining time is the entire timeout specified by the user.
*/
- int64 remainingtime = timeout;
+ int64 remainingtime = timeout;
/*
* Check existence of the backend. If the backend still exists, then wait
@@ -162,7 +163,7 @@ pg_wait_until_termination(int pid, int64 timeout)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("could not check the existence of the backend with PID %d: %m",
- pid)));
+ pid)));
}
/* Process interrupts, if any, before waiting */
@@ -198,9 +199,9 @@ pg_wait_until_termination(int pid, int64 timeout)
Datum
pg_terminate_backend(PG_FUNCTION_ARGS)
{
- int pid;
- int r;
- int timeout;
+ int pid;
+ int r;
+ int timeout;
pid = PG_GETARG_INT32(0);
timeout = PG_GETARG_INT64(1);
@@ -208,7 +209,7 @@ pg_terminate_backend(PG_FUNCTION_ARGS)
if (timeout < 0)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"timeout\" must not be negative")));
+ errmsg("\"timeout\" must not be negative")));
r = pg_signal_backend(pid, SIGTERM);
@@ -240,9 +241,9 @@ pg_terminate_backend(PG_FUNCTION_ARGS)
Datum
pg_wait_for_backend_termination(PG_FUNCTION_ARGS)
{
- int pid;
- int64 timeout;
- PGPROC *proc = NULL;
+ int pid;
+ int64 timeout;
+ PGPROC *proc = NULL;
pid = PG_GETARG_INT32(0);
timeout = PG_GETARG_INT64(1);
@@ -250,7 +251,7 @@ pg_wait_for_backend_termination(PG_FUNCTION_ARGS)
if (timeout <= 0)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"timeout\" must not be negative or zero")));
+ errmsg("\"timeout\" must not be negative or zero")));
proc = BackendPidGetProc(pid);
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 1465ee44a12..553b6e54603 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -482,7 +482,7 @@ ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXi
* snapshots that still see it.
*/
FullTransactionId nextXid = ReadNextFullTransactionId();
- uint64 diff;
+ uint64 diff;
diff = U64FromFullTransactionId(nextXid) -
U64FromFullTransactionId(latestRemovedFullXid);
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 692f21ef6a8..2575ea1ca0d 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -103,7 +103,7 @@ ProcGlobalShmemSize(void)
{
Size size = 0;
Size TotalProcs =
- add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
+ add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
/* ProcGlobal */
size = add_size(size, sizeof(PROC_HDR));
@@ -1245,8 +1245,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* Set timer so we can wake up after awhile and check for a deadlock. If a
* deadlock is detected, the handler sets MyProc->waitStatus =
- * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure rather
- * than success.
+ * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
+ * rather than success.
*
* By delaying the check until we've waited for a bit, we can avoid
* running the rather expensive deadlock-check code in most cases.
@@ -1371,9 +1371,9 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
}
/*
- * waitStatus could change from PROC_WAIT_STATUS_WAITING to something else
- * asynchronously. Read it just once per loop to prevent surprising
- * behavior (such as missing log messages).
+ * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
+ * else asynchronously. Read it just once per loop to prevent
+ * surprising behavior (such as missing log messages).
*/
myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
@@ -1429,7 +1429,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
ereport(DEBUG1,
(errmsg_internal("sending cancel to blocking autovacuum PID %d",
- pid),
+ pid),
errdetail_log("%s", logbuf.data)));
pfree(locktagbuf.data);
@@ -1587,11 +1587,12 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* Currently, the deadlock checker always kicks its own
- * process, which means that we'll only see PROC_WAIT_STATUS_ERROR when
- * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
- * print redundant messages. But for completeness and
- * future-proofing, print a message if it looks like someone
- * else kicked us off the lock.
+ * process, which means that we'll only see
+ * PROC_WAIT_STATUS_ERROR when deadlock_state ==
+ * DS_HARD_DEADLOCK, and there's no need to print redundant
+ * messages. But for completeness and future-proofing, print
+ * a message if it looks like someone else kicked us off the
+ * lock.
*/
if (deadlock_state != DS_HARD_DEADLOCK)
ereport(LOG,
@@ -1830,9 +1831,9 @@ CheckDeadLock(void)
* preserve the flexibility to kill some other transaction than the
* one detecting the deadlock.)
*
- * RemoveFromWaitQueue sets MyProc->waitStatus to PROC_WAIT_STATUS_ERROR, so
- * ProcSleep will report an error after we return from the signal
- * handler.
+ * RemoveFromWaitQueue sets MyProc->waitStatus to
+ * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
+ * return from the signal handler.
*/
Assert(MyProc->waitLock != NULL);
RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 6fe0c6532c6..557672cadda 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -37,7 +37,7 @@
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
#else
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
-#endif /* DISABLE_ATOMICS */
+#endif /* DISABLE_ATOMICS */
PGSemaphore *SpinlockSemaArray;
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index b231c438f95..82ca91f5977 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -259,7 +259,7 @@ PageAddItemExtended(Page page,
* group at the end of the line pointer array.
*/
for (offsetNumber = FirstOffsetNumber;
- offsetNumber < limit; /* limit is maxoff+1 */
+ offsetNumber < limit; /* limit is maxoff+1 */
offsetNumber++)
{
itemId = PageGetItemId(phdr, offsetNumber);
diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c
index 708215614db..bc3ceb27125 100644
--- a/src/backend/storage/sync/sync.c
+++ b/src/backend/storage/sync/sync.c
@@ -420,7 +420,7 @@ ProcessSyncRequests(void)
ereport(DEBUG1,
(errcode_for_file_access(),
errmsg_internal("could not fsync file \"%s\" but retrying: %m",
- path)));
+ path)));
/*
* Absorb incoming requests and check to see if a cancel
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 2d6d145ecc0..6200699ddd7 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -454,7 +454,7 @@ SocketBackend(StringInfo inBuf)
* the type.
*/
if (pq_getmessage(inBuf, maxmsglen))
- return EOF; /* suitable message already logged */
+ return EOF; /* suitable message already logged */
RESUME_CANCEL_INTERRUPTS();
return qtype;
@@ -1350,8 +1350,8 @@ exec_parse_message(const char *query_string, /* string to execute */
ereport(DEBUG2,
(errmsg_internal("parse %s: %s",
- *stmt_name ? stmt_name : "<unnamed>",
- query_string)));
+ *stmt_name ? stmt_name : "<unnamed>",
+ query_string)));
/*
* Start up a transaction command so we can run parse analysis etc. (Note
@@ -1606,8 +1606,8 @@ exec_bind_message(StringInfo input_message)
ereport(DEBUG2,
(errmsg_internal("bind %s to %s",
- *portal_name ? portal_name : "<unnamed>",
- *stmt_name ? stmt_name : "<unnamed>")));
+ *portal_name ? portal_name : "<unnamed>",
+ *stmt_name ? stmt_name : "<unnamed>")));
/* Find prepared statement */
if (stmt_name[0] != '\0')
diff --git a/src/backend/utils/activity/backend_progress.c b/src/backend/utils/activity/backend_progress.c
index 293254993c7..6743e68cef6 100644
--- a/src/backend/utils/activity/backend_progress.c
+++ b/src/backend/utils/activity/backend_progress.c
@@ -10,7 +10,7 @@
*/
#include "postgres.h"
-#include "port/atomics.h" /* for memory barriers */
+#include "port/atomics.h" /* for memory barriers */
#include "utils/backend_progress.h"
#include "utils/backend_status.h"
diff --git a/src/backend/utils/activity/backend_status.c b/src/backend/utils/activity/backend_status.c
index a3681011030..2901f9f5a9f 100644
--- a/src/backend/utils/activity/backend_status.c
+++ b/src/backend/utils/activity/backend_status.c
@@ -16,13 +16,13 @@
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
-#include "port/atomics.h" /* for memory barriers */
+#include "port/atomics.h" /* for memory barriers */
#include "storage/ipc.h"
-#include "storage/proc.h" /* for MyProc */
+#include "storage/proc.h" /* for MyProc */
#include "storage/sinvaladt.h"
#include "utils/ascii.h"
#include "utils/backend_status.h"
-#include "utils/guc.h" /* for application_name */
+#include "utils/guc.h" /* for application_name */
#include "utils/memutils.h"
@@ -498,8 +498,8 @@ pgstat_setup_backend_status_context(void)
{
if (!backendStatusSnapContext)
backendStatusSnapContext = AllocSetContextCreate(TopMemoryContext,
- "Backend Status Snapshot",
- ALLOCSET_SMALL_SIZES);
+ "Backend Status Snapshot",
+ ALLOCSET_SMALL_SIZES);
}
@@ -1033,7 +1033,8 @@ pgstat_get_my_query_id(void)
if (!MyBEEntry)
return 0;
- /* There's no need for a lock around pgstat_begin_read_activity /
+ /*
+ * There's no need for a lock around pgstat_begin_read_activity /
* pgstat_end_read_activity here as it's only called from
* pg_stat_get_activity which is already protected, or from the same
* backend which means that there won't be concurrent writes.
diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c
index 89b5b8b7b9d..6baf67740c7 100644
--- a/src/backend/utils/activity/wait_event.c
+++ b/src/backend/utils/activity/wait_event.c
@@ -22,8 +22,8 @@
*/
#include "postgres.h"
-#include "storage/lmgr.h" /* for GetLockNameFromTagType */
-#include "storage/lwlock.h" /* for GetLWLockIdentifier */
+#include "storage/lmgr.h" /* for GetLockNameFromTagType */
+#include "storage/lwlock.h" /* for GetLWLockIdentifier */
#include "utils/wait_event.h"
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 7861a0a613a..67f8b29434a 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -2453,9 +2453,9 @@ column_privilege_check(Oid tableoid, AttrNumber attnum,
return -1;
/*
- * Check for column-level privileges first. This serves in
- * part as a check on whether the column even exists, so we
- * need to do it before checking table-level privilege.
+ * Check for column-level privileges first. This serves in part as a check
+ * on whether the column even exists, so we need to do it before checking
+ * table-level privilege.
*/
aclresult = pg_attribute_aclcheck_ext(tableoid, attnum, roleid,
mode, &is_missing);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index da1a879f1f6..3c70bb59433 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -867,7 +867,7 @@ pg_relation_filenode(PG_FUNCTION_ARGS)
{
if (relform->relfilenode)
result = relform->relfilenode;
- else /* Consult the relation mapper */
+ else /* Consult the relation mapper */
result = RelationMapOidToFilenode(relid,
relform->relisshared);
}
@@ -946,17 +946,17 @@ pg_relation_filepath(PG_FUNCTION_ARGS)
rnode.dbNode = MyDatabaseId;
if (relform->relfilenode)
rnode.relNode = relform->relfilenode;
- else /* Consult the relation mapper */
+ else /* Consult the relation mapper */
rnode.relNode = RelationMapOidToFilenode(relid,
relform->relisshared);
}
else
{
- /* no storage, return NULL */
- rnode.relNode = InvalidOid;
- /* some compilers generate warnings without these next two lines */
- rnode.dbNode = InvalidOid;
- rnode.spcNode = InvalidOid;
+ /* no storage, return NULL */
+ rnode.relNode = InvalidOid;
+ /* some compilers generate warnings without these next two lines */
+ rnode.dbNode = InvalidOid;
+ rnode.spcNode = InvalidOid;
}
if (!OidIsValid(rnode.relNode))
diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c
index 322152ebd97..c436d9318b6 100644
--- a/src/backend/utils/adt/genfile.c
+++ b/src/backend/utils/adt/genfile.c
@@ -160,16 +160,15 @@ read_binary_file(const char *filename, int64 seek_offset, int64 bytes_to_read,
#define MIN_READ_SIZE 4096
/*
- * If not at end of file, and sbuf.len is equal to
- * MaxAllocSize - 1, then either the file is too large, or
- * there is nothing left to read. Attempt to read one more
- * byte to see if the end of file has been reached. If not,
- * the file is too large; we'd rather give the error message
- * for that ourselves.
+ * If not at end of file, and sbuf.len is equal to MaxAllocSize -
+ * 1, then either the file is too large, or there is nothing left
+ * to read. Attempt to read one more byte to see if the end of
+ * file has been reached. If not, the file is too large; we'd
+ * rather give the error message for that ourselves.
*/
if (sbuf.len == MaxAllocSize - 1)
{
- char rbuf[1];
+ char rbuf[1];
if (fread(rbuf, 1, 1, file) != 0 || !feof(file))
ereport(ERROR,
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 97f0265c12d..085fec3ea20 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -636,10 +636,10 @@ pg_isolation_test_session_is_blocked(PG_FUNCTION_ARGS)
* Check if any of these are in the list of interesting PIDs, that being
* the sessions that the isolation tester is running. We don't use
* "arrayoverlaps" here, because it would lead to cache lookups and one of
- * our goals is to run quickly with debug_invalidate_system_caches_always > 0. We expect
- * blocking_pids to be usually empty and otherwise a very small number in
- * isolation tester cases, so make that the outer loop of a naive search
- * for a match.
+ * our goals is to run quickly with debug_invalidate_system_caches_always
+ * > 0. We expect blocking_pids to be usually empty and otherwise a very
+ * small number in isolation tester cases, so make that the outer loop of
+ * a naive search for a match.
*/
for (i = 0; i < num_blocking_pids; i++)
for (j = 0; j < num_interesting_pids; j++)
diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c
index e2b87a7ed9f..2984768d199 100644
--- a/src/backend/utils/adt/mcxtfuncs.c
+++ b/src/backend/utils/adt/mcxtfuncs.c
@@ -34,8 +34,8 @@
*/
static void
PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore,
- TupleDesc tupdesc, MemoryContext context,
- const char *parent, int level)
+ TupleDesc tupdesc, MemoryContext context,
+ const char *parent, int level)
{
#define PG_GET_BACKEND_MEMORY_CONTEXTS_COLS 9
@@ -52,8 +52,8 @@ PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore,
ident = context->ident;
/*
- * To be consistent with logging output, we label dynahash contexts
- * with just the hash table name as with MemoryContextStatsPrint().
+ * To be consistent with logging output, we label dynahash contexts with
+ * just the hash table name as with MemoryContextStatsPrint().
*/
if (ident && strcmp(name, "dynahash") == 0)
{
@@ -75,7 +75,7 @@ PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore,
if (ident)
{
- int idlen = strlen(ident);
+ int idlen = strlen(ident);
char clipped_ident[MEMORY_CONTEXT_IDENT_DISPLAY_SIZE];
/*
@@ -108,7 +108,7 @@ PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore,
for (child = context->firstchild; child != NULL; child = child->nextchild)
{
PutMemoryContextsStatsTupleStore(tupstore, tupdesc,
- child, name, level + 1);
+ child, name, level + 1);
}
}
@@ -150,7 +150,7 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS)
MemoryContextSwitchTo(oldcontext);
PutMemoryContextsStatsTupleStore(tupstore, tupdesc,
- TopMemoryContext, NULL, 0);
+ TopMemoryContext, NULL, 0);
/* clean up and return the tuplestore */
tuplestore_donestoring(tupstore);
diff --git a/src/backend/utils/adt/name.c b/src/backend/utils/adt/name.c
index c93be3350ea..602a724d2f8 100644
--- a/src/backend/utils/adt/name.c
+++ b/src/backend/utils/adt/name.c
@@ -234,7 +234,7 @@ namestrcpy(Name name, const char *str)
{
/* NB: We need to zero-pad the destination. */
strncpy(NameStr(*name), str, NAMEDATALEN);
- NameStr(*name)[NAMEDATALEN-1] = '\0';
+ NameStr(*name)[NAMEDATALEN - 1] = '\0';
}
/*
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index caa09d6373e..453af401cab 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -1668,16 +1668,16 @@ get_collation_actual_version(char collprovider, const char *collcollate)
}
else
#endif
- if (collprovider == COLLPROVIDER_LIBC &&
- pg_strcasecmp("C", collcollate) != 0 &&
- pg_strncasecmp("C.", collcollate, 2) != 0 &&
- pg_strcasecmp("POSIX", collcollate) != 0)
+ if (collprovider == COLLPROVIDER_LIBC &&
+ pg_strcasecmp("C", collcollate) != 0 &&
+ pg_strncasecmp("C.", collcollate, 2) != 0 &&
+ pg_strcasecmp("POSIX", collcollate) != 0)
{
#if defined(__GLIBC__)
/* Use the glibc version because we don't have anything better. */
collversion = pstrdup(gnu_get_libc_version());
#elif defined(LC_VERSION_MASK)
- locale_t loc;
+ locale_t loc;
/* Look up FreeBSD collation version. */
loc = newlocale(LC_COLLATE, collcollate, NULL);
diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c
index 2c10f2c867c..671fe6ddb7a 100644
--- a/src/backend/utils/adt/rangetypes_typanalyze.c
+++ b/src/backend/utils/adt/rangetypes_typanalyze.c
@@ -330,7 +330,7 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
stats->statypid[slot_idx] = typcache->type_id;
stats->statyplen[slot_idx] = typcache->typlen;
stats->statypbyval[slot_idx] = typcache->typbyval;
- stats->statypalign[slot_idx] = typcache->typalign;
+ stats->statypalign[slot_idx] = typcache->typalign;
slot_idx++;
}
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 7c77c338cec..96269fc2adb 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -394,8 +394,8 @@ RI_FKey_check(TriggerData *trigdata)
* Now check that foreign key exists in PK table
*
* XXX detectNewRows must be true when a partitioned table is on the
- * referenced side. The reason is that our snapshot must be fresh
- * in order for the hack in find_inheritance_children() to work.
+ * referenced side. The reason is that our snapshot must be fresh in
+ * order for the hack in find_inheritance_children() to work.
*/
ri_PerformCheck(riinfo, &qkey, qplan,
fk_rel, pk_rel,
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 23787a6ae7d..1a71fdbc33f 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -1802,8 +1802,8 @@ hash_record(PG_FUNCTION_ARGS)
tuple.t_data = record;
/*
- * We arrange to look up the needed hashing info just once per series
- * of calls, assuming the record type doesn't change underneath us.
+ * We arrange to look up the needed hashing info just once per series of
+ * calls, assuming the record type doesn't change underneath us.
*/
my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL ||
@@ -1923,8 +1923,8 @@ hash_record_extended(PG_FUNCTION_ARGS)
tuple.t_data = record;
/*
- * We arrange to look up the needed hashing info just once per series
- * of calls, assuming the record type doesn't change underneath us.
+ * We arrange to look up the needed hashing info just once per series of
+ * calls, assuming the record type doesn't change underneath us.
*/
my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL ||
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 881e8ec03d2..84ad62caea3 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -2980,37 +2980,38 @@ pg_get_functiondef(PG_FUNCTION_ARGS)
}
else
{
- appendStringInfoString(&buf, "AS ");
+ appendStringInfoString(&buf, "AS ");
- tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_probin, &isnull);
- if (!isnull)
- {
- simple_quote_literal(&buf, TextDatumGetCString(tmp));
- appendStringInfoString(&buf, ", "); /* assume prosrc isn't null */
- }
+ tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_probin, &isnull);
+ if (!isnull)
+ {
+ simple_quote_literal(&buf, TextDatumGetCString(tmp));
+ appendStringInfoString(&buf, ", "); /* assume prosrc isn't null */
+ }
- tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_prosrc, &isnull);
- if (isnull)
- elog(ERROR, "null prosrc");
- prosrc = TextDatumGetCString(tmp);
+ tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_prosrc, &isnull);
+ if (isnull)
+ elog(ERROR, "null prosrc");
+ prosrc = TextDatumGetCString(tmp);
- /*
- * We always use dollar quoting. Figure out a suitable delimiter.
- *
- * Since the user is likely to be editing the function body string, we
- * shouldn't use a short delimiter that he might easily create a conflict
- * with. Hence prefer "$function$"/"$procedure$", but extend if needed.
- */
- initStringInfo(&dq);
- appendStringInfoChar(&dq, '$');
- appendStringInfoString(&dq, (isfunction ? "function" : "procedure"));
- while (strstr(prosrc, dq.data) != NULL)
- appendStringInfoChar(&dq, 'x');
- appendStringInfoChar(&dq, '$');
-
- appendBinaryStringInfo(&buf, dq.data, dq.len);
- appendStringInfoString(&buf, prosrc);
- appendBinaryStringInfo(&buf, dq.data, dq.len);
+ /*
+ * We always use dollar quoting. Figure out a suitable delimiter.
+ *
+ * Since the user is likely to be editing the function body string, we
+ * shouldn't use a short delimiter that he might easily create a
+ * conflict with. Hence prefer "$function$"/"$procedure$", but extend
+ * if needed.
+ */
+ initStringInfo(&dq);
+ appendStringInfoChar(&dq, '$');
+ appendStringInfoString(&dq, (isfunction ? "function" : "procedure"));
+ while (strstr(prosrc, dq.data) != NULL)
+ appendStringInfoChar(&dq, 'x');
+ appendStringInfoChar(&dq, '$');
+
+ appendBinaryStringInfo(&buf, dq.data, dq.len);
+ appendStringInfoString(&buf, prosrc);
+ appendBinaryStringInfo(&buf, dq.data, dq.len);
}
appendStringInfoChar(&buf, '\n');
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 3d4304cce7a..37ddda77240 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -3446,10 +3446,10 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
* XXX This has the consequence that if there's a statistics on the
* expression, we don't split it into individual Vars. This affects
* our selection of statistics in estimate_multivariate_ndistinct,
- * because it's probably better to use more accurate estimate for
- * each expression and treat them as independent, than to combine
- * estimates for the extracted variables when we don't know how that
- * relates to the expressions.
+ * because it's probably better to use more accurate estimate for each
+ * expression and treat them as independent, than to combine estimates
+ * for the extracted variables when we don't know how that relates to
+ * the expressions.
*/
examine_variable(root, groupexpr, 0, &vardata);
if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
@@ -4039,16 +4039,16 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel,
/*
* Process a simple Var expression, by matching it to keys
- * directly. If there's a matching expression, we'll try
- * matching it later.
+ * directly. If there's a matching expression, we'll try matching
+ * it later.
*/
if (IsA(varinfo->var, Var))
{
AttrNumber attnum = ((Var *) varinfo->var)->varattno;
/*
- * Ignore expressions on system attributes. Can't rely on
- * the bms check for negative values.
+ * Ignore expressions on system attributes. Can't rely on the
+ * bms check for negative values.
*/
if (!AttrNumberIsForUserDefinedAttr(attnum))
continue;
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 3a93e92e403..79761f809c8 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -3847,8 +3847,8 @@ timestamp_bin(PG_FUNCTION_ARGS)
tm_delta = tm_diff - tm_diff % stride_usecs;
/*
- * Make sure the returned timestamp is at the start of the bin,
- * even if the origin is in the future.
+ * Make sure the returned timestamp is at the start of the bin, even if
+ * the origin is in the future.
*/
if (origin > timestamp && stride_usecs > 1)
tm_delta -= stride_usecs;
@@ -4025,8 +4025,8 @@ timestamptz_bin(PG_FUNCTION_ARGS)
tm_delta = tm_diff - tm_diff % stride_usecs;
/*
- * Make sure the returned timestamp is at the start of the bin,
- * even if the origin is in the future.
+ * Make sure the returned timestamp is at the start of the bin, even if
+ * the origin is in the future.
*/
if (origin > timestamp && stride_usecs > 1)
tm_delta -= stride_usecs;
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 0c6e5f24ba1..d2a11b1b5dd 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -307,7 +307,7 @@ byteain(PG_FUNCTION_ARGS)
size_t len = strlen(inputText);
uint64 dstlen = pg_hex_dec_len(len - 2);
- bc = dstlen + VARHDRSZ; /* maximum possible length */
+ bc = dstlen + VARHDRSZ; /* maximum possible length */
result = palloc(bc);
bc = pg_hex_decode(inputText + 2, len - 2, VARDATA(result), dstlen);
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index f54dc12b718..dcfd9e83893 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -181,7 +181,7 @@ static int numSharedInvalidMessagesArray;
static int maxSharedInvalidMessagesArray;
/* GUC storage */
-int debug_invalidate_system_caches_always = 0;
+int debug_invalidate_system_caches_always = 0;
/*
* Dynamically-registered callback functions. Current implementation
@@ -692,26 +692,27 @@ AcceptInvalidationMessages(void)
/*
* Test code to force cache flushes anytime a flush could happen.
*
- * This helps detect intermittent faults caused by code that reads a
- * cache entry and then performs an action that could invalidate the entry,
- * but rarely actually does so. This can spot issues that would otherwise
+ * This helps detect intermittent faults caused by code that reads a cache
+ * entry and then performs an action that could invalidate the entry, but
+ * rarely actually does so. This can spot issues that would otherwise
* only arise with badly timed concurrent DDL, for example.
*
- * The default debug_invalidate_system_caches_always = 0 does no forced cache flushes.
+ * The default debug_invalidate_system_caches_always = 0 does no forced
+ * cache flushes.
*
- * If used with CLOBBER_FREED_MEMORY, debug_invalidate_system_caches_always = 1
- * (CLOBBER_CACHE_ALWAYS) provides a fairly thorough test that the system
- * contains no cache-flush hazards. However, it also makes the system
- * unbelievably slow --- the regression tests take about 100 times longer
- * than normal.
+ * If used with CLOBBER_FREED_MEMORY,
+ * debug_invalidate_system_caches_always = 1 (CLOBBER_CACHE_ALWAYS)
+ * provides a fairly thorough test that the system contains no cache-flush
+ * hazards. However, it also makes the system unbelievably slow --- the
+ * regression tests take about 100 times longer than normal.
*
- * If you're a glutton for punishment, try debug_invalidate_system_caches_always = 3
- * (CLOBBER_CACHE_RECURSIVELY). This slows things by at least a factor
- * of 10000, so I wouldn't suggest trying to run the entire regression
- * tests that way. It's useful to try a few simple tests, to make sure
- * that cache reload isn't subject to internal cache-flush hazards, but
- * after you've done a few thousand recursive reloads it's unlikely
- * you'll learn more.
+ * If you're a glutton for punishment, try
+ * debug_invalidate_system_caches_always = 3 (CLOBBER_CACHE_RECURSIVELY).
+ * This slows things by at least a factor of 10000, so I wouldn't suggest
+ * trying to run the entire regression tests that way. It's useful to try
+ * a few simple tests, to make sure that cache reload isn't subject to
+ * internal cache-flush hazards, but after you've done a few thousand
+ * recursive reloads it's unlikely you'll learn more.
*/
#ifdef CLOBBER_CACHE_ENABLED
{
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 1a0950489d7..07b01451327 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -897,8 +897,9 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* rejected a generic plan, it's possible to reach here with is_valid
* false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
- * sinval reset event or the debug_invalidate_system_caches_always code. But for
- * safety, let's treat it as real and redo the RevalidateCachedQuery call.
+ * sinval reset event or the debug_invalidate_system_caches_always code.
+ * But for safety, let's treat it as real and redo the
+ * RevalidateCachedQuery call.
*/
if (!plansource->is_valid)
qlist = RevalidateCachedQuery(plansource, queryEnv);
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index bd88f6105ba..fd05615e769 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1016,9 +1016,9 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
*
* When cache clobbering is enabled or when forced to by
* RECOVER_RELATION_BUILD_MEMORY=1, arrange to allocate the junk in a
- * temporary context that we'll free before returning. Make it a child
- * of caller's context so that it will get cleaned up appropriately if
- * we error out partway through.
+ * temporary context that we'll free before returning. Make it a child of
+ * caller's context so that it will get cleaned up appropriately if we
+ * error out partway through.
*/
#ifdef MAYBE_RECOVER_RELATION_BUILD_MEMORY
MemoryContext tmpcxt = NULL;
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 4915ef59349..35c8cf7b244 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -696,7 +696,7 @@ lookup_type_cache(Oid type_id, int flags)
!record_fields_have_hashing(typentry))
hash_proc = InvalidOid;
else if (hash_proc == F_HASH_RANGE &&
- !range_element_has_hashing(typentry))
+ !range_element_has_hashing(typentry))
hash_proc = InvalidOid;
/*
@@ -742,10 +742,10 @@ lookup_type_cache(Oid type_id, int flags)
!array_element_has_extended_hashing(typentry))
hash_extended_proc = InvalidOid;
else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
- !record_fields_have_extended_hashing(typentry))
+ !record_fields_have_extended_hashing(typentry))
hash_extended_proc = InvalidOid;
else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
- !range_element_has_extended_hashing(typentry))
+ !range_element_has_extended_hashing(typentry))
hash_extended_proc = InvalidOid;
/*
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 65019989cf6..a3e1c59a829 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -2717,10 +2717,10 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
case 'Q':
if (padding != 0)
appendStringInfo(buf, "%*lld", padding,
- (long long) pgstat_get_my_query_id());
+ (long long) pgstat_get_my_query_id());
else
appendStringInfo(buf, "%lld",
- (long long) pgstat_get_my_query_id());
+ (long long) pgstat_get_my_query_id());
break;
default:
/* format error - ignore it */
diff --git a/src/backend/utils/mb/Unicode/convutils.pm b/src/backend/utils/mb/Unicode/convutils.pm
index adfe12b2c29..5ad38514bee 100644
--- a/src/backend/utils/mb/Unicode/convutils.pm
+++ b/src/backend/utils/mb/Unicode/convutils.pm
@@ -381,7 +381,7 @@ sub print_radix_table
header => "Dummy map, for invalid values",
min_idx => 0,
max_idx => $widest_range,
- label => "dummy map"
+ label => "dummy map"
};
###
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 0a180341c22..eb7f7181e43 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -2636,7 +2636,7 @@ static struct config_int ConfigureNamesInt[] =
NULL
},
&vacuum_defer_cleanup_age,
- 0, 0, 1000000, /* see ComputeXidHorizons */
+ 0, 0, 1000000, /* see ComputeXidHorizons */
NULL, NULL, NULL
},
{
@@ -3257,6 +3257,7 @@ static struct config_int ConfigureNamesInt[] =
NULL
},
&autovacuum_freeze_max_age,
+
/*
* see pg_resetwal and vacuum_failsafe_age if you change the
* upper-limit value.
@@ -3513,9 +3514,9 @@ static struct config_int ConfigureNamesInt[] =
0,
#endif
0, 5,
-#else /* not CLOBBER_CACHE_ENABLED */
+#else /* not CLOBBER_CACHE_ENABLED */
0, 0, 0,
-#endif /* not CLOBBER_CACHE_ENABLED */
+#endif /* not CLOBBER_CACHE_ENABLED */
NULL, NULL, NULL
},
diff --git a/src/backend/utils/misc/queryjumble.c b/src/backend/utils/misc/queryjumble.c
index 1bb9fe20ea8..f004a9ce8cd 100644
--- a/src/backend/utils/misc/queryjumble.c
+++ b/src/backend/utils/misc/queryjumble.c
@@ -55,8 +55,8 @@ static void RecordConstLocation(JumbleState *jstate, int location);
const char *
CleanQuerytext(const char *query, int *location, int *len)
{
- int query_location = *location;
- int query_len = *len;
+ int query_location = *location;
+ int query_len = *len;
/* First apply starting offset, unless it's -1 (unknown). */
if (query_location >= 0)
@@ -95,11 +95,12 @@ JumbleState *
JumbleQuery(Query *query, const char *querytext)
{
JumbleState *jstate = NULL;
+
if (query->utilityStmt)
{
query->queryId = compute_utility_query_id(querytext,
- query->stmt_location,
- query->stmt_len);
+ query->stmt_location,
+ query->stmt_len);
}
else
{
@@ -137,12 +138,12 @@ JumbleQuery(Query *query, const char *querytext)
static uint64
compute_utility_query_id(const char *query_text, int query_location, int query_len)
{
- uint64 queryId;
+ uint64 queryId;
const char *sql;
/*
- * Confine our attention to the relevant part of the string, if the
- * query is a portion of a multi-statement source string.
+ * Confine our attention to the relevant part of the string, if the query
+ * is a portion of a multi-statement source string.
*/
sql = CleanQuerytext(query_text, &query_location, &query_len);
@@ -150,9 +151,8 @@ compute_utility_query_id(const char *query_text, int query_location, int query_l
query_len, 0));
/*
- * If we are unlucky enough to get a hash of zero(invalid), use
- * queryID as 2 instead, queryID 1 is already in use for normal
- * statements.
+ * If we are unlucky enough to get a hash of zero(invalid), use queryID as
+ * 2 instead, queryID 1 is already in use for normal statements.
*/
if (queryId == UINT64CONST(0))
queryId = UINT64CONST(2);
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 089ba2e1068..cafc0872549 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -1275,6 +1275,7 @@ LogicalTapeSetBlocks(LogicalTapeSet *lts)
for (int i = 0; i < lts->nTapes; i++)
{
LogicalTape *lt = &lts->tapes[i];
+
Assert(!lt->writing || lt->buffer == NULL);
}
#endif
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index 95704265b67..2968c7f7b7d 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -1808,8 +1808,8 @@ TransactionIdLimitedForOldSnapshots(TransactionId recentXmin,
if (ts == threshold_timestamp)
{
/*
- * Current timestamp is in same bucket as the last limit that
- * was applied. Reuse.
+ * Current timestamp is in same bucket as the last limit that was
+ * applied. Reuse.
*/
xlimit = threshold_xid;
}
@@ -1965,13 +1965,13 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin)
* number of minutes of difference between ts and the current
* head_timestamp.
*
- * The distance from the current head to the current tail is one
- * less than the number of entries in the mapping, because the
- * entry at the head_offset is for 0 minutes after head_timestamp.
+ * The distance from the current head to the current tail is one less
+ * than the number of entries in the mapping, because the entry at the
+ * head_offset is for 0 minutes after head_timestamp.
*
- * The difference between these two values is the number of minutes
- * by which we need to advance the mapping, either adding new entries
- * or rotating old ones out.
+ * The difference between these two values is the number of minutes by
+ * which we need to advance the mapping, either adding new entries or
+ * rotating old ones out.
*/
distance_to_new_tail =
(ts - oldSnapshotControl->head_timestamp) / USECS_PER_MINUTE;