aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/brin/brin.c17
-rw-r--r--src/backend/access/brin/brin_inclusion.c50
-rw-r--r--src/backend/access/brin/brin_minmax.c14
-rw-r--r--src/backend/access/brin/brin_revmap.c18
-rw-r--r--src/backend/access/brin/brin_tuple.c2
-rw-r--r--src/backend/access/gin/ginget.c3
-rw-r--r--src/backend/access/gin/ginutil.c2
-rw-r--r--src/backend/access/gist/gist.c2
-rw-r--r--src/backend/access/gist/gistscan.c12
-rw-r--r--src/backend/access/gist/gistutil.c2
-rw-r--r--src/backend/access/heap/heapam.c102
-rw-r--r--src/backend/access/heap/hio.c6
-rw-r--r--src/backend/access/index/genam.c20
-rw-r--r--src/backend/access/nbtree/nbtinsert.c13
-rw-r--r--src/backend/access/nbtree/nbtpage.c11
-rw-r--r--src/backend/access/nbtree/nbtree.c5
-rw-r--r--src/backend/access/nbtree/nbtsearch.c8
-rw-r--r--src/backend/access/nbtree/nbtsort.c2
-rw-r--r--src/backend/access/nbtree/nbtutils.c6
-rw-r--r--src/backend/access/rmgrdesc/committsdesc.c8
-rw-r--r--src/backend/access/rmgrdesc/replorigindesc.c6
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c14
-rw-r--r--src/backend/access/spgist/spgscan.c1
-rw-r--r--src/backend/access/tablesample/bernoulli.c69
-rw-r--r--src/backend/access/tablesample/system.c48
-rw-r--r--src/backend/access/tablesample/tablesample.c94
-rw-r--r--src/backend/access/transam/commit_ts.c74
-rw-r--r--src/backend/access/transam/multixact.c59
-rw-r--r--src/backend/access/transam/parallel.c188
-rw-r--r--src/backend/access/transam/twophase.c41
-rw-r--r--src/backend/access/transam/xact.c141
-rw-r--r--src/backend/access/transam/xlog.c242
-rw-r--r--src/backend/access/transam/xloginsert.c22
-rw-r--r--src/backend/access/transam/xlogreader.c27
34 files changed, 676 insertions, 653 deletions
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 199512551e5..ff18b220c2b 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -387,7 +387,7 @@ bringetbitmap(PG_FUNCTION_ARGS)
*/
Assert((key->sk_flags & SK_ISNULL) ||
(key->sk_collation ==
- bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
+ bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
/* First time this column? look up consistent function */
if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
@@ -523,10 +523,10 @@ brinbuildCallback(Relation index,
thisblock = ItemPointerGetBlockNumber(&htup->t_self);
/*
- * If we're in a block that belongs to a future range, summarize what we've
- * got and start afresh. Note the scan might have skipped many pages,
- * if they were devoid of live tuples; make sure to insert index tuples
- * for those too.
+ * If we're in a block that belongs to a future range, summarize what
+ * we've got and start afresh. Note the scan might have skipped many
+ * pages, if they were devoid of live tuples; make sure to insert index
+ * tuples for those too.
*/
while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
{
@@ -660,7 +660,6 @@ brinbuild(PG_FUNCTION_ARGS)
Datum
brinbuildempty(PG_FUNCTION_ARGS)
{
-
Relation index = (Relation) PG_GETARG_POINTER(0);
Buffer metabuf;
@@ -696,7 +695,7 @@ brinbulkdelete(PG_FUNCTION_ARGS)
{
/* other arguments are not currently used */
IndexBulkDeleteResult *stats =
- (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+ (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
/* allocate stats if first time through, else re-use existing struct */
if (stats == NULL)
@@ -714,7 +713,7 @@ brinvacuumcleanup(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
IndexBulkDeleteResult *stats =
- (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+ (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
Relation heapRel;
/* No-op in ANALYZE ONLY mode */
@@ -900,7 +899,7 @@ terminate_brin_buildstate(BrinBuildState *state)
page = BufferGetPage(state->bs_currentInsertBuf);
RecordPageWithFreeSpace(state->bs_irel,
- BufferGetBlockNumber(state->bs_currentInsertBuf),
+ BufferGetBlockNumber(state->bs_currentInsertBuf),
PageGetFreeSpace(page));
ReleaseBuffer(state->bs_currentInsertBuf);
}
diff --git a/src/backend/access/brin/brin_inclusion.c b/src/backend/access/brin/brin_inclusion.c
index 1f0bc7fdb1f..803b07f10a9 100644
--- a/src/backend/access/brin/brin_inclusion.c
+++ b/src/backend/access/brin/brin_inclusion.c
@@ -61,11 +61,11 @@
* 0 - the union of the values in the block range
* 1 - whether an empty value is present in any tuple in the block range
* 2 - whether the values in the block range cannot be merged (e.g. an IPv6
- * address amidst IPv4 addresses).
+ * address amidst IPv4 addresses).
*/
-#define INCLUSION_UNION 0
-#define INCLUSION_UNMERGEABLE 1
-#define INCLUSION_CONTAINS_EMPTY 2
+#define INCLUSION_UNION 0
+#define INCLUSION_UNMERGEABLE 1
+#define INCLUSION_CONTAINS_EMPTY 2
typedef struct InclusionOpaque
@@ -294,22 +294,22 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
unionval = column->bv_values[INCLUSION_UNION];
switch (key->sk_strategy)
{
- /*
- * Placement strategies
- *
- * These are implemented by logically negating the result of the
- * converse placement operator; for this to work, the converse operator
- * must be part of the opclass. An error will be thrown by
- * inclusion_get_strategy_procinfo() if the required strategy is not
- * part of the opclass.
- *
- * These all return false if either argument is empty, so there is
- * no need to check for empty elements.
- */
+ /*
+ * Placement strategies
+ *
+ * These are implemented by logically negating the result of the
+ * converse placement operator; for this to work, the converse
+ * operator must be part of the opclass. An error will be thrown
+ * by inclusion_get_strategy_procinfo() if the required strategy
+ * is not part of the opclass.
+ *
+ * These all return false if either argument is empty, so there is
+ * no need to check for empty elements.
+ */
case RTLeftStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverRightStrategyNumber);
+ RTOverRightStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
@@ -333,7 +333,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
case RTBelowStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverAboveStrategyNumber);
+ RTOverAboveStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
@@ -351,7 +351,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
case RTAboveStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverBelowStrategyNumber);
+ RTOverBelowStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
@@ -381,8 +381,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* strategies because some elements can be contained even though
* the union is not; instead we use the overlap operator.
*
- * We check for empty elements separately as they are not merged to
- * the union but contained by everything.
+ * We check for empty elements separately as they are not merged
+ * to the union but contained by everything.
*/
case RTContainedByStrategyNumber:
@@ -400,8 +400,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
/*
* Adjacent strategy
*
- * We test for overlap first but to be safe we need to call
- * the actual adjacent operator also.
+ * We test for overlap first but to be safe we need to call the
+ * actual adjacent operator also.
*
* An empty element cannot be adjacent to any other, so there is
* no need to check for it.
@@ -426,8 +426,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* the contains operator. Generally, inequality strategies do not
* make much sense for the types which will be used with the
* inclusion BRIN family of opclasses, but is is possible to
- * implement them with logical negation of the left-of and right-of
- * operators.
+ * implement them with logical negation of the left-of and
+ * right-of operators.
*
* NB: These strategies cannot be used with geometric datatypes
* that use comparison of areas! The only exception is the "same"
diff --git a/src/backend/access/brin/brin_minmax.c b/src/backend/access/brin/brin_minmax.c
index b105f980eca..7cd98887c0f 100644
--- a/src/backend/access/brin/brin_minmax.c
+++ b/src/backend/access/brin/brin_minmax.c
@@ -33,7 +33,7 @@ Datum brin_minmax_add_value(PG_FUNCTION_ARGS);
Datum brin_minmax_consistent(PG_FUNCTION_ARGS);
Datum brin_minmax_union(PG_FUNCTION_ARGS);
static FmgrInfo *minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno,
- Oid subtype, uint16 strategynum);
+ Oid subtype, uint16 strategynum);
Datum
@@ -209,7 +209,7 @@ brin_minmax_consistent(PG_FUNCTION_ARGS)
break;
/* max() >= scankey */
finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype,
- BTGreaterEqualStrategyNumber);
+ BTGreaterEqualStrategyNumber);
matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1],
value);
break;
@@ -260,10 +260,10 @@ brin_minmax_union(PG_FUNCTION_ARGS)
attr = bdesc->bd_tupdesc->attrs[attno - 1];
/*
- * Adjust "allnulls". If A doesn't have values, just copy the values
- * from B into A, and we're done. We cannot run the operators in this
- * case, because values in A might contain garbage. Note we already
- * established that B contains values.
+ * Adjust "allnulls". If A doesn't have values, just copy the values from
+ * B into A, and we're done. We cannot run the operators in this case,
+ * because values in A might contain garbage. Note we already established
+ * that B contains values.
*/
if (col_a->bv_allnulls)
{
@@ -355,7 +355,7 @@ minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno, Oid subtype,
strategynum, attr->atttypid, subtype, opfamily);
oprid = DatumGetObjectId(SysCacheGetAttr(AMOPSTRATEGY, tuple,
- Anum_pg_amop_amopopr, &isNull));
+ Anum_pg_amop_amopopr, &isNull));
ReleaseSysCache(tuple);
Assert(!isNull && RegProcedureIsValid(oprid));
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index 80795eca650..62d440f76b8 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -48,7 +48,7 @@ struct BrinRevmap
{
Relation rm_irel;
BlockNumber rm_pagesPerRange;
- BlockNumber rm_lastRevmapPage; /* cached from the metapage */
+ BlockNumber rm_lastRevmapPage; /* cached from the metapage */
Buffer rm_metaBuf;
Buffer rm_currBuf;
};
@@ -57,7 +57,7 @@ struct BrinRevmap
static BlockNumber revmap_get_blkno(BrinRevmap *revmap,
- BlockNumber heapBlk);
+ BlockNumber heapBlk);
static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap,
BlockNumber heapBlk);
@@ -110,7 +110,7 @@ brinRevmapTerminate(BrinRevmap *revmap)
void
brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
{
- BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
+ BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
@@ -245,7 +245,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg_internal("corrupted BRIN index: inconsistent range map")));
+ errmsg_internal("corrupted BRIN index: inconsistent range map")));
previptr = *iptr;
blk = ItemPointerGetBlockNumber(iptr);
@@ -356,7 +356,7 @@ revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
static BlockNumber
revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
{
- BlockNumber targetblk;
+ BlockNumber targetblk;
/* obtain revmap block number, skip 1 for metapage block */
targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
@@ -445,10 +445,10 @@ revmap_physical_extend(BrinRevmap *revmap)
if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
- BrinPageType(page),
- RelationGetRelationName(irel),
- BufferGetBlockNumber(buf))));
+ errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
+ BrinPageType(page),
+ RelationGetRelationName(irel),
+ BufferGetBlockNumber(buf))));
/* If the page is in use, evacuate it and restart */
if (brin_start_evacuating_page(irel, buf))
diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c
index 22ce74a4f43..72356c066c7 100644
--- a/src/backend/access/brin/brin_tuple.c
+++ b/src/backend/access/brin/brin_tuple.c
@@ -68,7 +68,7 @@ brtuple_disk_tupdesc(BrinDesc *brdesc)
{
for (j = 0; j < brdesc->bd_info[i]->oi_nstored; j++)
TupleDescInitEntry(tupdesc, attno++, NULL,
- brdesc->bd_info[i]->oi_typcache[j]->type_id,
+ brdesc->bd_info[i]->oi_typcache[j]->type_id,
-1, 0);
}
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 3e2b8b5fedf..54b2db88a68 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -1785,7 +1785,8 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* Set up the scan keys, and check for unsatisfiable query.
*/
- ginFreeScanKeys(so); /* there should be no keys yet, but just to be sure */
+ ginFreeScanKeys(so); /* there should be no keys yet, but just to be
+ * sure */
ginNewScanKey(scan);
if (GinIsVoidRes(scan))
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 445466b4477..cb4e32fe66b 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -527,7 +527,7 @@ ginoptions(PG_FUNCTION_ARGS)
static const relopt_parse_elt tab[] = {
{"fastupdate", RELOPT_TYPE_BOOL, offsetof(GinOptions, useFastUpdate)},
{"gin_pending_list_limit", RELOPT_TYPE_INT, offsetof(GinOptions,
- pendingListCleanupSize)}
+ pendingListCleanupSize)}
};
options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN,
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 96b7701633f..0e499598a42 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1407,7 +1407,7 @@ initGISTstate(Relation index)
/* opclasses are not required to provide a Fetch method */
if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
fmgr_info_copy(&(giststate->fetchFn[i]),
- index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
+ index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
scanCxt);
else
giststate->fetchFn[i].fn_oid = InvalidOid;
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index beb402357c0..ad392948756 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -154,8 +154,8 @@ gistrescan(PG_FUNCTION_ARGS)
}
/*
- * If we're doing an index-only scan, on the first call, also initialize
- * a tuple descriptor to represent the returned index tuples and create a
+ * If we're doing an index-only scan, on the first call, also initialize a
+ * tuple descriptor to represent the returned index tuples and create a
* memory context to hold them during the scan.
*/
if (scan->xs_want_itup && !scan->xs_itupdesc)
@@ -169,7 +169,7 @@ gistrescan(PG_FUNCTION_ARGS)
* descriptor. Instead, construct a descriptor with the original data
* types.
*/
- natts = RelationGetNumberOfAttributes(scan->indexRelation);
+ natts = RelationGetNumberOfAttributes(scan->indexRelation);
so->giststate->fetchTupdesc = CreateTemplateTupleDesc(natts, false);
for (attno = 1; attno <= natts; attno++)
{
@@ -288,9 +288,9 @@ gistrescan(PG_FUNCTION_ARGS)
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
/*
- * Look up the datatype returned by the original ordering operator.
- * GiST always uses a float8 for the distance function, but the
- * ordering operator could be anything else.
+ * Look up the datatype returned by the original ordering
+ * operator. GiST always uses a float8 for the distance function,
+ * but the ordering operator could be anything else.
*
* XXX: The distance function is only allowed to be lossy if the
* ordering operator's result type is float4 or float8. Otherwise
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index bf9fbf30a8b..7d596a3e2e6 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -583,7 +583,7 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
isleaf);
cep = (GISTENTRY *)
DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i],
- giststate->supportCollation[i],
+ giststate->supportCollation[i],
PointerGetDatum(&centry)));
compatt[i] = cep->key;
}
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index cb86a4fa3e6..caacc105d25 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -80,7 +80,7 @@ bool synchronize_seqscans = true;
static HeapScanDesc heap_beginscan_internal(Relation relation,
Snapshot snapshot,
int nkeys, ScanKey key,
- bool allow_strat, bool allow_sync, bool allow_pagemode,
+ bool allow_strat, bool allow_sync, bool allow_pagemode,
bool is_bitmapscan, bool is_samplescan,
bool temp_snap);
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
@@ -1366,8 +1366,8 @@ heap_beginscan_sampling(Relation relation, Snapshot snapshot,
static HeapScanDesc
heap_beginscan_internal(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
- bool allow_strat, bool allow_sync, bool allow_pagemode,
- bool is_bitmapscan, bool is_samplescan, bool temp_snap)
+ bool allow_strat, bool allow_sync, bool allow_pagemode,
+ bool is_bitmapscan, bool is_samplescan, bool temp_snap)
{
HeapScanDesc scan;
@@ -2284,9 +2284,9 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
{
/*
* For now, parallel operations are required to be strictly read-only.
- * Unlike heap_update() and heap_delete(), an insert should never create
- * a combo CID, so it might be possible to relax this restriction, but
- * not without more thought and testing.
+ * Unlike heap_update() and heap_delete(), an insert should never create a
+ * combo CID, so it might be possible to relax this restriction, but not
+ * without more thought and testing.
*/
if (IsInParallelMode())
ereport(ERROR,
@@ -2768,8 +2768,8 @@ l1:
infomask = tp.t_data->t_infomask;
/*
- * Sleep until concurrent transaction ends -- except when there's a single
- * locker and it's our own transaction. Note we don't care
+ * Sleep until concurrent transaction ends -- except when there's a
+ * single locker and it's our own transaction. Note we don't care
* which lock mode the locker has, because we need the strongest one.
*
* Before sleeping, we need to acquire tuple lock to establish our
@@ -2822,8 +2822,8 @@ l1:
else if (!TransactionIdIsCurrentTransactionId(xwait))
{
/*
- * Wait for regular transaction to end; but first, acquire
- * tuple lock.
+ * Wait for regular transaction to end; but first, acquire tuple
+ * lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
@@ -3336,8 +3336,8 @@ l2:
*
* Before sleeping, we need to acquire tuple lock to establish our
* priority for the tuple (see heap_lock_tuple). LockTuple will
- * release us when we are next-in-line for the tuple. Note we must not
- * acquire the tuple lock until we're sure we're going to sleep;
+ * release us when we are next-in-line for the tuple. Note we must
+ * not acquire the tuple lock until we're sure we're going to sleep;
* otherwise we're open for race conditions with other transactions
* holding the tuple lock which sleep on us.
*
@@ -3374,8 +3374,8 @@ l2:
*/
if (xmax_infomask_changed(oldtup.t_data->t_infomask,
infomask) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
}
@@ -3425,9 +3425,9 @@ l2:
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
/*
- * If it's just a key-share locker, and we're not changing the
- * key columns, we don't need to wait for it to end; but we
- * need to preserve it as locker.
+ * If it's just a key-share locker, and we're not changing the key
+ * columns, we don't need to wait for it to end; but we need to
+ * preserve it as locker.
*/
checked_lockers = true;
locker_remains = true;
@@ -3436,8 +3436,8 @@ l2:
else
{
/*
- * Wait for regular transaction to end; but first, acquire
- * tuple lock.
+ * Wait for regular transaction to end; but first, acquire tuple
+ * lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
@@ -3454,7 +3454,7 @@ l2:
*/
if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
!TransactionIdEquals(xwait,
- HeapTupleHeaderGetRawXmax(oldtup.t_data)))
+ HeapTupleHeaderGetRawXmax(oldtup.t_data)))
goto l2;
/* Otherwise check if it committed or aborted */
@@ -3779,7 +3779,7 @@ l2:
HeapTupleClearHeapOnly(newtup);
}
- RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
if (!already_marked)
{
@@ -4477,7 +4477,7 @@ l3:
if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
TransactionIdIsCurrentTransactionId(xwait))
{
- /* ... but if the xmax changed in the meantime, start over */
+ /* ... but if the xmax changed in the meantime, start over */
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@@ -4501,8 +4501,8 @@ l3:
* for the tuple. We must do this even if we are share-locking.
*
* If we are forced to "start over" below, we keep the tuple lock;
- * this arranges that we stay at the head of the line while rechecking
- * tuple state.
+ * this arranges that we stay at the head of the line while
+ * rechecking tuple state.
*/
if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
&have_tuple_lock))
@@ -4530,11 +4530,11 @@ l3:
{
case LockWaitBlock:
MultiXactIdWait((MultiXactId) xwait, status, infomask,
- relation, &tuple->t_self, XLTW_Lock, NULL);
+ relation, &tuple->t_self, XLTW_Lock, NULL);
break;
case LockWaitSkip:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
- status, infomask, relation,
+ status, infomask, relation,
NULL))
{
result = HeapTupleWouldBlock;
@@ -4545,12 +4545,12 @@ l3:
break;
case LockWaitError:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
- status, infomask, relation,
+ status, infomask, relation,
NULL))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ RelationGetRelationName(relation))));
break;
}
@@ -4588,7 +4588,7 @@ l3:
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ RelationGetRelationName(relation))));
break;
}
}
@@ -4613,9 +4613,9 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * xwait is done, but if xwait had just locked the tuple then
- * some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * xwait is done, but if xwait had just locked the tuple then some
+ * other xact could update this tuple before we get to this point.
+ * Check for xmax change, and start over if so.
*/
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@@ -4628,9 +4628,9 @@ l3:
* Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that would have been handled above. So
- * that transaction must necessarily be gone by now. But don't
- * check for this in the multixact case, because some locker
- * transactions might still be running.
+ * that transaction must necessarily be gone by now. But
+ * don't check for this in the multixact case, because some
+ * locker transactions might still be running.
*/
UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
}
@@ -4810,8 +4810,8 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
if (!ConditionalLockTupleTuplock(relation, tid, mode))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
break;
}
*have_tuple_lock = true;
@@ -5513,8 +5513,8 @@ heap_finish_speculative(Relation relation, HeapTuple tuple)
MarkBufferDirty(buffer);
/*
- * Replace the speculative insertion token with a real t_ctid,
- * pointing to itself like it does on regular tuples.
+ * Replace the speculative insertion token with a real t_ctid, pointing to
+ * itself like it does on regular tuples.
*/
htup->t_ctid = tuple->t_self;
@@ -6447,23 +6447,23 @@ static bool
DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
LockTupleMode lockmode)
{
- bool allow_old;
- int nmembers;
+ bool allow_old;
+ int nmembers;
MultiXactMember *members;
- bool result = false;
- LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
+ bool result = false;
+ LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
HEAP_XMAX_IS_LOCKED_ONLY(infomask));
if (nmembers >= 0)
{
- int i;
+ int i;
for (i = 0; i < nmembers; i++)
{
- TransactionId memxid;
- LOCKMODE memlockmode;
+ TransactionId memxid;
+ LOCKMODE memlockmode;
memlockmode = LOCKMODE_from_mxstatus(members[i].status);
@@ -7093,7 +7093,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
{
XLogRegisterBufData(0,
((char *) newtup->t_data) + SizeofHeapTupleHeader,
- newtup->t_len - SizeofHeapTupleHeader - suffixlen);
+ newtup->t_len - SizeofHeapTupleHeader - suffixlen);
}
else
{
@@ -7105,8 +7105,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
{
XLogRegisterBufData(0,
- ((char *) newtup->t_data) + SizeofHeapTupleHeader,
- newtup->t_data->t_hoff - SizeofHeapTupleHeader);
+ ((char *) newtup->t_data) + SizeofHeapTupleHeader,
+ newtup->t_data->t_hoff - SizeofHeapTupleHeader);
}
/* data after common prefix */
@@ -7289,8 +7289,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
{
/*
* The OID column can appear in an index definition, but that's
- * OK, because we always copy the OID if present (see below). Other
- * system columns may not.
+ * OK, because we always copy the OID if present (see below).
+ * Other system columns may not.
*/
if (attno == ObjectIdAttributeNumber)
continue;
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index a9f0ca35e49..6db73bf9d00 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -60,9 +60,9 @@ RelationPutHeapTuple(Relation relation,
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
/*
- * Insert the correct position into CTID of the stored tuple, too
- * (unless this is a speculative insertion, in which case the token is
- * held in CTID field instead)
+ * Insert the correct position into CTID of the stored tuple, too (unless
+ * this is a speculative insertion, in which case the token is held in
+ * CTID field instead)
*/
if (!token)
{
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index e6e4d28b74f..1043362f914 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -185,11 +185,11 @@ BuildIndexValueDescription(Relation indexRelation,
* Check permissions- if the user does not have access to view all of the
* key columns then return NULL to avoid leaking data.
*
- * First check if RLS is enabled for the relation. If so, return NULL
- * to avoid leaking data.
+ * First check if RLS is enabled for the relation. If so, return NULL to
+ * avoid leaking data.
*
- * Next we need to check table-level SELECT access and then, if
- * there is no access there, check column-level permissions.
+ * Next we need to check table-level SELECT access and then, if there is
+ * no access there, check column-level permissions.
*/
/*
@@ -215,18 +215,18 @@ BuildIndexValueDescription(Relation indexRelation,
if (aclresult != ACLCHECK_OK)
{
/*
- * No table-level access, so step through the columns in the
- * index and make sure the user has SELECT rights on all of them.
+ * No table-level access, so step through the columns in the index and
+ * make sure the user has SELECT rights on all of them.
*/
for (keyno = 0; keyno < idxrec->indnatts; keyno++)
{
AttrNumber attnum = idxrec->indkey.values[keyno];
/*
- * Note that if attnum == InvalidAttrNumber, then this is an
- * index based on an expression and we return no detail rather
- * than try to figure out what column(s) the expression includes
- * and if the user has SELECT rights on them.
+ * Note that if attnum == InvalidAttrNumber, then this is an index
+ * based on an expression and we return no detail rather than try
+ * to figure out what column(s) the expression includes and if the
+ * user has SELECT rights on them.
*/
if (attnum == InvalidAttrNumber ||
pg_attribute_aclcheck(indrelid, attnum, GetUserId(),
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 4a60c5fa2c8..77c2fdf90b4 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -160,8 +160,8 @@ top:
*/
if (checkUnique != UNIQUE_CHECK_NO)
{
- TransactionId xwait;
- uint32 speculativeToken;
+ TransactionId xwait;
+ uint32 speculativeToken;
offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
@@ -171,9 +171,10 @@ top:
{
/* Have to wait for the other guy ... */
_bt_relbuf(rel, buf);
+
/*
- * If it's a speculative insertion, wait for it to finish (ie.
- * to go ahead with the insertion, or kill the tuple). Otherwise
+ * If it's a speculative insertion, wait for it to finish (ie. to
+ * go ahead with the insertion, or kill the tuple). Otherwise
* wait for the transaction to finish as usual.
*/
if (speculativeToken)
@@ -417,8 +418,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
(errcode(ERRCODE_UNIQUE_VIOLATION),
errmsg("duplicate key value violates unique constraint \"%s\"",
RelationGetRelationName(rel)),
- key_desc ? errdetail("Key %s already exists.",
- key_desc) : 0,
+ key_desc ? errdetail("Key %s already exists.",
+ key_desc) : 0,
errtableconstraint(heapRel,
RelationGetRelationName(rel))));
}
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 0f4128253f4..6e65db91eb5 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -1233,6 +1233,7 @@ _bt_pagedel(Relation rel, Buffer buf)
lbuf = _bt_getbuf(rel, leftsib, BT_READ);
lpage = BufferGetPage(lbuf);
lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
+
/*
* If the left sibling is split again by another backend,
* after we released the lock, we know that the first
@@ -1345,11 +1346,11 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
leafrightsib = opaque->btpo_next;
/*
- * Before attempting to lock the parent page, check that the right
- * sibling is not in half-dead state. A half-dead right sibling would
- * have no downlink in the parent, which would be highly confusing later
- * when we delete the downlink that follows the current page's downlink.
- * (I believe the deletion would work correctly, but it would fail the
+ * Before attempting to lock the parent page, check that the right sibling
+ * is not in half-dead state. A half-dead right sibling would have no
+ * downlink in the parent, which would be highly confusing later when we
+ * delete the downlink that follows the current page's downlink. (I
+ * believe the deletion would work correctly, but it would fail the
* cross-check we make that the following downlink points to the right
* sibling of the delete page.)
*/
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index c2d52faa960..9431ab5d042 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -40,9 +40,8 @@ typedef struct
BTSpool *spool;
/*
- * spool2 is needed only when the index is a unique index. Dead tuples
- * are put into spool2 instead of spool in order to avoid uniqueness
- * check.
+ * spool2 is needed only when the index is a unique index. Dead tuples are
+ * put into spool2 instead of spool in order to avoid uniqueness check.
*/
BTSpool *spool2;
double indtuples;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index cfb1d64f86a..d69a0577a87 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -1027,10 +1027,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
offnum = OffsetNumberPrev(offnum);
/*
- * By here the scan position is now set for the first key. If all
- * further tuples are expected to match we set the SK_BT_MATCHED flag
- * to avoid re-checking the scan key later. This is a big win for
- * slow key matches though is still significant even for fast datatypes.
+ * By here the scan position is now set for the first key. If all further
+ * tuples are expected to match we set the SK_BT_MATCHED flag to avoid
+ * re-checking the scan key later. This is a big win for slow key matches
+ * though is still significant even for fast datatypes.
*/
switch (startKeys[0]->sk_strategy)
{
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 625f490af80..f95f67ad4b5 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -742,7 +742,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
{
for (i = 1; i <= keysz; i++)
{
- SortSupport entry;
+ SortSupport entry;
Datum attrDatum1,
attrDatum2;
bool isNull1,
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index d1589f05eff..91331bad651 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -1430,8 +1430,8 @@ _bt_checkkeys(IndexScanDesc scan,
Datum test;
/*
- * If the scan key has already matched we can skip this key, as
- * long as the index tuple does not contain NULL values.
+ * If the scan key has already matched we can skip this key, as long
+ * as the index tuple does not contain NULL values.
*/
if (key->sk_flags & SK_BT_MATCHED && !IndexTupleHasNulls(tuple))
continue;
@@ -1740,7 +1740,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* any items from the page, and so there is no need to search left from the
* recorded offset. (This observation also guarantees that the item is still
* the right one to delete, which might otherwise be questionable since heap
- * TIDs can get recycled.) This holds true even if the page has been modified
+ * TIDs can get recycled.) This holds true even if the page has been modified
* by inserts and page splits, so there is no need to consult the LSN.
*
* If the pin was released after reading the page, then we re-read it. If it
diff --git a/src/backend/access/rmgrdesc/committsdesc.c b/src/backend/access/rmgrdesc/committsdesc.c
index 088fd1bc8b6..59975eae9a6 100644
--- a/src/backend/access/rmgrdesc/committsdesc.c
+++ b/src/backend/access/rmgrdesc/committsdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* committsdesc.c
- * rmgr descriptor routines for access/transam/commit_ts.c
+ * rmgr descriptor routines for access/transam/commit_ts.c
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/committsdesc.c
+ * src/backend/access/rmgrdesc/committsdesc.c
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
else if (info == COMMIT_TS_SETTS)
{
xl_commit_ts_set *xlrec = (xl_commit_ts_set *) rec;
- int nsubxids;
+ int nsubxids;
appendStringInfo(buf, "set %s/%d for: %u",
timestamptz_to_str(xlrec->timestamp),
@@ -51,7 +51,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
sizeof(TransactionId));
if (nsubxids > 0)
{
- int i;
+ int i;
TransactionId *subxids;
subxids = palloc(sizeof(TransactionId) * nsubxids);
diff --git a/src/backend/access/rmgrdesc/replorigindesc.c b/src/backend/access/rmgrdesc/replorigindesc.c
index 19bae9a0f84..60cf0f679db 100644
--- a/src/backend/access/rmgrdesc/replorigindesc.c
+++ b/src/backend/access/rmgrdesc/replorigindesc.c
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* replorigindesc.c
- * rmgr descriptor routines for replication/logical/replication_origin.c
+ * rmgr descriptor routines for replication/logical/replication_origin.c
*
* Portions Copyright (c) 2015, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/replorigindesc.c
+ * src/backend/access/rmgrdesc/replorigindesc.c
*
*-------------------------------------------------------------------------
*/
@@ -26,6 +26,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec;
+
xlrec = (xl_replorigin_set *) rec;
appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
@@ -38,6 +39,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
case XLOG_REPLORIGIN_DROP:
{
xl_replorigin_drop *xlrec;
+
xlrec = (xl_replorigin_drop *) rec;
appendStringInfo(buf, "drop %u", xlrec->node_id);
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 793f9bb51fa..7b5f9830507 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -37,7 +37,8 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
memset(parsed, 0, sizeof(*parsed));
- parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+ parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
+ * present */
parsed->xact_time = xlrec->xact_time;
@@ -62,7 +63,7 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
{
- xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
+ xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
parsed->nsubxacts = xl_subxacts->nsubxacts;
parsed->subxacts = xl_subxacts->subxacts;
@@ -123,7 +124,8 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
memset(parsed, 0, sizeof(*parsed));
- parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+ parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
+ * present */
parsed->xact_time = xlrec->xact_time;
@@ -138,7 +140,7 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
{
- xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
+ xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
parsed->nsubxacts = xl_subxacts->nsubxacts;
parsed->subxacts = xl_subxacts->subxacts;
@@ -236,8 +238,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
{
appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
origin_id,
- (uint32)(parsed.origin_lsn >> 32),
- (uint32)parsed.origin_lsn,
+ (uint32) (parsed.origin_lsn >> 32),
+ (uint32) parsed.origin_lsn,
timestamptz_to_str(parsed.origin_timestamp));
}
}
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 06c6944fc72..8a0d9098c5e 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -658,6 +658,7 @@ Datum
spgcanreturn(PG_FUNCTION_ARGS)
{
Relation index = (Relation) PG_GETARG_POINTER(0);
+
/* int i = PG_GETARG_INT32(1); */
SpGistCache *cache;
diff --git a/src/backend/access/tablesample/bernoulli.c b/src/backend/access/tablesample/bernoulli.c
index c91f3f593e5..563a9168f0f 100644
--- a/src/backend/access/tablesample/bernoulli.c
+++ b/src/backend/access/tablesample/bernoulli.c
@@ -27,13 +27,15 @@
/* tsdesc */
typedef struct
{
- uint32 seed; /* random seed */
- BlockNumber startblock; /* starting block, we use ths for syncscan support */
+ uint32 seed; /* random seed */
+ BlockNumber startblock; /* starting block, we use ths for syncscan
+ * support */
BlockNumber nblocks; /* number of blocks */
BlockNumber blockno; /* current block */
- float4 probability; /* probabilty that tuple will be returned (0.0-1.0) */
+ float4 probability; /* probabilty that tuple will be returned
+ * (0.0-1.0) */
OffsetNumber lt; /* last tuple returned from current block */
- SamplerRandomState randstate; /* random generator tsdesc */
+ SamplerRandomState randstate; /* random generator tsdesc */
} BernoulliSamplerData;
/*
@@ -42,10 +44,10 @@ typedef struct
Datum
tsm_bernoulli_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
- HeapScanDesc scan = tsdesc->heapScan;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+ HeapScanDesc scan = tsdesc->heapScan;
BernoulliSamplerData *sampler;
if (percent < 0 || percent > 100)
@@ -77,14 +79,13 @@ tsm_bernoulli_init(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
/*
- * Bernoulli sampling scans all blocks on the table and supports
- * syncscan so loop from startblock to startblock instead of
- * from 0 to nblocks.
+ * Bernoulli sampling scans all blocks on the table and supports syncscan
+ * so loop from startblock to startblock instead of from 0 to nblocks.
*/
if (sampler->blockno == InvalidBlockNumber)
sampler->blockno = sampler->startblock;
@@ -116,7 +117,7 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
* tuples have same probability of being returned the visible and invisible
* tuples will be returned in same ratio as they have in the actual table.
* This means that there is no skew towards either visible or invisible tuples
- * and the number returned visible tuples to from the executor node is the
+ * and the number returned visible tuples to from the executor node is the
* fraction of visible tuples which was specified in input.
*
* This is faster than doing the coinflip in the examinetuple because we don't
@@ -128,12 +129,12 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
- float4 probability = sampler->probability;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
+ float4 probability = sampler->probability;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
@@ -142,8 +143,8 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
/*
* Loop over tuple offsets until the random generator returns value that
- * is within the probability of returning the tuple or until we reach
- * end of the block.
+ * is within the probability of returning the tuple or until we reach end
+ * of the block.
*
* (This is our implementation of bernoulli trial)
*/
@@ -183,9 +184,9 @@ tsm_bernoulli_end(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
sampler->blockno = InvalidBlockNumber;
sampler->lt = InvalidOffsetNumber;
@@ -200,14 +201,14 @@ tsm_bernoulli_reset(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *pctnode;
- float4 samplesize;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *pctnode;
+ float4 samplesize;
*pages = baserel->pages;
diff --git a/src/backend/access/tablesample/system.c b/src/backend/access/tablesample/system.c
index 1412e511faf..1d834369a4b 100644
--- a/src/backend/access/tablesample/system.c
+++ b/src/backend/access/tablesample/system.c
@@ -31,9 +31,9 @@
typedef struct
{
BlockSamplerData bs;
- uint32 seed; /* random seed */
+ uint32 seed; /* random seed */
BlockNumber nblocks; /* number of block in relation */
- int samplesize; /* number of blocks to return */
+ int samplesize; /* number of blocks to return */
OffsetNumber lt; /* last tuple returned from current block */
} SystemSamplerData;
@@ -44,11 +44,11 @@ typedef struct
Datum
tsm_system_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
- HeapScanDesc scan = tsdesc->heapScan;
- SystemSamplerData *sampler;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
if (percent < 0 || percent > 100)
ereport(ERROR,
@@ -80,9 +80,9 @@ tsm_system_init(PG_FUNCTION_ARGS)
Datum
tsm_system_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- BlockNumber blockno;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ BlockNumber blockno;
if (!BlockSampler_HasMore(&sampler->bs))
PG_RETURN_UINT32(InvalidBlockNumber);
@@ -99,10 +99,10 @@ tsm_system_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_system_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
@@ -136,8 +136,8 @@ tsm_system_end(PG_FUNCTION_ARGS)
Datum
tsm_system_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize,
@@ -152,14 +152,14 @@ tsm_system_reset(PG_FUNCTION_ARGS)
Datum
tsm_system_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *pctnode;
- float4 samplesize;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *pctnode;
+ float4 samplesize;
pctnode = linitial(args);
pctnode = estimate_expression_value(root, pctnode);
diff --git a/src/backend/access/tablesample/tablesample.c b/src/backend/access/tablesample/tablesample.c
index ef55d062e75..3398d02f854 100644
--- a/src/backend/access/tablesample/tablesample.c
+++ b/src/backend/access/tablesample/tablesample.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* tablesample.c
- * TABLESAMPLE internal API
+ * TABLESAMPLE internal API
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/tablesample/tablesample.c
+ * src/backend/access/tablesample/tablesample.c
*
* TABLESAMPLE is the SQL standard clause for sampling the relations.
*
@@ -53,7 +53,7 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
List *args = tablesample->args;
ListCell *arg;
ExprContext *econtext = scanstate->ss.ps.ps_ExprContext;
- TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
+ TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
/* Load functions */
fmgr_info(tablesample->tsminit, &(tsdesc->tsminit));
@@ -78,21 +78,21 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
fcinfo.argnull[0] = false;
/*
- * Second arg for init function is always REPEATABLE
- * When tablesample->repeatable is NULL then REPEATABLE clause was not
- * specified.
- * When specified, the expression cannot evaluate to NULL.
+ * Second arg for init function is always REPEATABLE When
+ * tablesample->repeatable is NULL then REPEATABLE clause was not
+ * specified. When specified, the expression cannot evaluate to NULL.
*/
if (tablesample->repeatable)
{
ExprState *argstate = ExecInitExpr((Expr *) tablesample->repeatable,
(PlanState *) scanstate);
+
fcinfo.arg[1] = ExecEvalExpr(argstate, econtext,
&fcinfo.argnull[1], NULL);
if (fcinfo.argnull[1])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("REPEATABLE clause must be NOT NULL numeric value")));
+ errmsg("REPEATABLE clause must be NOT NULL numeric value")));
}
else
{
@@ -130,15 +130,15 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
HeapTuple
tablesample_getnext(TableSampleDesc *desc)
{
- HeapScanDesc scan = desc->heapScan;
- HeapTuple tuple = &(scan->rs_ctup);
- bool pagemode = scan->rs_pageatatime;
- BlockNumber blockno;
- Page page;
- bool page_all_visible;
- ItemId itemid;
- OffsetNumber tupoffset,
- maxoffset;
+ HeapScanDesc scan = desc->heapScan;
+ HeapTuple tuple = &(scan->rs_ctup);
+ bool pagemode = scan->rs_pageatatime;
+ BlockNumber blockno;
+ Page page;
+ bool page_all_visible;
+ ItemId itemid;
+ OffsetNumber tupoffset,
+ maxoffset;
if (!scan->rs_inited)
{
@@ -152,7 +152,7 @@ tablesample_getnext(TableSampleDesc *desc)
return NULL;
}
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
- PointerGetDatum(desc)));
+ PointerGetDatum(desc)));
if (!BlockNumberIsValid(blockno))
{
tuple->t_data = NULL;
@@ -184,14 +184,14 @@ tablesample_getnext(TableSampleDesc *desc)
CHECK_FOR_INTERRUPTS();
tupoffset = DatumGetUInt16(FunctionCall3(&desc->tsmnexttuple,
- PointerGetDatum(desc),
- UInt32GetDatum(blockno),
- UInt16GetDatum(maxoffset)));
+ PointerGetDatum(desc),
+ UInt32GetDatum(blockno),
+ UInt16GetDatum(maxoffset)));
if (OffsetNumberIsValid(tupoffset))
{
- bool visible;
- bool found;
+ bool visible;
+ bool found;
/* Skip invalid tuple pointers. */
itemid = PageGetItemId(page, tupoffset);
@@ -208,8 +208,8 @@ tablesample_getnext(TableSampleDesc *desc)
visible = SampleTupleVisible(tuple, tupoffset, scan);
/*
- * Let the sampling method examine the actual tuple and decide if we
- * should return it.
+ * Let the sampling method examine the actual tuple and decide if
+ * we should return it.
*
* Note that we let it examine even invisible tuples for
* statistical purposes, but not return them since user should
@@ -218,10 +218,10 @@ tablesample_getnext(TableSampleDesc *desc)
if (OidIsValid(desc->tsmexaminetuple.fn_oid))
{
found = DatumGetBool(FunctionCall4(&desc->tsmexaminetuple,
- PointerGetDatum(desc),
- UInt32GetDatum(blockno),
- PointerGetDatum(tuple),
- BoolGetDatum(visible)));
+ PointerGetDatum(desc),
+ UInt32GetDatum(blockno),
+ PointerGetDatum(tuple),
+ BoolGetDatum(visible)));
/* Should not happen if sampling method is well written. */
if (found && !visible)
elog(ERROR, "Sampling method wanted to return invisible tuple");
@@ -248,19 +248,19 @@ tablesample_getnext(TableSampleDesc *desc)
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
- PointerGetDatum(desc)));
+ PointerGetDatum(desc)));
/*
- * Report our new scan position for synchronization purposes. We
- * don't do that when moving backwards, however. That would just
- * mess up any other forward-moving scanners.
+ * Report our new scan position for synchronization purposes. We don't
+ * do that when moving backwards, however. That would just mess up any
+ * other forward-moving scanners.
*
- * Note: we do this before checking for end of scan so that the
- * final state of the position hint is back at the start of the
- * rel. That's not strictly necessary, but otherwise when you run
- * the same query multiple times the starting position would shift
- * a little bit backwards on every invocation, which is confusing.
- * We don't guarantee any specific ordering in general, though.
+ * Note: we do this before checking for end of scan so that the final
+ * state of the position hint is back at the start of the rel. That's
+ * not strictly necessary, but otherwise when you run the same query
+ * multiple times the starting position would shift a little bit
+ * backwards on every invocation, which is confusing. We don't
+ * guarantee any specific ordering in general, though.
*/
if (scan->rs_syncscan)
ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ?
@@ -321,25 +321,25 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
{
/*
* If this scan is reading whole pages at a time, there is already
- * visibility info present in rs_vistuples so we can just search it
- * for the tupoffset.
+ * visibility info present in rs_vistuples so we can just search it for
+ * the tupoffset.
*/
if (scan->rs_pageatatime)
{
- int start = 0,
- end = scan->rs_ntuples - 1;
+ int start = 0,
+ end = scan->rs_ntuples - 1;
/*
* Do the binary search over rs_vistuples, it's already sorted by
* OffsetNumber so we don't need to do any sorting ourselves here.
*
- * We could use bsearch() here but it's slower for integers because
- * of the function call overhead and because it needs boiler plate code
+ * We could use bsearch() here but it's slower for integers because of
+ * the function call overhead and because it needs boiler plate code
* it would not save us anything code-wise anyway.
*/
while (start <= end)
{
- int mid = start + (end - start) / 2;
+ int mid = start + (end - start) / 2;
OffsetNumber curoffset = scan->rs_vistuples[mid];
if (curoffset == tupoffset)
@@ -358,7 +358,7 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
Snapshot snapshot = scan->rs_snapshot;
Buffer buffer = scan->rs_cbuf;
- bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
+ bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, buffer,
snapshot);
diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c
index 63344327e3d..5ad35c0d7f8 100644
--- a/src/backend/access/transam/commit_ts.c
+++ b/src/backend/access/transam/commit_ts.c
@@ -55,8 +55,8 @@
*/
typedef struct CommitTimestampEntry
{
- TimestampTz time;
- RepOriginId nodeid;
+ TimestampTz time;
+ RepOriginId nodeid;
} CommitTimestampEntry;
#define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \
@@ -65,7 +65,7 @@ typedef struct CommitTimestampEntry
#define COMMIT_TS_XACTS_PER_PAGE \
(BLCKSZ / SizeOfCommitTimestampEntry)
-#define TransactionIdToCTsPage(xid) \
+#define TransactionIdToCTsPage(xid) \
((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
#define TransactionIdToCTsEntry(xid) \
((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
@@ -83,21 +83,21 @@ static SlruCtlData CommitTsCtlData;
*/
typedef struct CommitTimestampShared
{
- TransactionId xidLastCommit;
+ TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit;
} CommitTimestampShared;
-CommitTimestampShared *commitTsShared;
+CommitTimestampShared *commitTsShared;
/* GUC variable */
-bool track_commit_timestamp;
+bool track_commit_timestamp;
static void SetXidCommitTsInPage(TransactionId xid, int nsubxids,
TransactionId *subxids, TimestampTz ts,
RepOriginId nodeid, int pageno);
static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts,
- RepOriginId nodeid, int slotno);
+ RepOriginId nodeid, int slotno);
static int ZeroCommitTsPage(int pageno, bool writeXlog);
static bool CommitTsPagePrecedes(int page1, int page2);
static void WriteZeroPageXlogRec(int pageno);
@@ -141,8 +141,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
return;
/*
- * Comply with the WAL-before-data rule: if caller specified it wants
- * this value to be recorded in WAL, do so before touching the data.
+ * Comply with the WAL-before-data rule: if caller specified it wants this
+ * value to be recorded in WAL, do so before touching the data.
*/
if (do_xlog)
WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid);
@@ -159,9 +159,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
/*
* We split the xids to set the timestamp to in groups belonging to the
* same SLRU page; the first element in each such set is its head. The
- * first group has the main XID as the head; subsequent sets use the
- * first subxid not on the previous page as head. This way, we only have
- * to lock/modify each SLRU page once.
+ * first group has the main XID as the head; subsequent sets use the first
+ * subxid not on the previous page as head. This way, we only have to
+ * lock/modify each SLRU page once.
*/
for (i = 0, headxid = xid;;)
{
@@ -183,8 +183,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
break;
/*
- * Set the new head and skip over it, as well as over the subxids
- * we just wrote.
+ * Set the new head and skip over it, as well as over the subxids we
+ * just wrote.
*/
headxid = subxids[j];
i += j - i + 1;
@@ -271,14 +271,14 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
- errhint("Make sure the configuration parameter \"%s\" is set.",
- "track_commit_timestamp")));
+ errhint("Make sure the configuration parameter \"%s\" is set.",
+ "track_commit_timestamp")));
/* error if the given Xid doesn't normally commit */
if (!TransactionIdIsNormal(xid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
+ errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
/*
* Return empty if the requested value is outside our valid range.
@@ -350,15 +350,15 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
TransactionId
GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
{
- TransactionId xid;
+ TransactionId xid;
/* Error if module not enabled */
if (!track_commit_timestamp)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
- errhint("Make sure the configuration parameter \"%s\" is set.",
- "track_commit_timestamp")));
+ errhint("Make sure the configuration parameter \"%s\" is set.",
+ "track_commit_timestamp")));
LWLockAcquire(CommitTsLock, LW_SHARED);
xid = commitTsShared->xidLastCommit;
@@ -377,9 +377,9 @@ GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
Datum
pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
{
- TransactionId xid = PG_GETARG_UINT32(0);
- TimestampTz ts;
- bool found;
+ TransactionId xid = PG_GETARG_UINT32(0);
+ TimestampTz ts;
+ bool found;
found = TransactionIdGetCommitTsData(xid, &ts, NULL);
@@ -393,11 +393,11 @@ pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
Datum
pg_last_committed_xact(PG_FUNCTION_ARGS)
{
- TransactionId xid;
- TimestampTz ts;
- Datum values[2];
- bool nulls[2];
- TupleDesc tupdesc;
+ TransactionId xid;
+ TimestampTz ts;
+ Datum values[2];
+ bool nulls[2];
+ TupleDesc tupdesc;
HeapTuple htup;
/* and construct a tuple with our data */
@@ -462,7 +462,7 @@ CommitTsShmemSize(void)
void
CommitTsShmemInit(void)
{
- bool found;
+ bool found;
CommitTsCtl->PagePrecedes = CommitTsPagePrecedes;
SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0,
@@ -495,8 +495,8 @@ BootStrapCommitTs(void)
{
/*
* Nothing to do here at present, unlike most other SLRU modules; segments
- * are created when the server is started with this module enabled.
- * See StartupCommitTs.
+ * are created when the server is started with this module enabled. See
+ * StartupCommitTs.
*/
}
@@ -561,9 +561,9 @@ CompleteCommitTsInitialization(void)
/*
* Activate this module whenever necessary.
- * This must happen during postmaster or standalong-backend startup,
- * or during WAL replay anytime the track_commit_timestamp setting is
- * changed in the master.
+ * This must happen during postmaster or standalong-backend startup,
+ * or during WAL replay anytime the track_commit_timestamp setting is
+ * changed in the master.
*
* The reason why this SLRU needs separate activation/deactivation functions is
* that it can be enabled/disabled during start and the activation/deactivation
@@ -612,7 +612,7 @@ ActivateCommitTs(void)
/* Finally, create the current segment file, if necessary */
if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno))
{
- int slotno;
+ int slotno;
LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE);
slotno = ZeroCommitTsPage(pageno, false);
@@ -834,7 +834,7 @@ WriteSetTimestampXlogRec(TransactionId mainxid, int nsubxids,
TransactionId *subxids, TimestampTz timestamp,
RepOriginId nodeid)
{
- xl_commit_ts_set record;
+ xl_commit_ts_set record;
record.timestamp = timestamp;
record.nodeid = nodeid;
@@ -907,7 +907,7 @@ commit_ts_redo(XLogReaderState *record)
subxids = NULL;
TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids,
- setts->timestamp, setts->nodeid, false);
+ setts->timestamp, setts->nodeid, false);
if (subxids)
pfree(subxids);
}
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 0218378ccb5..9568ff1ddb7 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -965,7 +965,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
*/
if (!MultiXactIdPrecedes(result, MultiXactState->multiVacLimit) ||
(MultiXactState->nextOffset - MultiXactState->oldestOffset
- > MULTIXACT_MEMBER_SAFE_THRESHOLD))
+ > MULTIXACT_MEMBER_SAFE_THRESHOLD))
{
/*
* For safety's sake, we release MultiXactGenLock while sending
@@ -1190,9 +1190,9 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
MultiXactIdSetOldestVisible();
/*
- * If we know the multi is used only for locking and not for updates,
- * then we can skip checking if the value is older than our oldest
- * visible multi. It cannot possibly still be running.
+ * If we know the multi is used only for locking and not for updates, then
+ * we can skip checking if the value is older than our oldest visible
+ * multi. It cannot possibly still be running.
*/
if (onlyLock &&
MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
@@ -1207,14 +1207,14 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it has already been removed, or will be removed shortly, by
- * truncation. Returning the wrong values could lead
- * to an incorrect visibility result. However, to support pg_upgrade we
- * need to allow an empty set to be returned regardless, if the caller is
- * willing to accept it; the caller is expected to check that it's an
- * allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
- * expecting this to be called only on recently created multis, then we
- * raise an error.
+ * truncation. Returning the wrong values could lead to an incorrect
+ * visibility result. However, to support pg_upgrade we need to allow an
+ * empty set to be returned regardless, if the caller is willing to accept
+ * it; the caller is expected to check that it's an allowed condition
+ * (such as ensuring that the infomask bits set on the tuple are
+ * consistent with the pg_upgrade scenario). If the caller is expecting
+ * this to be called only on recently created multis, then we raise an
+ * error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
* seen, it implies undetected ID wraparound has occurred. This raises a
@@ -2123,11 +2123,11 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
* enough to contain the next value that would be created.
*
* We need to do this pretty early during the first startup in binary
- * upgrade mode: before StartupMultiXact() in fact, because this routine is
- * called even before that by StartupXLOG(). And we can't do it earlier
- * than at this point, because during that first call of this routine we
- * determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru
- * needs.
+ * upgrade mode: before StartupMultiXact() in fact, because this routine
+ * is called even before that by StartupXLOG(). And we can't do it
+ * earlier than at this point, because during that first call of this
+ * routine we determine the MultiXactState->nextMXact value that
+ * MaybeExtendOffsetSlru needs.
*/
if (IsBinaryUpgrade)
MaybeExtendOffsetSlru();
@@ -2202,11 +2202,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/*
* Determine the offset of the oldest multixact that might still be
- * referenced. Normally, we can read the offset from the multixact itself,
- * but there's an important special case: if there are no multixacts in
- * existence at all, oldest_datminmxid obviously can't point to one. It
- * will instead point to the multixact ID that will be assigned the next
- * time one is needed.
+ * referenced. Normally, we can read the offset from the multixact
+ * itself, but there's an important special case: if there are no
+ * multixacts in existence at all, oldest_datminmxid obviously can't point
+ * to one. It will instead point to the multixact ID that will be
+ * assigned the next time one is needed.
*
* NB: oldest_dataminmxid is the oldest multixact that might still be
* referenced from a table, unlike in DetermineSafeOldestOffset, where we
@@ -2520,10 +2520,9 @@ DetermineSafeOldestOffset(MultiXactId oldestMXact)
* obviously can't point to one. It will instead point to the multixact
* ID that will be assigned the next time one is needed.
*
- * NB: oldestMXact should be the oldest multixact that still exists in
- * the SLRU, unlike in SetMultiXactIdLimit, where we do this same
- * computation based on the oldest value that might be referenced in a
- * table.
+ * NB: oldestMXact should be the oldest multixact that still exists in the
+ * SLRU, unlike in SetMultiXactIdLimit, where we do this same computation
+ * based on the oldest value that might be referenced in a table.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
if (MultiXactState->nextMXact == oldestMXact)
@@ -2679,9 +2678,9 @@ int
MultiXactMemberFreezeThreshold(void)
{
MultiXactOffset members;
- uint32 multixacts;
- uint32 victim_multixacts;
- double fraction;
+ uint32 multixacts;
+ uint32 victim_multixacts;
+ double fraction;
ReadMultiXactCounts(&multixacts, &members);
@@ -2800,7 +2799,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
void
TruncateMultiXact(void)
{
- MultiXactId oldestMXact;
+ MultiXactId oldestMXact;
MultiXactOffset oldestOffset;
MultiXactOffset nextOffset;
mxtruncinfo trunc;
diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c
index 8d6a3606794..f4ba8518b12 100644
--- a/src/backend/access/transam/parallel.c
+++ b/src/backend/access/transam/parallel.c
@@ -39,7 +39,7 @@
* without blocking. That way, a worker that errors out can write the whole
* message into the queue and terminate without waiting for the user backend.
*/
-#define PARALLEL_ERROR_QUEUE_SIZE 16384
+#define PARALLEL_ERROR_QUEUE_SIZE 16384
/* Magic number for parallel context TOC. */
#define PARALLEL_MAGIC 0x50477c7c
@@ -71,7 +71,7 @@ typedef struct FixedParallelState
BackendId parallel_master_backend_id;
/* Entrypoint for parallel workers. */
- parallel_worker_main_type entrypoint;
+ parallel_worker_main_type entrypoint;
/* Mutex protects remaining fields. */
slock_t mutex;
@@ -90,10 +90,10 @@ typedef struct FixedParallelState
* and < the number of workers before any user code is invoked; each parallel
* worker will get a different parallel worker number.
*/
-int ParallelWorkerNumber = -1;
+int ParallelWorkerNumber = -1;
/* Is there a parallel message pending which we need to receive? */
-bool ParallelMessagePending = false;
+bool ParallelMessagePending = false;
/* Pointer to our fixed parallel state. */
static FixedParallelState *MyFixedParallelState;
@@ -115,8 +115,8 @@ static void ParallelWorkerMain(Datum main_arg);
ParallelContext *
CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
{
- MemoryContext oldcontext;
- ParallelContext *pcxt;
+ MemoryContext oldcontext;
+ ParallelContext *pcxt;
/* It is unsafe to create a parallel context if not in parallel mode. */
Assert(IsInParallelMode());
@@ -159,7 +159,7 @@ CreateParallelContextForExternalFunction(char *library_name,
char *function_name,
int nworkers)
{
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
ParallelContext *pcxt;
/* We might be running in a very short-lived memory context. */
@@ -184,15 +184,15 @@ CreateParallelContextForExternalFunction(char *library_name,
void
InitializeParallelDSM(ParallelContext *pcxt)
{
- MemoryContext oldcontext;
- Size library_len = 0;
- Size guc_len = 0;
- Size combocidlen = 0;
- Size tsnaplen = 0;
- Size asnaplen = 0;
- Size tstatelen = 0;
- Size segsize = 0;
- int i;
+ MemoryContext oldcontext;
+ Size library_len = 0;
+ Size guc_len = 0;
+ Size combocidlen = 0;
+ Size tsnaplen = 0;
+ Size asnaplen = 0;
+ Size tstatelen = 0;
+ Size segsize = 0;
+ int i;
FixedParallelState *fps;
Snapshot transaction_snapshot = GetTransactionSnapshot();
Snapshot active_snapshot = GetActiveSnapshot();
@@ -205,8 +205,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
- * Normally, the user will have requested at least one worker process,
- * but if by chance they have not, we can skip a bunch of things here.
+ * Normally, the user will have requested at least one worker process, but
+ * if by chance they have not, we can skip a bunch of things here.
*/
if (pcxt->nworkers > 0)
{
@@ -228,8 +228,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
/* Estimate space need for error queues. */
StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
- PARALLEL_ERROR_QUEUE_SIZE,
- "parallel error queue size not buffer-aligned");
+ PARALLEL_ERROR_QUEUE_SIZE,
+ "parallel error queue size not buffer-aligned");
shm_toc_estimate_chunk(&pcxt->estimator,
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
shm_toc_estimate_keys(&pcxt->estimator, 1);
@@ -251,9 +251,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
* memory segment; instead, just use backend-private memory.
*
* Also, if we can't create a dynamic shared memory segment because the
- * maximum number of segments have already been created, then fall back
- * to backend-private memory, and plan not to use any workers. We hope
- * this won't happen very often, but it's better to abandon the use of
+ * maximum number of segments have already been created, then fall back to
+ * backend-private memory, and plan not to use any workers. We hope this
+ * won't happen very often, but it's better to abandon the use of
* parallelism than to fail outright.
*/
segsize = shm_toc_estimate(&pcxt->estimator);
@@ -290,13 +290,13 @@ InitializeParallelDSM(ParallelContext *pcxt)
/* We can skip the rest of this if we're not budgeting for any workers. */
if (pcxt->nworkers > 0)
{
- char *libraryspace;
- char *gucspace;
- char *combocidspace;
- char *tsnapspace;
- char *asnapspace;
- char *tstatespace;
- char *error_queue_space;
+ char *libraryspace;
+ char *gucspace;
+ char *combocidspace;
+ char *tsnapspace;
+ char *asnapspace;
+ char *tstatespace;
+ char *error_queue_space;
/* Serialize shared libraries we have loaded. */
libraryspace = shm_toc_allocate(pcxt->toc, library_len);
@@ -338,12 +338,12 @@ InitializeParallelDSM(ParallelContext *pcxt)
* should be transmitted via separate (possibly larger?) queues.
*/
error_queue_space =
- shm_toc_allocate(pcxt->toc,
- PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
+ shm_toc_allocate(pcxt->toc,
+ PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
for (i = 0; i < pcxt->nworkers; ++i)
{
- char *start;
- shm_mq *mq;
+ char *start;
+ shm_mq *mq;
start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
@@ -355,8 +355,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
/* Serialize extension entrypoint information. */
if (pcxt->library_name != NULL)
{
- Size lnamelen = strlen(pcxt->library_name);
- char *extensionstate;
+ Size lnamelen = strlen(pcxt->library_name);
+ char *extensionstate;
extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
+ strlen(pcxt->function_name) + 2);
@@ -377,10 +377,10 @@ InitializeParallelDSM(ParallelContext *pcxt)
void
LaunchParallelWorkers(ParallelContext *pcxt)
{
- MemoryContext oldcontext;
- BackgroundWorker worker;
- int i;
- bool any_registrations_failed = false;
+ MemoryContext oldcontext;
+ BackgroundWorker worker;
+ int i;
+ bool any_registrations_failed = false;
/* Skip this if we have no workers. */
if (pcxt->nworkers == 0)
@@ -408,8 +408,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
*
* The caller must be able to tolerate ending up with fewer workers than
* expected, so there is no need to throw an error here if registration
- * fails. It wouldn't help much anyway, because registering the worker
- * in no way guarantees that it will start up and initialize successfully.
+ * fails. It wouldn't help much anyway, because registering the worker in
+ * no way guarantees that it will start up and initialize successfully.
*/
for (i = 0; i < pcxt->nworkers; ++i)
{
@@ -421,8 +421,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
else
{
/*
- * If we weren't able to register the worker, then we've bumped
- * up against the max_worker_processes limit, and future
+ * If we weren't able to register the worker, then we've bumped up
+ * against the max_worker_processes limit, and future
* registrations will probably fail too, so arrange to skip them.
* But we still have to execute this code for the remaining slots
* to make sure that we forget about the error queues we budgeted
@@ -455,13 +455,13 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
{
for (;;)
{
- bool anyone_alive = false;
- int i;
+ bool anyone_alive = false;
+ int i;
/*
- * This will process any parallel messages that are pending, which
- * may change the outcome of the loop that follows. It may also
- * throw an error propagated from a worker.
+ * This will process any parallel messages that are pending, which may
+ * change the outcome of the loop that follows. It may also throw an
+ * error propagated from a worker.
*/
CHECK_FOR_INTERRUPTS();
@@ -502,7 +502,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
void
DestroyParallelContext(ParallelContext *pcxt)
{
- int i;
+ int i;
/*
* Be careful about order of operations here! We remove the parallel
@@ -548,7 +548,7 @@ DestroyParallelContext(ParallelContext *pcxt)
/* Wait until the workers actually die. */
for (i = 0; i < pcxt->nworkers; ++i)
{
- BgwHandleStatus status;
+ BgwHandleStatus status;
if (pcxt->worker[i].bgwhandle == NULL)
continue;
@@ -626,9 +626,9 @@ HandleParallelMessages(void)
dlist_foreach(iter, &pcxt_list)
{
ParallelContext *pcxt;
- int i;
- Size nbytes;
- void *data;
+ int i;
+ Size nbytes;
+ void *data;
pcxt = dlist_container(ParallelContext, node, iter.cur);
if (pcxt->worker == NULL)
@@ -637,14 +637,14 @@ HandleParallelMessages(void)
for (i = 0; i < pcxt->nworkers; ++i)
{
/*
- * Read as many messages as we can from each worker, but stop
- * when either (1) the error queue goes away, which can happen if
- * we receive a Terminate message from the worker; or (2) no more
+ * Read as many messages as we can from each worker, but stop when
+ * either (1) the error queue goes away, which can happen if we
+ * receive a Terminate message from the worker; or (2) no more
* messages can be read from the worker without blocking.
*/
while (pcxt->worker[i].error_mqh != NULL)
{
- shm_mq_result res;
+ shm_mq_result res;
res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
&data, true);
@@ -652,7 +652,7 @@ HandleParallelMessages(void)
break;
else if (res == SHM_MQ_SUCCESS)
{
- StringInfoData msg;
+ StringInfoData msg;
initStringInfo(&msg);
appendBinaryStringInfo(&msg, data, nbytes);
@@ -661,7 +661,7 @@ HandleParallelMessages(void)
}
else
ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
+ (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
errmsg("lost connection to parallel worker")));
/* This might make the error queue go away. */
@@ -677,23 +677,24 @@ HandleParallelMessages(void)
static void
HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
{
- char msgtype;
+ char msgtype;
msgtype = pq_getmsgbyte(msg);
switch (msgtype)
{
- case 'K': /* BackendKeyData */
+ case 'K': /* BackendKeyData */
{
- int32 pid = pq_getmsgint(msg, 4);
+ int32 pid = pq_getmsgint(msg, 4);
+
(void) pq_getmsgint(msg, 4); /* discard cancel key */
(void) pq_getmsgend(msg);
pcxt->worker[i].pid = pid;
break;
}
- case 'E': /* ErrorResponse */
- case 'N': /* NoticeResponse */
+ case 'E': /* ErrorResponse */
+ case 'N': /* NoticeResponse */
{
ErrorData edata;
ErrorContextCallback errctx;
@@ -725,14 +726,14 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
break;
}
- case 'A': /* NotifyResponse */
+ case 'A': /* NotifyResponse */
{
/* Propagate NotifyResponse. */
pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
break;
}
- case 'X': /* Terminate, indicating clean exit */
+ case 'X': /* Terminate, indicating clean exit */
{
pfree(pcxt->worker[i].bgwhandle);
pfree(pcxt->worker[i].error_mqh);
@@ -797,18 +798,18 @@ static void
ParallelWorkerMain(Datum main_arg)
{
dsm_segment *seg;
- shm_toc *toc;
+ shm_toc *toc;
FixedParallelState *fps;
- char *error_queue_space;
- shm_mq *mq;
+ char *error_queue_space;
+ shm_mq *mq;
shm_mq_handle *mqh;
- char *libraryspace;
- char *gucspace;
- char *combocidspace;
- char *tsnapspace;
- char *asnapspace;
- char *tstatespace;
- StringInfoData msgbuf;
+ char *libraryspace;
+ char *gucspace;
+ char *combocidspace;
+ char *tsnapspace;
+ char *asnapspace;
+ char *tstatespace;
+ StringInfoData msgbuf;
/* Establish signal handlers. */
pqsignal(SIGTERM, die);
@@ -824,8 +825,8 @@ ParallelWorkerMain(Datum main_arg)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Now that we have a resource owner, we can attach to the dynamic
- * shared memory segment and read the table of contents.
+ * Now that we have a resource owner, we can attach to the dynamic shared
+ * memory segment and read the table of contents.
*/
seg = dsm_attach(DatumGetUInt32(main_arg));
if (seg == NULL)
@@ -836,7 +837,7 @@ ParallelWorkerMain(Datum main_arg)
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("bad magic number in dynamic shared memory segment")));
+ errmsg("bad magic number in dynamic shared memory segment")));
/* Determine and set our worker number. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
@@ -860,7 +861,7 @@ ParallelWorkerMain(Datum main_arg)
*/
error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
mq = (shm_mq *) (error_queue_space +
- ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
+ ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
shm_mq_set_sender(mq, MyProc);
mqh = shm_mq_attach(mq, seg, NULL);
pq_redirect_to_shm_mq(mq, mqh);
@@ -870,9 +871,9 @@ ParallelWorkerMain(Datum main_arg)
/*
* Send a BackendKeyData message to the process that initiated parallelism
* so that it has access to our PID before it receives any other messages
- * from us. Our cancel key is sent, too, since that's the way the protocol
- * message is defined, but it won't actually be used for anything in this
- * case.
+ * from us. Our cancel key is sent, too, since that's the way the
+ * protocol message is defined, but it won't actually be used for anything
+ * in this case.
*/
pq_beginmessage(&msgbuf, 'K');
pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
@@ -880,13 +881,13 @@ ParallelWorkerMain(Datum main_arg)
pq_endmessage(&msgbuf);
/*
- * Hooray! Primary initialization is complete. Now, we need to set up
- * our backend-local state to match the original backend.
+ * Hooray! Primary initialization is complete. Now, we need to set up our
+ * backend-local state to match the original backend.
*/
/*
- * Load libraries that were loaded by original backend. We want to do this
- * before restoring GUCs, because the libraries might define custom
+ * Load libraries that were loaded by original backend. We want to do
+ * this before restoring GUCs, because the libraries might define custom
* variables.
*/
libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
@@ -928,7 +929,8 @@ ParallelWorkerMain(Datum main_arg)
SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
/*
- * We've initialized all of our state now; nothing should change hereafter.
+ * We've initialized all of our state now; nothing should change
+ * hereafter.
*/
EnterParallelMode();
@@ -965,9 +967,9 @@ ParallelWorkerMain(Datum main_arg)
static void
ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
{
- char *extensionstate;
- char *library_name;
- char *function_name;
+ char *extensionstate;
+ char *library_name;
+ char *function_name;
parallel_worker_main_type entrypt;
extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
@@ -988,7 +990,7 @@ ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
static void
ParallelErrorContext(void *arg)
{
- errcontext("parallel worker, pid %d", * (int32 *) arg);
+ errcontext("parallel worker, pid %d", *(int32 *) arg);
}
/*
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 4743cacefe6..177d1e1432e 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -117,7 +117,7 @@ typedef struct GlobalTransactionData
TimestampTz prepared_at; /* time of preparation */
XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */
Oid owner; /* ID of user that executed the xact */
- BackendId locking_backend; /* backend currently working on the xact */
+ BackendId locking_backend; /* backend currently working on the xact */
bool valid; /* TRUE if PGPROC entry is in proc array */
char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
} GlobalTransactionData;
@@ -256,24 +256,24 @@ AtAbort_Twophase(void)
return;
/*
- * What to do with the locked global transaction entry? If we were in
- * the process of preparing the transaction, but haven't written the WAL
+ * What to do with the locked global transaction entry? If we were in the
+ * process of preparing the transaction, but haven't written the WAL
* record and state file yet, the transaction must not be considered as
* prepared. Likewise, if we are in the process of finishing an
- * already-prepared transaction, and fail after having already written
- * the 2nd phase commit or rollback record to the WAL, the transaction
- * should not be considered as prepared anymore. In those cases, just
- * remove the entry from shared memory.
+ * already-prepared transaction, and fail after having already written the
+ * 2nd phase commit or rollback record to the WAL, the transaction should
+ * not be considered as prepared anymore. In those cases, just remove the
+ * entry from shared memory.
*
- * Otherwise, the entry must be left in place so that the transaction
- * can be finished later, so just unlock it.
+ * Otherwise, the entry must be left in place so that the transaction can
+ * be finished later, so just unlock it.
*
* If we abort during prepare, after having written the WAL record, we
* might not have transferred all locks and other state to the prepared
* transaction yet. Likewise, if we abort during commit or rollback,
- * after having written the WAL record, we might not have released
- * all the resources held by the transaction yet. In those cases, the
- * in-memory state can be wrong, but it's too late to back out.
+ * after having written the WAL record, we might not have released all the
+ * resources held by the transaction yet. In those cases, the in-memory
+ * state can be wrong, but it's too late to back out.
*/
if (!MyLockedGxact->valid)
{
@@ -408,8 +408,8 @@ MarkAsPreparing(TransactionId xid, const char *gid,
TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact;
/*
- * Remember that we have this GlobalTransaction entry locked for us.
- * If we abort after this, we must release it.
+ * Remember that we have this GlobalTransaction entry locked for us. If we
+ * abort after this, we must release it.
*/
MyLockedGxact = gxact;
@@ -499,8 +499,8 @@ LockGXact(const char *gid, Oid user)
if (gxact->locking_backend != InvalidBackendId)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("prepared transaction with identifier \"%s\" is busy",
- gid)));
+ errmsg("prepared transaction with identifier \"%s\" is busy",
+ gid)));
if (user != gxact->owner && !superuser_arg(user))
ereport(ERROR,
@@ -1423,8 +1423,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
- * no one else will try to commit/rollback, and so it will be recycled
- * if we fail after this point. It is still locked by our backend so it
+ * no one else will try to commit/rollback, and so it will be recycled if
+ * we fail after this point. It is still locked by our backend so it
* won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
@@ -2055,8 +2055,9 @@ RecoverPreparedTransactions(void)
StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
/*
- * We're done with recovering this transaction. Clear MyLockedGxact,
- * like we do in PrepareTransaction() during normal operation.
+ * We're done with recovering this transaction. Clear
+ * MyLockedGxact, like we do in PrepareTransaction() during normal
+ * operation.
*/
PostPrepare_Twophase();
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 23401057e2c..b53d95faf86 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -102,9 +102,9 @@ int synchronous_commit = SYNCHRONOUS_COMMIT_ON;
* The XIDs are stored sorted in numerical order (not logical order) to make
* lookups as fast as possible.
*/
-TransactionId XactTopTransactionId = InvalidTransactionId;
-int nParallelCurrentXids = 0;
-TransactionId *ParallelCurrentXids;
+TransactionId XactTopTransactionId = InvalidTransactionId;
+int nParallelCurrentXids = 0;
+TransactionId *ParallelCurrentXids;
/*
* MyXactAccessedTempRel is set when a temporary relation is accessed.
@@ -142,7 +142,7 @@ typedef enum TBlockState
/* transaction block states */
TBLOCK_BEGIN, /* starting transaction block */
TBLOCK_INPROGRESS, /* live transaction */
- TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */
+ TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */
TBLOCK_END, /* COMMIT received */
TBLOCK_ABORT, /* failed xact, awaiting ROLLBACK */
TBLOCK_ABORT_END, /* failed xact, ROLLBACK received */
@@ -184,7 +184,7 @@ typedef struct TransactionStateData
bool prevXactReadOnly; /* entry-time xact r/o state */
bool startedInRecovery; /* did we start in recovery? */
bool didLogXid; /* has xid been included in WAL record? */
- int parallelModeLevel; /* Enter/ExitParallelMode counter */
+ int parallelModeLevel; /* Enter/ExitParallelMode counter */
struct TransactionStateData *parent; /* back link to parent */
} TransactionStateData;
@@ -494,8 +494,8 @@ AssignTransactionId(TransactionState s)
Assert(s->state == TRANS_INPROGRESS);
/*
- * Workers synchronize transaction state at the beginning of each
- * parallel operation, so we can't account for new XIDs at this point.
+ * Workers synchronize transaction state at the beginning of each parallel
+ * operation, so we can't account for new XIDs at this point.
*/
if (IsInParallelMode())
elog(ERROR, "cannot assign XIDs during a parallel operation");
@@ -788,10 +788,10 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
return false;
/*
- * In parallel workers, the XIDs we must consider as current are stored
- * in ParallelCurrentXids rather than the transaction-state stack. Note
- * that the XIDs in this array are sorted numerically rather than
- * according to transactionIdPrecedes order.
+ * In parallel workers, the XIDs we must consider as current are stored in
+ * ParallelCurrentXids rather than the transaction-state stack. Note that
+ * the XIDs in this array are sorted numerically rather than according to
+ * transactionIdPrecedes order.
*/
if (nParallelCurrentXids > 0)
{
@@ -1204,7 +1204,7 @@ RecordTransactionCommit(void)
nchildren, children, nrels, rels,
nmsgs, invalMessages,
RelcacheInitFileInval, forceSyncCommit,
- InvalidTransactionId /* plain commit */);
+ InvalidTransactionId /* plain commit */ );
/*
* Record plain commit ts if not replaying remote actions, or if no
@@ -1505,7 +1505,7 @@ RecordTransactionAbort(bool isSubXact)
RelFileNode *rels;
int nchildren;
TransactionId *children;
- TimestampTz xact_time;
+ TimestampTz xact_time;
/*
* If we haven't been assigned an XID, nobody will care whether we aborted
@@ -2316,8 +2316,8 @@ PrepareTransaction(void)
/*
* In normal commit-processing, this is all non-critical post-transaction
- * cleanup. When the transaction is prepared, however, it's important that
- * the locks and other per-backend resources are transferred to the
+ * cleanup. When the transaction is prepared, however, it's important
+ * that the locks and other per-backend resources are transferred to the
* prepared transaction's PGPROC entry. Note that if an error is raised
* here, it's too late to abort the transaction. XXX: This probably should
* be in a critical section, to force a PANIC if any of this fails, but
@@ -2358,9 +2358,8 @@ PrepareTransaction(void)
/*
* Allow another backend to finish the transaction. After
- * PostPrepare_Twophase(), the transaction is completely detached from
- * our backend. The rest is just non-critical cleanup of backend-local
- * state.
+ * PostPrepare_Twophase(), the transaction is completely detached from our
+ * backend. The rest is just non-critical cleanup of backend-local state.
*/
PostPrepare_Twophase();
@@ -2417,7 +2416,7 @@ AbortTransaction(void)
{
TransactionState s = CurrentTransactionState;
TransactionId latestXid;
- bool is_parallel_worker;
+ bool is_parallel_worker;
/* Prevent cancel/die interrupt while cleaning up */
HOLD_INTERRUPTS();
@@ -2520,9 +2519,9 @@ AbortTransaction(void)
latestXid = InvalidTransactionId;
/*
- * Since the parallel master won't get our value of XactLastRecEnd in this
- * case, we nudge WAL-writer ourselves in this case. See related comments in
- * RecordTransactionAbort for why this matters.
+ * Since the parallel master won't get our value of XactLastRecEnd in
+ * this case, we nudge WAL-writer ourselves in this case. See related
+ * comments in RecordTransactionAbort for why this matters.
*/
XLogSetAsyncXactLSN(XactLastRecEnd);
}
@@ -3720,7 +3719,7 @@ DefineSavepoint(char *name)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot define savepoints during a parallel operation")));
+ errmsg("cannot define savepoints during a parallel operation")));
switch (s->blockState)
{
@@ -3787,7 +3786,7 @@ ReleaseSavepoint(List *options)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot release savepoints during a parallel operation")));
+ errmsg("cannot release savepoints during a parallel operation")));
switch (s->blockState)
{
@@ -3900,7 +3899,7 @@ RollbackToSavepoint(List *options)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot rollback to savepoints during a parallel operation")));
+ errmsg("cannot rollback to savepoints during a parallel operation")));
switch (s->blockState)
{
@@ -4017,17 +4016,18 @@ BeginInternalSubTransaction(char *name)
/*
* Workers synchronize transaction state at the beginning of each parallel
- * operation, so we can't account for new subtransactions after that point.
- * We might be able to make an exception for the type of subtransaction
- * established by this function, which is typically used in contexts where
- * we're going to release or roll back the subtransaction before proceeding
- * further, so that no enduring change to the transaction state occurs.
- * For now, however, we prohibit this case along with all the others.
+ * operation, so we can't account for new subtransactions after that
+ * point. We might be able to make an exception for the type of
+ * subtransaction established by this function, which is typically used in
+ * contexts where we're going to release or roll back the subtransaction
+ * before proceeding further, so that no enduring change to the
+ * transaction state occurs. For now, however, we prohibit this case along
+ * with all the others.
*/
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot start subtransactions during a parallel operation")));
+ errmsg("cannot start subtransactions during a parallel operation")));
switch (s->blockState)
{
@@ -4094,7 +4094,7 @@ ReleaseCurrentSubTransaction(void)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot commit subtransactions during a parallel operation")));
+ errmsg("cannot commit subtransactions during a parallel operation")));
if (s->blockState != TBLOCK_SUBINPROGRESS)
elog(ERROR, "ReleaseCurrentSubTransaction: unexpected state %s",
@@ -4773,7 +4773,8 @@ Size
EstimateTransactionStateSpace(void)
{
TransactionState s;
- Size nxids = 5; /* iso level, deferrable, top & current XID, XID count */
+ Size nxids = 5; /* iso level, deferrable, top & current XID,
+ * XID count */
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{
@@ -4804,8 +4805,8 @@ void
SerializeTransactionState(Size maxsize, char *start_address)
{
TransactionState s;
- Size nxids = 0;
- Size i = 0;
+ Size nxids = 0;
+ Size i = 0;
TransactionId *workspace;
TransactionId *result = (TransactionId *) start_address;
@@ -4830,8 +4831,8 @@ SerializeTransactionState(Size maxsize, char *start_address)
}
/*
- * OK, we need to generate a sorted list of XIDs that our workers
- * should view as current. First, figure out how many there are.
+ * OK, we need to generate a sorted list of XIDs that our workers should
+ * view as current. First, figure out how many there are.
*/
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{
@@ -5060,22 +5061,22 @@ xactGetCommittedChildren(TransactionId **ptr)
*/
XLogRecPtr
XactLogCommitRecord(TimestampTz commit_time,
- int nsubxacts, TransactionId *subxacts,
- int nrels, RelFileNode *rels,
- int nmsgs, SharedInvalidationMessage *msgs,
- bool relcacheInval, bool forceSync,
- TransactionId twophase_xid)
+ int nsubxacts, TransactionId *subxacts,
+ int nrels, RelFileNode *rels,
+ int nmsgs, SharedInvalidationMessage *msgs,
+ bool relcacheInval, bool forceSync,
+ TransactionId twophase_xid)
{
- xl_xact_commit xlrec;
- xl_xact_xinfo xl_xinfo;
- xl_xact_dbinfo xl_dbinfo;
- xl_xact_subxacts xl_subxacts;
+ xl_xact_commit xlrec;
+ xl_xact_xinfo xl_xinfo;
+ xl_xact_dbinfo xl_dbinfo;
+ xl_xact_subxacts xl_subxacts;
xl_xact_relfilenodes xl_relfilenodes;
- xl_xact_invals xl_invals;
- xl_xact_twophase xl_twophase;
- xl_xact_origin xl_origin;
+ xl_xact_invals xl_invals;
+ xl_xact_twophase xl_twophase;
+ xl_xact_origin xl_origin;
- uint8 info;
+ uint8 info;
Assert(CritSectionCount > 0);
@@ -5198,17 +5199,17 @@ XactLogCommitRecord(TimestampTz commit_time,
*/
XLogRecPtr
XactLogAbortRecord(TimestampTz abort_time,
- int nsubxacts, TransactionId *subxacts,
- int nrels, RelFileNode *rels,
- TransactionId twophase_xid)
+ int nsubxacts, TransactionId *subxacts,
+ int nrels, RelFileNode *rels,
+ TransactionId twophase_xid)
{
- xl_xact_abort xlrec;
- xl_xact_xinfo xl_xinfo;
- xl_xact_subxacts xl_subxacts;
+ xl_xact_abort xlrec;
+ xl_xact_xinfo xl_xinfo;
+ xl_xact_subxacts xl_subxacts;
xl_xact_relfilenodes xl_relfilenodes;
- xl_xact_twophase xl_twophase;
+ xl_xact_twophase xl_twophase;
- uint8 info;
+ uint8 info;
Assert(CritSectionCount > 0);
@@ -5289,7 +5290,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
{
TransactionId max_xid;
int i;
- TimestampTz commit_time;
+ TimestampTz commit_time;
max_xid = TransactionIdLatest(xid, parsed->nsubxacts, parsed->subxacts);
@@ -5351,13 +5352,13 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
* recovered. It's unlikely but it's good to be safe.
*/
TransactionIdAsyncCommitTree(
- xid, parsed->nsubxacts, parsed->subxacts, lsn);
+ xid, parsed->nsubxacts, parsed->subxacts, lsn);
/*
* We must mark clog before we update the ProcArray.
*/
ExpireTreeKnownAssignedTransactionIds(
- xid, parsed->nsubxacts, parsed->subxacts, max_xid);
+ xid, parsed->nsubxacts, parsed->subxacts, max_xid);
/*
* Send any cache invalidations attached to the commit. We must
@@ -5365,9 +5366,9 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
* occurs in CommitTransaction().
*/
ProcessCommittedInvalidationMessages(
- parsed->msgs, parsed->nmsgs,
- XactCompletionRelcacheInitFileInval(parsed->xinfo),
- parsed->dbId, parsed->tsId);
+ parsed->msgs, parsed->nmsgs,
+ XactCompletionRelcacheInitFileInval(parsed->xinfo),
+ parsed->dbId, parsed->tsId);
/*
* Release locks, if any. We do this for both two phase and normal one
@@ -5383,7 +5384,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
{
/* recover apply progress */
replorigin_advance(origin_id, parsed->origin_lsn, lsn,
- false /* backward */, false /* WAL */);
+ false /* backward */ , false /* WAL */ );
}
/* Make sure files supposed to be dropped are dropped */
@@ -5447,8 +5448,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
static void
xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
{
- int i;
- TransactionId max_xid;
+ int i;
+ TransactionId max_xid;
/*
* Make sure nextXid is beyond any XID mentioned in the record.
@@ -5495,7 +5496,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
* We must update the ProcArray after we have marked clog.
*/
ExpireTreeKnownAssignedTransactionIds(
- xid, parsed->nsubxacts, parsed->subxacts, max_xid);
+ xid, parsed->nsubxacts, parsed->subxacts, max_xid);
/*
* There are no flat files that need updating, nor invalidation
@@ -5557,7 +5558,7 @@ xact_redo(XLogReaderState *record)
xl_xact_parsed_abort parsed;
ParseAbortRecord(XLogRecGetInfo(record), xlrec,
- &parsed);
+ &parsed);
if (info == XLOG_XACT_ABORT)
{
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b913bf3ebcb..087b6be084d 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -81,8 +81,8 @@ extern uint32 bootstrap_data_checksum_version;
/* User-settable parameters */
-int max_wal_size = 64; /* 1 GB */
-int min_wal_size = 5; /* 80 MB */
+int max_wal_size = 64; /* 1 GB */
+int min_wal_size = 5; /* 80 MB */
int wal_keep_segments = 0;
int XLOGbuffers = -1;
int XLogArchiveTimeout = 0;
@@ -951,14 +951,14 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
/*
* Check to see if my copy of RedoRecPtr or doPageWrites is out of date.
* If so, may have to go back and have the caller recompute everything.
- * This can only happen just after a checkpoint, so it's better to be
- * slow in this case and fast otherwise.
+ * This can only happen just after a checkpoint, so it's better to be slow
+ * in this case and fast otherwise.
*
* If we aren't doing full-page writes then RedoRecPtr doesn't actually
* affect the contents of the XLOG record, so we'll update our local copy
* but not force a recomputation. (If doPageWrites was just turned off,
- * we could recompute the record without full pages, but we choose not
- * to bother.)
+ * we could recompute the record without full pages, but we choose not to
+ * bother.)
*/
if (RedoRecPtr != Insert->RedoRecPtr)
{
@@ -970,8 +970,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites)
{
/*
- * Oops, some buffer now needs to be backed up that the caller
- * didn't back up. Start over.
+ * Oops, some buffer now needs to be backed up that the caller didn't
+ * back up. Start over.
*/
WALInsertLockRelease();
END_CRIT_SECTION();
@@ -1100,8 +1100,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
{
appendStringInfo(&buf, "error decoding record: out of memory");
}
- else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
- &errormsg))
+ else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
+ &errormsg))
{
appendStringInfo(&buf, "error decoding record: %s",
errormsg ? errormsg : "no error message");
@@ -1932,11 +1932,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
/*
* Fill the new page's header
*/
- NewPage ->xlp_magic = XLOG_PAGE_MAGIC;
+ NewPage->xlp_magic = XLOG_PAGE_MAGIC;
/* NewPage->xlp_info = 0; */ /* done by memset */
- NewPage ->xlp_tli = ThisTimeLineID;
- NewPage ->xlp_pageaddr = NewPageBeginPtr;
+ NewPage->xlp_tli = ThisTimeLineID;
+ NewPage->xlp_pageaddr = NewPageBeginPtr;
/* NewPage->xlp_rem_len = 0; */ /* done by memset */
@@ -1954,7 +1954,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
* compress a few records.
*/
if (!Insert->forcePageWrites)
- NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
+ NewPage->xlp_info |= XLP_BKP_REMOVABLE;
/*
* If first page of an XLOG segment file, make it a long header.
@@ -1966,7 +1966,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
NewLongPage->xlp_sysid = ControlFile->system_identifier;
NewLongPage->xlp_seg_size = XLogSegSize;
NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
- NewPage ->xlp_info |= XLP_LONG_HEADER;
+ NewPage->xlp_info |= XLP_LONG_HEADER;
}
/*
@@ -2008,10 +2008,10 @@ CalculateCheckpointSegments(void)
*
* a) we keep WAL for two checkpoint cycles, back to the "prev" checkpoint.
* b) during checkpoint, we consume checkpoint_completion_target *
- * number of segments consumed between checkpoints.
+ * number of segments consumed between checkpoints.
*-------
*/
- target = (double ) max_wal_size / (2.0 + CheckPointCompletionTarget);
+ target = (double) max_wal_size / (2.0 + CheckPointCompletionTarget);
/* round down */
CheckPointSegments = (int) target;
@@ -2052,15 +2052,15 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr)
* remove enough segments to stay below the maximum.
*/
minSegNo = PriorRedoPtr / XLOG_SEG_SIZE + min_wal_size - 1;
- maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
+ maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
/*
* Between those limits, recycle enough segments to get us through to the
* estimated end of next checkpoint.
*
* To estimate where the next checkpoint will finish, assume that the
- * system runs steadily consuming CheckPointDistanceEstimate
- * bytes between every checkpoint.
+ * system runs steadily consuming CheckPointDistanceEstimate bytes between
+ * every checkpoint.
*
* The reason this calculation is done from the prior checkpoint, not the
* one that just finished, is that this behaves better if some checkpoint
@@ -3005,11 +3005,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
/*
* XXX: What should we use as max_segno? We used to use XLOGfileslop when
* that was a constant, but that was always a bit dubious: normally, at a
- * checkpoint, XLOGfileslop was the offset from the checkpoint record,
- * but here, it was the offset from the insert location. We can't do the
+ * checkpoint, XLOGfileslop was the offset from the checkpoint record, but
+ * here, it was the offset from the insert location. We can't do the
* normal XLOGfileslop calculation here because we don't have access to
- * the prior checkpoint's redo location. So somewhat arbitrarily, just
- * use CheckPointSegments.
+ * the prior checkpoint's redo location. So somewhat arbitrarily, just use
+ * CheckPointSegments.
*/
max_segno = logsegno + CheckPointSegments;
if (!InstallXLogFileSegment(&installed_segno, tmppath,
@@ -3098,7 +3098,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
nread = upto - nbytes;
/*
- * The part that is not read from the source file is filled with zeros.
+ * The part that is not read from the source file is filled with
+ * zeros.
*/
if (nread < sizeof(buffer))
memset(buffer, 0, sizeof(buffer));
@@ -3153,8 +3154,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
/*
* Now move the segment into place with its final name. (Or just return
- * the path to the file we created, if the caller wants to handle the
- * rest on its own.)
+ * the path to the file we created, if the caller wants to handle the rest
+ * on its own.)
*/
if (dstfname)
{
@@ -3690,8 +3691,8 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
/*
* Remove files that are on a timeline older than the new one we're
- * switching to, but with a segment number >= the first segment on
- * the new timeline.
+ * switching to, but with a segment number >= the first segment on the
+ * new timeline.
*/
if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
strcmp(xlde->d_name + 8, switchseg + 8) > 0)
@@ -3768,12 +3769,13 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
segname)));
#ifdef WIN32
+
/*
* On Windows, if another process (e.g another backend) holds the file
* open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
* will still show up in directory listing until the last handle is
- * closed. To avoid confusing the lingering deleted file for a live WAL
- * file that needs to be archived, rename it before deleting it.
+ * closed. To avoid confusing the lingering deleted file for a live
+ * WAL file that needs to be archived, rename it before deleting it.
*
* If another process holds the file open without FILE_SHARE_DELETE
* flag, rename will fail. We'll try again at the next checkpoint.
@@ -3783,8 +3785,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not rename old transaction log file \"%s\": %m",
- path)));
+ errmsg("could not rename old transaction log file \"%s\": %m",
+ path)));
return;
}
rc = unlink(newpath);
@@ -3795,8 +3797,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not remove old transaction log file \"%s\": %m",
- path)));
+ errmsg("could not remove old transaction log file \"%s\": %m",
+ path)));
return;
}
CheckpointStats.ckpt_segs_removed++;
@@ -4609,11 +4611,11 @@ XLOGShmemInit(void)
int i;
#ifdef WAL_DEBUG
+
/*
- * Create a memory context for WAL debugging that's exempt from the
- * normal "no pallocs in critical section" rule. Yes, that can lead to a
- * PANIC if an allocation fails, but wal_debug is not for production use
- * anyway.
+ * Create a memory context for WAL debugging that's exempt from the normal
+ * "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
+ * an allocation fails, but wal_debug is not for production use anyway.
*/
if (walDebugCxt == NULL)
{
@@ -5044,7 +5046,7 @@ readRecoveryCommandFile(void)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for recovery parameter \"recovery_target\""),
- errhint("The only allowed value is \"immediate\".")));
+ errhint("The only allowed value is \"immediate\".")));
ereport(DEBUG2,
(errmsg_internal("recovery_target = '%s'",
item->value)));
@@ -5135,9 +5137,9 @@ readRecoveryCommandFile(void)
}
/*
- * Override any inconsistent requests. Not that this is a change
- * of behaviour in 9.5; prior to this we simply ignored a request
- * to pause if hot_standby = off, which was surprising behaviour.
+ * Override any inconsistent requests. Not that this is a change of
+ * behaviour in 9.5; prior to this we simply ignored a request to pause if
+ * hot_standby = off, which was surprising behaviour.
*/
if (recoveryTargetAction == RECOVERY_TARGET_ACTION_PAUSE &&
recoveryTargetActionSet &&
@@ -6043,7 +6045,7 @@ StartupXLOG(void)
if (read_backup_label(&checkPointLoc, &backupEndRequired,
&backupFromStandby))
{
- List *tablespaces = NIL;
+ List *tablespaces = NIL;
/*
* Archive recovery was requested, and thanks to the backup label
@@ -6099,7 +6101,7 @@ StartupXLOG(void)
foreach(lc, tablespaces)
{
tablespaceinfo *ti = lfirst(lc);
- char *linkloc;
+ char *linkloc;
linkloc = psprintf("pg_tblspc/%s", ti->oid);
@@ -6112,26 +6114,26 @@ StartupXLOG(void)
*/
if (lstat(linkloc, &st) == 0 && S_ISDIR(st.st_mode))
{
- if (!rmtree(linkloc,true))
+ if (!rmtree(linkloc, true))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not remove directory \"%s\": %m",
- linkloc)));
+ errmsg("could not remove directory \"%s\": %m",
+ linkloc)));
}
else
{
if (unlink(linkloc) < 0 && errno != ENOENT)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not remove symbolic link \"%s\": %m",
- linkloc)));
+ errmsg("could not remove symbolic link \"%s\": %m",
+ linkloc)));
}
if (symlink(ti->path, linkloc) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create symbolic link \"%s\": %m",
- linkloc)));
+ errmsg("could not create symbolic link \"%s\": %m",
+ linkloc)));
pfree(ti->oid);
pfree(ti->path);
@@ -6222,9 +6224,9 @@ StartupXLOG(void)
* in place if the database had been cleanly shut down, but it seems
* safest to just remove them always and let them be rebuilt during the
* first backend startup. These files needs to be removed from all
- * directories including pg_tblspc, however the symlinks are created
- * only after reading tablesapce_map file in case of archive recovery
- * from backup, so needs to clear old relcache files here after creating
+ * directories including pg_tblspc, however the symlinks are created only
+ * after reading tablesapce_map file in case of archive recovery from
+ * backup, so needs to clear old relcache files here after creating
* symlinks.
*/
RelationCacheInitFileRemove();
@@ -6442,9 +6444,9 @@ StartupXLOG(void)
* Also set backupEndPoint and use minRecoveryPoint as the backup end
* location if we're starting recovery from a base backup which was
* taken from a standby. In this case, the database system status in
- * pg_control must indicate that the database was already in
- * recovery. Usually that will be DB_IN_ARCHIVE_RECOVERY but also can
- * be DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
+ * pg_control must indicate that the database was already in recovery.
+ * Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be
+ * DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
* before reaching this point; e.g. because restore_command or
* primary_conninfo were faulty.
*
@@ -6500,10 +6502,10 @@ StartupXLOG(void)
/*
* If there was a tablespace_map file, it's done its job and the
- * symlinks have been created. We must get rid of the map file
- * so that if we crash during recovery, we don't create symlinks
- * again. It seems prudent though to just rename the file out of
- * the way rather than delete it completely.
+ * symlinks have been created. We must get rid of the map file so
+ * that if we crash during recovery, we don't create symlinks again.
+ * It seems prudent though to just rename the file out of the way
+ * rather than delete it completely.
*/
if (haveTblspcMap)
{
@@ -6859,7 +6861,8 @@ StartupXLOG(void)
{
/*
* Before we continue on the new timeline, clean up any
- * (possibly bogus) future WAL segments on the old timeline.
+ * (possibly bogus) future WAL segments on the old
+ * timeline.
*/
RemoveNonParentXlogFiles(EndRecPtr, ThisTimeLineID);
@@ -6890,32 +6893,33 @@ StartupXLOG(void)
{
if (!reachedConsistency)
ereport(FATAL,
- (errmsg("requested recovery stop point is before consistent recovery point")));
+ (errmsg("requested recovery stop point is before consistent recovery point")));
/*
* This is the last point where we can restart recovery with a
* new recovery target, if we shutdown and begin again. After
- * this, Resource Managers may choose to do permanent corrective
- * actions at end of recovery.
+ * this, Resource Managers may choose to do permanent
+ * corrective actions at end of recovery.
*/
switch (recoveryTargetAction)
{
case RECOVERY_TARGET_ACTION_SHUTDOWN:
- /*
- * exit with special return code to request shutdown
- * of postmaster. Log messages issued from
- * postmaster.
- */
- proc_exit(3);
+
+ /*
+ * exit with special return code to request shutdown
+ * of postmaster. Log messages issued from
+ * postmaster.
+ */
+ proc_exit(3);
case RECOVERY_TARGET_ACTION_PAUSE:
- SetRecoveryPause(true);
- recoveryPausesHere();
+ SetRecoveryPause(true);
+ recoveryPausesHere();
- /* drop into promote */
+ /* drop into promote */
case RECOVERY_TARGET_ACTION_PROMOTE:
- break;
+ break;
}
}
@@ -7259,8 +7263,8 @@ StartupXLOG(void)
* too.
*
* If a .done or .ready file already exists for the old timeline,
- * however, we had already determined that the segment is complete,
- * so we can let it be archived normally. (In particular, if it was
+ * however, we had already determined that the segment is complete, so
+ * we can let it be archived normally. (In particular, if it was
* restored from the archive to begin with, it's expected to have a
* .done file).
*/
@@ -7291,8 +7295,8 @@ StartupXLOG(void)
if (rename(origpath, partialpath) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rename file \"%s\" to \"%s\": %m",
- origpath, partialpath)));
+ errmsg("could not rename file \"%s\" to \"%s\": %m",
+ origpath, partialpath)));
XLogArchiveNotify(partialfname);
}
}
@@ -7366,8 +7370,8 @@ StartupXLOG(void)
XLogReportParameters();
/*
- * Local WAL inserts enabled, so it's time to finish initialization
- * of commit timestamp.
+ * Local WAL inserts enabled, so it's time to finish initialization of
+ * commit timestamp.
*/
CompleteCommitTsInitialization();
@@ -7961,7 +7965,7 @@ LogCheckpointStart(int flags, bool restartpoint)
(flags & CHECKPOINT_WAIT) ? " wait" : "",
(flags & CHECKPOINT_CAUSE_XLOG) ? " xlog" : "",
(flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
- (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" :"");
+ (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "");
}
/*
@@ -8056,8 +8060,8 @@ static void
UpdateCheckPointDistanceEstimate(uint64 nbytes)
{
/*
- * To estimate the number of segments consumed between checkpoints, keep
- * a moving average of the amount of WAL generated in previous checkpoint
+ * To estimate the number of segments consumed between checkpoints, keep a
+ * moving average of the amount of WAL generated in previous checkpoint
* cycles. However, if the load is bursty, with quiet periods and busy
* periods, we want to cater for the peak load. So instead of a plain
* moving average, let the average decline slowly if the previous cycle
@@ -9473,8 +9477,8 @@ xlog_redo(XLogReaderState *record)
}
/*
- * Update the commit timestamp tracking. If there was a change
- * it needs to be activated or deactivated accordingly.
+ * Update the commit timestamp tracking. If there was a change it
+ * needs to be activated or deactivated accordingly.
*/
if (track_commit_timestamp != xlrec.track_commit_timestamp)
{
@@ -9483,6 +9487,7 @@ xlog_redo(XLogReaderState *record)
if (track_commit_timestamp)
ActivateCommitTs();
else
+
/*
* We can't create a new WAL record here, but that's OK as
* master did the WAL logging already and we will replay the
@@ -9996,7 +10001,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
char *relpath = NULL;
int rllen;
StringInfoData buflinkpath;
- char *s = linkpath;
+ char *s = linkpath;
/* Skip special stuff */
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0)
@@ -10023,10 +10028,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
linkpath[rllen] = '\0';
/*
- * Add the escape character '\\' before newline in a string
- * to ensure that we can distinguish between the newline in
- * the tablespace path and end of line while reading
- * tablespace_map file during archive recovery.
+ * Add the escape character '\\' before newline in a string to
+ * ensure that we can distinguish between the newline in the
+ * tablespace path and end of line while reading tablespace_map
+ * file during archive recovery.
*/
initStringInfo(&buflinkpath);
@@ -10054,8 +10059,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
ti->rpath = relpath ? pstrdup(relpath) : NULL;
ti->size = infotbssize ? sendTablespace(fullpath, true) : -1;
- if(tablespaces)
- *tablespaces = lappend(*tablespaces, ti);
+ if (tablespaces)
+ *tablespaces = lappend(*tablespaces, ti);
appendStringInfo(&tblspc_mapfbuf, "%s %s\n", ti->oid, ti->path);
@@ -10150,10 +10155,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
}
else
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("a backup is already in progress"),
- errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
- TABLESPACE_MAP)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("a backup is already in progress"),
+ errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
+ TABLESPACE_MAP)));
fp = AllocateFile(TABLESPACE_MAP, "w");
@@ -10353,8 +10358,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
BACKUP_LABEL_FILE)));
/*
- * Remove tablespace_map file if present, it is created
- * only if there are tablespaces.
+ * Remove tablespace_map file if present, it is created only if there
+ * are tablespaces.
*/
unlink(TABLESPACE_MAP);
}
@@ -10773,10 +10778,12 @@ read_tablespace_map(List **tablespaces)
tablespaceinfo *ti;
FILE *lfp;
char tbsoid[MAXPGPATH];
- char *tbslinkpath;
+ char *tbslinkpath;
char str[MAXPGPATH];
- int ch, prev_ch = -1,
- i = 0, n;
+ int ch,
+ prev_ch = -1,
+ i = 0,
+ n;
/*
* See if tablespace_map file is present
@@ -10794,9 +10801,9 @@ read_tablespace_map(List **tablespaces)
/*
* Read and parse the link name and path lines from tablespace_map file
- * (this code is pretty crude, but we are not expecting any variability
- * in the file format). While taking backup we embed escape character
- * '\\' before newline in tablespace path, so that during reading of
+ * (this code is pretty crude, but we are not expecting any variability in
+ * the file format). While taking backup we embed escape character '\\'
+ * before newline in tablespace path, so that during reading of
* tablespace_map file, we could distinguish newline in tablespace path
* and end of line. Now while reading tablespace_map file, remove the
* escape character that has been added in tablespace path during backup.
@@ -10808,8 +10815,8 @@ read_tablespace_map(List **tablespaces)
str[i] = '\0';
if (sscanf(str, "%s %n", tbsoid, &n) != 1)
ereport(FATAL,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
tbslinkpath = str + n;
i = 0;
@@ -10821,7 +10828,7 @@ read_tablespace_map(List **tablespaces)
continue;
}
else if ((ch == '\n' || ch == '\r') && prev_ch == '\\')
- str[i-1] = ch;
+ str[i - 1] = ch;
else
str[i++] = ch;
prev_ch = ch;
@@ -10868,7 +10875,7 @@ BackupInProgress(void)
/*
* CancelBackup: rename the "backup_label" and "tablespace_map"
- * files to cancel backup mode
+ * files to cancel backup mode
*
* If the "backup_label" file exists, it will be renamed to "backup_label.old".
* Similarly, if the "tablespace_map" file exists, it will be renamed to
@@ -11115,8 +11122,8 @@ static bool
WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
bool fetching_ckpt, XLogRecPtr tliRecPtr)
{
- static TimestampTz last_fail_time = 0;
- TimestampTz now;
+ static TimestampTz last_fail_time = 0;
+ TimestampTz now;
/*-------
* Standby mode is implemented by a state machine:
@@ -11270,9 +11277,10 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
*/
now = GetCurrentTimestamp();
if (!TimestampDifferenceExceeds(last_fail_time, now,
- wal_retrieve_retry_interval))
+ wal_retrieve_retry_interval))
{
- long secs, wait_time;
+ long secs,
+ wait_time;
int usecs;
TimestampDifference(last_fail_time, now, &secs, &usecs);
@@ -11280,7 +11288,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
(secs * 1000 + usecs / 1000);
WaitLatch(&XLogCtl->recoveryWakeupLatch,
- WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+ WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
wait_time);
ResetLatch(&XLogCtl->recoveryWakeupLatch);
now = GetCurrentTimestamp();
@@ -11605,8 +11613,8 @@ fsync_pgdata(char *datadir)
return;
/*
- * If possible, hint to the kernel that we're soon going to fsync
- * the data directory and its contents.
+ * If possible, hint to the kernel that we're soon going to fsync the data
+ * directory and its contents.
*/
#if defined(HAVE_SYNC_FILE_RANGE) || \
(defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED))
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index 419736da310..b96c39ac657 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -33,7 +33,7 @@
#include "pg_trace.h"
/* Buffer size required to store a compressed version of backup block image */
-#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
+#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
/*
* For each block reference registered with XLogRegisterBuffer, we fill in
@@ -58,7 +58,7 @@ typedef struct
/* buffer to store a compressed version of backup block image */
char compressed_page[PGLZ_MAX_BLCKSZ];
-} registered_buffer;
+} registered_buffer;
static registered_buffer *registered_buffers;
static int max_registered_buffers; /* allocated size */
@@ -110,7 +110,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
XLogRecPtr RedoRecPtr, bool doPageWrites,
XLogRecPtr *fpw_lsn);
static bool XLogCompressBackupBlock(char *page, uint16 hole_offset,
- uint16 hole_length, char *dest, uint16 *dlen);
+ uint16 hole_length, char *dest, uint16 *dlen);
/*
* Begin constructing a WAL record. This must be called before the
@@ -602,7 +602,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
&compressed_len);
}
- /* Fill in the remaining fields in the XLogRecordBlockHeader struct */
+ /*
+ * Fill in the remaining fields in the XLogRecordBlockHeader
+ * struct
+ */
bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
/*
@@ -762,7 +765,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
* the length of compressed block image.
*/
static bool
-XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
+XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length,
char *dest, uint16 *dlen)
{
int32 orig_len = BLCKSZ - hole_length;
@@ -790,16 +793,15 @@ XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
source = page;
/*
- * We recheck the actual size even if pglz_compress() reports success
- * and see if the number of bytes saved by compression is larger than
- * the length of extra data needed for the compressed version of block
- * image.
+ * We recheck the actual size even if pglz_compress() reports success and
+ * see if the number of bytes saved by compression is larger than the
+ * length of extra data needed for the compressed version of block image.
*/
len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
if (len >= 0 &&
len + extra_bytes < orig_len)
{
- *dlen = (uint16) len; /* successful compression */
+ *dlen = (uint16) len; /* successful compression */
return true;
}
return false;
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index 3661e7229aa..a9e926c5a28 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -1086,50 +1086,53 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
blk->bimg_len == BLCKSZ))
{
report_invalid_record(state,
- "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
+ "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
(unsigned int) blk->hole_offset,
(unsigned int) blk->hole_length,
(unsigned int) blk->bimg_len,
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
+
/*
- * cross-check that hole_offset == 0 and hole_length == 0
- * if the HAS_HOLE flag is not set.
+ * cross-check that hole_offset == 0 and hole_length == 0 if
+ * the HAS_HOLE flag is not set.
*/
if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
(blk->hole_offset != 0 || blk->hole_length != 0))
{
report_invalid_record(state,
- "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
+ "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
(unsigned int) blk->hole_offset,
(unsigned int) blk->hole_length,
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
+
/*
- * cross-check that bimg_len < BLCKSZ
- * if the IS_COMPRESSED flag is set.
+ * cross-check that bimg_len < BLCKSZ if the IS_COMPRESSED
+ * flag is set.
*/
if ((blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
blk->bimg_len == BLCKSZ)
{
report_invalid_record(state,
- "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
+ "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
(unsigned int) blk->bimg_len,
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
+
/*
- * cross-check that bimg_len = BLCKSZ if neither
- * HAS_HOLE nor IS_COMPRESSED flag is set.
+ * cross-check that bimg_len = BLCKSZ if neither HAS_HOLE nor
+ * IS_COMPRESSED flag is set.
*/
if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
!(blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
blk->bimg_len != BLCKSZ)
{
report_invalid_record(state,
- "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
+ "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
(unsigned int) blk->data_len,
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
@@ -1294,8 +1297,8 @@ bool
RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
{
DecodedBkpBlock *bkpb;
- char *ptr;
- char tmp[BLCKSZ];
+ char *ptr;
+ char tmp[BLCKSZ];
if (!record->blocks[block_id].in_use)
return false;