aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/common/heaptuple.c14
-rw-r--r--src/backend/access/heap/heapam.c2
-rw-r--r--src/backend/access/heap/heapam_visibility.c16
-rw-r--r--src/backend/access/heap/visibilitymap.c56
-rw-r--r--src/backend/access/table/tableam.c11
-rw-r--r--src/backend/access/transam/multixact.c6
-rw-r--r--src/include/access/heapam.h26
-rw-r--r--src/include/access/heapam_xlog.h4
-rw-r--r--src/include/access/htup_details.h2
-rw-r--r--src/include/access/multixact.h7
-rw-r--r--src/include/access/rewriteheap.h12
-rw-r--r--src/include/access/tableam.h8
-rw-r--r--src/include/commands/cluster.h2
-rw-r--r--src/include/replication/snapbuild.h13
14 files changed, 90 insertions, 89 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 503cda46eff..7e355585a01 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -420,13 +420,13 @@ heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc)
* ----------------
*/
Datum
-nocachegetattr(HeapTuple tuple,
+nocachegetattr(HeapTuple tup,
int attnum,
TupleDesc tupleDesc)
{
- HeapTupleHeader tup = tuple->t_data;
+ HeapTupleHeader td = tup->t_data;
char *tp; /* ptr to data part of tuple */
- bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
+ bits8 *bp = td->t_bits; /* ptr to null bitmap in tuple */
bool slow = false; /* do we have to walk attrs? */
int off; /* current offset within data */
@@ -441,7 +441,7 @@ nocachegetattr(HeapTuple tuple,
attnum--;
- if (!HeapTupleNoNulls(tuple))
+ if (!HeapTupleNoNulls(tup))
{
/*
* there's a null somewhere in the tuple
@@ -470,7 +470,7 @@ nocachegetattr(HeapTuple tuple,
}
}
- tp = (char *) tup + tup->t_hoff;
+ tp = (char *) td + td->t_hoff;
if (!slow)
{
@@ -489,7 +489,7 @@ nocachegetattr(HeapTuple tuple,
* target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
- if (HeapTupleHasVarWidth(tuple))
+ if (HeapTupleHasVarWidth(tup))
{
int j;
@@ -565,7 +565,7 @@ nocachegetattr(HeapTuple tuple,
{
Form_pg_attribute att = TupleDescAttr(tupleDesc, i);
- if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
+ if (HeapTupleHasNulls(tup) && att_isnull(i, bp))
{
usecache = false;
continue; /* this cannot be the target att */
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 5887166061a..eb811d751e5 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -108,7 +108,7 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status
static void index_delete_sort(TM_IndexDeleteOp *delstate);
static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
-static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_required,
+static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
bool *copy);
diff --git a/src/backend/access/heap/heapam_visibility.c b/src/backend/access/heap/heapam_visibility.c
index ff0b8a688de..6e33d1c8812 100644
--- a/src/backend/access/heap/heapam_visibility.c
+++ b/src/backend/access/heap/heapam_visibility.c
@@ -1763,30 +1763,30 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
* if so, the indicated buffer is marked dirty.
*/
bool
-HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)
+HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot, Buffer buffer)
{
switch (snapshot->snapshot_type)
{
case SNAPSHOT_MVCC:
- return HeapTupleSatisfiesMVCC(tup, snapshot, buffer);
+ return HeapTupleSatisfiesMVCC(htup, snapshot, buffer);
break;
case SNAPSHOT_SELF:
- return HeapTupleSatisfiesSelf(tup, snapshot, buffer);
+ return HeapTupleSatisfiesSelf(htup, snapshot, buffer);
break;
case SNAPSHOT_ANY:
- return HeapTupleSatisfiesAny(tup, snapshot, buffer);
+ return HeapTupleSatisfiesAny(htup, snapshot, buffer);
break;
case SNAPSHOT_TOAST:
- return HeapTupleSatisfiesToast(tup, snapshot, buffer);
+ return HeapTupleSatisfiesToast(htup, snapshot, buffer);
break;
case SNAPSHOT_DIRTY:
- return HeapTupleSatisfiesDirty(tup, snapshot, buffer);
+ return HeapTupleSatisfiesDirty(htup, snapshot, buffer);
break;
case SNAPSHOT_HISTORIC_MVCC:
- return HeapTupleSatisfiesHistoricMVCC(tup, snapshot, buffer);
+ return HeapTupleSatisfiesHistoricMVCC(htup, snapshot, buffer);
break;
case SNAPSHOT_NON_VACUUMABLE:
- return HeapTupleSatisfiesNonVacuumable(tup, snapshot, buffer);
+ return HeapTupleSatisfiesNonVacuumable(htup, snapshot, buffer);
break;
}
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index ed72eb7b631..d62761728b0 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -137,7 +137,7 @@ static void vm_extend(Relation rel, BlockNumber vm_nblocks);
* any I/O. Returns true if any bits have been cleared and false otherwise.
*/
bool
-visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
+visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
@@ -152,21 +152,21 @@ visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
#endif
- if (!BufferIsValid(buf) || BufferGetBlockNumber(buf) != mapBlock)
+ if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock)
elog(ERROR, "wrong buffer passed to visibilitymap_clear");
- LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
- map = PageGetContents(BufferGetPage(buf));
+ LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE);
+ map = PageGetContents(BufferGetPage(vmbuf));
if (map[mapByte] & mask)
{
map[mapByte] &= ~mask;
- MarkBufferDirty(buf);
+ MarkBufferDirty(vmbuf);
cleared = true;
}
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ LockBuffer(vmbuf, BUFFER_LOCK_UNLOCK);
return cleared;
}
@@ -180,43 +180,43 @@ visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
* shouldn't hold a lock on the heap page while doing that. Then, call
* visibilitymap_set to actually set the bit.
*
- * On entry, *buf should be InvalidBuffer or a valid buffer returned by
+ * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by
* an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
- * relation. On return, *buf is a valid buffer with the map page containing
+ * relation. On return, *vmbuf is a valid buffer with the map page containing
* the bit for heapBlk.
*
* If the page doesn't exist in the map file yet, it is extended.
*/
void
-visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
+visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
/* Reuse the old pinned buffer if possible */
- if (BufferIsValid(*buf))
+ if (BufferIsValid(*vmbuf))
{
- if (BufferGetBlockNumber(*buf) == mapBlock)
+ if (BufferGetBlockNumber(*vmbuf) == mapBlock)
return;
- ReleaseBuffer(*buf);
+ ReleaseBuffer(*vmbuf);
}
- *buf = vm_readbuf(rel, mapBlock, true);
+ *vmbuf = vm_readbuf(rel, mapBlock, true);
}
/*
* visibilitymap_pin_ok - do we already have the correct page pinned?
*
- * On entry, buf should be InvalidBuffer or a valid buffer returned by
+ * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by
* an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
* relation. The return value indicates whether the buffer covers the
* given heapBlk.
*/
bool
-visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
+visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
- return BufferIsValid(buf) && BufferGetBlockNumber(buf) == mapBlock;
+ return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock;
}
/*
@@ -314,11 +314,11 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
* Are all tuples on heapBlk visible to all or are marked frozen, according
* to the visibility map?
*
- * On entry, *buf should be InvalidBuffer or a valid buffer returned by an
+ * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an
* earlier call to visibilitymap_pin or visibilitymap_get_status on the same
- * relation. On return, *buf is a valid buffer with the map page containing
+ * relation. On return, *vmbuf is a valid buffer with the map page containing
* the bit for heapBlk, or InvalidBuffer. The caller is responsible for
- * releasing *buf after it's done testing and setting bits.
+ * releasing *vmbuf after it's done testing and setting bits.
*
* NOTE: This function is typically called without a lock on the heap page,
* so somebody else could change the bit just after we look at it. In fact,
@@ -328,7 +328,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
* all concurrency issues!
*/
uint8
-visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
+visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
@@ -341,23 +341,23 @@ visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *buf)
#endif
/* Reuse the old pinned buffer if possible */
- if (BufferIsValid(*buf))
+ if (BufferIsValid(*vmbuf))
{
- if (BufferGetBlockNumber(*buf) != mapBlock)
+ if (BufferGetBlockNumber(*vmbuf) != mapBlock)
{
- ReleaseBuffer(*buf);
- *buf = InvalidBuffer;
+ ReleaseBuffer(*vmbuf);
+ *vmbuf = InvalidBuffer;
}
}
- if (!BufferIsValid(*buf))
+ if (!BufferIsValid(*vmbuf))
{
- *buf = vm_readbuf(rel, mapBlock, false);
- if (!BufferIsValid(*buf))
+ *vmbuf = vm_readbuf(rel, mapBlock, false);
+ if (!BufferIsValid(*vmbuf))
return false;
}
- map = PageGetContents(BufferGetPage(*buf));
+ map = PageGetContents(BufferGetPage(*vmbuf));
/*
* A single byte read is atomic. There could be memory-ordering effects
diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c
index b3d1a6c3f8f..094b24c7c9c 100644
--- a/src/backend/access/table/tableam.c
+++ b/src/backend/access/table/tableam.c
@@ -172,19 +172,18 @@ table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan,
}
TableScanDesc
-table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
+table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
{
Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN |
SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
- Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
+ Assert(RelationGetRelid(relation) == pscan->phs_relid);
- if (!parallel_scan->phs_snapshot_any)
+ if (!pscan->phs_snapshot_any)
{
/* Snapshot was serialized -- restore it */
- snapshot = RestoreSnapshot((char *) parallel_scan +
- parallel_scan->phs_snapshot_off);
+ snapshot = RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off);
RegisterSnapshot(snapshot);
flags |= SO_TEMP_SNAPSHOT;
}
@@ -195,7 +194,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
}
return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
- parallel_scan, flags);
+ pscan, flags);
}
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index ec57f56adf3..a7383f553b3 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -1214,14 +1214,14 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
* range, that is, greater to or equal than oldestMultiXactId, and less than
* nextMXact. Otherwise, an error is raised.
*
- * onlyLock must be set to true if caller is certain that the given multi
+ * isLockOnly must be set to true if caller is certain that the given multi
* is used only to lock tuples; can be false without loss of correctness,
* but passing a true means we can return quickly without checking for
* old updates.
*/
int
GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
- bool from_pgupgrade, bool onlyLock)
+ bool from_pgupgrade, bool isLockOnly)
{
int pageno;
int prev_pageno;
@@ -1263,7 +1263,7 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
* we can skip checking if the value is older than our oldest visible
* multi. It cannot possibly still be running.
*/
- if (onlyLock &&
+ if (isLockOnly &&
MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
{
debug_elog2(DEBUG2, "GetMembers: a locker-only multi is too old");
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index abf62d9df79..9dab35551e1 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -118,13 +118,13 @@ extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
ParallelTableScanDesc parallel_scan,
uint32 flags);
-extern void heap_setscanlimits(TableScanDesc scan, BlockNumber startBlk,
+extern void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk,
BlockNumber numBlks);
-extern void heapgetpage(TableScanDesc scan, BlockNumber page);
-extern void heap_rescan(TableScanDesc scan, ScanKey key, bool set_params,
+extern void heapgetpage(TableScanDesc sscan, BlockNumber page);
+extern void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
bool allow_strat, bool allow_sync, bool allow_pagemode);
-extern void heap_endscan(TableScanDesc scan);
-extern HeapTuple heap_getnext(TableScanDesc scan, ScanDirection direction);
+extern void heap_endscan(TableScanDesc sscan);
+extern HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction);
extern bool heap_getnextslot(TableScanDesc sscan,
ScanDirection direction, struct TupleTableSlot *slot);
extern void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
@@ -138,7 +138,7 @@ extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
bool *all_dead, bool first_call);
-extern void heap_get_latest_tid(TableScanDesc scan, ItemPointer tid);
+extern void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid);
extern BulkInsertState GetBulkInsertState(void);
extern void FreeBulkInsertState(BulkInsertState);
@@ -160,7 +160,7 @@ extern TM_Result heap_update(Relation relation, ItemPointer otid,
struct TM_FailureData *tmfd, LockTupleMode *lockmode);
extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
- bool follow_update,
+ bool follow_updates,
Buffer *buffer, struct TM_FailureData *tmfd);
extern void heap_inplace_update(Relation relation, HeapTuple tuple);
@@ -187,7 +187,7 @@ extern void heap_page_prune_opt(Relation relation, Buffer buffer);
extern int heap_page_prune(Relation relation, Buffer buffer,
struct GlobalVisState *vistest,
TransactionId old_snap_xmin,
- TimestampTz old_snap_ts_ts,
+ TimestampTz old_snap_ts,
int *nnewlpdead,
OffsetNumber *off_loc);
extern void heap_page_prune_execute(Buffer buffer,
@@ -202,13 +202,13 @@ extern void heap_vacuum_rel(Relation rel,
struct VacuumParams *params, BufferAccessStrategy bstrategy);
/* in heap/heapam_visibility.c */
-extern bool HeapTupleSatisfiesVisibility(HeapTuple stup, Snapshot snapshot,
+extern bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot,
Buffer buffer);
-extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple stup, CommandId curcid,
+extern TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
Buffer buffer);
-extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple stup, TransactionId OldestXmin,
+extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
Buffer buffer);
-extern HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple stup, Buffer buffer,
+extern HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer,
TransactionId *dead_after);
extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
uint16 infomask, TransactionId xid);
@@ -227,7 +227,7 @@ extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
HeapTuple htup,
Buffer buffer,
CommandId *cmin, CommandId *cmax);
-extern void HeapCheckForSerializableConflictOut(bool valid, Relation relation, HeapTuple tuple,
+extern void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple,
Buffer buffer, Snapshot snapshot);
#endif /* HEAPAM_H */
diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h
index 1705e736be5..34220d93cff 100644
--- a/src/include/access/heapam_xlog.h
+++ b/src/include/access/heapam_xlog.h
@@ -414,8 +414,8 @@ extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
TransactionId *relfrozenxid_out,
MultiXactId *relminmxid_out);
extern void heap_execute_freeze_tuple(HeapTupleHeader tuple,
- xl_heap_freeze_tuple *xlrec_tp);
+ xl_heap_freeze_tuple *frz);
extern XLogRecPtr log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer,
- Buffer vm_buffer, TransactionId cutoff_xid, uint8 flags);
+ Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags);
#endif /* HEAPAM_XLOG_H */
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index 51a60eda088..9561c835f21 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -699,7 +699,7 @@ extern void heap_fill_tuple(TupleDesc tupleDesc,
uint16 *infomask, bits8 *bit);
extern bool heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc);
extern Datum nocachegetattr(HeapTuple tup, int attnum,
- TupleDesc att);
+ TupleDesc tupleDesc);
extern Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
bool *isnull);
extern Datum getmissingattr(TupleDesc tupleDesc,
diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h
index a5600a320ae..4cbe17de7bd 100644
--- a/src/include/access/multixact.h
+++ b/src/include/access/multixact.h
@@ -112,8 +112,8 @@ extern MultiXactId ReadNextMultiXactId(void);
extern void ReadMultiXactIdRange(MultiXactId *oldest, MultiXactId *next);
extern bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly);
extern void MultiXactIdSetOldestMember(void);
-extern int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **xids,
- bool allow_old, bool isLockOnly);
+extern int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
+ bool from_pgupgrade, bool isLockOnly);
extern bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2);
extern bool MultiXactIdPrecedesOrEquals(MultiXactId multi1,
MultiXactId multi2);
@@ -140,7 +140,8 @@ extern void MultiXactGetCheckptMulti(bool is_shutdown,
Oid *oldestMultiDB);
extern void CheckPointMultiXact(void);
extern MultiXactId GetOldestMultiXactId(void);
-extern void TruncateMultiXact(MultiXactId oldestMulti, Oid oldestMultiDB);
+extern void TruncateMultiXact(MultiXactId newOldestMulti,
+ Oid newOldestMultiDB);
extern void MultiXactSetNextMXact(MultiXactId nextMulti,
MultiXactOffset nextMultiOffset);
extern void MultiXactAdvanceNextMXact(MultiXactId minMulti,
diff --git a/src/include/access/rewriteheap.h b/src/include/access/rewriteheap.h
index 353cbb2924c..5cc04756a5e 100644
--- a/src/include/access/rewriteheap.h
+++ b/src/include/access/rewriteheap.h
@@ -21,13 +21,13 @@
/* struct definition is private to rewriteheap.c */
typedef struct RewriteStateData *RewriteState;
-extern RewriteState begin_heap_rewrite(Relation OldHeap, Relation NewHeap,
- TransactionId OldestXmin, TransactionId FreezeXid,
- MultiXactId MultiXactCutoff);
+extern RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap,
+ TransactionId oldest_xmin, TransactionId freeze_xid,
+ MultiXactId cutoff_multi);
extern void end_heap_rewrite(RewriteState state);
-extern void rewrite_heap_tuple(RewriteState state, HeapTuple oldTuple,
- HeapTuple newTuple);
-extern bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple oldTuple);
+extern void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple,
+ HeapTuple new_tuple);
+extern bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple);
/*
* On-Disk data format for an individual logical rewrite mapping.
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index ffe265d2a15..e45d73eae3c 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -863,13 +863,13 @@ typedef struct TableAmRoutine
* for the relation. Works for tables, views, foreign tables and partitioned
* tables.
*/
-extern const TupleTableSlotOps *table_slot_callbacks(Relation rel);
+extern const TupleTableSlotOps *table_slot_callbacks(Relation relation);
/*
* Returns slot using the callbacks returned by table_slot_callbacks(), and
* registers it on *reglist.
*/
-extern TupleTableSlot *table_slot_create(Relation rel, List **reglist);
+extern TupleTableSlot *table_slot_create(Relation relation, List **reglist);
/* ----------------------------------------------------------------------------
@@ -895,7 +895,7 @@ table_beginscan(Relation rel, Snapshot snapshot,
* Like table_beginscan(), but for scanning catalog. It'll automatically use a
* snapshot appropriate for scanning catalog relations.
*/
-extern TableScanDesc table_beginscan_catalog(Relation rel, int nkeys,
+extern TableScanDesc table_beginscan_catalog(Relation relation, int nkeys,
struct ScanKeyData *key);
/*
@@ -1133,7 +1133,7 @@ extern void table_parallelscan_initialize(Relation rel,
*
* Caller must hold a suitable lock on the relation.
*/
-extern TableScanDesc table_beginscan_parallel(Relation rel,
+extern TableScanDesc table_beginscan_parallel(Relation relation,
ParallelTableScanDesc pscan);
/*
diff --git a/src/include/commands/cluster.h b/src/include/commands/cluster.h
index df8e73af409..de9040c4b7c 100644
--- a/src/include/commands/cluster.h
+++ b/src/include/commands/cluster.h
@@ -45,7 +45,7 @@ extern void finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
bool check_constraints,
bool is_internal,
TransactionId frozenXid,
- MultiXactId minMulti,
+ MultiXactId cutoffMulti,
char newrelpersistence);
#endif /* CLUSTER_H */
diff --git a/src/include/replication/snapbuild.h b/src/include/replication/snapbuild.h
index e6adea24f22..f126ff2e085 100644
--- a/src/include/replication/snapbuild.h
+++ b/src/include/replication/snapbuild.h
@@ -59,24 +59,24 @@ struct xl_running_xacts;
extern void CheckPointSnapBuild(void);
-extern SnapBuild *AllocateSnapshotBuilder(struct ReorderBuffer *cache,
+extern SnapBuild *AllocateSnapshotBuilder(struct ReorderBuffer *reorder,
TransactionId xmin_horizon, XLogRecPtr start_lsn,
bool need_full_snapshot,
XLogRecPtr two_phase_at);
-extern void FreeSnapshotBuilder(SnapBuild *cache);
+extern void FreeSnapshotBuilder(SnapBuild *builder);
extern void SnapBuildSnapDecRefcount(Snapshot snap);
extern Snapshot SnapBuildInitialSnapshot(SnapBuild *builder);
-extern const char *SnapBuildExportSnapshot(SnapBuild *snapstate);
+extern const char *SnapBuildExportSnapshot(SnapBuild *builder);
extern void SnapBuildClearExportedSnapshot(void);
extern void SnapBuildResetExportedSnapshotState(void);
-extern SnapBuildState SnapBuildCurrentState(SnapBuild *snapstate);
+extern SnapBuildState SnapBuildCurrentState(SnapBuild *builder);
extern Snapshot SnapBuildGetOrBuildSnapshot(SnapBuild *builder,
TransactionId xid);
-extern bool SnapBuildXactNeedsSkip(SnapBuild *snapstate, XLogRecPtr ptr);
+extern bool SnapBuildXactNeedsSkip(SnapBuild *builder, XLogRecPtr ptr);
extern XLogRecPtr SnapBuildGetTwoPhaseAt(SnapBuild *builder);
extern void SnapBuildSetTwoPhaseAt(SnapBuild *builder, XLogRecPtr ptr);
@@ -86,7 +86,8 @@ extern void SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn,
extern bool SnapBuildProcessChange(SnapBuild *builder, TransactionId xid,
XLogRecPtr lsn);
extern void SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid,
- XLogRecPtr lsn, struct xl_heap_new_cid *cid);
+ XLogRecPtr lsn,
+ struct xl_heap_new_cid *xlrec);
extern void SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn,
struct xl_running_xacts *running);
extern void SnapBuildSerializationPoint(SnapBuild *builder, XLogRecPtr lsn);