aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/heap
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/heap')
-rw-r--r--src/backend/access/heap/heapam.c15
-rw-r--r--src/backend/access/heap/heapam_handler.c2
-rw-r--r--src/backend/access/heap/rewriteheap.c2
3 files changed, 5 insertions, 14 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index bf82304b0ff..8ac0f8a5134 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -239,8 +239,8 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
* behaviors, independently of the size of the table; also there is a GUC
* variable that can disable synchronized scanning.)
*
- * Note that heap_parallelscan_initialize has a very similar test; if you
- * change this, consider changing that one, too.
+ * Note that table_block_parallelscan_initialize has a very similar test;
+ * if you change this, consider changing that one, too.
*/
if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
scan->rs_nblocks > NBuffers / 4)
@@ -1396,15 +1396,6 @@ heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *s
* If the tuple is found but fails the time qual check, then false is returned
* but tuple->t_data is left pointing to the tuple.
*
- * keep_buf determines what is done with the buffer in the false-result cases.
- * When the caller specifies keep_buf = true, we retain the pin on the buffer
- * and return it in *userbuf (so the caller must eventually unpin it); when
- * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
- *
- * stats_relation is the relation to charge the heap_fetch operation against
- * for statistical purposes. (This could be the heap rel itself, an
- * associated index, or NULL to not count the fetch at all.)
- *
* heap_fetch does not follow HOT chains: only the exact TID requested will
* be fetched.
*
@@ -7085,7 +7076,7 @@ heap_compute_xid_horizon_for_tuples(Relation rel,
* Conjecture: if hitemid is dead then it had xids before the xids
* marked on LP_NORMAL items. So we just ignore this item and move
* onto the next, for the purposes of calculating
- * latestRemovedxids.
+ * latestRemovedXid.
*/
}
else
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 92ea1d163e6..b7d2ddbbdcf 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -286,7 +286,7 @@ heapam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot,
static void
heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
- uint32 spekToken, bool succeeded)
+ uint32 specToken, bool succeeded)
{
bool shouldFree = true;
HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 131ec7b8d7f..369694fa2e9 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -350,7 +350,7 @@ end_heap_rewrite(RewriteState state)
*
* It's obvious that we must do this when not WAL-logging. It's less
* obvious that we have to do it even if we did WAL-log the pages. The
- * reason is the same as in tablecmds.c's copy_relation_data(): we're
+ * reason is the same as in storage.c's RelationCopyStorage(): we're
* writing data that's not in shared buffers, and so a CHECKPOINT
* occurring during the rewriteheap operation won't have fsync'd data we
* wrote before the checkpoint.