aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
authorPeter Geoghegan <pg@bowt.ie>2019-05-13 15:53:39 -0700
committerPeter Geoghegan <pg@bowt.ie>2019-05-13 15:53:39 -0700
commitae7291acbc5ae4c3751615177fc256f9f3399403 (patch)
tree8a67a1936fe81aa9d5e5b1c4b023f7993d6b9437 /src/backend/access
parent08ca9d7feca890e97f77ef1fde02d7542a54ac5e (diff)
downloadpostgresql-ae7291acbc5ae4c3751615177fc256f9f3399403.tar.gz
postgresql-ae7291acbc5ae4c3751615177fc256f9f3399403.zip
Standardize ItemIdData terminology.
The term "item pointer" should not be used to refer to ItemIdData variables, since that is needlessly ambiguous. Only ItemPointerData/ItemPointer variables should be called item pointers. To fix, establish the convention that ItemIdData variables should always be referred to either as "item identifiers" or "line pointers". The term "item identifier" already predominates in docs and translatable messages, and so should be the preferred alternative there. Discussion: https://postgr.es/m/CAH2-Wz=c=MZQjUzde3o9+2PLAPuHTpVZPPdYxN=E4ndQ2--8ew@mail.gmail.com
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/heap/README.HOT10
-rw-r--r--src/backend/access/heap/heapam.c4
-rw-r--r--src/backend/access/heap/heapam_handler.c2
-rw-r--r--src/backend/access/heap/pruneheap.c10
-rw-r--r--src/backend/access/heap/vacuumlazy.c6
-rw-r--r--src/backend/access/index/indexam.c26
-rw-r--r--src/backend/access/nbtree/nbtinsert.c2
-rw-r--r--src/backend/access/spgist/spgvacuum.c2
8 files changed, 18 insertions, 44 deletions
diff --git a/src/backend/access/heap/README.HOT b/src/backend/access/heap/README.HOT
index 4cf3c3a0d4c..68c6709aa88 100644
--- a/src/backend/access/heap/README.HOT
+++ b/src/backend/access/heap/README.HOT
@@ -149,8 +149,8 @@ the descendant heap-only tuple. It is conceivable that someone prunes
the heap-only tuple before that, and even conceivable that the line pointer
is re-used for another purpose. Therefore, when following a HOT chain,
it is always necessary to be prepared for the possibility that the
-linked-to item pointer is unused, dead, or redirected; and if it is a
-normal item pointer, we still have to check that XMIN of the tuple matches
+linked-to line pointer is unused, dead, or redirected; and if it is a
+normal line pointer, we still have to check that XMIN of the tuple matches
the XMAX of the tuple we left. Otherwise we should assume that we have
come to the end of the HOT chain. Note that this sort of XMIN/XMAX
matching is required when following ordinary update chains anyway.
@@ -171,14 +171,14 @@ bit: there can be at most one visible tuple in the chain, so we can stop
when we find it. This rule does not work for non-MVCC snapshots, though.)
Sequential scans do not need to pay attention to the HOT links because
-they scan every item pointer on the page anyway. The same goes for a
+they scan every line pointer on the page anyway. The same goes for a
bitmap heap scan with a lossy bitmap.
Pruning
-------
-HOT pruning means updating item pointers so that HOT chains are
+HOT pruning means updating line pointers so that HOT chains are
reduced in length, by collapsing out line pointers for intermediate dead
tuples. Although this makes those line pointers available for re-use,
it does not immediately make the space occupied by their tuples available.
@@ -271,7 +271,7 @@ physical tuple by eliminating an intermediate heap-only tuple or
replacing a physical root tuple by a redirect pointer, a decrement in
the table's number of dead tuples is reported to pgstats, which may
postpone autovacuuming. Note that we do not count replacing a root tuple
-by a DEAD item pointer as decrementing n_dead_tuples; we still want
+by a DEAD line pointer as decrementing n_dead_tuples; we still want
autovacuum to run to clean up the index entries and DEAD item.
This area probably needs further work ...
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 6f26ddac5f9..d97cb4c6426 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -7163,7 +7163,7 @@ log_heap_clean(Relation reln, Buffer buffer,
* arrays need not be stored too. Note that even if all three arrays are
* empty, we want to expose the buffer as a candidate for whole-page
* storage, since this record type implies a defragmentation operation
- * even if no item pointers changed state.
+ * even if no line pointers changed state.
*/
if (nredirected > 0)
XLogRegisterBufData(0, (char *) redirected,
@@ -7724,7 +7724,7 @@ heap_xlog_clean(XLogReaderState *record)
nunused = (end - nowunused);
Assert(nunused >= 0);
- /* Update all item pointers per the record, and repair fragmentation */
+ /* Update all line pointers per the record, and repair fragmentation */
heap_page_prune_execute(buffer,
redirected, nredirected,
nowdead, ndead,
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 4d179881f27..bc47856ad53 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -2162,7 +2162,7 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
else
{
/*
- * Bitmap is lossy, so we must examine each item pointer on the page.
+ * Bitmap is lossy, so we must examine each line pointer on the page.
* But we can ignore HOT chains, since we'll check each tuple anyway.
*/
Page dp = (Page) BufferGetPage(buffer);
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index a3e51922d85..417a2bf8e6e 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -324,7 +324,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
/*
- * Prune specified item pointer or a HOT chain originating at that item.
+ * Prune specified line pointer or a HOT chain originating at line pointer.
*
* If the item is an index-referenced tuple (i.e. not a heap-only tuple),
* the HOT chain is pruned by removing all DEAD tuples at the start of the HOT
@@ -454,7 +454,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
}
/*
- * Likewise, a dead item pointer can't be part of the chain. (We
+ * Likewise, a dead line pointer can't be part of the chain. (We
* already eliminated the case of dead root tuple outside this
* function.)
*/
@@ -630,7 +630,7 @@ heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
prstate->new_prune_xid = xid;
}
-/* Record item pointer to be redirected */
+/* Record line pointer to be redirected */
static void
heap_prune_record_redirect(PruneState *prstate,
OffsetNumber offnum, OffsetNumber rdoffnum)
@@ -645,7 +645,7 @@ heap_prune_record_redirect(PruneState *prstate,
prstate->marked[rdoffnum] = true;
}
-/* Record item pointer to be marked dead */
+/* Record line pointer to be marked dead */
static void
heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
{
@@ -656,7 +656,7 @@ heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
prstate->marked[offnum] = true;
}
-/* Record item pointer to be marked unused */
+/* Record line pointer to be marked unused */
static void
heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
{
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index f1a79059cdb..9e17acc110e 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -509,7 +509,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
live_tuples, /* live tuples (reltuples estimate) */
tups_vacuumed, /* tuples cleaned up by vacuum */
nkeep, /* dead-but-not-removable tuples */
- nunused; /* unused item pointers */
+ nunused; /* unused line pointers */
IndexBulkDeleteResult **indstats;
int i;
PGRUsage ru0;
@@ -1017,7 +1017,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
ItemPointerSet(&(tuple.t_self), blkno, offnum);
/*
- * DEAD item pointers are to be vacuumed normally; but we don't
+ * DEAD line pointers are to be vacuumed normally; but we don't
* count them in tups_vacuumed, else we'd be double-counting (at
* least in the common case where heap_page_prune() just freed up
* a non-HOT tuple).
@@ -1483,7 +1483,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
appendStringInfo(&buf,
_("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
nkeep, OldestXmin);
- appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
+ appendStringInfo(&buf, _("There were %.0f unused item identifiers.\n"),
nunused);
appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
"Skipped %u pages due to buffer pins, ",
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index ae1c87ebadd..0fc9139badb 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -38,32 +38,6 @@
* This file contains the index_ routines which used
* to be a scattered collection of stuff in access/genam.
*
- *
- * old comments
- * Scans are implemented as follows:
- *
- * `0' represents an invalid item pointer.
- * `-' represents an unknown item pointer.
- * `X' represents a known item pointers.
- * `+' represents known or invalid item pointers.
- * `*' represents any item pointers.
- *
- * State is represented by a triple of these symbols in the order of
- * previous, current, next. Note that the case of reverse scans works
- * identically.
- *
- * State Result
- * (1) + + - + 0 0 (if the next item pointer is invalid)
- * (2) + X - (otherwise)
- * (3) * 0 0 * 0 0 (no change)
- * (4) + X 0 X 0 0 (shift)
- * (5) * + X + X - (shift, add unknown)
- *
- * All other states cannot occur.
- *
- * Note: It would be possible to cache the status of the previous and
- * next item pointer using the flags.
- *
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 8b3ce183507..47ae7ec8b85 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -1689,7 +1689,7 @@ _bt_split(Relation rel, BTScanInsert itup_key, Buffer buf, Buffer cbuf,
* Direct access to page is not good but faster - we should implement
* some new func in page API. Note we only store the tuples
* themselves, knowing that they were inserted in item-number order
- * and so the item pointers can be reconstructed. See comments for
+ * and so the line pointers can be reconstructed. See comments for
* _bt_restore_page().
*/
XLogRegisterBufData(1,
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index b9311ce5950..fc85c6f9407 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -337,7 +337,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
InvalidBlockNumber, InvalidOffsetNumber);
/*
- * We implement the move step by swapping the item pointers of the source
+ * We implement the move step by swapping the line pointers of the source
* and target tuples, then replacing the newly-source tuples with
* placeholders. This is perhaps unduly friendly with the page data
* representation, but it's fast and doesn't risk page overflow when a