aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
authorKevin Grittner <kgrittn@postgresql.org>2016-04-20 08:31:19 -0500
committerKevin Grittner <kgrittn@postgresql.org>2016-04-20 08:31:19 -0500
commita343e223a5c33a7283a6d8b255c9dbc48dbc5061 (patch)
treef02f3de305180170d8d5e51120861ae6770b31e8 /src/backend/commands
parent4db0d2d2fe935e086dfd26c00f707dab298b443c (diff)
downloadpostgresql-a343e223a5c33a7283a6d8b255c9dbc48dbc5061.tar.gz
postgresql-a343e223a5c33a7283a6d8b255c9dbc48dbc5061.zip
Revert no-op changes to BufferGetPage()
The reverted changes were intended to force a choice of whether any newly-added BufferGetPage() calls needed to be accompanied by a test of the snapshot age, to support the "snapshot too old" feature. Such an accompanying test is needed in about 7% of the cases, where the page is being used as part of a scan rather than positioning for other purposes (such as DML or vacuuming). The additional effort required for back-patching, and the doubt whether the intended benefit would really be there, have indicated it is best just to rely on developers to do the right thing based on comments and existing usage, as we do with many other conventions. This change should have little or no effect on generated executable code. Motivated by the back-patching pain of Tom Lane and Robert Haas
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/analyze.c2
-rw-r--r--src/backend/commands/sequence.c12
-rw-r--r--src/backend/commands/trigger.c2
-rw-r--r--src/backend/commands/vacuumlazy.c12
4 files changed, 14 insertions, 14 deletions
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 8b41ab0828e..cf8c8164b7e 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -1025,7 +1025,7 @@ acquire_sample_rows(Relation onerel, int elevel,
targbuffer = ReadBufferExtended(onerel, MAIN_FORKNUM, targblock,
RBM_NORMAL, vac_strategy);
LockBuffer(targbuffer, BUFFER_LOCK_SHARE);
- targpage = BufferGetPage(targbuffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ targpage = BufferGetPage(targbuffer);
maxoffset = PageGetMaxOffsetNumber(targpage);
/* Inner loop over all tuples on the selected page */
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index f38126f4f98..c98f9811119 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -337,7 +337,7 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
buf = ReadBuffer(rel, P_NEW);
Assert(BufferGetBlockNumber(buf) == 0);
- page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ page = BufferGetPage(buf);
PageInit(page, BufferGetPageSize(buf), sizeof(sequence_magic));
sm = (sequence_magic *) PageGetSpecialPointer(page);
@@ -462,7 +462,7 @@ AlterSequence(AlterSeqStmt *stmt)
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
- Page page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ Page page = BufferGetPage(buf);
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
@@ -584,7 +584,7 @@ nextval_internal(Oid relid)
/* lock page' buffer and read tuple */
seq = read_seq_tuple(elm, seqrel, &buf, &seqtuple);
- page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ page = BufferGetPage(buf);
last = next = result = seq->last_value;
incby = seq->increment_by;
@@ -923,7 +923,7 @@ do_setval(Oid relid, int64 next, bool iscalled)
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
- Page page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ Page page = BufferGetPage(buf);
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
@@ -1115,7 +1115,7 @@ read_seq_tuple(SeqTable elm, Relation rel, Buffer *buf, HeapTuple seqtuple)
*buf = ReadBuffer(rel, 0);
LockBuffer(*buf, BUFFER_LOCK_EXCLUSIVE);
- page = BufferGetPage(*buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ page = BufferGetPage(*buf);
sm = (sequence_magic *) PageGetSpecialPointer(page);
if (sm->magic != SEQ_MAGIC)
@@ -1591,7 +1591,7 @@ seq_redo(XLogReaderState *record)
elog(PANIC, "seq_redo: unknown op code %u", info);
buffer = XLogInitBufferForRedo(record, 0);
- page = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ page = (Page) BufferGetPage(buffer);
/*
* We always reinit the page. However, since this WAL record type is also
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 33107e02a9d..6f728ff0fc9 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -2798,7 +2798,7 @@ ltrmark:;
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
- page = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ page = BufferGetPage(buffer);
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
Assert(ItemIdIsNormal(lp));
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index d0e92b33658..426e7560930 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -803,7 +803,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacrelstats->scanned_pages++;
- page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ page = BufferGetPage(buf);
if (PageIsNew(page))
{
@@ -1378,7 +1378,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
&vmbuffer);
/* Now that we've compacted the page, record its available space */
- page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ page = BufferGetPage(buf);
freespace = PageGetHeapFreeSpace(page);
UnlockReleaseBuffer(buf);
@@ -1414,7 +1414,7 @@ static int
lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
{
- Page page = BufferGetPage(buffer, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ Page page = BufferGetPage(buffer);
OffsetNumber unused[MaxOffsetNumber];
int uncnt = 0;
TransactionId visibility_cutoff_xid;
@@ -1511,7 +1511,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
static bool
lazy_check_needs_freeze(Buffer buf, bool *hastup)
{
- Page page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ Page page = BufferGetPage(buf);
OffsetNumber offnum,
maxoff;
HeapTupleHeader tupleheader;
@@ -1864,7 +1864,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
/* In this phase we only need shared access to the buffer */
LockBuffer(buf, BUFFER_LOCK_SHARE);
- page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ page = BufferGetPage(buf);
if (PageIsNew(page) || PageIsEmpty(page))
{
@@ -2032,7 +2032,7 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
TransactionId *visibility_cutoff_xid,
bool *all_frozen)
{
- Page page = BufferGetPage(buf, NULL, NULL, BGP_NO_SNAPSHOT_TEST);
+ Page page = BufferGetPage(buf);
BlockNumber blockno = BufferGetBlockNumber(buf);
OffsetNumber offnum,
maxoff;