aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/heap/heapam.c21
-rw-r--r--src/backend/executor/nodeSeqscan.c6
-rw-r--r--src/include/access/relscan.h1
3 files changed, 14 insertions, 14 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 85fd749b76a..c9b1d5fd04c 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -479,8 +479,6 @@ heapgettup(HeapScanDesc scan,
if (valid)
{
- if (!scan->rs_relpredicatelocked)
- PredicateLockTuple(scan->rs_rd, tuple, snapshot);
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
return;
}
@@ -748,16 +746,12 @@ heapgettup_pagemode(HeapScanDesc scan,
nkeys, key, valid);
if (valid)
{
- if (!scan->rs_relpredicatelocked)
- PredicateLockTuple(scan->rs_rd, tuple, scan->rs_snapshot);
scan->rs_cindex = lineindex;
return;
}
}
else
{
- if (!scan->rs_relpredicatelocked)
- PredicateLockTuple(scan->rs_rd, tuple, scan->rs_snapshot);
scan->rs_cindex = lineindex;
return;
}
@@ -1228,13 +1222,26 @@ heap_beginscan_internal(Relation relation, Snapshot snapshot,
scan->rs_strategy = NULL; /* set in initscan */
scan->rs_allow_strat = allow_strat;
scan->rs_allow_sync = allow_sync;
- scan->rs_relpredicatelocked = false;
/*
* we can use page-at-a-time mode if it's an MVCC-safe snapshot
*/
scan->rs_pageatatime = IsMVCCSnapshot(snapshot);
+ /*
+ * For a seqscan in a serializable transaction, acquire a predicate lock
+ * on the entire relation. This is required not only to lock all the
+ * matching tuples, but also to conflict with new insertions into the
+ * table. In an indexscan, we take page locks on the index pages covering
+ * the range specified in the scan qual, but in a heap scan there is
+ * nothing more fine-grained to lock. A bitmap scan is a different story,
+ * there we have already scanned the index and locked the index pages
+ * covering the predicate. But in that case we still have to lock any
+ * matching heap tuples.
+ */
+ if (!is_bitmapscan)
+ PredicateLockRelation(relation, snapshot);
+
/* we only need to set this up once */
scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index f356874b441..0f3438d0639 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -28,7 +28,6 @@
#include "access/relscan.h"
#include "executor/execdebug.h"
#include "executor/nodeSeqscan.h"
-#include "storage/predicate.h"
static void InitScanRelation(SeqScanState *node, EState *estate);
static TupleTableSlot *SeqNext(SeqScanState *node);
@@ -106,16 +105,11 @@ SeqRecheck(SeqScanState *node, TupleTableSlot *slot)
* tuple.
* We call the ExecScan() routine and pass it the appropriate
* access method functions.
- * For serializable transactions, we first acquire a predicate
- * lock on the entire relation.
* ----------------------------------------------------------------
*/
TupleTableSlot *
ExecSeqScan(SeqScanState *node)
{
- PredicateLockRelation(node->ss_currentRelation,
- node->ss_currentScanDesc->rs_snapshot);
- node->ss_currentScanDesc->rs_relpredicatelocked = true;
return ExecScan((ScanState *) node,
(ExecScanAccessMtd) SeqNext,
(ExecScanRecheckMtd) SeqRecheck);
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index db1131e2600..57d08b9656e 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -35,7 +35,6 @@ typedef struct HeapScanDescData
BlockNumber rs_startblock; /* block # to start at */
BufferAccessStrategy rs_strategy; /* access strategy for reads */
bool rs_syncscan; /* report location to syncscan logic? */
- bool rs_relpredicatelocked; /* predicate lock on relation exists */
/* scan current state */
bool rs_inited; /* false = scan not init'd yet */