aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeBitmapHeapscan.c
diff options
context:
space:
mode:
authorAndres Freund <andres@anarazel.de>2025-04-02 14:25:17 -0400
committerAndres Freund <andres@anarazel.de>2025-04-02 14:54:20 -0400
commit459e7bf8e2f8ab894dc613fa8555b74c4eef6969 (patch)
treed89ead863ddc22c0615d244c97ce26d3cf9cda32 /src/backend/executor/nodeBitmapHeapscan.c
parent0dca5d68d7bebf2c1036fd84875533afef6df992 (diff)
downloadpostgresql-459e7bf8e2f8ab894dc613fa8555b74c4eef6969.tar.gz
postgresql-459e7bf8e2f8ab894dc613fa8555b74c4eef6969.zip
Remove HeapBitmapScan's skip_fetch optimization
The optimization does not take the removal of TIDs by a concurrent vacuum into account. The concurrent vacuum can remove dead TIDs and make pages ALL_VISIBLE while those dead TIDs are referenced in the bitmap. This can lead to a skip_fetch scan returning too many tuples. It likely would be possible to implement this optimization safely, but we don't have the necessary infrastructure in place. Nor is it clear that it's worth building that infrastructure, given how limited the skip_fetch optimization is. In the backbranches we just disable the optimization by always passing need_tuples=true to table_beginscan_bm(). We can't perform API/ABI changes in the backbranches and we want to make the change as minimal as possible. Author: Matthias van de Meent <boekewurm+postgres@gmail.com> Reported-By: Konstantin Knizhnik <knizhnik@garret.ru> Discussion: https://postgr.es/m/CAEze2Wg3gXXZTr6_rwC+s4-o2ZVFB5F985uUSgJTsECx6AmGcQ@mail.gmail.com Backpatch-through: 13
Diffstat (limited to 'src/backend/executor/nodeBitmapHeapscan.c')
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c15
1 files changed, 1 insertions, 14 deletions
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 3e33360c0fc..bf24f3d7fe0 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -105,24 +105,11 @@ BitmapTableScanSetup(BitmapHeapScanState *node)
*/
if (!node->ss.ss_currentScanDesc)
{
- bool need_tuples = false;
-
- /*
- * We can potentially skip fetching heap pages if we do not need any
- * columns of the table, either for checking non-indexable quals or
- * for returning data. This test is a bit simplistic, as it checks
- * the stronger condition that there's no qual or return tlist at all.
- * But in most cases it's probably not worth working harder than that.
- */
- need_tuples = (node->ss.ps.plan->qual != NIL ||
- node->ss.ps.plan->targetlist != NIL);
-
node->ss.ss_currentScanDesc =
table_beginscan_bm(node->ss.ss_currentRelation,
node->ss.ps.state->es_snapshot,
0,
- NULL,
- need_tuples);
+ NULL);
}
node->ss.ss_currentScanDesc->st.rs_tbmiterator = tbmiterator;