aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/heapam_handler.c372
-rw-r--r--src/backend/access/table/tableamapi.c3
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c73
-rw-r--r--src/backend/optimizer/util/plancat.c2
-rw-r--r--src/include/access/tableam.h90
5 files changed, 244 insertions, 296 deletions
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 3035adacade..4da4dc84580 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -56,6 +56,10 @@ static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan);
+static bool BitmapHeapScanNextBlock(TableScanDesc scan,
+ bool *recheck,
+ uint64 *lossy_pages, uint64 *exact_pages);
+
/* ------------------------------------------------------------------------
* Slot related callbacks for heap AM
@@ -2116,198 +2120,44 @@ heapam_estimate_rel_size(Relation rel, int32 *attr_widths,
*/
static bool
-heapam_scan_bitmap_next_block(TableScanDesc scan,
+heapam_scan_bitmap_next_tuple(TableScanDesc scan,
+ TupleTableSlot *slot,
bool *recheck,
- uint64 *lossy_pages, uint64 *exact_pages)
+ uint64 *lossy_pages,
+ uint64 *exact_pages)
{
BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
HeapScanDesc hscan = (HeapScanDesc) bscan;
- BlockNumber block;
- void *per_buffer_data;
- Buffer buffer;
- Snapshot snapshot;
- int ntup;
- TBMIterateResult *tbmres;
- OffsetNumber offsets[TBM_MAX_TUPLES_PER_PAGE];
- int noffsets = -1;
-
- Assert(scan->rs_flags & SO_TYPE_BITMAPSCAN);
- Assert(hscan->rs_read_stream);
-
- hscan->rs_cindex = 0;
- hscan->rs_ntuples = 0;
-
- /* Release buffer containing previous block. */
- if (BufferIsValid(hscan->rs_cbuf))
- {
- ReleaseBuffer(hscan->rs_cbuf);
- hscan->rs_cbuf = InvalidBuffer;
- }
-
- hscan->rs_cbuf = read_stream_next_buffer(hscan->rs_read_stream,
- &per_buffer_data);
-
- if (BufferIsInvalid(hscan->rs_cbuf))
- {
- if (BufferIsValid(bscan->rs_vmbuffer))
- {
- ReleaseBuffer(bscan->rs_vmbuffer);
- bscan->rs_vmbuffer = InvalidBuffer;
- }
-
- /*
- * Bitmap is exhausted. Time to emit empty tuples if relevant. We emit
- * all empty tuples at the end instead of emitting them per block we
- * skip fetching. This is necessary because the streaming read API
- * will only return TBMIterateResults for blocks actually fetched.
- * When we skip fetching a block, we keep track of how many empty
- * tuples to emit at the end of the BitmapHeapScan. We do not recheck
- * all NULL tuples.
- */
- *recheck = false;
- return bscan->rs_empty_tuples_pending > 0;
- }
-
- Assert(per_buffer_data);
-
- tbmres = per_buffer_data;
-
- Assert(BlockNumberIsValid(tbmres->blockno));
- Assert(BufferGetBlockNumber(hscan->rs_cbuf) == tbmres->blockno);
-
- /* Exact pages need their tuple offsets extracted. */
- if (!tbmres->lossy)
- noffsets = tbm_extract_page_tuple(tbmres, offsets,
- TBM_MAX_TUPLES_PER_PAGE);
-
- *recheck = tbmres->recheck;
-
- block = hscan->rs_cblock = tbmres->blockno;
- buffer = hscan->rs_cbuf;
- snapshot = scan->rs_snapshot;
-
- ntup = 0;
-
- /*
- * Prune and repair fragmentation for the whole page, if possible.
- */
- heap_page_prune_opt(scan->rs_rd, buffer);
-
- /*
- * We must hold share lock on the buffer content while examining tuple
- * visibility. Afterwards, however, the tuples we have found to be
- * visible are guaranteed good as long as we hold the buffer pin.
- */
- LockBuffer(buffer, BUFFER_LOCK_SHARE);
+ OffsetNumber targoffset;
+ Page page;
+ ItemId lp;
/*
- * We need two separate strategies for lossy and non-lossy cases.
+ * Out of range? If so, nothing more to look at on this page
*/
- if (!tbmres->lossy)
- {
- /*
- * Bitmap is non-lossy, so we just look through the offsets listed in
- * tbmres; but we have to follow any HOT chain starting at each such
- * offset.
- */
- int curslot;
-
- /* We must have extracted the tuple offsets by now */
- Assert(noffsets > -1);
-
- for (curslot = 0; curslot < noffsets; curslot++)
- {
- OffsetNumber offnum = offsets[curslot];
- ItemPointerData tid;
- HeapTupleData heapTuple;
-
- ItemPointerSet(&tid, block, offnum);
- if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
- &heapTuple, NULL, true))
- hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
- }
- }
- else
+ while (hscan->rs_cindex >= hscan->rs_ntuples)
{
/*
- * Bitmap is lossy, so we must examine each line pointer on the page.
- * But we can ignore HOT chains, since we'll check each tuple anyway.
+ * Emit empty tuples before advancing to the next block
*/
- Page page = BufferGetPage(buffer);
- OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
- OffsetNumber offnum;
-
- for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
+ if (bscan->rs_empty_tuples_pending > 0)
{
- ItemId lp;
- HeapTupleData loctup;
- bool valid;
-
- lp = PageGetItemId(page, offnum);
- if (!ItemIdIsNormal(lp))
- continue;
- loctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
- loctup.t_len = ItemIdGetLength(lp);
- loctup.t_tableOid = scan->rs_rd->rd_id;
- ItemPointerSet(&loctup.t_self, block, offnum);
- valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
- if (valid)
- {
- hscan->rs_vistuples[ntup++] = offnum;
- PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
- HeapTupleHeaderGetXmin(loctup.t_data));
- }
- HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
- buffer, snapshot);
+ /*
+ * If we don't have to fetch the tuple, just return nulls.
+ */
+ ExecStoreAllNullTuple(slot);
+ bscan->rs_empty_tuples_pending--;
+ return true;
}
- }
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-
- Assert(ntup <= MaxHeapTuplesPerPage);
- hscan->rs_ntuples = ntup;
-
- if (tbmres->lossy)
- (*lossy_pages)++;
- else
- (*exact_pages)++;
-
- /*
- * Return true to indicate that a valid block was found and the bitmap is
- * not exhausted. If there are no visible tuples on this page,
- * hscan->rs_ntuples will be 0 and heapam_scan_bitmap_next_tuple() will
- * return false returning control to this function to advance to the next
- * block in the bitmap.
- */
- return true;
-}
-
-static bool
-heapam_scan_bitmap_next_tuple(TableScanDesc scan,
- TupleTableSlot *slot)
-{
- BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
- HeapScanDesc hscan = (HeapScanDesc) bscan;
- OffsetNumber targoffset;
- Page page;
- ItemId lp;
-
- if (bscan->rs_empty_tuples_pending > 0)
- {
/*
- * If we don't have to fetch the tuple, just return nulls.
+ * Returns false if the bitmap is exhausted and there are no further
+ * blocks we need to scan.
*/
- ExecStoreAllNullTuple(slot);
- bscan->rs_empty_tuples_pending--;
- return true;
+ if (!BitmapHeapScanNextBlock(scan, recheck, lossy_pages, exact_pages))
+ return false;
}
- /*
- * Out of range? If so, nothing more to look at on this page
- */
- if (hscan->rs_cindex >= hscan->rs_ntuples)
- return false;
-
targoffset = hscan->rs_vistuples[hscan->rs_cindex];
page = BufferGetPage(hscan->rs_cbuf);
lp = PageGetItemId(page, targoffset);
@@ -2614,6 +2464,177 @@ SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
}
}
+/*
+ * Helper function get the next block of a bitmap heap scan. Returns true when
+ * it got the next block and saved it in the scan descriptor and false when
+ * the bitmap and or relation are exhausted.
+ */
+static bool
+BitmapHeapScanNextBlock(TableScanDesc scan,
+ bool *recheck,
+ uint64 *lossy_pages, uint64 *exact_pages)
+{
+ BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) scan;
+ HeapScanDesc hscan = (HeapScanDesc) bscan;
+ BlockNumber block;
+ void *per_buffer_data;
+ Buffer buffer;
+ Snapshot snapshot;
+ int ntup;
+ TBMIterateResult *tbmres;
+ OffsetNumber offsets[TBM_MAX_TUPLES_PER_PAGE];
+ int noffsets = -1;
+
+ Assert(scan->rs_flags & SO_TYPE_BITMAPSCAN);
+ Assert(hscan->rs_read_stream);
+
+ hscan->rs_cindex = 0;
+ hscan->rs_ntuples = 0;
+
+ /* Release buffer containing previous block. */
+ if (BufferIsValid(hscan->rs_cbuf))
+ {
+ ReleaseBuffer(hscan->rs_cbuf);
+ hscan->rs_cbuf = InvalidBuffer;
+ }
+
+ hscan->rs_cbuf = read_stream_next_buffer(hscan->rs_read_stream,
+ &per_buffer_data);
+
+ if (BufferIsInvalid(hscan->rs_cbuf))
+ {
+ if (BufferIsValid(bscan->rs_vmbuffer))
+ {
+ ReleaseBuffer(bscan->rs_vmbuffer);
+ bscan->rs_vmbuffer = InvalidBuffer;
+ }
+
+ /*
+ * Bitmap is exhausted. Time to emit empty tuples if relevant. We emit
+ * all empty tuples at the end instead of emitting them per block we
+ * skip fetching. This is necessary because the streaming read API
+ * will only return TBMIterateResults for blocks actually fetched.
+ * When we skip fetching a block, we keep track of how many empty
+ * tuples to emit at the end of the BitmapHeapScan. We do not recheck
+ * all NULL tuples.
+ */
+ *recheck = false;
+ return bscan->rs_empty_tuples_pending > 0;
+ }
+
+ Assert(per_buffer_data);
+
+ tbmres = per_buffer_data;
+
+ Assert(BlockNumberIsValid(tbmres->blockno));
+ Assert(BufferGetBlockNumber(hscan->rs_cbuf) == tbmres->blockno);
+
+ /* Exact pages need their tuple offsets extracted. */
+ if (!tbmres->lossy)
+ noffsets = tbm_extract_page_tuple(tbmres, offsets,
+ TBM_MAX_TUPLES_PER_PAGE);
+
+ *recheck = tbmres->recheck;
+
+ block = hscan->rs_cblock = tbmres->blockno;
+ buffer = hscan->rs_cbuf;
+ snapshot = scan->rs_snapshot;
+
+ ntup = 0;
+
+ /*
+ * Prune and repair fragmentation for the whole page, if possible.
+ */
+ heap_page_prune_opt(scan->rs_rd, buffer);
+
+ /*
+ * We must hold share lock on the buffer content while examining tuple
+ * visibility. Afterwards, however, the tuples we have found to be
+ * visible are guaranteed good as long as we hold the buffer pin.
+ */
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
+
+ /*
+ * We need two separate strategies for lossy and non-lossy cases.
+ */
+ if (!tbmres->lossy)
+ {
+ /*
+ * Bitmap is non-lossy, so we just look through the offsets listed in
+ * tbmres; but we have to follow any HOT chain starting at each such
+ * offset.
+ */
+ int curslot;
+
+ /* We must have extracted the tuple offsets by now */
+ Assert(noffsets > -1);
+
+ for (curslot = 0; curslot < noffsets; curslot++)
+ {
+ OffsetNumber offnum = offsets[curslot];
+ ItemPointerData tid;
+ HeapTupleData heapTuple;
+
+ ItemPointerSet(&tid, block, offnum);
+ if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
+ &heapTuple, NULL, true))
+ hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid);
+ }
+ }
+ else
+ {
+ /*
+ * Bitmap is lossy, so we must examine each line pointer on the page.
+ * But we can ignore HOT chains, since we'll check each tuple anyway.
+ */
+ Page page = BufferGetPage(buffer);
+ OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
+ OffsetNumber offnum;
+
+ for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
+ {
+ ItemId lp;
+ HeapTupleData loctup;
+ bool valid;
+
+ lp = PageGetItemId(page, offnum);
+ if (!ItemIdIsNormal(lp))
+ continue;
+ loctup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ loctup.t_len = ItemIdGetLength(lp);
+ loctup.t_tableOid = scan->rs_rd->rd_id;
+ ItemPointerSet(&loctup.t_self, block, offnum);
+ valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
+ if (valid)
+ {
+ hscan->rs_vistuples[ntup++] = offnum;
+ PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
+ HeapTupleHeaderGetXmin(loctup.t_data));
+ }
+ HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
+ buffer, snapshot);
+ }
+ }
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ Assert(ntup <= MaxHeapTuplesPerPage);
+ hscan->rs_ntuples = ntup;
+
+ if (tbmres->lossy)
+ (*lossy_pages)++;
+ else
+ (*exact_pages)++;
+
+ /*
+ * Return true to indicate that a valid block was found and the bitmap is
+ * not exhausted. If there are no visible tuples on this page,
+ * hscan->rs_ntuples will be 0 and heapam_scan_bitmap_next_tuple() will
+ * return false returning control to this function to advance to the next
+ * block in the bitmap.
+ */
+ return true;
+}
/* ------------------------------------------------------------------------
* Definition of the heap table access method.
@@ -2673,7 +2694,6 @@ static const TableAmRoutine heapam_methods = {
.relation_estimate_size = heapam_estimate_rel_size,
- .scan_bitmap_next_block = heapam_scan_bitmap_next_block,
.scan_bitmap_next_tuple = heapam_scan_bitmap_next_tuple,
.scan_sample_next_block = heapam_scan_sample_next_block,
.scan_sample_next_tuple = heapam_scan_sample_next_tuple
diff --git a/src/backend/access/table/tableamapi.c b/src/backend/access/table/tableamapi.c
index 760a36fd2a1..476663b66aa 100644
--- a/src/backend/access/table/tableamapi.c
+++ b/src/backend/access/table/tableamapi.c
@@ -91,9 +91,6 @@ GetTableAmRoutine(Oid amhandler)
Assert(routine->relation_estimate_size != NULL);
- /* optional, but one callback implies presence of the other */
- Assert((routine->scan_bitmap_next_block == NULL) ==
- (routine->scan_bitmap_next_tuple == NULL));
Assert(routine->scan_sample_next_block != NULL);
Assert(routine->scan_sample_next_tuple != NULL);
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 6df34094a13..3e33360c0fc 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -138,69 +138,44 @@ BitmapTableScanSetup(BitmapHeapScanState *node)
static TupleTableSlot *
BitmapHeapNext(BitmapHeapScanState *node)
{
- ExprContext *econtext;
- TableScanDesc scan;
- TupleTableSlot *slot;
-
- /*
- * extract necessary information from index scan node
- */
- econtext = node->ss.ps.ps_ExprContext;
- slot = node->ss.ss_ScanTupleSlot;
- scan = node->ss.ss_currentScanDesc;
+ ExprContext *econtext = node->ss.ps.ps_ExprContext;
+ TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
/*
* If we haven't yet performed the underlying index scan, do it, and begin
* the iteration over the bitmap.
*/
if (!node->initialized)
- {
BitmapTableScanSetup(node);
- scan = node->ss.ss_currentScanDesc;
- goto new_page;
- }
- for (;;)
+ while (table_scan_bitmap_next_tuple(node->ss.ss_currentScanDesc,
+ slot, &node->recheck,
+ &node->stats.lossy_pages,
+ &node->stats.exact_pages))
{
- while (table_scan_bitmap_next_tuple(scan, slot))
- {
- /*
- * Continuing in previously obtained page.
- */
-
- CHECK_FOR_INTERRUPTS();
+ /*
+ * Continuing in previously obtained page.
+ */
+ CHECK_FOR_INTERRUPTS();
- /*
- * If we are using lossy info, we have to recheck the qual
- * conditions at every tuple.
- */
- if (node->recheck)
+ /*
+ * If we are using lossy info, we have to recheck the qual conditions
+ * at every tuple.
+ */
+ if (node->recheck)
+ {
+ econtext->ecxt_scantuple = slot;
+ if (!ExecQualAndReset(node->bitmapqualorig, econtext))
{
- econtext->ecxt_scantuple = slot;
- if (!ExecQualAndReset(node->bitmapqualorig, econtext))
- {
- /* Fails recheck, so drop it and loop back for another */
- InstrCountFiltered2(node, 1);
- ExecClearTuple(slot);
- continue;
- }
+ /* Fails recheck, so drop it and loop back for another */
+ InstrCountFiltered2(node, 1);
+ ExecClearTuple(slot);
+ continue;
}
-
- /* OK to return this tuple */
- return slot;
}
-new_page:
-
- /*
- * Returns false if the bitmap is exhausted and there are no further
- * blocks we need to scan.
- */
- if (!table_scan_bitmap_next_block(scan,
- &node->recheck,
- &node->stats.lossy_pages,
- &node->stats.exact_pages))
- break;
+ /* OK to return this tuple */
+ return slot;
}
/*
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 71abb01f655..0489ad36644 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -325,7 +325,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
info->amcanparallel = amroutine->amcanparallel;
info->amhasgettuple = (amroutine->amgettuple != NULL);
info->amhasgetbitmap = amroutine->amgetbitmap != NULL &&
- relation->rd_tableam->scan_bitmap_next_block != NULL;
+ relation->rd_tableam->scan_bitmap_next_tuple != NULL;
info->amcanmarkpos = (amroutine->ammarkpos != NULL &&
amroutine->amrestrpos != NULL);
info->amcostestimate = amroutine->amcostestimate;
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index 507d4ebe68f..b8cb1e744ad 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -780,44 +780,24 @@ typedef struct TableAmRoutine
*/
/*
- * Prepare to fetch / check / return tuples from `blockno` as part of a
- * bitmap table scan. `scan` was started via table_beginscan_bm(). Return
- * false if the bitmap is exhausted and true otherwise.
- *
- * This will typically read and pin the target block, and do the necessary
- * work to allow scan_bitmap_next_tuple() to return tuples (e.g. it might
- * make sense to perform tuple visibility checks at this time).
- *
- * `lossy_pages` and `exact_pages` are EXPLAIN counters that can be
- * incremented by the table AM to indicate whether or not the block's
- * representation in the bitmap is lossy.
+ * Fetch the next tuple of a bitmap table scan into `slot` and return true
+ * if a visible tuple was found, false otherwise.
*
- * `recheck` is set by the table AM to indicate whether or not the tuples
- * from this block should be rechecked. Tuples from lossy pages will
- * always need to be rechecked, but some non-lossy pages' tuples may also
- * require recheck.
+ * `lossy_pages` is incremented if the bitmap is lossy for the selected
+ * page; otherwise, `exact_pages` is incremented. These are tracked for
+ * display in EXPLAIN ANALYZE output.
*
* Prefetching additional data from the bitmap is left to the table AM.
*
- * Optional callback, but either both scan_bitmap_next_block and
- * scan_bitmap_next_tuple need to exist, or neither.
+ * This is an optional callback.
*/
- bool (*scan_bitmap_next_block) (TableScanDesc scan,
+ bool (*scan_bitmap_next_tuple) (TableScanDesc scan,
+ TupleTableSlot *slot,
bool *recheck,
uint64 *lossy_pages,
uint64 *exact_pages);
/*
- * Fetch the next tuple of a bitmap table scan into `slot` and return true
- * if a visible tuple was found, false otherwise.
- *
- * Optional callback, but either both scan_bitmap_next_block and
- * scan_bitmap_next_tuple need to exist, or neither.
- */
- bool (*scan_bitmap_next_tuple) (TableScanDesc scan,
- TupleTableSlot *slot);
-
- /*
* Prepare to fetch tuples from the next block in a sample scan. Return
* false if the sample scan is finished, true otherwise. `scan` was
* started via table_beginscan_sampling().
@@ -1939,53 +1919,26 @@ table_relation_estimate_size(Relation rel, int32 *attr_widths,
*/
/*
- * Prepare to fetch / check / return tuples as part of a bitmap table scan.
- * `scan` needs to have been started via table_beginscan_bm(). Returns false
- * if there are no more blocks in the bitmap, true otherwise.
- *
- * `lossy_pages` and `exact_pages` are EXPLAIN counters that can be
- * incremented by the table AM to indicate whether or not the block's
- * representation in the bitmap is lossy.
+ * Fetch / check / return tuples as part of a bitmap table scan. `scan` needs
+ * to have been started via table_beginscan_bm(). Fetch the next tuple of a
+ * bitmap table scan into `slot` and return true if a visible tuple was found,
+ * false otherwise.
*
- * `recheck` is set by the table AM to indicate whether or not the tuples
- * from this block should be rechecked.
+ * `recheck` is set by the table AM to indicate whether or not the tuple in
+ * `slot` should be rechecked. Tuples from lossy pages will always need to be
+ * rechecked, but some non-lossy pages' tuples may also require recheck.
*
- * Note, this is an optionally implemented function, therefore should only be
- * used after verifying the presence (at plan time or such).
+ * `lossy_pages` is incremented if the block's representation in the bitmap is
+ * lossy; otherwise, `exact_pages` is incremented.
*/
static inline bool
-table_scan_bitmap_next_block(TableScanDesc scan,
+table_scan_bitmap_next_tuple(TableScanDesc scan,
+ TupleTableSlot *slot,
bool *recheck,
uint64 *lossy_pages,
uint64 *exact_pages)
{
/*
- * We don't expect direct calls to table_scan_bitmap_next_block with valid
- * CheckXidAlive for catalog or regular tables. See detailed comments in
- * xact.c where these variables are declared.
- */
- if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
- elog(ERROR, "unexpected table_scan_bitmap_next_block call during logical decoding");
-
- return scan->rs_rd->rd_tableam->scan_bitmap_next_block(scan,
- recheck,
- lossy_pages,
- exact_pages);
-}
-
-/*
- * Fetch the next tuple of a bitmap table scan into `slot` and return true if
- * a visible tuple was found, false otherwise.
- * table_scan_bitmap_next_block() needs to previously have selected a
- * block (i.e. returned true), and no previous
- * table_scan_bitmap_next_tuple() for the same block may have
- * returned false.
- */
-static inline bool
-table_scan_bitmap_next_tuple(TableScanDesc scan,
- TupleTableSlot *slot)
-{
- /*
* We don't expect direct calls to table_scan_bitmap_next_tuple with valid
* CheckXidAlive for catalog or regular tables. See detailed comments in
* xact.c where these variables are declared.
@@ -1994,7 +1947,10 @@ table_scan_bitmap_next_tuple(TableScanDesc scan,
elog(ERROR, "unexpected table_scan_bitmap_next_tuple call during logical decoding");
return scan->rs_rd->rd_tableam->scan_bitmap_next_tuple(scan,
- slot);
+ slot,
+ recheck,
+ lossy_pages,
+ exact_pages);
}
/*