aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/nbtree/nbtree.c19
-rw-r--r--src/backend/access/nbtree/nbtsearch.c2
2 files changed, 1 insertions, 20 deletions
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index af24f671309..3a7942997c2 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -608,25 +608,6 @@ btrestrpos(IndexScanDesc scan)
*/
so->currPos.itemIndex = so->markItemIndex;
}
- else if (so->currPos.currPage == so->markPos.currPage)
- {
- /*
- * so->markItemIndex < 0 but mark and current positions are on the
- * same page. This would be an unusual case, where the scan moved to
- * a new index page after the mark, restored, and later restored again
- * without moving off the marked page. It is not clear that this code
- * can currently be reached, but it seems better to make this function
- * robust for this case than to Assert() or elog() that it can't
- * happen.
- *
- * We neither want to set so->markItemIndex >= 0 (because that could
- * cause a later move to a new page to redo the memcpy() executions)
- * nor re-execute the memcpy() functions for a restore within the same
- * page. The previous restore to this page already set everything
- * except markPos as it should be.
- */
- so->currPos.itemIndex = so->markPos.itemIndex;
- }
else
{
/*
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index c392fd6cb31..ee46023c5a6 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -1011,7 +1011,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
so->currPos.moreRight = false;
}
so->numKilled = 0; /* just paranoia */
- so->markItemIndex = -1; /* ditto */
+ Assert(so->markItemIndex == -1);
/* position to the precise item on the page */
offnum = _bt_binsrch(rel, buf, keysCount, scankeys, nextkey);