aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/vacuumlazy.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index d8f847b0e66..4b65205cd13 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -61,6 +61,7 @@
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
+#include "catalog/index.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/progress.h"
@@ -2923,8 +2924,24 @@ static bool
lazy_tid_reaped(ItemPointer itemptr, void *state)
{
LVDeadTuples *dead_tuples = (LVDeadTuples *) state;
+ int64 litem,
+ ritem,
+ item;
ItemPointer res;
+ litem = itemptr_encode(&dead_tuples->itemptrs[0]);
+ ritem = itemptr_encode(&dead_tuples->itemptrs[dead_tuples->num_tuples - 1]);
+ item = itemptr_encode(itemptr);
+
+ /*
+ * Doing a simple bound check before bsearch() is useful to avoid the
+ * extra cost of bsearch(), especially if dead tuples on the heap are
+ * concentrated in a certain range. Since this function is called for
+ * every index tuple, it pays to be really fast.
+ */
+ if (item < litem || item > ritem)
+ return false;
+
res = (ItemPointer) bsearch((void *) itemptr,
(void *) dead_tuples->itemptrs,
dead_tuples->num_tuples,