aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/table/tableam.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/table/tableam.c')
-rw-r--r--src/backend/access/table/tableam.c34
1 files changed, 34 insertions, 0 deletions
diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c
index b1e31982918..8ad4c629432 100644
--- a/src/backend/access/table/tableam.c
+++ b/src/backend/access/table/tableam.c
@@ -177,6 +177,40 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
/* ----------------------------------------------------------------------------
+ * Index scan related functions.
+ * ----------------------------------------------------------------------------
+ */
+
+/*
+ * To perform that check simply start an index scan, create the necessary
+ * slot, do the heap lookup, and shut everything down again. This could be
+ * optimized, but is unlikely to matter from a performance POV. If there
+ * frequently are live index pointers also matching a unique index key, the
+ * CPU overhead of this routine is unlikely to matter.
+ */
+bool
+table_index_fetch_tuple_check(Relation rel,
+ ItemPointer tid,
+ Snapshot snapshot,
+ bool *all_dead)
+{
+ IndexFetchTableData *scan;
+ TupleTableSlot *slot;
+ bool call_again = false;
+ bool found;
+
+ slot = table_slot_create(rel, NULL);
+ scan = table_index_fetch_begin(rel);
+ found = table_index_fetch_tuple(scan, tid, snapshot, slot, &call_again,
+ all_dead);
+ table_index_fetch_end(scan);
+ ExecDropSingleTupleTableSlot(slot);
+
+ return found;
+}
+
+
+/* ----------------------------------------------------------------------------
* Functions to make modifications a bit simpler.
* ----------------------------------------------------------------------------
*/