aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execIndexing.c2
-rw-r--r--src/backend/executor/execParallel.c59
-rw-r--r--src/backend/executor/execReplication.c2
-rw-r--r--src/backend/executor/nodeBitmapIndexscan.c116
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c96
-rw-r--r--src/backend/executor/nodeIndexscan.c97
6 files changed, 348 insertions, 24 deletions
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index 742f3f8c08d..e3fe9b78bb5 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -816,7 +816,7 @@ check_exclusion_or_unique_constraint(Relation heap, Relation index,
retry:
conflict = false;
found_self = false;
- index_scan = index_beginscan(heap, index, &DirtySnapshot, indnkeyatts, 0);
+ index_scan = index_beginscan(heap, index, &DirtySnapshot, NULL, indnkeyatts, 0);
index_rescan(index_scan, scankeys, indnkeyatts, NULL, 0);
while (index_getnext_slot(index_scan, ForwardScanDirection, existing_slot))
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 1bedb808368..e9337a97d17 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -28,6 +28,7 @@
#include "executor/nodeAgg.h"
#include "executor/nodeAppend.h"
#include "executor/nodeBitmapHeapscan.h"
+#include "executor/nodeBitmapIndexscan.h"
#include "executor/nodeCustom.h"
#include "executor/nodeForeignscan.h"
#include "executor/nodeHash.h"
@@ -244,14 +245,19 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
e->pcxt);
break;
case T_IndexScanState:
- if (planstate->plan->parallel_aware)
- ExecIndexScanEstimate((IndexScanState *) planstate,
- e->pcxt);
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecIndexScanEstimate((IndexScanState *) planstate,
+ e->pcxt);
break;
case T_IndexOnlyScanState:
- if (planstate->plan->parallel_aware)
- ExecIndexOnlyScanEstimate((IndexOnlyScanState *) planstate,
- e->pcxt);
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecIndexOnlyScanEstimate((IndexOnlyScanState *) planstate,
+ e->pcxt);
+ break;
+ case T_BitmapIndexScanState:
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecBitmapIndexScanEstimate((BitmapIndexScanState *) planstate,
+ e->pcxt);
break;
case T_ForeignScanState:
if (planstate->plan->parallel_aware)
@@ -468,14 +474,17 @@ ExecParallelInitializeDSM(PlanState *planstate,
d->pcxt);
break;
case T_IndexScanState:
- if (planstate->plan->parallel_aware)
- ExecIndexScanInitializeDSM((IndexScanState *) planstate,
- d->pcxt);
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecIndexScanInitializeDSM((IndexScanState *) planstate, d->pcxt);
break;
case T_IndexOnlyScanState:
- if (planstate->plan->parallel_aware)
- ExecIndexOnlyScanInitializeDSM((IndexOnlyScanState *) planstate,
- d->pcxt);
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecIndexOnlyScanInitializeDSM((IndexOnlyScanState *) planstate,
+ d->pcxt);
+ break;
+ case T_BitmapIndexScanState:
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecBitmapIndexScanInitializeDSM((BitmapIndexScanState *) planstate, d->pcxt);
break;
case T_ForeignScanState:
if (planstate->plan->parallel_aware)
@@ -1002,6 +1011,7 @@ ExecParallelReInitializeDSM(PlanState *planstate,
ExecHashJoinReInitializeDSM((HashJoinState *) planstate,
pcxt);
break;
+ case T_BitmapIndexScanState:
case T_HashState:
case T_SortState:
case T_IncrementalSortState:
@@ -1063,6 +1073,15 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
/* Perform any node-type-specific work that needs to be done. */
switch (nodeTag(planstate))
{
+ case T_IndexScanState:
+ ExecIndexScanRetrieveInstrumentation((IndexScanState *) planstate);
+ break;
+ case T_IndexOnlyScanState:
+ ExecIndexOnlyScanRetrieveInstrumentation((IndexOnlyScanState *) planstate);
+ break;
+ case T_BitmapIndexScanState:
+ ExecBitmapIndexScanRetrieveInstrumentation((BitmapIndexScanState *) planstate);
+ break;
case T_SortState:
ExecSortRetrieveInstrumentation((SortState *) planstate);
break;
@@ -1330,14 +1349,18 @@ ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
ExecSeqScanInitializeWorker((SeqScanState *) planstate, pwcxt);
break;
case T_IndexScanState:
- if (planstate->plan->parallel_aware)
- ExecIndexScanInitializeWorker((IndexScanState *) planstate,
- pwcxt);
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecIndexScanInitializeWorker((IndexScanState *) planstate, pwcxt);
break;
case T_IndexOnlyScanState:
- if (planstate->plan->parallel_aware)
- ExecIndexOnlyScanInitializeWorker((IndexOnlyScanState *) planstate,
- pwcxt);
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecIndexOnlyScanInitializeWorker((IndexOnlyScanState *) planstate,
+ pwcxt);
+ break;
+ case T_BitmapIndexScanState:
+ /* even when not parallel-aware, for EXPLAIN ANALYZE */
+ ExecBitmapIndexScanInitializeWorker((BitmapIndexScanState *) planstate,
+ pwcxt);
break;
case T_ForeignScanState:
if (planstate->plan->parallel_aware)
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index f59cb9098ff..0a9b880d250 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -202,7 +202,7 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid,
skey_attoff = build_replindex_scan_key(skey, rel, idxrel, searchslot);
/* Start an index scan. */
- scan = index_beginscan(rel, idxrel, &snap, skey_attoff, 0);
+ scan = index_beginscan(rel, idxrel, &snap, NULL, skey_attoff, 0);
retry:
found = false;
diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c
index 0b32c3a022f..abbb033881a 100644
--- a/src/backend/executor/nodeBitmapIndexscan.c
+++ b/src/backend/executor/nodeBitmapIndexscan.c
@@ -184,6 +184,27 @@ ExecEndBitmapIndexScan(BitmapIndexScanState *node)
indexScanDesc = node->biss_ScanDesc;
/*
+ * When ending a parallel worker, copy the statistics gathered by the
+ * worker back into shared memory so that it can be picked up by the main
+ * process to report in EXPLAIN ANALYZE
+ */
+ if (node->biss_SharedInfo != NULL && IsParallelWorker())
+ {
+ IndexScanInstrumentation *winstrument;
+
+ Assert(ParallelWorkerNumber <= node->biss_SharedInfo->num_workers);
+ winstrument = &node->biss_SharedInfo->winstrument[ParallelWorkerNumber];
+
+ /*
+ * We have to accumulate the stats rather than performing a memcpy.
+ * When a Gather/GatherMerge node finishes it will perform planner
+ * shutdown on the workers. On rescan it will spin up new workers
+ * which will have a new BitmapIndexScanState and zeroed stats.
+ */
+ winstrument->nsearches += node->biss_Instrument.nsearches;
+ }
+
+ /*
* close the index relation (no-op if we didn't open it)
*/
if (indexScanDesc)
@@ -302,6 +323,7 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags)
indexstate->biss_ScanDesc =
index_beginscan_bitmap(indexstate->biss_RelationDesc,
estate->es_snapshot,
+ &indexstate->biss_Instrument,
indexstate->biss_NumScanKeys);
/*
@@ -319,3 +341,97 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags)
*/
return indexstate;
}
+
+/* ----------------------------------------------------------------
+ * ExecBitmapIndexScanEstimate
+ *
+ * Compute the amount of space we'll need in the parallel
+ * query DSM, and inform pcxt->estimator about our needs.
+ * ----------------------------------------------------------------
+ */
+void
+ExecBitmapIndexScanEstimate(BitmapIndexScanState *node, ParallelContext *pcxt)
+{
+ Size size;
+
+ /*
+ * Parallel bitmap index scans are not supported, but we still need to
+ * store the scan's instrumentation in DSM during parallel query
+ */
+ if (!node->ss.ps.instrument || pcxt->nworkers == 0)
+ return;
+
+ size = offsetof(SharedIndexScanInstrumentation, winstrument) +
+ pcxt->nworkers * sizeof(IndexScanInstrumentation);
+ shm_toc_estimate_chunk(&pcxt->estimator, size);
+ shm_toc_estimate_keys(&pcxt->estimator, 1);
+}
+
+/* ----------------------------------------------------------------
+ * ExecBitmapIndexScanInitializeDSM
+ *
+ * Set up bitmap index scan shared instrumentation.
+ * ----------------------------------------------------------------
+ */
+void
+ExecBitmapIndexScanInitializeDSM(BitmapIndexScanState *node,
+ ParallelContext *pcxt)
+{
+ Size size;
+
+ /* don't need this if not instrumenting or no workers */
+ if (!node->ss.ps.instrument || pcxt->nworkers == 0)
+ return;
+
+ size = offsetof(SharedIndexScanInstrumentation, winstrument) +
+ pcxt->nworkers * sizeof(IndexScanInstrumentation);
+ node->biss_SharedInfo =
+ (SharedIndexScanInstrumentation *) shm_toc_allocate(pcxt->toc,
+ size);
+ shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
+ node->biss_SharedInfo);
+
+ /* Each per-worker area must start out as zeroes */
+ memset(node->biss_SharedInfo, 0, size);
+ node->biss_SharedInfo->num_workers = pcxt->nworkers;
+}
+
+/* ----------------------------------------------------------------
+ * ExecBitmapIndexScanInitializeWorker
+ *
+ * Copy relevant information from TOC into planstate.
+ * ----------------------------------------------------------------
+ */
+void
+ExecBitmapIndexScanInitializeWorker(BitmapIndexScanState *node,
+ ParallelWorkerContext *pwcxt)
+{
+ /* don't need this if not instrumenting */
+ if (!node->ss.ps.instrument)
+ return;
+
+ node->biss_SharedInfo = (SharedIndexScanInstrumentation *)
+ shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
+}
+
+/* ----------------------------------------------------------------
+ * ExecBitmapIndexScanRetrieveInstrumentation
+ *
+ * Transfer bitmap index scan statistics from DSM to private memory.
+ * ----------------------------------------------------------------
+ */
+void
+ExecBitmapIndexScanRetrieveInstrumentation(BitmapIndexScanState *node)
+{
+ SharedIndexScanInstrumentation *SharedInfo = node->biss_SharedInfo;
+ size_t size;
+
+ if (SharedInfo == NULL)
+ return;
+
+ /* Create a copy of SharedInfo in backend-local memory */
+ size = offsetof(SharedIndexScanInstrumentation, winstrument) +
+ SharedInfo->num_workers * sizeof(IndexScanInstrumentation);
+ node->biss_SharedInfo = palloc(size);
+ memcpy(node->biss_SharedInfo, SharedInfo, size);
+}
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index e6635233155..f464cca9507 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -92,6 +92,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
scandesc = index_beginscan(node->ss.ss_currentRelation,
node->ioss_RelationDesc,
estate->es_snapshot,
+ &node->ioss_Instrument,
node->ioss_NumScanKeys,
node->ioss_NumOrderByKeys);
@@ -414,6 +415,27 @@ ExecEndIndexOnlyScan(IndexOnlyScanState *node)
}
/*
+ * When ending a parallel worker, copy the statistics gathered by the
+ * worker back into shared memory so that it can be picked up by the main
+ * process to report in EXPLAIN ANALYZE
+ */
+ if (node->ioss_SharedInfo != NULL && IsParallelWorker())
+ {
+ IndexScanInstrumentation *winstrument;
+
+ Assert(ParallelWorkerNumber <= node->ioss_SharedInfo->num_workers);
+ winstrument = &node->ioss_SharedInfo->winstrument[ParallelWorkerNumber];
+
+ /*
+ * We have to accumulate the stats rather than performing a memcpy.
+ * When a Gather/GatherMerge node finishes it will perform planner
+ * shutdown on the workers. On rescan it will spin up new workers
+ * which will have a new IndexOnlyScanState and zeroed stats.
+ */
+ winstrument->nsearches += node->ioss_Instrument.nsearches;
+ }
+
+ /*
* close the index relation (no-op if we didn't open it)
*/
if (indexScanDesc)
@@ -707,11 +729,21 @@ ExecIndexOnlyScanEstimate(IndexOnlyScanState *node,
ParallelContext *pcxt)
{
EState *estate = node->ss.ps.state;
+ bool instrument = (node->ss.ps.instrument != NULL);
+ bool parallel_aware = node->ss.ps.plan->parallel_aware;
+
+ if (!instrument && !parallel_aware)
+ {
+ /* No DSM required by the scan */
+ return;
+ }
node->ioss_PscanLen = index_parallelscan_estimate(node->ioss_RelationDesc,
node->ioss_NumScanKeys,
node->ioss_NumOrderByKeys,
- estate->es_snapshot);
+ estate->es_snapshot,
+ instrument, parallel_aware,
+ pcxt->nworkers);
shm_toc_estimate_chunk(&pcxt->estimator, node->ioss_PscanLen);
shm_toc_estimate_keys(&pcxt->estimator, 1);
}
@@ -728,16 +760,33 @@ ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node,
{
EState *estate = node->ss.ps.state;
ParallelIndexScanDesc piscan;
+ bool instrument = node->ss.ps.instrument != NULL;
+ bool parallel_aware = node->ss.ps.plan->parallel_aware;
+
+ if (!instrument && !parallel_aware)
+ {
+ /* No DSM required by the scan */
+ return;
+ }
piscan = shm_toc_allocate(pcxt->toc, node->ioss_PscanLen);
index_parallelscan_initialize(node->ss.ss_currentRelation,
node->ioss_RelationDesc,
estate->es_snapshot,
- piscan);
+ instrument, parallel_aware, pcxt->nworkers,
+ &node->ioss_SharedInfo, piscan);
shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, piscan);
+
+ if (!parallel_aware)
+ {
+ /* Only here to initialize SharedInfo in DSM */
+ return;
+ }
+
node->ioss_ScanDesc =
index_beginscan_parallel(node->ss.ss_currentRelation,
node->ioss_RelationDesc,
+ &node->ioss_Instrument,
node->ioss_NumScanKeys,
node->ioss_NumOrderByKeys,
piscan);
@@ -764,6 +813,7 @@ void
ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node,
ParallelContext *pcxt)
{
+ Assert(node->ss.ps.plan->parallel_aware);
index_parallelrescan(node->ioss_ScanDesc);
}
@@ -778,11 +828,31 @@ ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node,
ParallelWorkerContext *pwcxt)
{
ParallelIndexScanDesc piscan;
+ bool instrument = node->ss.ps.instrument != NULL;
+ bool parallel_aware = node->ss.ps.plan->parallel_aware;
+
+ if (!instrument && !parallel_aware)
+ {
+ /* No DSM required by the scan */
+ return;
+ }
piscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
+
+ if (instrument)
+ node->ioss_SharedInfo = (SharedIndexScanInstrumentation *)
+ OffsetToPointer(piscan, piscan->ps_offset_ins);
+
+ if (!parallel_aware)
+ {
+ /* Only here to set up worker node's SharedInfo */
+ return;
+ }
+
node->ioss_ScanDesc =
index_beginscan_parallel(node->ss.ss_currentRelation,
node->ioss_RelationDesc,
+ &node->ioss_Instrument,
node->ioss_NumScanKeys,
node->ioss_NumOrderByKeys,
piscan);
@@ -797,3 +867,25 @@ ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node,
node->ioss_ScanKeys, node->ioss_NumScanKeys,
node->ioss_OrderByKeys, node->ioss_NumOrderByKeys);
}
+
+/* ----------------------------------------------------------------
+ * ExecIndexOnlyScanRetrieveInstrumentation
+ *
+ * Transfer index-only scan statistics from DSM to private memory.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexOnlyScanRetrieveInstrumentation(IndexOnlyScanState *node)
+{
+ SharedIndexScanInstrumentation *SharedInfo = node->ioss_SharedInfo;
+ size_t size;
+
+ if (SharedInfo == NULL)
+ return;
+
+ /* Create a copy of SharedInfo in backend-local memory */
+ size = offsetof(SharedIndexScanInstrumentation, winstrument) +
+ SharedInfo->num_workers * sizeof(IndexScanInstrumentation);
+ node->ioss_SharedInfo = palloc(size);
+ memcpy(node->ioss_SharedInfo, SharedInfo, size);
+}
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index c30b9c2c197..7fcaa37fe62 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -109,6 +109,7 @@ IndexNext(IndexScanState *node)
scandesc = index_beginscan(node->ss.ss_currentRelation,
node->iss_RelationDesc,
estate->es_snapshot,
+ &node->iss_Instrument,
node->iss_NumScanKeys,
node->iss_NumOrderByKeys);
@@ -204,6 +205,7 @@ IndexNextWithReorder(IndexScanState *node)
scandesc = index_beginscan(node->ss.ss_currentRelation,
node->iss_RelationDesc,
estate->es_snapshot,
+ &node->iss_Instrument,
node->iss_NumScanKeys,
node->iss_NumOrderByKeys);
@@ -794,6 +796,27 @@ ExecEndIndexScan(IndexScanState *node)
indexScanDesc = node->iss_ScanDesc;
/*
+ * When ending a parallel worker, copy the statistics gathered by the
+ * worker back into shared memory so that it can be picked up by the main
+ * process to report in EXPLAIN ANALYZE
+ */
+ if (node->iss_SharedInfo != NULL && IsParallelWorker())
+ {
+ IndexScanInstrumentation *winstrument;
+
+ Assert(ParallelWorkerNumber <= node->iss_SharedInfo->num_workers);
+ winstrument = &node->iss_SharedInfo->winstrument[ParallelWorkerNumber];
+
+ /*
+ * We have to accumulate the stats rather than performing a memcpy.
+ * When a Gather/GatherMerge node finishes it will perform planner
+ * shutdown on the workers. On rescan it will spin up new workers
+ * which will have a new IndexOnlyScanState and zeroed stats.
+ */
+ winstrument->nsearches += node->iss_Instrument.nsearches;
+ }
+
+ /*
* close the index relation (no-op if we didn't open it)
*/
if (indexScanDesc)
@@ -1642,11 +1665,21 @@ ExecIndexScanEstimate(IndexScanState *node,
ParallelContext *pcxt)
{
EState *estate = node->ss.ps.state;
+ bool instrument = node->ss.ps.instrument != NULL;
+ bool parallel_aware = node->ss.ps.plan->parallel_aware;
+
+ if (!instrument && !parallel_aware)
+ {
+ /* No DSM required by the scan */
+ return;
+ }
node->iss_PscanLen = index_parallelscan_estimate(node->iss_RelationDesc,
node->iss_NumScanKeys,
node->iss_NumOrderByKeys,
- estate->es_snapshot);
+ estate->es_snapshot,
+ instrument, parallel_aware,
+ pcxt->nworkers);
shm_toc_estimate_chunk(&pcxt->estimator, node->iss_PscanLen);
shm_toc_estimate_keys(&pcxt->estimator, 1);
}
@@ -1663,16 +1696,33 @@ ExecIndexScanInitializeDSM(IndexScanState *node,
{
EState *estate = node->ss.ps.state;
ParallelIndexScanDesc piscan;
+ bool instrument = node->ss.ps.instrument != NULL;
+ bool parallel_aware = node->ss.ps.plan->parallel_aware;
+
+ if (!instrument && !parallel_aware)
+ {
+ /* No DSM required by the scan */
+ return;
+ }
piscan = shm_toc_allocate(pcxt->toc, node->iss_PscanLen);
index_parallelscan_initialize(node->ss.ss_currentRelation,
node->iss_RelationDesc,
estate->es_snapshot,
- piscan);
+ instrument, parallel_aware, pcxt->nworkers,
+ &node->iss_SharedInfo, piscan);
shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, piscan);
+
+ if (!parallel_aware)
+ {
+ /* Only here to initialize SharedInfo in DSM */
+ return;
+ }
+
node->iss_ScanDesc =
index_beginscan_parallel(node->ss.ss_currentRelation,
node->iss_RelationDesc,
+ &node->iss_Instrument,
node->iss_NumScanKeys,
node->iss_NumOrderByKeys,
piscan);
@@ -1697,6 +1747,7 @@ void
ExecIndexScanReInitializeDSM(IndexScanState *node,
ParallelContext *pcxt)
{
+ Assert(node->ss.ps.plan->parallel_aware);
index_parallelrescan(node->iss_ScanDesc);
}
@@ -1711,11 +1762,31 @@ ExecIndexScanInitializeWorker(IndexScanState *node,
ParallelWorkerContext *pwcxt)
{
ParallelIndexScanDesc piscan;
+ bool instrument = node->ss.ps.instrument != NULL;
+ bool parallel_aware = node->ss.ps.plan->parallel_aware;
+
+ if (!instrument && !parallel_aware)
+ {
+ /* No DSM required by the scan */
+ return;
+ }
piscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
+
+ if (instrument)
+ node->iss_SharedInfo = (SharedIndexScanInstrumentation *)
+ OffsetToPointer(piscan, piscan->ps_offset_ins);
+
+ if (!parallel_aware)
+ {
+ /* Only here to set up worker node's SharedInfo */
+ return;
+ }
+
node->iss_ScanDesc =
index_beginscan_parallel(node->ss.ss_currentRelation,
node->iss_RelationDesc,
+ &node->iss_Instrument,
node->iss_NumScanKeys,
node->iss_NumOrderByKeys,
piscan);
@@ -1729,3 +1800,25 @@ ExecIndexScanInitializeWorker(IndexScanState *node,
node->iss_ScanKeys, node->iss_NumScanKeys,
node->iss_OrderByKeys, node->iss_NumOrderByKeys);
}
+
+/* ----------------------------------------------------------------
+ * ExecIndexScanRetrieveInstrumentation
+ *
+ * Transfer index scan statistics from DSM to private memory.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexScanRetrieveInstrumentation(IndexScanState *node)
+{
+ SharedIndexScanInstrumentation *SharedInfo = node->iss_SharedInfo;
+ size_t size;
+
+ if (SharedInfo == NULL)
+ return;
+
+ /* Create a copy of SharedInfo in backend-local memory */
+ size = offsetof(SharedIndexScanInstrumentation, winstrument) +
+ SharedInfo->num_workers * sizeof(IndexScanInstrumentation);
+ node->iss_SharedInfo = palloc(size);
+ memcpy(node->iss_SharedInfo, SharedInfo, size);
+}