diff options
Diffstat (limited to 'src/backend/executor/nodeGather.c')
-rw-r--r-- | src/backend/executor/nodeGather.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index 58f88a5724d..f9cf1b2f875 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -153,12 +153,15 @@ ExecGather(PlanState *pstate) { ParallelContext *pcxt; - /* Initialize the workers required to execute Gather node. */ + /* Initialize, or re-initialize, shared state needed by workers. */ if (!node->pei) node->pei = ExecInitParallelPlan(node->ps.lefttree, estate, gather->num_workers, node->tuples_needed); + else + ExecParallelReinitialize(node->ps.lefttree, + node->pei); /* * Register backend workers. We might not get as many as we @@ -426,7 +429,7 @@ ExecShutdownGather(GatherState *node) /* ---------------------------------------------------------------- * ExecReScanGather * - * Re-initialize the workers and rescans a relation via them. + * Prepare to re-scan the result of a Gather. * ---------------------------------------------------------------- */ void @@ -435,19 +438,12 @@ ExecReScanGather(GatherState *node) Gather *gather = (Gather *) node->ps.plan; PlanState *outerPlan = outerPlanState(node); - /* - * Re-initialize the parallel workers to perform rescan of relation. We - * want to gracefully shutdown all the workers so that they should be able - * to propagate any error or other information to master backend before - * dying. Parallel context will be reused for rescan. - */ + /* Make sure any existing workers are gracefully shut down */ ExecShutdownGatherWorkers(node); + /* Mark node so that shared state will be rebuilt at next call */ node->initialized = false; - if (node->pei) - ExecParallelReinitialize(node->pei); - /* * Set child node's chgParam to tell it that the next scan might deliver a * different set of rows within the leader process. (The overall rowset @@ -459,10 +455,15 @@ ExecReScanGather(GatherState *node) outerPlan->chgParam = bms_add_member(outerPlan->chgParam, gather->rescan_param); - /* - * if chgParam of subnode is not null then plan will be re-scanned by - * first ExecProcNode. + * If chgParam of subnode is not null then plan will be re-scanned by + * first ExecProcNode. Note: because this does nothing if we have a + * rescan_param, it's currently guaranteed that parallel-aware child nodes + * will not see a ReScan call until after they get a ReInitializeDSM call. + * That ordering might not be something to rely on, though. A good rule + * of thumb is that ReInitializeDSM should reset only shared state, ReScan + * should reset only local state, and anything that depends on both of + * those steps being finished must wait until the first ExecProcNode call. */ if (outerPlan->chgParam == NULL) ExecReScan(outerPlan); |