aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeGatherMerge.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2017-08-30 13:18:16 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2017-08-30 13:18:16 -0400
commit41b0dd987d44089dc48e9c70024277e253b396b7 (patch)
treec42eeeb2f175764a9b1ad9c095f8a46057078eb3 /src/backend/executor/nodeGatherMerge.c
parent6c2c5bea3cec4c874d1ee225bb6e222055c03d75 (diff)
downloadpostgresql-41b0dd987d44089dc48e9c70024277e253b396b7.tar.gz
postgresql-41b0dd987d44089dc48e9c70024277e253b396b7.zip
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared state within the ExecReScan code for parallel-aware scan nodes. This is problematic, because it means that the ExecReScan call has to occur synchronously (ie, during the parent Gather node's ReScan call). That is swimming very much against the tide so far as the ExecReScan machinery is concerned; the fact that it works at all today depends on a lot of fragile assumptions, such as that no plan node between Gather and a parallel-aware scan node is parameterized. Another objection is that because ExecReScan might be called in workers as well as the leader, hacky extra tests are needed in some places to prevent unwanted shared-state resets. Hence, let's separate this code into two functions, a ReInitializeDSM call and the ReScan call proper. ReInitializeDSM is called only in the leader and is guaranteed to run before we start new workers. ReScan is returned to its traditional function of resetting only local state, which means that ExecReScan's usual habits of delaying or eliminating child rescan calls are safe again. As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary to make these changes in 9.6, which is a good thing because the FDW and CustomScan APIs are impacted. Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
Diffstat (limited to 'src/backend/executor/nodeGatherMerge.c')
-rw-r--r--src/backend/executor/nodeGatherMerge.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c
index f50841699c4..0bd5da38b4a 100644
--- a/src/backend/executor/nodeGatherMerge.c
+++ b/src/backend/executor/nodeGatherMerge.c
@@ -187,12 +187,15 @@ ExecGatherMerge(PlanState *pstate)
{
ParallelContext *pcxt;
- /* Initialize data structures for workers. */
+ /* Initialize, or re-initialize, shared state needed by workers. */
if (!node->pei)
node->pei = ExecInitParallelPlan(node->ps.lefttree,
estate,
gm->num_workers,
node->tuples_needed);
+ else
+ ExecParallelReinitialize(node->ps.lefttree,
+ node->pei);
/* Try to launch workers. */
pcxt = node->pei->pcxt;
@@ -321,7 +324,7 @@ ExecShutdownGatherMergeWorkers(GatherMergeState *node)
/* ----------------------------------------------------------------
* ExecReScanGatherMerge
*
- * Re-initialize the workers and rescans a relation via them.
+ * Prepare to re-scan the result of a GatherMerge.
* ----------------------------------------------------------------
*/
void
@@ -330,20 +333,13 @@ ExecReScanGatherMerge(GatherMergeState *node)
GatherMerge *gm = (GatherMerge *) node->ps.plan;
PlanState *outerPlan = outerPlanState(node);
- /*
- * Re-initialize the parallel workers to perform rescan of relation. We
- * want to gracefully shutdown all the workers so that they should be able
- * to propagate any error or other information to master backend before
- * dying. Parallel context will be reused for rescan.
- */
+ /* Make sure any existing workers are gracefully shut down */
ExecShutdownGatherMergeWorkers(node);
+ /* Mark node so that shared state will be rebuilt at next call */
node->initialized = false;
node->gm_initialized = false;
- if (node->pei)
- ExecParallelReinitialize(node->pei);
-
/*
* Set child node's chgParam to tell it that the next scan might deliver a
* different set of rows within the leader process. (The overall rowset
@@ -355,10 +351,15 @@ ExecReScanGatherMerge(GatherMergeState *node)
outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
gm->rescan_param);
-
/*
- * if chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode.
+ * If chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode. Note: because this does nothing if we have a
+ * rescan_param, it's currently guaranteed that parallel-aware child nodes
+ * will not see a ReScan call until after they get a ReInitializeDSM call.
+ * That ordering might not be something to rely on, though. A good rule
+ * of thumb is that ReInitializeDSM should reset only shared state, ReScan
+ * should reset only local state, and anything that depends on both of
+ * those steps being finished must wait until the first ExecProcNode call.
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan);