diff options
author | Robert Haas <rhaas@postgresql.org> | 2017-12-18 12:17:37 -0500 |
---|---|---|
committer | Robert Haas <rhaas@postgresql.org> | 2017-12-18 12:22:31 -0500 |
commit | fd7c0fa732d97a4b4ebb58730e6244ea30d0a618 (patch) | |
tree | 984e90d9e0d1d289a0de4dec67cb16445e5f0da3 /src/backend/executor/execParallel.c | |
parent | 7731c32087faf498db0562cc7e40d256ffc1750f (diff) | |
download | postgresql-fd7c0fa732d97a4b4ebb58730e6244ea30d0a618.tar.gz postgresql-fd7c0fa732d97a4b4ebb58730e6244ea30d0a618.zip |
Fix crashes on plans with multiple Gather (Merge) nodes.
es_query_dsa turns out to be broken by design, because it supposes
that there is only one DSA for the whole query, whereas there is
actually one per Gather (Merge) node. For now, work around that
problem by setting and clearing the pointer around the sections of
code that might need it. It's probably a better idea to get rid of
es_query_dsa altogether in favor of having each node keep track
individually of which DSA is relevant, but that seems like more than
we would want to back-patch.
Thomas Munro, reviewed and tested by Andreas Seltenreich, Amit
Kapila, and by me.
Discussion: http://postgr.es/m/CAEepm=1U6as=brnVvMNixEV2tpi8NuyQoTmO8Qef0-VV+=7MDA@mail.gmail.com
Diffstat (limited to 'src/backend/executor/execParallel.c')
-rw-r--r-- | src/backend/executor/execParallel.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index d57cdbd4e15..6b6064637b8 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -330,7 +330,7 @@ EstimateParamExecSpace(EState *estate, Bitmapset *params) * parameter array) and then the datum as serialized by datumSerialize(). */ static dsa_pointer -SerializeParamExecParams(EState *estate, Bitmapset *params) +SerializeParamExecParams(EState *estate, Bitmapset *params, dsa_area *area) { Size size; int nparams; @@ -341,8 +341,8 @@ SerializeParamExecParams(EState *estate, Bitmapset *params) /* Allocate enough space for the current parameter values. */ size = EstimateParamExecSpace(estate, params); - handle = dsa_allocate(estate->es_query_dsa, size); - start_address = dsa_get_address(estate->es_query_dsa, handle); + handle = dsa_allocate(area, size); + start_address = dsa_get_address(area, handle); /* First write the number of parameters as a 4-byte integer. */ nparams = bms_num_members(params); @@ -737,12 +737,6 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, pcxt->seg); /* - * Make the area available to executor nodes running in the leader. - * See also ParallelQueryMain which makes it available to workers. - */ - estate->es_query_dsa = pei->area; - - /* * Serialize parameters, if any, using DSA storage. We don't dare use * the main parallel query DSM for this because we might relaunch * workers after the values have changed (and thus the amount of @@ -750,7 +744,8 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, */ if (!bms_is_empty(sendParams)) { - pei->param_exec = SerializeParamExecParams(estate, sendParams); + pei->param_exec = SerializeParamExecParams(estate, sendParams, + pei->area); fpes->param_exec = pei->param_exec; } } @@ -763,7 +758,11 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, d.pcxt = pcxt; d.instrumentation = instrumentation; d.nnodes = 0; + + /* Install our DSA area while initializing the plan. */ + estate->es_query_dsa = pei->area; ExecParallelInitializeDSM(planstate, &d); + estate->es_query_dsa = NULL; /* * Make sure that the world hasn't shifted under our feet. This could @@ -832,19 +831,22 @@ ExecParallelReinitialize(PlanState *planstate, /* Free any serialized parameters from the last round. */ if (DsaPointerIsValid(fpes->param_exec)) { - dsa_free(estate->es_query_dsa, fpes->param_exec); + dsa_free(pei->area, fpes->param_exec); fpes->param_exec = InvalidDsaPointer; } /* Serialize current parameter values if required. */ if (!bms_is_empty(sendParams)) { - pei->param_exec = SerializeParamExecParams(estate, sendParams); + pei->param_exec = SerializeParamExecParams(estate, sendParams, + pei->area); fpes->param_exec = pei->param_exec; } /* Traverse plan tree and let each child node reset associated state. */ + estate->es_query_dsa = pei->area; ExecParallelReInitializeDSM(planstate, pei->pcxt); + estate->es_query_dsa = NULL; } /* |