From fd7c0fa732d97a4b4ebb58730e6244ea30d0a618 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Mon, 18 Dec 2017 12:17:37 -0500 Subject: [PATCH] Fix crashes on plans with multiple Gather (Merge) nodes. es_query_dsa turns out to be broken by design, because it supposes that there is only one DSA for the whole query, whereas there is actually one per Gather (Merge) node. For now, work around that problem by setting and clearing the pointer around the sections of code that might need it. It's probably a better idea to get rid of es_query_dsa altogether in favor of having each node keep track individually of which DSA is relevant, but that seems like more than we would want to back-patch. Thomas Munro, reviewed and tested by Andreas Seltenreich, Amit Kapila, and by me. Discussion: http://postgr.es/m/CAEepm=1U6as=brnVvMNixEV2tpi8NuyQoTmO8Qef0-VV+=7MDA@mail.gmail.com --- src/backend/executor/execParallel.c | 26 ++++++++++++++------------ src/backend/executor/nodeGather.c | 6 ++++++ src/backend/executor/nodeGatherMerge.c | 4 ++++ 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index d57cdbd4e1..6b6064637b 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -330,7 +330,7 @@ EstimateParamExecSpace(EState *estate, Bitmapset *params) * parameter array) and then the datum as serialized by datumSerialize(). */ static dsa_pointer -SerializeParamExecParams(EState *estate, Bitmapset *params) +SerializeParamExecParams(EState *estate, Bitmapset *params, dsa_area *area) { Size size; int nparams; @@ -341,8 +341,8 @@ SerializeParamExecParams(EState *estate, Bitmapset *params) /* Allocate enough space for the current parameter values. */ size = EstimateParamExecSpace(estate, params); - handle = dsa_allocate(estate->es_query_dsa, size); - start_address = dsa_get_address(estate->es_query_dsa, handle); + handle = dsa_allocate(area, size); + start_address = dsa_get_address(area, handle); /* First write the number of parameters as a 4-byte integer. */ nparams = bms_num_members(params); @@ -736,12 +736,6 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, LWTRANCHE_PARALLEL_QUERY_DSA, pcxt->seg); - /* - * Make the area available to executor nodes running in the leader. - * See also ParallelQueryMain which makes it available to workers. - */ - estate->es_query_dsa = pei->area; - /* * Serialize parameters, if any, using DSA storage. We don't dare use * the main parallel query DSM for this because we might relaunch @@ -750,7 +744,8 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, */ if (!bms_is_empty(sendParams)) { - pei->param_exec = SerializeParamExecParams(estate, sendParams); + pei->param_exec = SerializeParamExecParams(estate, sendParams, + pei->area); fpes->param_exec = pei->param_exec; } } @@ -763,7 +758,11 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, d.pcxt = pcxt; d.instrumentation = instrumentation; d.nnodes = 0; + + /* Install our DSA area while initializing the plan. */ + estate->es_query_dsa = pei->area; ExecParallelInitializeDSM(planstate, &d); + estate->es_query_dsa = NULL; /* * Make sure that the world hasn't shifted under our feet. This could @@ -832,19 +831,22 @@ ExecParallelReinitialize(PlanState *planstate, /* Free any serialized parameters from the last round. */ if (DsaPointerIsValid(fpes->param_exec)) { - dsa_free(estate->es_query_dsa, fpes->param_exec); + dsa_free(pei->area, fpes->param_exec); fpes->param_exec = InvalidDsaPointer; } /* Serialize current parameter values if required. */ if (!bms_is_empty(sendParams)) { - pei->param_exec = SerializeParamExecParams(estate, sendParams); + pei->param_exec = SerializeParamExecParams(estate, sendParams, + pei->area); fpes->param_exec = pei->param_exec; } /* Traverse plan tree and let each child node reset associated state. */ + estate->es_query_dsa = pei->area; ExecParallelReInitializeDSM(planstate, pei->pcxt); + estate->es_query_dsa = NULL; } /* diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index a44cf8409a..1697ae650d 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -277,7 +277,13 @@ gather_getnext(GatherState *gatherstate) if (gatherstate->need_to_scan_locally) { + EState *estate = gatherstate->ps.state; + + /* Install our DSA area while executing the plan. */ + estate->es_query_dsa = + gatherstate->pei ? gatherstate->pei->area : NULL; outerTupleSlot = ExecProcNode(outerPlan); + estate->es_query_dsa = NULL; if (!TupIsNull(outerTupleSlot)) return outerTupleSlot; diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 4a8a59eabf..a69777aa95 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -637,8 +637,12 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait) { PlanState *outerPlan = outerPlanState(gm_state); TupleTableSlot *outerTupleSlot; + EState *estate = gm_state->ps.state; + /* Install our DSA area while executing the plan. */ + estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL; outerTupleSlot = ExecProcNode(outerPlan); + estate->es_query_dsa = NULL; if (!TupIsNull(outerTupleSlot)) { -- 2.40.0