Fix crashes on plans with multiple Gather (Merge) nodes.
authorRobert Haas
Mon, 18 Dec 2017 17:17:37 +0000 (12:17 -0500)
committerRobert Haas
Mon, 18 Dec 2017 17:31:10 +0000 (12:31 -0500)
es_query_dsa turns out to be broken by design, because it supposes
that there is only one DSA for the whole query, whereas there is
actually one per Gather (Merge) node.  For now, work around that
problem by setting and clearing the pointer around the sections of
code that might need it.  It's probably a better idea to get rid of
es_query_dsa altogether in favor of having each node keep track
individually of which DSA is relevant, but that seems like more than
we would want to back-patch.

Thomas Munro, reviewed and tested by Andreas Seltenreich, Amit
Kapila, and by me.

Discussion: http://postgr.es/m/CAEepm=1U6as=brnVvMNixEV2tpi8NuyQoTmO8Qef0-VV+=7MDA@mail.gmail.com

src/backend/executor/execParallel.c
src/backend/executor/nodeGather.c
src/backend/executor/nodeGatherMerge.c

index 7dda399daf351fc7e5297b985ec2cc42f11c244d..989cf5b80b1aabecf738d1fb67d90ead128aa4af 100644 (file)
@@ -543,12 +543,6 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
                                        pcxt->seg);
    }
 
-   /*
-    * Make the area available to executor nodes running in the leader.  See
-    * also ParallelQueryMain which makes it available to workers.
-    */
-   estate->es_query_dsa = pei->area;
-
    /*
     * Give parallel-aware nodes a chance to initialize their shared data.
     * This also initializes the elements of instrumentation->ps_instrument,
@@ -557,7 +551,11 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
    d.pcxt = pcxt;
    d.instrumentation = instrumentation;
    d.nnodes = 0;
+
+   /* Install our DSA area while initializing the plan. */
+   estate->es_query_dsa = pei->area;
    ExecParallelInitializeDSM(planstate, &d);
+   estate->es_query_dsa = NULL;
 
    /*
     * Make sure that the world hasn't shifted under our feet.  This could
@@ -609,6 +607,8 @@ void
 ExecParallelReinitialize(PlanState *planstate,
                         ParallelExecutorInfo *pei)
 {
+   EState     *estate = planstate->state;
+
    /* Old workers must already be shut down */
    Assert(pei->finished);
 
@@ -618,7 +618,9 @@ ExecParallelReinitialize(PlanState *planstate,
    pei->finished = false;
 
    /* Traverse plan tree and let each child node reset associated state. */
+   estate->es_query_dsa = pei->area;
    ExecParallelReInitializeDSM(planstate, pei->pcxt);
+   estate->es_query_dsa = NULL;
 }
 
 /*
index 89f592828c1a4bcfbe08ae5adb3caf6faa5ad09f..597cbfaa16d21dcee506e6d299d45ce5d2a43943 100644 (file)
@@ -278,7 +278,13 @@ gather_getnext(GatherState *gatherstate)
 
        if (gatherstate->need_to_scan_locally)
        {
+           EState *estate = gatherstate->ps.state;
+
+           /* Install our DSA area while executing the plan. */
+           estate->es_query_dsa =
+               gatherstate->pei ? gatherstate->pei->area : NULL;
            outerTupleSlot = ExecProcNode(outerPlan);
+           estate->es_query_dsa = NULL;
 
            if (!TupIsNull(outerTupleSlot))
                return outerTupleSlot;
index 6b173543564188e368fe5b781c0fa7c8a12b3281..ee98f4cf30c4c0c2cb880cfa6183ce20f73ef33e 100644 (file)
@@ -627,8 +627,12 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
        {
            PlanState  *outerPlan = outerPlanState(gm_state);
            TupleTableSlot *outerTupleSlot;
+           EState *estate = gm_state->ps.state;
 
+           /* Install our DSA area while executing the plan. */
+           estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL;
            outerTupleSlot = ExecProcNode(outerPlan);
+           estate->es_query_dsa = NULL;
 
            if (!TupIsNull(outerTupleSlot))
            {