1 /*-------------------------------------------------------------------------
4 * Support routines for scanning a plan via multiple workers.
6 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
9 * A Gather executor launches parallel workers to run multiple copies of a
10 * plan. It can also run the plan itself, if the workers are not available
11 * or have not started up yet. It then merges all of the results it produces
12 * and the results from the workers into a single output stream. Therefore,
13 * it will normally be used with a plan where running multiple copies of the
14 * same plan does not produce duplicate output, such as parallel-aware
17 * Alternatively, a Gather node can be configured to use just one worker
18 * and the single-copy flag can be set. In this case, the Gather node will
19 * run the plan in one worker and will not execute the plan itself. In
20 * this case, it simply returns whatever tuples were returned by the worker.
21 * If a worker cannot be obtained, then it will run the plan itself and
22 * return the results. Therefore, a plan used with a single-copy Gather
23 * node need not be parallel-aware.
26 * src/backend/executor/nodeGather.c
28 *-------------------------------------------------------------------------
33 #include "access/relscan.h"
34 #include "access/xact.h"
35 #include "executor/execdebug.h"
36 #include "executor/execParallel.h"
37 #include "executor/nodeGather.h"
38 #include "executor/nodeSubplan.h"
39 #include "executor/tqueue.h"
40 #include "miscadmin.h"
41 #include "optimizer/planmain.h"
43 #include "utils/memutils.h"
44 #include "utils/rel.h"
47 static TupleTableSlot *ExecGather(PlanState *pstate);
48 static TupleTableSlot *gather_getnext(GatherState *gatherstate);
49 static HeapTuple gather_readnext(GatherState *gatherstate);
50 static void ExecShutdownGatherWorkers(GatherState *node);
53 /* ----------------------------------------------------------------
55 * ----------------------------------------------------------------
58 ExecInitGather(Gather *node, EState *estate, int eflags)
60 GatherState *gatherstate;
65 /* Gather node doesn't have innerPlan node. */
66 Assert(innerPlan(node) == NULL);
69 * create state structure
71 gatherstate = makeNode(GatherState);
72 gatherstate->ps.plan = (Plan *) node;
73 gatherstate->ps.state = estate;
74 gatherstate->ps.ExecProcNode = ExecGather;
76 gatherstate->initialized = false;
77 gatherstate->need_to_scan_locally =
78 !node->single_copy && parallel_leader_participation;
79 gatherstate->tuples_needed = -1;
82 * Miscellaneous initialization
84 * create expression context for node
86 ExecAssignExprContext(estate, &gatherstate->ps);
89 * Gather doesn't support checking a qual (it's always more efficient to
90 * do it in the child node).
92 Assert(!node->plan.qual);
95 * tuple table initialization
97 gatherstate->funnel_slot = ExecInitExtraTupleSlot(estate);
98 ExecInitResultTupleSlot(estate, &gatherstate->ps);
101 * now initialize outer plan
103 outerNode = outerPlan(node);
104 outerPlanState(gatherstate) = ExecInitNode(outerNode, estate, eflags);
107 * Initialize funnel slot to same tuple descriptor as outer plan.
109 if (!ExecContextForcesOids(outerPlanState(gatherstate), &hasoid))
111 tupDesc = ExecTypeFromTL(outerNode->targetlist, hasoid);
112 ExecSetSlotDescriptor(gatherstate->funnel_slot, tupDesc);
115 * Initialize result tuple type and projection info.
117 ExecAssignResultTypeFromTL(&gatherstate->ps);
118 ExecConditionalAssignProjectionInfo(&gatherstate->ps, tupDesc, OUTER_VAR);
123 /* ----------------------------------------------------------------
126 * Scans the relation via multiple workers and returns
127 * the next qualifying tuple.
128 * ----------------------------------------------------------------
130 static TupleTableSlot *
131 ExecGather(PlanState *pstate)
133 GatherState *node = castNode(GatherState, pstate);
134 TupleTableSlot *slot;
135 ExprContext *econtext;
137 CHECK_FOR_INTERRUPTS();
140 * Initialize the parallel context and workers on first execution. We do
141 * this on first execution rather than during node initialization, as it
142 * needs to allocate a large dynamic segment, so it is better to do it
143 * only if it is really needed.
145 if (!node->initialized)
147 EState *estate = node->ps.state;
148 Gather *gather = (Gather *) node->ps.plan;
151 * Sometimes we might have to run without parallelism; but if parallel
152 * mode is active then we can try to fire up some workers.
154 if (gather->num_workers > 0 && estate->es_use_parallel_mode)
156 ParallelContext *pcxt;
158 /* Initialize, or re-initialize, shared state needed by workers. */
160 node->pei = ExecInitParallelPlan(node->ps.lefttree,
164 node->tuples_needed);
166 ExecParallelReinitialize(node->ps.lefttree,
171 * Register backend workers. We might not get as many as we
172 * requested, or indeed any at all.
174 pcxt = node->pei->pcxt;
175 LaunchParallelWorkers(pcxt);
176 /* We save # workers launched for the benefit of EXPLAIN */
177 node->nworkers_launched = pcxt->nworkers_launched;
179 /* Set up tuple queue readers to read the results. */
180 if (pcxt->nworkers_launched > 0)
182 ExecParallelCreateReaders(node->pei);
183 /* Make a working array showing the active readers */
184 node->nreaders = pcxt->nworkers_launched;
185 node->reader = (TupleQueueReader **)
186 palloc(node->nreaders * sizeof(TupleQueueReader *));
187 memcpy(node->reader, node->pei->reader,
188 node->nreaders * sizeof(TupleQueueReader *));
192 /* No workers? Then never mind. */
196 node->nextreader = 0;
199 /* Run plan locally if no workers or enabled and not single-copy. */
200 node->need_to_scan_locally = (node->nreaders == 0)
201 || (!gather->single_copy && parallel_leader_participation);
202 node->initialized = true;
206 * Reset per-tuple memory context to free any expression evaluation
207 * storage allocated in the previous tuple cycle.
209 econtext = node->ps.ps_ExprContext;
210 ResetExprContext(econtext);
213 * Get next tuple, either from one of our workers, or by running the plan
216 slot = gather_getnext(node);
220 /* If no projection is required, we're done. */
221 if (node->ps.ps_ProjInfo == NULL)
225 * Form the result tuple using ExecProject(), and return it.
227 econtext->ecxt_outertuple = slot;
228 return ExecProject(node->ps.ps_ProjInfo);
231 /* ----------------------------------------------------------------
234 * frees any storage allocated through C routines.
235 * ----------------------------------------------------------------
238 ExecEndGather(GatherState *node)
240 ExecEndNode(outerPlanState(node)); /* let children clean up first */
241 ExecShutdownGather(node);
242 ExecFreeExprContext(&node->ps);
243 ExecClearTuple(node->ps.ps_ResultTupleSlot);
247 * Read the next tuple. We might fetch a tuple from one of the tuple queues
248 * using gather_readnext, or if no tuple queue contains a tuple and the
249 * single_copy flag is not set, we might generate one locally instead.
251 static TupleTableSlot *
252 gather_getnext(GatherState *gatherstate)
254 PlanState *outerPlan = outerPlanState(gatherstate);
255 TupleTableSlot *outerTupleSlot;
256 TupleTableSlot *fslot = gatherstate->funnel_slot;
259 while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
261 CHECK_FOR_INTERRUPTS();
263 if (gatherstate->nreaders > 0)
265 tup = gather_readnext(gatherstate);
267 if (HeapTupleIsValid(tup))
269 ExecStoreTuple(tup, /* tuple to store */
270 fslot, /* slot in which to store the tuple */
271 InvalidBuffer, /* buffer associated with this
273 true); /* pfree tuple when done with it */
278 if (gatherstate->need_to_scan_locally)
280 outerTupleSlot = ExecProcNode(outerPlan);
282 if (!TupIsNull(outerTupleSlot))
283 return outerTupleSlot;
285 gatherstate->need_to_scan_locally = false;
289 return ExecClearTuple(fslot);
293 * Attempt to read a tuple from one of our parallel workers.
296 gather_readnext(GatherState *gatherstate)
302 TupleQueueReader *reader;
306 /* Check for async events, particularly messages from workers. */
307 CHECK_FOR_INTERRUPTS();
309 /* Attempt to read a tuple, but don't block if none is available. */
310 Assert(gatherstate->nextreader < gatherstate->nreaders);
311 reader = gatherstate->reader[gatherstate->nextreader];
312 tup = TupleQueueReaderNext(reader, true, &readerdone);
315 * If this reader is done, remove it from our working array of active
316 * readers. If all readers are done, we're outta here.
321 --gatherstate->nreaders;
322 if (gatherstate->nreaders == 0)
324 memmove(&gatherstate->reader[gatherstate->nextreader],
325 &gatherstate->reader[gatherstate->nextreader + 1],
326 sizeof(TupleQueueReader *)
327 * (gatherstate->nreaders - gatherstate->nextreader));
328 if (gatherstate->nextreader >= gatherstate->nreaders)
329 gatherstate->nextreader = 0;
333 /* If we got a tuple, return it. */
338 * Advance nextreader pointer in round-robin fashion. Note that we
339 * only reach this code if we weren't able to get a tuple from the
340 * current worker. We used to advance the nextreader pointer after
341 * every tuple, but it turns out to be much more efficient to keep
342 * reading from the same queue until that would require blocking.
344 gatherstate->nextreader++;
345 if (gatherstate->nextreader >= gatherstate->nreaders)
346 gatherstate->nextreader = 0;
348 /* Have we visited every (surviving) TupleQueueReader? */
350 if (nvisited >= gatherstate->nreaders)
353 * If (still) running plan locally, return NULL so caller can
354 * generate another tuple from the local copy of the plan.
356 if (gatherstate->need_to_scan_locally)
359 /* Nothing to do except wait for developments. */
360 WaitLatch(MyLatch, WL_LATCH_SET, 0, WAIT_EVENT_EXECUTE_GATHER);
367 /* ----------------------------------------------------------------
368 * ExecShutdownGatherWorkers
370 * Stop all the parallel workers.
371 * ----------------------------------------------------------------
374 ExecShutdownGatherWorkers(GatherState *node)
376 if (node->pei != NULL)
377 ExecParallelFinish(node->pei);
379 /* Flush local copy of reader array */
385 /* ----------------------------------------------------------------
388 * Destroy the setup for parallel workers including parallel context.
389 * ----------------------------------------------------------------
392 ExecShutdownGather(GatherState *node)
394 ExecShutdownGatherWorkers(node);
396 /* Now destroy the parallel context. */
397 if (node->pei != NULL)
399 ExecParallelCleanup(node->pei);
404 /* ----------------------------------------------------------------
406 * ----------------------------------------------------------------
409 /* ----------------------------------------------------------------
412 * Prepare to re-scan the result of a Gather.
413 * ----------------------------------------------------------------
416 ExecReScanGather(GatherState *node)
418 Gather *gather = (Gather *) node->ps.plan;
419 PlanState *outerPlan = outerPlanState(node);
421 /* Make sure any existing workers are gracefully shut down */
422 ExecShutdownGatherWorkers(node);
424 /* Mark node so that shared state will be rebuilt at next call */
425 node->initialized = false;
428 * Set child node's chgParam to tell it that the next scan might deliver a
429 * different set of rows within the leader process. (The overall rowset
430 * shouldn't change, but the leader process's subset might; hence nodes
431 * between here and the parallel table scan node mustn't optimize on the
432 * assumption of an unchanging rowset.)
434 if (gather->rescan_param >= 0)
435 outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
436 gather->rescan_param);
439 * If chgParam of subnode is not null then plan will be re-scanned by
440 * first ExecProcNode. Note: because this does nothing if we have a
441 * rescan_param, it's currently guaranteed that parallel-aware child nodes
442 * will not see a ReScan call until after they get a ReInitializeDSM call.
443 * That ordering might not be something to rely on, though. A good rule
444 * of thumb is that ReInitializeDSM should reset only shared state, ReScan
445 * should reset only local state, and anything that depends on both of
446 * those steps being finished must wait until the first ExecProcNode call.
448 if (outerPlan->chgParam == NULL)
449 ExecReScan(outerPlan);