1 /*-------------------------------------------------------------------------
4 * Support routines for parallel execution.
6 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
9 * This file contains routines that are intended to support setting up,
10 * using, and tearing down a ParallelContext from within the PostgreSQL
11 * executor. The ParallelContext machinery will handle starting the
12 * workers and ensuring that their state generally matches that of the
13 * leader; see src/backend/access/transam/README.parallel for details.
14 * However, we must save and restore relevant executor state, such as
15 * any ParamListInfo associated with the query, buffer usage info, and
16 * the actual plan to be passed down to the worker.
19 * src/backend/executor/execParallel.c
21 *-------------------------------------------------------------------------
26 #include "executor/execParallel.h"
27 #include "executor/executor.h"
28 #include "executor/nodeCustom.h"
29 #include "executor/nodeForeignscan.h"
30 #include "executor/nodeSeqscan.h"
31 #include "executor/tqueue.h"
32 #include "nodes/nodeFuncs.h"
33 #include "optimizer/planmain.h"
34 #include "optimizer/planner.h"
35 #include "storage/spin.h"
36 #include "tcop/tcopprot.h"
37 #include "utils/dsa.h"
38 #include "utils/memutils.h"
39 #include "utils/snapmgr.h"
42 * Magic numbers for parallel executor communication. We use constants
43 * greater than any 32-bit integer here so that values < 2^32 can be used
44 * by individual parallel nodes to store their own state.
46 #define PARALLEL_KEY_PLANNEDSTMT UINT64CONST(0xE000000000000001)
47 #define PARALLEL_KEY_PARAMS UINT64CONST(0xE000000000000002)
48 #define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000003)
49 #define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000004)
50 #define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000005)
51 #define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000006)
53 #define PARALLEL_TUPLE_QUEUE_SIZE 65536
56 * DSM structure for accumulating per-PlanState instrumentation.
58 * instrument_options: Same meaning here as in instrument.c.
60 * instrument_offset: Offset, relative to the start of this structure,
61 * of the first Instrumentation object. This will depend on the length of
62 * the plan_node_id array.
64 * num_workers: Number of workers.
66 * num_plan_nodes: Number of plan nodes.
68 * plan_node_id: Array of plan nodes for which we are gathering instrumentation
69 * from parallel workers. The length of this array is given by num_plan_nodes.
71 struct SharedExecutorInstrumentation
73 int instrument_options;
74 int instrument_offset;
77 int plan_node_id[FLEXIBLE_ARRAY_MEMBER];
78 /* array of num_plan_nodes * num_workers Instrumentation objects follows */
80 #define GetInstrumentationArray(sei) \
81 (AssertVariableIsOfTypeMacro(sei, SharedExecutorInstrumentation *), \
82 (Instrumentation *) (((char *) sei) + sei->instrument_offset))
84 /* Context object for ExecParallelEstimate. */
85 typedef struct ExecParallelEstimateContext
87 ParallelContext *pcxt;
89 } ExecParallelEstimateContext;
91 /* Context object for ExecParallelInitializeDSM. */
92 typedef struct ExecParallelInitializeDSMContext
94 ParallelContext *pcxt;
95 SharedExecutorInstrumentation *instrumentation;
97 } ExecParallelInitializeDSMContext;
99 /* Helper functions that run in the parallel leader. */
100 static char *ExecSerializePlan(Plan *plan, EState *estate);
101 static bool ExecParallelEstimate(PlanState *node,
102 ExecParallelEstimateContext *e);
103 static bool ExecParallelInitializeDSM(PlanState *node,
104 ExecParallelInitializeDSMContext *d);
105 static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
107 static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
108 SharedExecutorInstrumentation *instrumentation);
110 /* Helper functions that run in the parallel worker. */
111 static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
112 static DestReceiver *ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc);
115 * Create a serialized representation of the plan to be sent to each worker.
118 ExecSerializePlan(Plan *plan, EState *estate)
123 /* We can't scribble on the original plan, so make a copy. */
124 plan = copyObject(plan);
127 * The worker will start its own copy of the executor, and that copy will
128 * insert a junk filter if the toplevel node has any resjunk entries. We
129 * don't want that to happen, because while resjunk columns shouldn't be
130 * sent back to the user, here the tuples are coming back to another
131 * backend which may very well need them. So mutate the target list
132 * accordingly. This is sort of a hack; there might be better ways to do
135 foreach(tlist, plan->targetlist)
137 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
139 tle->resjunk = false;
143 * Create a dummy PlannedStmt. Most of the fields don't need to be valid
144 * for our purposes, but the worker will need at least a minimal
145 * PlannedStmt to start the executor.
147 pstmt = makeNode(PlannedStmt);
148 pstmt->commandType = CMD_SELECT;
150 pstmt->hasReturning = false;
151 pstmt->hasModifyingCTE = false;
152 pstmt->canSetTag = true;
153 pstmt->transientPlan = false;
154 pstmt->dependsOnRole = false;
155 pstmt->parallelModeNeeded = false;
156 pstmt->planTree = plan;
157 pstmt->rtable = estate->es_range_table;
158 pstmt->resultRelations = NIL;
159 pstmt->utilityStmt = NULL;
160 pstmt->subplans = NIL;
161 pstmt->rewindPlanIDs = NULL;
162 pstmt->rowMarks = NIL;
163 pstmt->relationOids = NIL;
164 pstmt->invalItems = NIL; /* workers can't replan anyway... */
165 pstmt->nParamExec = estate->es_plannedstmt->nParamExec;
167 /* Return serialized copy of our dummy PlannedStmt. */
168 return nodeToString(pstmt);
172 * Ordinary plan nodes won't do anything here, but parallel-aware plan nodes
173 * may need some state which is shared across all parallel workers. Before
174 * we size the DSM, give them a chance to call shm_toc_estimate_chunk or
175 * shm_toc_estimate_keys on &pcxt->estimator.
177 * While we're at it, count the number of PlanState nodes in the tree, so
178 * we know how many SharedPlanStateInstrumentation structures we need.
181 ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
183 if (planstate == NULL)
186 /* Count this node. */
189 /* Call estimators for parallel-aware nodes. */
190 if (planstate->plan->parallel_aware)
192 switch (nodeTag(planstate))
195 ExecSeqScanEstimate((SeqScanState *) planstate,
198 case T_ForeignScanState:
199 ExecForeignScanEstimate((ForeignScanState *) planstate,
202 case T_CustomScanState:
203 ExecCustomScanEstimate((CustomScanState *) planstate,
211 return planstate_tree_walker(planstate, ExecParallelEstimate, e);
215 * Initialize the dynamic shared memory segment that will be used to control
216 * parallel execution.
219 ExecParallelInitializeDSM(PlanState *planstate,
220 ExecParallelInitializeDSMContext *d)
222 if (planstate == NULL)
225 /* If instrumentation is enabled, initialize slot for this node. */
226 if (d->instrumentation != NULL)
227 d->instrumentation->plan_node_id[d->nnodes] =
228 planstate->plan->plan_node_id;
230 /* Count this node. */
234 * Call initializers for parallel-aware plan nodes.
236 * Ordinary plan nodes won't do anything here, but parallel-aware plan
237 * nodes may need to initialize shared state in the DSM before parallel
238 * workers are available. They can allocate the space they previously
239 * estimated using shm_toc_allocate, and add the keys they previously
240 * estimated using shm_toc_insert, in each case targeting pcxt->toc.
242 if (planstate->plan->parallel_aware)
244 switch (nodeTag(planstate))
247 ExecSeqScanInitializeDSM((SeqScanState *) planstate,
250 case T_ForeignScanState:
251 ExecForeignScanInitializeDSM((ForeignScanState *) planstate,
254 case T_CustomScanState:
255 ExecCustomScanInitializeDSM((CustomScanState *) planstate,
263 return planstate_tree_walker(planstate, ExecParallelInitializeDSM, d);
267 * It sets up the response queues for backend workers to return tuples
268 * to the main backend and start the workers.
270 static shm_mq_handle **
271 ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
273 shm_mq_handle **responseq;
277 /* Skip this if no workers. */
278 if (pcxt->nworkers == 0)
281 /* Allocate memory for shared memory queue handles. */
282 responseq = (shm_mq_handle **)
283 palloc(pcxt->nworkers * sizeof(shm_mq_handle *));
286 * If not reinitializing, allocate space from the DSM for the queues;
287 * otherwise, find the already allocated space.
291 shm_toc_allocate(pcxt->toc,
292 mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
295 tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE);
297 /* Create the queues, and become the receiver for each. */
298 for (i = 0; i < pcxt->nworkers; ++i)
302 mq = shm_mq_create(tqueuespace +
303 ((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE,
304 (Size) PARALLEL_TUPLE_QUEUE_SIZE);
306 shm_mq_set_receiver(mq, MyProc);
307 responseq[i] = shm_mq_attach(mq, pcxt->seg, NULL);
310 /* Add array of queues to shm_toc, so others can find it. */
312 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, tqueuespace);
314 /* Return array of handles. */
319 * Re-initialize the parallel executor info such that it can be reused by
323 ExecParallelReinitialize(ParallelExecutorInfo *pei)
325 ReinitializeParallelDSM(pei->pcxt);
326 pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
327 pei->finished = false;
331 * Sets up the required infrastructure for backend workers to perform
332 * execution and return results to the main backend.
334 ParallelExecutorInfo *
335 ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
337 ParallelExecutorInfo *pei;
338 ParallelContext *pcxt;
339 ExecParallelEstimateContext e;
340 ExecParallelInitializeDSMContext d;
344 BufferUsage *bufusage_space;
345 SharedExecutorInstrumentation *instrumentation = NULL;
348 int instrumentation_len = 0;
349 int instrument_offset = 0;
350 Size dsa_minsize = dsa_minimum_size();
352 /* Allocate object for return value. */
353 pei = palloc0(sizeof(ParallelExecutorInfo));
354 pei->finished = false;
355 pei->planstate = planstate;
357 /* Fix up and serialize plan to be sent to workers. */
358 pstmt_data = ExecSerializePlan(planstate->plan, estate);
360 /* Create a parallel context. */
361 pcxt = CreateParallelContext(ParallelQueryMain, nworkers);
365 * Before telling the parallel context to create a dynamic shared memory
366 * segment, we need to figure out how big it should be. Estimate space
367 * for the various things we need to store.
370 /* Estimate space for serialized PlannedStmt. */
371 pstmt_len = strlen(pstmt_data) + 1;
372 shm_toc_estimate_chunk(&pcxt->estimator, pstmt_len);
373 shm_toc_estimate_keys(&pcxt->estimator, 1);
375 /* Estimate space for serialized ParamListInfo. */
376 param_len = EstimateParamListSpace(estate->es_param_list_info);
377 shm_toc_estimate_chunk(&pcxt->estimator, param_len);
378 shm_toc_estimate_keys(&pcxt->estimator, 1);
381 * Estimate space for BufferUsage.
383 * If EXPLAIN is not in use and there are no extensions loaded that care,
384 * we could skip this. But we have no way of knowing whether anyone's
385 * looking at pgBufferUsage, so do it unconditionally.
387 shm_toc_estimate_chunk(&pcxt->estimator,
388 mul_size(sizeof(BufferUsage), pcxt->nworkers));
389 shm_toc_estimate_keys(&pcxt->estimator, 1);
391 /* Estimate space for tuple queues. */
392 shm_toc_estimate_chunk(&pcxt->estimator,
393 mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
394 shm_toc_estimate_keys(&pcxt->estimator, 1);
397 * Give parallel-aware nodes a chance to add to the estimates, and get a
398 * count of how many PlanState nodes there are.
402 ExecParallelEstimate(planstate, &e);
404 /* Estimate space for instrumentation, if required. */
405 if (estate->es_instrument)
407 instrumentation_len =
408 offsetof(SharedExecutorInstrumentation, plan_node_id) +
409 sizeof(int) * e.nnodes;
410 instrumentation_len = MAXALIGN(instrumentation_len);
411 instrument_offset = instrumentation_len;
412 instrumentation_len +=
413 mul_size(sizeof(Instrumentation),
414 mul_size(e.nnodes, nworkers));
415 shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
416 shm_toc_estimate_keys(&pcxt->estimator, 1);
419 /* Estimate space for DSA area. */
420 shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);
421 shm_toc_estimate_keys(&pcxt->estimator, 1);
423 /* Everyone's had a chance to ask for space, so now create the DSM. */
424 InitializeParallelDSM(pcxt);
427 * OK, now we have a dynamic shared memory segment, and it should be big
428 * enough to store all of the data we estimated we would want to put into
429 * it, plus whatever general stuff (not specifically executor-related) the
430 * ParallelContext itself needs to store there. None of the space we
431 * asked for has been allocated or initialized yet, though, so do that.
434 /* Store serialized PlannedStmt. */
435 pstmt_space = shm_toc_allocate(pcxt->toc, pstmt_len);
436 memcpy(pstmt_space, pstmt_data, pstmt_len);
437 shm_toc_insert(pcxt->toc, PARALLEL_KEY_PLANNEDSTMT, pstmt_space);
439 /* Store serialized ParamListInfo. */
440 param_space = shm_toc_allocate(pcxt->toc, param_len);
441 shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMS, param_space);
442 SerializeParamList(estate->es_param_list_info, ¶m_space);
444 /* Allocate space for each worker's BufferUsage; no need to initialize. */
445 bufusage_space = shm_toc_allocate(pcxt->toc,
446 mul_size(sizeof(BufferUsage), pcxt->nworkers));
447 shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
448 pei->buffer_usage = bufusage_space;
450 /* Set up tuple queues. */
451 pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
454 * If instrumentation options were supplied, allocate space for the data.
455 * It only gets partially initialized here; the rest happens during
456 * ExecParallelInitializeDSM.
458 if (estate->es_instrument)
460 Instrumentation *instrument;
463 instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
464 instrumentation->instrument_options = estate->es_instrument;
465 instrumentation->instrument_offset = instrument_offset;
466 instrumentation->num_workers = nworkers;
467 instrumentation->num_plan_nodes = e.nnodes;
468 instrument = GetInstrumentationArray(instrumentation);
469 for (i = 0; i < nworkers * e.nnodes; ++i)
470 InstrInit(&instrument[i], estate->es_instrument);
471 shm_toc_insert(pcxt->toc, PARALLEL_KEY_INSTRUMENTATION,
473 pei->instrumentation = instrumentation;
477 * Create a DSA area that can be used by the leader and all workers.
478 * (However, if we failed to create a DSM and are using private memory
479 * instead, then skip this.)
481 if (pcxt->seg != NULL)
485 area_space = shm_toc_allocate(pcxt->toc, dsa_minsize);
486 shm_toc_insert(pcxt->toc, PARALLEL_KEY_DSA, area_space);
487 pei->area = dsa_create_in_place(area_space, dsa_minsize,
488 LWTRANCHE_PARALLEL_QUERY_DSA,
489 "parallel_query_dsa",
494 * Make the area available to executor nodes running in the leader. See
495 * also ParallelQueryMain which makes it available to workers.
497 estate->es_query_dsa = pei->area;
500 * Give parallel-aware nodes a chance to initialize their shared data.
501 * This also initializes the elements of instrumentation->ps_instrument,
505 d.instrumentation = instrumentation;
507 ExecParallelInitializeDSM(planstate, &d);
510 * Make sure that the world hasn't shifted under our feat. This could
511 * probably just be an Assert(), but let's be conservative for now.
513 if (e.nnodes != d.nnodes)
514 elog(ERROR, "inconsistent count of PlanState nodes");
516 /* OK, we're ready to rock and roll. */
521 * Copy instrumentation information about this node and its descendants from
522 * dynamic shared memory.
525 ExecParallelRetrieveInstrumentation(PlanState *planstate,
526 SharedExecutorInstrumentation *instrumentation)
528 Instrumentation *instrument;
532 int plan_node_id = planstate->plan->plan_node_id;
533 MemoryContext oldcontext;
535 /* Find the instumentation for this node. */
536 for (i = 0; i < instrumentation->num_plan_nodes; ++i)
537 if (instrumentation->plan_node_id[i] == plan_node_id)
539 if (i >= instrumentation->num_plan_nodes)
540 elog(ERROR, "plan node %d not found", plan_node_id);
542 /* Accumulate the statistics from all workers. */
543 instrument = GetInstrumentationArray(instrumentation);
544 instrument += i * instrumentation->num_workers;
545 for (n = 0; n < instrumentation->num_workers; ++n)
546 InstrAggNode(planstate->instrument, &instrument[n]);
549 * Also store the per-worker detail.
551 * Worker instrumentation should be allocated in the same context as
552 * the regular instrumentation information, which is the per-query
553 * context. Switch into per-query memory context.
555 oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
556 ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
557 planstate->worker_instrument =
558 palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
559 MemoryContextSwitchTo(oldcontext);
561 planstate->worker_instrument->num_workers = instrumentation->num_workers;
562 memcpy(&planstate->worker_instrument->instrument, instrument, ibytes);
564 return planstate_tree_walker(planstate, ExecParallelRetrieveInstrumentation,
569 * Finish parallel execution. We wait for parallel workers to finish, and
570 * accumulate their buffer usage and instrumentation.
573 ExecParallelFinish(ParallelExecutorInfo *pei)
580 /* First, wait for the workers to finish. */
581 WaitForParallelWorkersToFinish(pei->pcxt);
583 /* Next, accumulate buffer usage. */
584 for (i = 0; i < pei->pcxt->nworkers_launched; ++i)
585 InstrAccumParallelQuery(&pei->buffer_usage[i]);
587 /* Finally, accumulate instrumentation, if any. */
588 if (pei->instrumentation)
589 ExecParallelRetrieveInstrumentation(pei->planstate,
590 pei->instrumentation);
592 pei->finished = true;
596 * Clean up whatever ParallelExecutorInfo resources still exist after
597 * ExecParallelFinish. We separate these routines because someone might
598 * want to examine the contents of the DSM after ExecParallelFinish and
599 * before calling this routine.
602 ExecParallelCleanup(ParallelExecutorInfo *pei)
604 if (pei->area != NULL)
606 dsa_detach(pei->area);
609 if (pei->pcxt != NULL)
611 DestroyParallelContext(pei->pcxt);
618 * Create a DestReceiver to write tuples we produce to the shm_mq designated
621 static DestReceiver *
622 ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
627 mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE);
628 mqspace += ParallelWorkerNumber * PARALLEL_TUPLE_QUEUE_SIZE;
629 mq = (shm_mq *) mqspace;
630 shm_mq_set_sender(mq, MyProc);
631 return CreateTupleQueueDestReceiver(shm_mq_attach(mq, seg, NULL));
635 * Create a QueryDesc for the PlannedStmt we are to execute, and return it.
638 ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
639 int instrument_options)
644 ParamListInfo paramLI;
646 /* Reconstruct leader-supplied PlannedStmt. */
647 pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT);
648 pstmt = (PlannedStmt *) stringToNode(pstmtspace);
650 /* Reconstruct ParamListInfo. */
651 paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS);
652 paramLI = RestoreParamList(¶mspace);
655 * Create a QueryDesc for the query.
657 * It's not obvious how to obtain the query string from here; and even if
658 * we could copying it would take more cycles than not copying it. But
659 * it's a bit unsatisfying to just use a dummy string here, so consider
660 * revising this someday.
662 return CreateQueryDesc(pstmt,
664 GetActiveSnapshot(), InvalidSnapshot,
665 receiver, paramLI, instrument_options);
669 * Copy instrumentation information from this node and its descendants into
670 * dynamic shared memory, so that the parallel leader can retrieve it.
673 ExecParallelReportInstrumentation(PlanState *planstate,
674 SharedExecutorInstrumentation *instrumentation)
677 int plan_node_id = planstate->plan->plan_node_id;
678 Instrumentation *instrument;
680 InstrEndLoop(planstate->instrument);
683 * If we shuffled the plan_node_id values in ps_instrument into sorted
684 * order, we could use binary search here. This might matter someday if
685 * we're pushing down sufficiently large plan trees. For now, do it the
688 for (i = 0; i < instrumentation->num_plan_nodes; ++i)
689 if (instrumentation->plan_node_id[i] == plan_node_id)
691 if (i >= instrumentation->num_plan_nodes)
692 elog(ERROR, "plan node %d not found", plan_node_id);
695 * Add our statistics to the per-node, per-worker totals. It's possible
696 * that this could happen more than once if we relaunched workers.
698 instrument = GetInstrumentationArray(instrumentation);
699 instrument += i * instrumentation->num_workers;
700 Assert(IsParallelWorker());
701 Assert(ParallelWorkerNumber < instrumentation->num_workers);
702 InstrAggNode(&instrument[ParallelWorkerNumber], planstate->instrument);
704 return planstate_tree_walker(planstate, ExecParallelReportInstrumentation,
709 * Initialize the PlanState and its descendants with the information
710 * retrieved from shared memory. This has to be done once the PlanState
711 * is allocated and initialized by executor; that is, after ExecutorStart().
714 ExecParallelInitializeWorker(PlanState *planstate, shm_toc *toc)
716 if (planstate == NULL)
719 /* Call initializers for parallel-aware plan nodes. */
720 if (planstate->plan->parallel_aware)
722 switch (nodeTag(planstate))
725 ExecSeqScanInitializeWorker((SeqScanState *) planstate, toc);
727 case T_ForeignScanState:
728 ExecForeignScanInitializeWorker((ForeignScanState *) planstate,
731 case T_CustomScanState:
732 ExecCustomScanInitializeWorker((CustomScanState *) planstate,
740 return planstate_tree_walker(planstate, ExecParallelInitializeWorker, toc);
744 * Main entrypoint for parallel query worker processes.
746 * We reach this function from ParallelMain, so the setup necessary to create
747 * a sensible parallel environment has already been done; ParallelMain worries
748 * about stuff like the transaction state, combo CID mappings, and GUC values,
749 * so we don't need to deal with any of that here.
751 * Our job is to deal with concerns specific to the executor. The parallel
752 * group leader will have stored a serialized PlannedStmt, and it's our job
753 * to execute that plan and write the resulting tuples to the appropriate
754 * tuple queue. Various bits of supporting information that we need in order
755 * to do this are also stored in the dsm_segment and can be accessed through
759 ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
761 BufferUsage *buffer_usage;
762 DestReceiver *receiver;
763 QueryDesc *queryDesc;
764 SharedExecutorInstrumentation *instrumentation;
765 int instrument_options = 0;
769 /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
770 receiver = ExecParallelGetReceiver(seg, toc);
771 instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION);
772 if (instrumentation != NULL)
773 instrument_options = instrumentation->instrument_options;
774 queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
776 /* Prepare to track buffer usage during query execution. */
777 InstrStartParallelQuery();
779 /* Attach to the dynamic shared memory area. */
780 area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA);
781 area = dsa_attach_in_place(area_space, seg);
783 /* Start up the executor */
784 ExecutorStart(queryDesc, 0);
786 /* Special executor initialization steps for parallel workers */
787 queryDesc->planstate->state->es_query_dsa = area;
788 ExecParallelInitializeWorker(queryDesc->planstate, toc);
791 ExecutorRun(queryDesc, ForwardScanDirection, 0L);
793 /* Shut down the executor */
794 ExecutorFinish(queryDesc);
796 /* Report buffer usage during parallel execution. */
797 buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE);
798 InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber]);
800 /* Report instrumentation data if any instrumentation options are set. */
801 if (instrumentation != NULL)
802 ExecParallelReportInstrumentation(queryDesc->planstate,
805 /* Must do this after capturing instrumentation. */
806 ExecutorEnd(queryDesc);
810 FreeQueryDesc(queryDesc);
811 (*receiver->rDestroy) (receiver);