1 /*-------------------------------------------------------------------------
4 * Support routines for parallel execution.
6 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
9 * This file contains routines that are intended to support setting up,
10 * using, and tearing down a ParallelContext from within the PostgreSQL
11 * executor. The ParallelContext machinery will handle starting the
12 * workers and ensuring that their state generally matches that of the
13 * leader; see src/backend/access/transam/README.parallel for details.
14 * However, we must save and restore relevant executor state, such as
15 * any ParamListInfo associated with the query, buffer usage info, and
16 * the actual plan to be passed down to the worker.
19 * src/backend/executor/execParallel.c
21 *-------------------------------------------------------------------------
26 #include "executor/execParallel.h"
27 #include "executor/executor.h"
28 #include "executor/nodeCustom.h"
29 #include "executor/nodeForeignscan.h"
30 #include "executor/nodeSeqscan.h"
31 #include "executor/tqueue.h"
32 #include "nodes/nodeFuncs.h"
33 #include "optimizer/planmain.h"
34 #include "optimizer/planner.h"
35 #include "storage/spin.h"
36 #include "tcop/tcopprot.h"
37 #include "utils/memutils.h"
38 #include "utils/snapmgr.h"
41 * Magic numbers for parallel executor communication. We use constants
42 * greater than any 32-bit integer here so that values < 2^32 can be used
43 * by individual parallel nodes to store their own state.
45 #define PARALLEL_KEY_PLANNEDSTMT UINT64CONST(0xE000000000000001)
46 #define PARALLEL_KEY_PARAMS UINT64CONST(0xE000000000000002)
47 #define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000003)
48 #define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000004)
49 #define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000005)
51 #define PARALLEL_TUPLE_QUEUE_SIZE 65536
54 * DSM structure for accumulating per-PlanState instrumentation.
56 * instrument_options: Same meaning here as in instrument.c.
58 * instrument_offset: Offset, relative to the start of this structure,
59 * of the first Instrumentation object. This will depend on the length of
60 * the plan_node_id array.
62 * num_workers: Number of workers.
64 * num_plan_nodes: Number of plan nodes.
66 * plan_node_id: Array of plan nodes for which we are gathering instrumentation
67 * from parallel workers. The length of this array is given by num_plan_nodes.
69 struct SharedExecutorInstrumentation
71 int instrument_options;
72 int instrument_offset;
75 int plan_node_id[FLEXIBLE_ARRAY_MEMBER];
76 /* array of num_plan_nodes * num_workers Instrumentation objects follows */
78 #define GetInstrumentationArray(sei) \
79 (AssertVariableIsOfTypeMacro(sei, SharedExecutorInstrumentation *), \
80 (Instrumentation *) (((char *) sei) + sei->instrument_offset))
82 /* Context object for ExecParallelEstimate. */
83 typedef struct ExecParallelEstimateContext
85 ParallelContext *pcxt;
87 } ExecParallelEstimateContext;
89 /* Context object for ExecParallelInitializeDSM. */
90 typedef struct ExecParallelInitializeDSMContext
92 ParallelContext *pcxt;
93 SharedExecutorInstrumentation *instrumentation;
95 } ExecParallelInitializeDSMContext;
97 /* Helper functions that run in the parallel leader. */
98 static char *ExecSerializePlan(Plan *plan, EState *estate);
99 static bool ExecParallelEstimate(PlanState *node,
100 ExecParallelEstimateContext *e);
101 static bool ExecParallelInitializeDSM(PlanState *node,
102 ExecParallelInitializeDSMContext *d);
103 static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
105 static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
106 SharedExecutorInstrumentation *instrumentation);
108 /* Helper functions that run in the parallel worker. */
109 static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
110 static DestReceiver *ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc);
113 * Create a serialized representation of the plan to be sent to each worker.
116 ExecSerializePlan(Plan *plan, EState *estate)
121 /* We can't scribble on the original plan, so make a copy. */
122 plan = copyObject(plan);
125 * The worker will start its own copy of the executor, and that copy will
126 * insert a junk filter if the toplevel node has any resjunk entries. We
127 * don't want that to happen, because while resjunk columns shouldn't be
128 * sent back to the user, here the tuples are coming back to another
129 * backend which may very well need them. So mutate the target list
130 * accordingly. This is sort of a hack; there might be better ways to do
133 foreach(tlist, plan->targetlist)
135 TargetEntry *tle = (TargetEntry *) lfirst(tlist);
137 tle->resjunk = false;
141 * Create a dummy PlannedStmt. Most of the fields don't need to be valid
142 * for our purposes, but the worker will need at least a minimal
143 * PlannedStmt to start the executor.
145 pstmt = makeNode(PlannedStmt);
146 pstmt->commandType = CMD_SELECT;
148 pstmt->hasReturning = false;
149 pstmt->hasModifyingCTE = false;
150 pstmt->canSetTag = true;
151 pstmt->transientPlan = false;
152 pstmt->dependsOnRole = false;
153 pstmt->parallelModeNeeded = false;
154 pstmt->planTree = plan;
155 pstmt->rtable = estate->es_range_table;
156 pstmt->resultRelations = NIL;
157 pstmt->utilityStmt = NULL;
158 pstmt->subplans = NIL;
159 pstmt->rewindPlanIDs = NULL;
160 pstmt->rowMarks = NIL;
161 pstmt->relationOids = NIL;
162 pstmt->invalItems = NIL; /* workers can't replan anyway... */
163 pstmt->nParamExec = estate->es_plannedstmt->nParamExec;
165 /* Return serialized copy of our dummy PlannedStmt. */
166 return nodeToString(pstmt);
170 * Ordinary plan nodes won't do anything here, but parallel-aware plan nodes
171 * may need some state which is shared across all parallel workers. Before
172 * we size the DSM, give them a chance to call shm_toc_estimate_chunk or
173 * shm_toc_estimate_keys on &pcxt->estimator.
175 * While we're at it, count the number of PlanState nodes in the tree, so
176 * we know how many SharedPlanStateInstrumentation structures we need.
179 ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
181 if (planstate == NULL)
184 /* Count this node. */
187 /* Call estimators for parallel-aware nodes. */
188 if (planstate->plan->parallel_aware)
190 switch (nodeTag(planstate))
193 ExecSeqScanEstimate((SeqScanState *) planstate,
196 case T_ForeignScanState:
197 ExecForeignScanEstimate((ForeignScanState *) planstate,
200 case T_CustomScanState:
201 ExecCustomScanEstimate((CustomScanState *) planstate,
209 return planstate_tree_walker(planstate, ExecParallelEstimate, e);
213 * Initialize the dynamic shared memory segment that will be used to control
214 * parallel execution.
217 ExecParallelInitializeDSM(PlanState *planstate,
218 ExecParallelInitializeDSMContext *d)
220 if (planstate == NULL)
223 /* If instrumentation is enabled, initialize slot for this node. */
224 if (d->instrumentation != NULL)
225 d->instrumentation->plan_node_id[d->nnodes] =
226 planstate->plan->plan_node_id;
228 /* Count this node. */
232 * Call initializers for parallel-aware plan nodes.
234 * Ordinary plan nodes won't do anything here, but parallel-aware plan
235 * nodes may need to initialize shared state in the DSM before parallel
236 * workers are available. They can allocate the space they previously
237 * estimated using shm_toc_allocate, and add the keys they previously
238 * estimated using shm_toc_insert, in each case targeting pcxt->toc.
240 if (planstate->plan->parallel_aware)
242 switch (nodeTag(planstate))
245 ExecSeqScanInitializeDSM((SeqScanState *) planstate,
248 case T_ForeignScanState:
249 ExecForeignScanInitializeDSM((ForeignScanState *) planstate,
252 case T_CustomScanState:
253 ExecCustomScanInitializeDSM((CustomScanState *) planstate,
261 return planstate_tree_walker(planstate, ExecParallelInitializeDSM, d);
265 * It sets up the response queues for backend workers to return tuples
266 * to the main backend and start the workers.
268 static shm_mq_handle **
269 ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
271 shm_mq_handle **responseq;
275 /* Skip this if no workers. */
276 if (pcxt->nworkers == 0)
279 /* Allocate memory for shared memory queue handles. */
280 responseq = (shm_mq_handle **)
281 palloc(pcxt->nworkers * sizeof(shm_mq_handle *));
284 * If not reinitializing, allocate space from the DSM for the queues;
285 * otherwise, find the already allocated space.
289 shm_toc_allocate(pcxt->toc,
290 mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
293 tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE);
295 /* Create the queues, and become the receiver for each. */
296 for (i = 0; i < pcxt->nworkers; ++i)
300 mq = shm_mq_create(tqueuespace +
301 ((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE,
302 (Size) PARALLEL_TUPLE_QUEUE_SIZE);
304 shm_mq_set_receiver(mq, MyProc);
305 responseq[i] = shm_mq_attach(mq, pcxt->seg, NULL);
308 /* Add array of queues to shm_toc, so others can find it. */
310 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, tqueuespace);
312 /* Return array of handles. */
317 * Re-initialize the parallel executor info such that it can be reused by
321 ExecParallelReinitialize(ParallelExecutorInfo *pei)
323 ReinitializeParallelDSM(pei->pcxt);
324 pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
325 pei->finished = false;
329 * Sets up the required infrastructure for backend workers to perform
330 * execution and return results to the main backend.
332 ParallelExecutorInfo *
333 ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
335 ParallelExecutorInfo *pei;
336 ParallelContext *pcxt;
337 ExecParallelEstimateContext e;
338 ExecParallelInitializeDSMContext d;
342 BufferUsage *bufusage_space;
343 SharedExecutorInstrumentation *instrumentation = NULL;
346 int instrumentation_len = 0;
347 int instrument_offset = 0;
349 /* Allocate object for return value. */
350 pei = palloc0(sizeof(ParallelExecutorInfo));
351 pei->finished = false;
352 pei->planstate = planstate;
354 /* Fix up and serialize plan to be sent to workers. */
355 pstmt_data = ExecSerializePlan(planstate->plan, estate);
357 /* Create a parallel context. */
358 pcxt = CreateParallelContext(ParallelQueryMain, nworkers);
362 * Before telling the parallel context to create a dynamic shared memory
363 * segment, we need to figure out how big it should be. Estimate space
364 * for the various things we need to store.
367 /* Estimate space for serialized PlannedStmt. */
368 pstmt_len = strlen(pstmt_data) + 1;
369 shm_toc_estimate_chunk(&pcxt->estimator, pstmt_len);
370 shm_toc_estimate_keys(&pcxt->estimator, 1);
372 /* Estimate space for serialized ParamListInfo. */
373 param_len = EstimateParamListSpace(estate->es_param_list_info);
374 shm_toc_estimate_chunk(&pcxt->estimator, param_len);
375 shm_toc_estimate_keys(&pcxt->estimator, 1);
378 * Estimate space for BufferUsage.
380 * If EXPLAIN is not in use and there are no extensions loaded that care,
381 * we could skip this. But we have no way of knowing whether anyone's
382 * looking at pgBufferUsage, so do it unconditionally.
384 shm_toc_estimate_chunk(&pcxt->estimator,
385 mul_size(sizeof(BufferUsage), pcxt->nworkers));
386 shm_toc_estimate_keys(&pcxt->estimator, 1);
388 /* Estimate space for tuple queues. */
389 shm_toc_estimate_chunk(&pcxt->estimator,
390 mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
391 shm_toc_estimate_keys(&pcxt->estimator, 1);
394 * Give parallel-aware nodes a chance to add to the estimates, and get a
395 * count of how many PlanState nodes there are.
399 ExecParallelEstimate(planstate, &e);
401 /* Estimate space for instrumentation, if required. */
402 if (estate->es_instrument)
404 instrumentation_len =
405 offsetof(SharedExecutorInstrumentation, plan_node_id) +
406 sizeof(int) * e.nnodes;
407 instrumentation_len = MAXALIGN(instrumentation_len);
408 instrument_offset = instrumentation_len;
409 instrumentation_len +=
410 mul_size(sizeof(Instrumentation),
411 mul_size(e.nnodes, nworkers));
412 shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
413 shm_toc_estimate_keys(&pcxt->estimator, 1);
416 /* Everyone's had a chance to ask for space, so now create the DSM. */
417 InitializeParallelDSM(pcxt);
420 * OK, now we have a dynamic shared memory segment, and it should be big
421 * enough to store all of the data we estimated we would want to put into
422 * it, plus whatever general stuff (not specifically executor-related) the
423 * ParallelContext itself needs to store there. None of the space we
424 * asked for has been allocated or initialized yet, though, so do that.
427 /* Store serialized PlannedStmt. */
428 pstmt_space = shm_toc_allocate(pcxt->toc, pstmt_len);
429 memcpy(pstmt_space, pstmt_data, pstmt_len);
430 shm_toc_insert(pcxt->toc, PARALLEL_KEY_PLANNEDSTMT, pstmt_space);
432 /* Store serialized ParamListInfo. */
433 param_space = shm_toc_allocate(pcxt->toc, param_len);
434 shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMS, param_space);
435 SerializeParamList(estate->es_param_list_info, ¶m_space);
437 /* Allocate space for each worker's BufferUsage; no need to initialize. */
438 bufusage_space = shm_toc_allocate(pcxt->toc,
439 mul_size(sizeof(BufferUsage), pcxt->nworkers));
440 shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
441 pei->buffer_usage = bufusage_space;
443 /* Set up tuple queues. */
444 pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
447 * If instrumentation options were supplied, allocate space for the data.
448 * It only gets partially initialized here; the rest happens during
449 * ExecParallelInitializeDSM.
451 if (estate->es_instrument)
453 Instrumentation *instrument;
456 instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
457 instrumentation->instrument_options = estate->es_instrument;
458 instrumentation->instrument_offset = instrument_offset;
459 instrumentation->num_workers = nworkers;
460 instrumentation->num_plan_nodes = e.nnodes;
461 instrument = GetInstrumentationArray(instrumentation);
462 for (i = 0; i < nworkers * e.nnodes; ++i)
463 InstrInit(&instrument[i], estate->es_instrument);
464 shm_toc_insert(pcxt->toc, PARALLEL_KEY_INSTRUMENTATION,
466 pei->instrumentation = instrumentation;
470 * Give parallel-aware nodes a chance to initialize their shared data.
471 * This also initializes the elements of instrumentation->ps_instrument,
475 d.instrumentation = instrumentation;
477 ExecParallelInitializeDSM(planstate, &d);
480 * Make sure that the world hasn't shifted under our feat. This could
481 * probably just be an Assert(), but let's be conservative for now.
483 if (e.nnodes != d.nnodes)
484 elog(ERROR, "inconsistent count of PlanState nodes");
486 /* OK, we're ready to rock and roll. */
491 * Copy instrumentation information about this node and its descendents from
492 * dynamic shared memory.
495 ExecParallelRetrieveInstrumentation(PlanState *planstate,
496 SharedExecutorInstrumentation *instrumentation)
498 Instrumentation *instrument;
502 int plan_node_id = planstate->plan->plan_node_id;
503 MemoryContext oldcontext;
505 /* Find the instumentation for this node. */
506 for (i = 0; i < instrumentation->num_plan_nodes; ++i)
507 if (instrumentation->plan_node_id[i] == plan_node_id)
509 if (i >= instrumentation->num_plan_nodes)
510 elog(ERROR, "plan node %d not found", plan_node_id);
512 /* Accumulate the statistics from all workers. */
513 instrument = GetInstrumentationArray(instrumentation);
514 instrument += i * instrumentation->num_workers;
515 for (n = 0; n < instrumentation->num_workers; ++n)
516 InstrAggNode(planstate->instrument, &instrument[n]);
519 * Also store the per-worker detail.
521 * Worker instrumentation should be allocated in the same context as
522 * the regular instrumentation information, which is the per-query
523 * context. Switch into per-query memory context.
525 oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
526 ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
527 planstate->worker_instrument =
528 palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
529 MemoryContextSwitchTo(oldcontext);
531 planstate->worker_instrument->num_workers = instrumentation->num_workers;
532 memcpy(&planstate->worker_instrument->instrument, instrument, ibytes);
534 return planstate_tree_walker(planstate, ExecParallelRetrieveInstrumentation,
539 * Finish parallel execution. We wait for parallel workers to finish, and
540 * accumulate their buffer usage and instrumentation.
543 ExecParallelFinish(ParallelExecutorInfo *pei)
550 /* First, wait for the workers to finish. */
551 WaitForParallelWorkersToFinish(pei->pcxt);
553 /* Next, accumulate buffer usage. */
554 for (i = 0; i < pei->pcxt->nworkers_launched; ++i)
555 InstrAccumParallelQuery(&pei->buffer_usage[i]);
557 /* Finally, accumulate instrumentation, if any. */
558 if (pei->instrumentation)
559 ExecParallelRetrieveInstrumentation(pei->planstate,
560 pei->instrumentation);
562 pei->finished = true;
566 * Clean up whatever ParallelExecutreInfo resources still exist after
567 * ExecParallelFinish. We separate these routines because someone might
568 * want to examine the contents of the DSM after ExecParallelFinish and
569 * before calling this routine.
572 ExecParallelCleanup(ParallelExecutorInfo *pei)
574 if (pei->pcxt != NULL)
576 DestroyParallelContext(pei->pcxt);
583 * Create a DestReceiver to write tuples we produce to the shm_mq designated
586 static DestReceiver *
587 ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
592 mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE);
593 mqspace += ParallelWorkerNumber * PARALLEL_TUPLE_QUEUE_SIZE;
594 mq = (shm_mq *) mqspace;
595 shm_mq_set_sender(mq, MyProc);
596 return CreateTupleQueueDestReceiver(shm_mq_attach(mq, seg, NULL));
600 * Create a QueryDesc for the PlannedStmt we are to execute, and return it.
603 ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
604 int instrument_options)
609 ParamListInfo paramLI;
611 /* Reconstruct leader-supplied PlannedStmt. */
612 pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT);
613 pstmt = (PlannedStmt *) stringToNode(pstmtspace);
615 /* Reconstruct ParamListInfo. */
616 paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS);
617 paramLI = RestoreParamList(¶mspace);
620 * Create a QueryDesc for the query.
622 * It's not obvious how to obtain the query string from here; and even if
623 * we could copying it would take more cycles than not copying it. But
624 * it's a bit unsatisfying to just use a dummy string here, so consider
625 * revising this someday.
627 return CreateQueryDesc(pstmt,
629 GetActiveSnapshot(), InvalidSnapshot,
630 receiver, paramLI, instrument_options);
634 * Copy instrumentation information from this node and its descendents into
635 * dynamic shared memory, so that the parallel leader can retrieve it.
638 ExecParallelReportInstrumentation(PlanState *planstate,
639 SharedExecutorInstrumentation *instrumentation)
642 int plan_node_id = planstate->plan->plan_node_id;
643 Instrumentation *instrument;
645 InstrEndLoop(planstate->instrument);
648 * If we shuffled the plan_node_id values in ps_instrument into sorted
649 * order, we could use binary search here. This might matter someday if
650 * we're pushing down sufficiently large plan trees. For now, do it the
653 for (i = 0; i < instrumentation->num_plan_nodes; ++i)
654 if (instrumentation->plan_node_id[i] == plan_node_id)
656 if (i >= instrumentation->num_plan_nodes)
657 elog(ERROR, "plan node %d not found", plan_node_id);
660 * Add our statistics to the per-node, per-worker totals. It's possible
661 * that this could happen more than once if we relaunched workers.
663 instrument = GetInstrumentationArray(instrumentation);
664 instrument += i * instrumentation->num_workers;
665 Assert(IsParallelWorker());
666 Assert(ParallelWorkerNumber < instrumentation->num_workers);
667 InstrAggNode(&instrument[ParallelWorkerNumber], planstate->instrument);
669 return planstate_tree_walker(planstate, ExecParallelReportInstrumentation,
674 * Initialize the PlanState and its descendents with the information
675 * retrieved from shared memory. This has to be done once the PlanState
676 * is allocated and initialized by executor; that is, after ExecutorStart().
679 ExecParallelInitializeWorker(PlanState *planstate, shm_toc *toc)
681 if (planstate == NULL)
684 /* Call initializers for parallel-aware plan nodes. */
685 if (planstate->plan->parallel_aware)
687 switch (nodeTag(planstate))
690 ExecSeqScanInitializeWorker((SeqScanState *) planstate, toc);
692 case T_ForeignScanState:
693 ExecForeignScanInitializeWorker((ForeignScanState *) planstate,
696 case T_CustomScanState:
697 ExecCustomScanInitializeWorker((CustomScanState *) planstate,
705 return planstate_tree_walker(planstate, ExecParallelInitializeWorker, toc);
709 * Main entrypoint for parallel query worker processes.
711 * We reach this function from ParallelMain, so the setup necessary to create
712 * a sensible parallel environment has already been done; ParallelMain worries
713 * about stuff like the transaction state, combo CID mappings, and GUC values,
714 * so we don't need to deal with any of that here.
716 * Our job is to deal with concerns specific to the executor. The parallel
717 * group leader will have stored a serialized PlannedStmt, and it's our job
718 * to execute that plan and write the resulting tuples to the appropriate
719 * tuple queue. Various bits of supporting information that we need in order
720 * to do this are also stored in the dsm_segment and can be accessed through
724 ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
726 BufferUsage *buffer_usage;
727 DestReceiver *receiver;
728 QueryDesc *queryDesc;
729 SharedExecutorInstrumentation *instrumentation;
730 int instrument_options = 0;
732 /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
733 receiver = ExecParallelGetReceiver(seg, toc);
734 instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION);
735 if (instrumentation != NULL)
736 instrument_options = instrumentation->instrument_options;
737 queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
739 /* Prepare to track buffer usage during query execution. */
740 InstrStartParallelQuery();
742 /* Start up the executor, have it run the plan, and then shut it down. */
743 ExecutorStart(queryDesc, 0);
744 ExecParallelInitializeWorker(queryDesc->planstate, toc);
745 ExecutorRun(queryDesc, ForwardScanDirection, 0L);
746 ExecutorFinish(queryDesc);
748 /* Report buffer usage during parallel execution. */
749 buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE);
750 InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber]);
752 /* Report instrumentation data if any instrumentation options are set. */
753 if (instrumentation != NULL)
754 ExecParallelReportInstrumentation(queryDesc->planstate,
757 /* Must do this after capturing instrumentation. */
758 ExecutorEnd(queryDesc);
761 FreeQueryDesc(queryDesc);
762 (*receiver->rDestroy) (receiver);