When a Gather or Gather Merge node is started and stopped multiple
times, accumulate instrumentation data only once, at the end, instead
of after each execution, to avoid recording inflated totals.
This is a back-port of commit
8526bcb2df76d5171b4f4d6dc7a97560a73a5eff
by Amit Kapila.
Discussion: http://postgr.es/m/
20171127175631.GA405@depesz.com
Discussion: http://postgr.es/m/CAA4eK1KT3BYj50qWhK5qBF=LDzQCoUVSFZjcK3mHoJJeWA+fNA@mail.gmail.com
/*
* Finish parallel execution. We wait for parallel workers to finish, and
- * accumulate their buffer usage and instrumentation.
+ * accumulate their buffer usage.
*/
void
ExecParallelFinish(ParallelExecutorInfo *pei)
for (i = 0; i < pei->pcxt->nworkers_launched; ++i)
InstrAccumParallelQuery(&pei->buffer_usage[i]);
- /* Finally, accumulate instrumentation, if any. */
- if (pei->instrumentation)
- ExecParallelRetrieveInstrumentation(pei->planstate,
- pei->instrumentation);
-
pei->finished = true;
}
/*
- * Clean up whatever ParallelExecutorInfo resources still exist after
- * ExecParallelFinish. We separate these routines because someone might
- * want to examine the contents of the DSM after ExecParallelFinish and
- * before calling this routine.
+ * Accumulate instrumentation, and then clean up whatever ParallelExecutorInfo
+ * resources still exist after ExecParallelFinish. We separate these
+ * routines because someone might want to examine the contents of the DSM
+ * after ExecParallelFinish and before calling this routine.
*/
void
ExecParallelCleanup(ParallelExecutorInfo *pei)
{
+ /* Accumulate instrumentation, if any. */
+ if (pei->instrumentation)
+ ExecParallelRetrieveInstrumentation(pei->planstate,
+ pei->instrumentation);
+
if (pei->pcxt != NULL)
{
DestroyParallelContext(pei->pcxt);
13.6538461538461538
(1 row)
+-- test accumulation of stats for parallel nodes
+set enable_indexscan to off;
+set enable_bitmapscan to off;
+set enable_material to off;
+alter table tenk2 set (parallel_workers = 0);
+create function explain_parallel_stats() returns setof text
+language plpgsql as
+$$
+declare ln text;
+begin
+ for ln in
+ explain (analyze, timing off, costs off)
+ select count(*) from tenk1, tenk2 where
+ tenk1.hundred > 1 and tenk2.thousand=0
+ loop
+ ln := regexp_replace(ln, 'Planning time: \S*', 'Planning time: xxx');
+ ln := regexp_replace(ln, 'Execution time: \S*', 'Execution time: xxx');
+ return next ln;
+ end loop;
+end;
+$$;
+select * from explain_parallel_stats();
+ explain_parallel_stats
+--------------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Nested Loop (actual rows=98000 loops=1)
+ -> Seq Scan on tenk2 (actual rows=10 loops=1)
+ Filter: (thousand = 0)
+ Rows Removed by Filter: 9990
+ -> Gather (actual rows=9800 loops=10)
+ Workers Planned: 4
+ Workers Launched: 4
+ -> Parallel Seq Scan on tenk1 (actual rows=1960 loops=50)
+ Filter: (hundred > 1)
+ Rows Removed by Filter: 40
+ Planning time: xxx ms
+ Execution time: xxx ms
+(13 rows)
+
+reset enable_indexscan;
+reset enable_bitmapscan;
+reset enable_material;
+alter table tenk2 reset (parallel_workers);
+drop function explain_parallel_stats();
-- test the sanity of parallel query after the active role is dropped.
set force_parallel_mode=1;
drop role if exists regress_parallel_worker;
select avg(aa::int8) from a_star;
+-- test accumulation of stats for parallel nodes
+set enable_indexscan to off;
+set enable_bitmapscan to off;
+set enable_material to off;
+alter table tenk2 set (parallel_workers = 0);
+create function explain_parallel_stats() returns setof text
+language plpgsql as
+$$
+declare ln text;
+begin
+ for ln in
+ explain (analyze, timing off, costs off)
+ select count(*) from tenk1, tenk2 where
+ tenk1.hundred > 1 and tenk2.thousand=0
+ loop
+ ln := regexp_replace(ln, 'Planning time: \S*', 'Planning time: xxx');
+ ln := regexp_replace(ln, 'Execution time: \S*', 'Execution time: xxx');
+ return next ln;
+ end loop;
+end;
+$$;
+select * from explain_parallel_stats();
+reset enable_indexscan;
+reset enable_bitmapscan;
+reset enable_material;
+alter table tenk2 reset (parallel_workers);
+drop function explain_parallel_stats();
+
-- test the sanity of parallel query after the active role is dropped.
set force_parallel_mode=1;
drop role if exists regress_parallel_worker;