static RelOptInfo *create_grouping_paths(PlannerInfo *root,
RelOptInfo *input_rel,
PathTarget *target,
+ const AggClauseCosts *agg_costs,
List *rollup_lists,
List *rollup_groupclauses);
static RelOptInfo *create_window_paths(PlannerInfo *root,
PathTarget *grouping_target;
PathTarget *scanjoin_target;
bool have_grouping;
+ AggClauseCosts agg_costs;
WindowFuncLists *wflists = NULL;
List *activeWindows = NIL;
List *rollup_lists = NIL;
*/
root->processed_tlist = tlist;
+ /*
+ * Collect statistics about aggregates for estimating costs, and mark
+ * all the aggregates with resolved aggtranstypes. We must do this
+ * before slicing and dicing the tlist into various pathtargets, else
+ * some copies of the Aggref nodes might escape being marked with the
+ * correct transtypes.
+ *
+ * Note: currently, we do not detect duplicate aggregates here. This
+ * may result in somewhat-overestimated cost, which is fine for our
+ * purposes since all Paths will get charged the same. But at some
+ * point we might wish to do that detection in the planner, rather
+ * than during executor startup.
+ */
+ MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
+ if (parse->hasAggs)
+ {
+ get_agg_clause_costs(root, (Node *) tlist, AGGSPLIT_SIMPLE,
+ &agg_costs);
+ get_agg_clause_costs(root, parse->havingQual, AGGSPLIT_SIMPLE,
+ &agg_costs);
+ }
+
/*
* Locate any window functions in the tlist. (We don't need to look
* anywhere else, since expressions used in ORDER BY will be in there
current_rel = create_grouping_paths(root,
current_rel,
grouping_target,
+ &agg_costs,
rollup_lists,
rollup_groupclauses);
}
*
* input_rel: contains the source-data Paths
* target: the pathtarget for the result Paths to compute
+ * agg_costs: cost info about all aggregates in query (in AGGSPLIT_SIMPLE mode)
* rollup_lists: list of grouping sets, or NIL if not doing grouping sets
* rollup_groupclauses: list of grouping clauses for grouping sets,
* or NIL if not doing grouping sets
create_grouping_paths(PlannerInfo *root,
RelOptInfo *input_rel,
PathTarget *target,
+ const AggClauseCosts *agg_costs,
List *rollup_lists,
List *rollup_groupclauses)
{
Path *cheapest_path = input_rel->cheapest_total_path;
RelOptInfo *grouped_rel;
PathTarget *partial_grouping_target = NULL;
- AggClauseCosts agg_costs;
AggClauseCosts agg_partial_costs; /* parallel only */
AggClauseCosts agg_final_costs; /* parallel only */
Size hashaggtablesize;
return grouped_rel;
}
- /*
- * Collect statistics about aggregates for estimating costs. Note: we do
- * not detect duplicate aggregates here; a somewhat-overestimated cost is
- * okay for our purposes.
- */
- MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
- if (parse->hasAggs)
- {
- get_agg_clause_costs(root, (Node *) target->exprs, AGGSPLIT_SIMPLE,
- &agg_costs);
- get_agg_clause_costs(root, parse->havingQual, AGGSPLIT_SIMPLE,
- &agg_costs);
- }
-
/*
* Estimate number of groups.
*/
*/
can_hash = (parse->groupClause != NIL &&
parse->groupingSets == NIL &&
- agg_costs.numOrderedAggs == 0 &&
+ agg_costs->numOrderedAggs == 0 &&
grouping_is_hashable(parse->groupClause));
/*
/* We don't know how to do grouping sets in parallel. */
try_parallel_aggregation = false;
}
- else if (agg_costs.hasNonPartial || agg_costs.hasNonSerial)
+ else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial)
{
/* Insufficient support for partial mode. */
try_parallel_aggregation = false;
(List *) parse->havingQual,
rollup_lists,
rollup_groupclauses,
- &agg_costs,
+ agg_costs,
dNumGroups));
}
else if (parse->hasAggs)
AGGSPLIT_SIMPLE,
parse->groupClause,
(List *) parse->havingQual,
- &agg_costs,
+ agg_costs,
dNumGroups));
}
else if (parse->groupClause)
if (can_hash)
{
hashaggtablesize = estimate_hashagg_tablesize(cheapest_path,
- &agg_costs,
+ agg_costs,
dNumGroups);
/*
AGGSPLIT_SIMPLE,
parse->groupClause,
(List *) parse->havingQual,
- &agg_costs,
+ agg_costs,
dNumGroups));
}
0 | 0
(3 rows)
+-- test for failure to set all aggregates' aggtranstype
+explain (verbose, costs off)
+select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2
+ from tenk1 group by thousand order by thousand limit 3;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: (sum(tenthous)), (((sum(tenthous))::double precision + (random() * '0'::double precision))), thousand
+ -> GroupAggregate
+ Output: sum(tenthous), ((sum(tenthous))::double precision + (random() * '0'::double precision)), thousand
+ Group Key: tenk1.thousand
+ -> Index Only Scan using tenk1_thous_tenthous on public.tenk1
+ Output: thousand, tenthous
+(7 rows)
+
+select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2
+ from tenk1 group by thousand order by thousand limit 3;
+ s1 | s2
+-------+-------
+ 45000 | 45000
+ 45010 | 45010
+ 45020 | 45020
+(3 rows)
+