1 /*-------------------------------------------------------------------------
4 * The query optimizer external interface.
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.244 2008/10/04 21:56:53 tgl Exp $
13 *-------------------------------------------------------------------------
20 #include "catalog/pg_operator.h"
21 #include "executor/executor.h"
22 #include "executor/nodeAgg.h"
23 #include "miscadmin.h"
24 #include "nodes/makefuncs.h"
25 #include "optimizer/clauses.h"
26 #include "optimizer/cost.h"
27 #include "optimizer/pathnode.h"
28 #include "optimizer/paths.h"
29 #include "optimizer/planmain.h"
30 #include "optimizer/planner.h"
31 #include "optimizer/prep.h"
32 #include "optimizer/subselect.h"
33 #include "optimizer/tlist.h"
34 #include "optimizer/var.h"
35 #ifdef OPTIMIZER_DEBUG
36 #include "nodes/print.h"
38 #include "parser/parse_expr.h"
39 #include "parser/parse_oper.h"
40 #include "parser/parsetree.h"
41 #include "utils/lsyscache.h"
42 #include "utils/syscache.h"
46 double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
48 /* Hook for plugins to get control in planner() */
49 planner_hook_type planner_hook = NULL;
52 /* Expression kind codes for preprocess_expression */
53 #define EXPRKIND_QUAL 0
54 #define EXPRKIND_TARGET 1
55 #define EXPRKIND_RTFUNC 2
56 #define EXPRKIND_VALUES 3
57 #define EXPRKIND_LIMIT 4
58 #define EXPRKIND_APPINFO 5
61 static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
62 static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
63 static Plan *inheritance_planner(PlannerInfo *root);
64 static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction);
65 static bool is_dummy_plan(Plan *plan);
66 static double preprocess_limit(PlannerInfo *root,
67 double tuple_fraction,
68 int64 *offset_est, int64 *count_est);
69 static void preprocess_groupclause(PlannerInfo *root);
70 static bool choose_hashed_grouping(PlannerInfo *root,
71 double tuple_fraction, double limit_tuples,
72 Path *cheapest_path, Path *sorted_path,
73 double dNumGroups, AggClauseCounts *agg_counts);
74 static bool choose_hashed_distinct(PlannerInfo *root,
75 Plan *input_plan, List *input_pathkeys,
76 double tuple_fraction, double limit_tuples,
77 double dNumDistinctRows);
78 static List *make_subplanTargetList(PlannerInfo *root, List *tlist,
79 AttrNumber **groupColIdx, bool *need_tlist_eval);
80 static void locate_grouping_columns(PlannerInfo *root,
83 AttrNumber *groupColIdx);
84 static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
87 /*****************************************************************************
89 * Query optimizer entry point
91 * To support loadable plugins that monitor or modify planner behavior,
92 * we provide a hook variable that lets a plugin get control before and
93 * after the standard planning process. The plugin would normally call
96 * Note to plugin authors: standard_planner() scribbles on its Query input,
97 * so you'd better copy that data structure if you want to plan more than once.
99 *****************************************************************************/
101 planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
106 result = (*planner_hook) (parse, cursorOptions, boundParams);
108 result = standard_planner(parse, cursorOptions, boundParams);
113 standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
117 double tuple_fraction;
123 /* Cursor options may come from caller or from DECLARE CURSOR stmt */
124 if (parse->utilityStmt &&
125 IsA(parse->utilityStmt, DeclareCursorStmt))
126 cursorOptions |= ((DeclareCursorStmt *) parse->utilityStmt)->options;
129 * Set up global state for this planner invocation. This data is needed
130 * across all levels of sub-Query that might exist in the given command,
131 * so we keep it in a separate struct that's linked to by each per-Query
134 glob = makeNode(PlannerGlobal);
136 glob->boundParams = boundParams;
137 glob->paramlist = NIL;
138 glob->subplans = NIL;
139 glob->subrtables = NIL;
140 glob->rewindPlanIDs = NULL;
141 glob->finalrtable = NIL;
142 glob->relationOids = NIL;
143 glob->invalItems = NIL;
144 glob->transientPlan = false;
146 /* Determine what fraction of the plan is likely to be scanned */
147 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
150 * We have no real idea how many tuples the user will ultimately FETCH
151 * from a cursor, but it is often the case that he doesn't want 'em
152 * all, or would prefer a fast-start plan anyway so that he can
153 * process some of the tuples sooner. Use a GUC parameter to decide
154 * what fraction to optimize for.
156 tuple_fraction = cursor_tuple_fraction;
159 * We document cursor_tuple_fraction as simply being a fraction,
160 * which means the edge cases 0 and 1 have to be treated specially
161 * here. We convert 1 to 0 ("all the tuples") and 0 to a very small
164 if (tuple_fraction >= 1.0)
165 tuple_fraction = 0.0;
166 else if (tuple_fraction <= 0.0)
167 tuple_fraction = 1e-10;
171 /* Default assumption is we need all the tuples */
172 tuple_fraction = 0.0;
175 /* primary planning entry point (may recurse for subqueries) */
176 top_plan = subquery_planner(glob, parse, NULL,
177 false, tuple_fraction, &root);
180 * If creating a plan for a scrollable cursor, make sure it can run
181 * backwards on demand. Add a Material node at the top at need.
183 if (cursorOptions & CURSOR_OPT_SCROLL)
185 if (!ExecSupportsBackwardScan(top_plan))
186 top_plan = materialize_finished_plan(top_plan);
189 /* final cleanup of the plan */
190 Assert(glob->finalrtable == NIL);
191 top_plan = set_plan_references(glob, top_plan, root->parse->rtable);
192 /* ... and the subplans (both regular subplans and initplans) */
193 Assert(list_length(glob->subplans) == list_length(glob->subrtables));
194 forboth(lp, glob->subplans, lr, glob->subrtables)
196 Plan *subplan = (Plan *) lfirst(lp);
197 List *subrtable = (List *) lfirst(lr);
199 lfirst(lp) = set_plan_references(glob, subplan, subrtable);
202 /* build the PlannedStmt result */
203 result = makeNode(PlannedStmt);
205 result->commandType = parse->commandType;
206 result->canSetTag = parse->canSetTag;
207 result->transientPlan = glob->transientPlan;
208 result->planTree = top_plan;
209 result->rtable = glob->finalrtable;
210 result->resultRelations = root->resultRelations;
211 result->utilityStmt = parse->utilityStmt;
212 result->intoClause = parse->intoClause;
213 result->subplans = glob->subplans;
214 result->rewindPlanIDs = glob->rewindPlanIDs;
215 result->returningLists = root->returningLists;
216 result->rowMarks = parse->rowMarks;
217 result->relationOids = glob->relationOids;
218 result->invalItems = glob->invalItems;
219 result->nParamExec = list_length(glob->paramlist);
225 /*--------------------
227 * Invokes the planner on a subquery. We recurse to here for each
228 * sub-SELECT found in the query tree.
230 * glob is the global state for the current planner run.
231 * parse is the querytree produced by the parser & rewriter.
232 * parent_root is the immediate parent Query's info (NULL at the top level).
233 * hasRecursion is true if this is a recursive WITH query.
234 * tuple_fraction is the fraction of tuples we expect will be retrieved.
235 * tuple_fraction is interpreted as explained for grouping_planner, below.
237 * If subroot isn't NULL, we pass back the query's final PlannerInfo struct;
238 * among other things this tells the output sort ordering of the plan.
240 * Basically, this routine does the stuff that should only be done once
241 * per Query object. It then calls grouping_planner. At one time,
242 * grouping_planner could be invoked recursively on the same Query object;
243 * that's not currently true, but we keep the separation between the two
244 * routines anyway, in case we need it again someday.
246 * subquery_planner will be called recursively to handle sub-Query nodes
247 * found within the query's expressions and rangetable.
249 * Returns a query plan.
250 *--------------------
253 subquery_planner(PlannerGlobal *glob, Query *parse,
254 PlannerInfo *parent_root,
255 bool hasRecursion, double tuple_fraction,
256 PlannerInfo **subroot)
258 int num_old_subplans = list_length(glob->subplans);
265 /* Create a PlannerInfo data structure for this subquery */
266 root = makeNode(PlannerInfo);
269 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
270 root->parent_root = parent_root;
271 root->planner_cxt = CurrentMemoryContext;
272 root->init_plans = NIL;
273 root->cte_plan_ids = NIL;
274 root->eq_classes = NIL;
275 root->append_rel_list = NIL;
277 root->hasRecursion = hasRecursion;
279 root->wt_param_id = SS_assign_worktable_param(root);
281 root->wt_param_id = -1;
282 root->non_recursive_plan = NULL;
285 * If there is a WITH list, process each WITH query and build an
286 * initplan SubPlan structure for it.
289 SS_process_ctes(root);
292 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
293 * to transform them into joins. Note that this step does not descend
294 * into subqueries; if we pull up any subqueries below, their SubLinks are
295 * processed just before pulling them up.
297 if (parse->hasSubLinks)
298 pull_up_sublinks(root);
301 * Scan the rangetable for set-returning functions, and inline them
302 * if possible (producing subqueries that might get pulled up next).
303 * Recursion issues here are handled in the same way as for SubLinks.
305 inline_set_returning_functions(root);
308 * Check to see if any subqueries in the rangetable can be merged into
311 parse->jointree = (FromExpr *)
312 pull_up_subqueries(root, (Node *) parse->jointree, false, false);
315 * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can
316 * avoid the expense of doing flatten_join_alias_vars(). Also check for
317 * outer joins --- if none, we can skip reduce_outer_joins().
318 * This must be done after we have done pull_up_subqueries, of course.
320 root->hasJoinRTEs = false;
321 hasOuterJoins = false;
322 foreach(l, parse->rtable)
324 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
326 if (rte->rtekind == RTE_JOIN)
328 root->hasJoinRTEs = true;
329 if (IS_OUTER_JOIN(rte->jointype))
331 hasOuterJoins = true;
332 /* Can quit scanning once we find an outer join */
339 * Expand any rangetable entries that are inheritance sets into "append
340 * relations". This can add entries to the rangetable, but they must be
341 * plain base relations not joins, so it's OK (and marginally more
342 * efficient) to do it after checking for join RTEs. We must do it after
343 * pulling up subqueries, else we'd fail to handle inherited tables in
346 expand_inherited_tables(root);
349 * Set hasHavingQual to remember if HAVING clause is present. Needed
350 * because preprocess_expression will reduce a constant-true condition to
351 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
353 root->hasHavingQual = (parse->havingQual != NULL);
355 /* Clear this flag; might get set in distribute_qual_to_rels */
356 root->hasPseudoConstantQuals = false;
359 * Do expression preprocessing on targetlist and quals.
361 parse->targetList = (List *)
362 preprocess_expression(root, (Node *) parse->targetList,
365 parse->returningList = (List *)
366 preprocess_expression(root, (Node *) parse->returningList,
369 preprocess_qual_conditions(root, (Node *) parse->jointree);
371 parse->havingQual = preprocess_expression(root, parse->havingQual,
374 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
376 parse->limitCount = preprocess_expression(root, parse->limitCount,
379 root->append_rel_list = (List *)
380 preprocess_expression(root, (Node *) root->append_rel_list,
383 /* Also need to preprocess expressions for function and values RTEs */
384 foreach(l, parse->rtable)
386 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
388 if (rte->rtekind == RTE_FUNCTION)
389 rte->funcexpr = preprocess_expression(root, rte->funcexpr,
391 else if (rte->rtekind == RTE_VALUES)
392 rte->values_lists = (List *)
393 preprocess_expression(root, (Node *) rte->values_lists,
398 * In some cases we may want to transfer a HAVING clause into WHERE. We
399 * cannot do so if the HAVING clause contains aggregates (obviously) or
400 * volatile functions (since a HAVING clause is supposed to be executed
401 * only once per group). Also, it may be that the clause is so expensive
402 * to execute that we're better off doing it only once per group, despite
403 * the loss of selectivity. This is hard to estimate short of doing the
404 * entire planning process twice, so we use a heuristic: clauses
405 * containing subplans are left in HAVING. Otherwise, we move or copy the
406 * HAVING clause into WHERE, in hopes of eliminating tuples before
407 * aggregation instead of after.
409 * If the query has explicit grouping then we can simply move such a
410 * clause into WHERE; any group that fails the clause will not be in the
411 * output because none of its tuples will reach the grouping or
412 * aggregation stage. Otherwise we must have a degenerate (variable-free)
413 * HAVING clause, which we put in WHERE so that query_planner() can use it
414 * in a gating Result node, but also keep in HAVING to ensure that we
415 * don't emit a bogus aggregated row. (This could be done better, but it
416 * seems not worth optimizing.)
418 * Note that both havingQual and parse->jointree->quals are in
419 * implicitly-ANDed-list form at this point, even though they are declared
423 foreach(l, (List *) parse->havingQual)
425 Node *havingclause = (Node *) lfirst(l);
427 if (contain_agg_clause(havingclause) ||
428 contain_volatile_functions(havingclause) ||
429 contain_subplans(havingclause))
431 /* keep it in HAVING */
432 newHaving = lappend(newHaving, havingclause);
434 else if (parse->groupClause)
436 /* move it to WHERE */
437 parse->jointree->quals = (Node *)
438 lappend((List *) parse->jointree->quals, havingclause);
442 /* put a copy in WHERE, keep it in HAVING */
443 parse->jointree->quals = (Node *)
444 lappend((List *) parse->jointree->quals,
445 copyObject(havingclause));
446 newHaving = lappend(newHaving, havingclause);
449 parse->havingQual = (Node *) newHaving;
452 * If we have any outer joins, try to reduce them to plain inner joins.
453 * This step is most easily done after we've done expression
457 reduce_outer_joins(root);
460 * Do the main planning. If we have an inherited target relation, that
461 * needs special processing, else go straight to grouping_planner.
463 if (parse->resultRelation &&
464 rt_fetch(parse->resultRelation, parse->rtable)->inh)
465 plan = inheritance_planner(root);
467 plan = grouping_planner(root, tuple_fraction);
470 * If any subplans were generated, or if we're inside a subplan, build
471 * initPlan list and extParam/allParam sets for plan nodes, and attach the
472 * initPlans to the top plan node.
474 if (list_length(glob->subplans) != num_old_subplans ||
475 root->query_level > 1)
476 SS_finalize_plan(root, plan, true);
478 /* Return internal info if caller wants it */
486 * preprocess_expression
487 * Do subquery_planner's preprocessing work for an expression,
488 * which can be a targetlist, a WHERE clause (including JOIN/ON
489 * conditions), or a HAVING clause.
492 preprocess_expression(PlannerInfo *root, Node *expr, int kind)
495 * Fall out quickly if expression is empty. This occurs often enough to
496 * be worth checking. Note that null->null is the correct conversion for
497 * implicit-AND result format, too.
503 * If the query has any join RTEs, replace join alias variables with
504 * base-relation variables. We must do this before sublink processing,
505 * else sublinks expanded out from join aliases wouldn't get processed. We
506 * can skip it in VALUES lists, however, since they can't contain any Vars
509 if (root->hasJoinRTEs && kind != EXPRKIND_VALUES)
510 expr = flatten_join_alias_vars(root, expr);
513 * Simplify constant expressions.
515 * Note: this also flattens nested AND and OR expressions into N-argument
516 * form. All processing of a qual expression after this point must be
517 * careful to maintain AND/OR flatness --- that is, do not generate a tree
518 * with AND directly under AND, nor OR directly under OR.
520 * Because this is a relatively expensive process, we skip it when the
521 * query is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". The
522 * expression will only be evaluated once anyway, so no point in
523 * pre-simplifying; we can't execute it any faster than the executor can,
524 * and we will waste cycles copying the tree. Notice however that we
525 * still must do it for quals (to get AND/OR flatness); and if we are in a
526 * subquery we should not assume it will be done only once.
528 * For VALUES lists we never do this at all, again on the grounds that we
529 * should optimize for one-time evaluation.
531 if (kind != EXPRKIND_VALUES &&
532 (root->parse->jointree->fromlist != NIL ||
533 kind == EXPRKIND_QUAL ||
534 root->query_level > 1))
535 expr = eval_const_expressions(root, expr);
538 * If it's a qual or havingQual, canonicalize it.
540 if (kind == EXPRKIND_QUAL)
542 expr = (Node *) canonicalize_qual((Expr *) expr);
544 #ifdef OPTIMIZER_DEBUG
545 printf("After canonicalize_qual()\n");
550 /* Expand SubLinks to SubPlans */
551 if (root->parse->hasSubLinks)
552 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
555 * XXX do not insert anything here unless you have grokked the comments in
556 * SS_replace_correlation_vars ...
559 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
560 if (root->query_level > 1)
561 expr = SS_replace_correlation_vars(root, expr);
564 * If it's a qual or havingQual, convert it to implicit-AND format. (We
565 * don't want to do this before eval_const_expressions, since the latter
566 * would be unable to simplify a top-level AND correctly. Also,
567 * SS_process_sublinks expects explicit-AND format.)
569 if (kind == EXPRKIND_QUAL)
570 expr = (Node *) make_ands_implicit((Expr *) expr);
576 * preprocess_qual_conditions
577 * Recursively scan the query's jointree and do subquery_planner's
578 * preprocessing work on each qual condition found therein.
581 preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
585 if (IsA(jtnode, RangeTblRef))
587 /* nothing to do here */
589 else if (IsA(jtnode, FromExpr))
591 FromExpr *f = (FromExpr *) jtnode;
594 foreach(l, f->fromlist)
595 preprocess_qual_conditions(root, lfirst(l));
597 f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
599 else if (IsA(jtnode, JoinExpr))
601 JoinExpr *j = (JoinExpr *) jtnode;
603 preprocess_qual_conditions(root, j->larg);
604 preprocess_qual_conditions(root, j->rarg);
606 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
609 elog(ERROR, "unrecognized node type: %d",
610 (int) nodeTag(jtnode));
614 * inheritance_planner
615 * Generate a plan in the case where the result relation is an
618 * We have to handle this case differently from cases where a source relation
619 * is an inheritance set. Source inheritance is expanded at the bottom of the
620 * plan tree (see allpaths.c), but target inheritance has to be expanded at
621 * the top. The reason is that for UPDATE, each target relation needs a
622 * different targetlist matching its own column set. Also, for both UPDATE
623 * and DELETE, the executor needs the Append plan node at the top, else it
624 * can't keep track of which table is the current target table. Fortunately,
625 * the UPDATE/DELETE target can never be the nullable side of an outer join,
626 * so it's OK to generate the plan this way.
628 * Returns a query plan.
631 inheritance_planner(PlannerInfo *root)
633 Query *parse = root->parse;
634 int parentRTindex = parse->resultRelation;
635 List *subplans = NIL;
636 List *resultRelations = NIL;
637 List *returningLists = NIL;
643 foreach(l, root->append_rel_list)
645 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
648 /* append_rel_list contains all append rels; ignore others */
649 if (appinfo->parent_relid != parentRTindex)
653 * Generate modified query with this rel as target.
655 memcpy(&subroot, root, sizeof(PlannerInfo));
656 subroot.parse = (Query *)
657 adjust_appendrel_attrs((Node *) parse,
659 subroot.init_plans = NIL;
660 /* There shouldn't be any OJ info to translate, as yet */
661 Assert(subroot.join_info_list == NIL);
664 subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ );
667 * If this child rel was excluded by constraint exclusion, exclude it
670 if (is_dummy_plan(subplan))
673 /* Save rtable and tlist from first rel for use below */
676 rtable = subroot.parse->rtable;
677 tlist = subplan->targetlist;
680 subplans = lappend(subplans, subplan);
682 /* Make sure any initplans from this rel get into the outer list */
683 root->init_plans = list_concat(root->init_plans, subroot.init_plans);
685 /* Build target-relations list for the executor */
686 resultRelations = lappend_int(resultRelations, appinfo->child_relid);
688 /* Build list of per-relation RETURNING targetlists */
689 if (parse->returningList)
691 Assert(list_length(subroot.returningLists) == 1);
692 returningLists = list_concat(returningLists,
693 subroot.returningLists);
697 root->resultRelations = resultRelations;
698 root->returningLists = returningLists;
700 /* Mark result as unordered (probably unnecessary) */
701 root->query_pathkeys = NIL;
704 * If we managed to exclude every child rel, return a dummy plan
708 root->resultRelations = list_make1_int(parentRTindex);
709 /* although dummy, it must have a valid tlist for executor */
710 tlist = preprocess_targetlist(root, parse->targetList);
711 return (Plan *) make_result(root,
713 (Node *) list_make1(makeBoolConst(false,
719 * Planning might have modified the rangetable, due to changes of the
720 * Query structures inside subquery RTEs. We have to ensure that this
721 * gets propagated back to the master copy. But can't do this until we
722 * are done planning, because all the calls to grouping_planner need
723 * virgin sub-Queries to work from. (We are effectively assuming that
724 * sub-Queries will get planned identically each time, or at least that
725 * the impacts on their rangetables will be the same each time.)
727 * XXX should clean this up someday
729 parse->rtable = rtable;
731 /* Suppress Append if there's only one surviving child rel */
732 if (list_length(subplans) == 1)
733 return (Plan *) linitial(subplans);
735 return (Plan *) make_append(subplans, true, tlist);
738 /*--------------------
740 * Perform planning steps related to grouping, aggregation, etc.
741 * This primarily means adding top-level processing to the basic
742 * query plan produced by query_planner.
744 * tuple_fraction is the fraction of tuples we expect will be retrieved
746 * tuple_fraction is interpreted as follows:
747 * 0: expect all tuples to be retrieved (normal case)
748 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
749 * from the plan to be retrieved
750 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
751 * expected to be retrieved (ie, a LIMIT specification)
753 * Returns a query plan. Also, root->query_pathkeys is returned as the
754 * actual output ordering of the plan (in pathkey format).
755 *--------------------
758 grouping_planner(PlannerInfo *root, double tuple_fraction)
760 Query *parse = root->parse;
761 List *tlist = parse->targetList;
762 int64 offset_est = 0;
764 double limit_tuples = -1.0;
766 List *current_pathkeys;
767 double dNumGroups = 0;
769 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
770 if (parse->limitCount || parse->limitOffset)
772 tuple_fraction = preprocess_limit(root, tuple_fraction,
773 &offset_est, &count_est);
776 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
777 * estimate the effects of using a bounded sort.
779 if (count_est > 0 && offset_est >= 0)
780 limit_tuples = (double) count_est + (double) offset_est;
783 if (parse->setOperations)
785 List *set_sortclauses;
788 * If there's a top-level ORDER BY, assume we have to fetch all the
789 * tuples. This might be too simplistic given all the hackery below
790 * to possibly avoid the sort; but the odds of accurate estimates
791 * here are pretty low anyway.
793 if (parse->sortClause)
794 tuple_fraction = 0.0;
797 * Construct the plan for set operations. The result will not need
798 * any work except perhaps a top-level sort and/or LIMIT. Note that
799 * any special work for recursive unions is the responsibility of
800 * plan_set_operations.
802 result_plan = plan_set_operations(root, tuple_fraction,
806 * Calculate pathkeys representing the sort order (if any) of the set
807 * operation's result. We have to do this before overwriting the sort
810 current_pathkeys = make_pathkeys_for_sortclauses(root,
812 result_plan->targetlist,
816 * We should not need to call preprocess_targetlist, since we must be
817 * in a SELECT query node. Instead, use the targetlist returned by
818 * plan_set_operations (since this tells whether it returned any
819 * resjunk columns!), and transfer any sort key information from the
822 Assert(parse->commandType == CMD_SELECT);
824 tlist = postprocess_setop_tlist(copyObject(result_plan->targetlist),
828 * Can't handle FOR UPDATE/SHARE here (parser should have checked
829 * already, but let's make sure).
833 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
834 errmsg("SELECT FOR UPDATE/SHARE is not allowed with UNION/INTERSECT/EXCEPT")));
837 * Calculate pathkeys that represent result ordering requirements
839 Assert(parse->distinctClause == NIL);
840 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
847 /* No set operations, do regular planning */
849 AttrNumber *groupColIdx = NULL;
850 bool need_tlist_eval = true;
856 AggClauseCounts agg_counts;
858 bool use_hashed_grouping = false;
860 MemSet(&agg_counts, 0, sizeof(AggClauseCounts));
862 /* A recursive query should always have setOperations */
863 Assert(!root->hasRecursion);
865 /* Preprocess GROUP BY clause, if any */
866 if (parse->groupClause)
867 preprocess_groupclause(root);
868 numGroupCols = list_length(parse->groupClause);
870 /* Preprocess targetlist */
871 tlist = preprocess_targetlist(root, tlist);
874 * Generate appropriate target list for subplan; may be different from
875 * tlist if grouping or aggregation is needed.
877 sub_tlist = make_subplanTargetList(root, tlist,
878 &groupColIdx, &need_tlist_eval);
881 * Calculate pathkeys that represent grouping/ordering requirements.
882 * Stash them in PlannerInfo so that query_planner can canonicalize
883 * them after EquivalenceClasses have been formed. The sortClause
884 * is certainly sort-able, but GROUP BY and DISTINCT might not be,
885 * in which case we just leave their pathkeys empty.
887 if (parse->groupClause &&
888 grouping_is_sortable(parse->groupClause))
889 root->group_pathkeys =
890 make_pathkeys_for_sortclauses(root,
895 root->group_pathkeys = NIL;
897 if (parse->distinctClause &&
898 grouping_is_sortable(parse->distinctClause))
899 root->distinct_pathkeys =
900 make_pathkeys_for_sortclauses(root,
901 parse->distinctClause,
905 root->distinct_pathkeys = NIL;
907 root->sort_pathkeys =
908 make_pathkeys_for_sortclauses(root,
914 * Will need actual number of aggregates for estimating costs.
916 * Note: we do not attempt to detect duplicate aggregates here; a
917 * somewhat-overestimated count is okay for our present purposes.
919 * Note: think not that we can turn off hasAggs if we find no aggs. It
920 * is possible for constant-expression simplification to remove all
921 * explicit references to aggs, but we still have to follow the
922 * aggregate semantics (eg, producing only one output row).
926 count_agg_clauses((Node *) tlist, &agg_counts);
927 count_agg_clauses(parse->havingQual, &agg_counts);
931 * Figure out whether we want a sorted result from query_planner.
933 * If we have a sortable GROUP BY clause, then we want a result sorted
934 * properly for grouping. Otherwise, if there's a sortable DISTINCT
935 * clause that's more rigorous than the ORDER BY clause, we try to
936 * produce output that's sufficiently well sorted for the DISTINCT.
937 * Otherwise, if there is an ORDER BY clause, we want to sort by the
940 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a
941 * superset of GROUP BY, it would be tempting to request sort by ORDER
942 * BY --- but that might just leave us failing to exploit an available
943 * sort order at all. Needs more thought. The choice for DISTINCT
944 * versus ORDER BY is much easier, since we know that the parser
945 * ensured that one is a superset of the other.
947 if (root->group_pathkeys)
948 root->query_pathkeys = root->group_pathkeys;
949 else if (list_length(root->distinct_pathkeys) >
950 list_length(root->sort_pathkeys))
951 root->query_pathkeys = root->distinct_pathkeys;
952 else if (root->sort_pathkeys)
953 root->query_pathkeys = root->sort_pathkeys;
955 root->query_pathkeys = NIL;
958 * Generate the best unsorted and presorted paths for this Query (but
959 * note there may not be any presorted path). query_planner will also
960 * estimate the number of groups in the query, and canonicalize all
963 query_planner(root, sub_tlist, tuple_fraction, limit_tuples,
964 &cheapest_path, &sorted_path, &dNumGroups);
967 * If grouping, decide whether to use sorted or hashed grouping.
969 if (parse->groupClause)
975 * Executor doesn't support hashed aggregation with DISTINCT
976 * aggregates. (Doing so would imply storing *all* the input
977 * values in the hash table, which seems like a certain loser.)
979 can_hash = (agg_counts.numDistinctAggs == 0 &&
980 grouping_is_hashable(parse->groupClause));
981 can_sort = grouping_is_sortable(parse->groupClause);
982 if (can_hash && can_sort)
984 /* we have a meaningful choice to make ... */
985 use_hashed_grouping =
986 choose_hashed_grouping(root,
987 tuple_fraction, limit_tuples,
988 cheapest_path, sorted_path,
989 dNumGroups, &agg_counts);
992 use_hashed_grouping = true;
994 use_hashed_grouping = false;
997 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
998 errmsg("could not implement GROUP BY"),
999 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
1001 /* Also convert # groups to long int --- but 'ware overflow! */
1002 numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
1006 * Select the best path. If we are doing hashed grouping, we will
1007 * always read all the input tuples, so use the cheapest-total path.
1008 * Otherwise, trust query_planner's decision about which to use.
1010 if (use_hashed_grouping || !sorted_path)
1011 best_path = cheapest_path;
1013 best_path = sorted_path;
1016 * Check to see if it's possible to optimize MIN/MAX aggregates. If
1017 * so, we will forget all the work we did so far to choose a "regular"
1018 * path ... but we had to do it anyway to be able to tell which way is
1021 result_plan = optimize_minmax_aggregates(root,
1024 if (result_plan != NULL)
1027 * optimize_minmax_aggregates generated the full plan, with the
1028 * right tlist, and it has no sort order.
1030 current_pathkeys = NIL;
1035 * Normal case --- create a plan according to query_planner's
1038 bool need_sort_for_grouping = false;
1040 result_plan = create_plan(root, best_path);
1041 current_pathkeys = best_path->pathkeys;
1043 /* Detect if we'll need an explicit sort for grouping */
1044 if (parse->groupClause && !use_hashed_grouping &&
1045 !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
1047 need_sort_for_grouping = true;
1049 * Always override query_planner's tlist, so that we don't
1050 * sort useless data from a "physical" tlist.
1052 need_tlist_eval = true;
1056 * create_plan() returns a plan with just a "flat" tlist of
1057 * required Vars. Usually we need to insert the sub_tlist as the
1058 * tlist of the top plan node. However, we can skip that if we
1059 * determined that whatever query_planner chose to return will be
1062 if (need_tlist_eval)
1065 * If the top-level plan node is one that cannot do expression
1066 * evaluation, we must insert a Result node to project the
1069 if (!is_projection_capable_plan(result_plan))
1071 result_plan = (Plan *) make_result(root,
1079 * Otherwise, just replace the subplan's flat tlist with
1080 * the desired tlist.
1082 result_plan->targetlist = sub_tlist;
1086 * Also, account for the cost of evaluation of the sub_tlist.
1088 * Up to now, we have only been dealing with "flat" tlists,
1089 * containing just Vars. So their evaluation cost is zero
1090 * according to the model used by cost_qual_eval() (or if you
1091 * prefer, the cost is factored into cpu_tuple_cost). Thus we
1092 * can avoid accounting for tlist cost throughout
1093 * query_planner() and subroutines. But now we've inserted a
1094 * tlist that might contain actual operators, sub-selects, etc
1095 * --- so we'd better account for its cost.
1097 * Below this point, any tlist eval cost for added-on nodes
1098 * should be accounted for as we create those nodes.
1099 * Presently, of the node types we can add on, only Agg and
1100 * Group project new tlists (the rest just copy their input
1101 * tuples) --- so make_agg() and make_group() are responsible
1102 * for computing the added cost.
1104 cost_qual_eval(&tlist_cost, sub_tlist, root);
1105 result_plan->startup_cost += tlist_cost.startup;
1106 result_plan->total_cost += tlist_cost.startup +
1107 tlist_cost.per_tuple * result_plan->plan_rows;
1112 * Since we're using query_planner's tlist and not the one
1113 * make_subplanTargetList calculated, we have to refigure any
1114 * grouping-column indexes make_subplanTargetList computed.
1116 locate_grouping_columns(root, tlist, result_plan->targetlist,
1121 * Insert AGG or GROUP node if needed, plus an explicit sort step
1124 * HAVING clause, if any, becomes qual of the Agg or Group node.
1126 if (use_hashed_grouping)
1128 /* Hashed aggregate plan --- no sort needed */
1129 result_plan = (Plan *) make_agg(root,
1131 (List *) parse->havingQual,
1135 extract_grouping_ops(parse->groupClause),
1139 /* Hashed aggregation produces randomly-ordered results */
1140 current_pathkeys = NIL;
1142 else if (parse->hasAggs)
1144 /* Plain aggregate plan --- sort if needed */
1145 AggStrategy aggstrategy;
1147 if (parse->groupClause)
1149 if (need_sort_for_grouping)
1151 result_plan = (Plan *)
1152 make_sort_from_groupcols(root,
1156 current_pathkeys = root->group_pathkeys;
1158 aggstrategy = AGG_SORTED;
1161 * The AGG node will not change the sort ordering of its
1162 * groups, so current_pathkeys describes the result too.
1167 aggstrategy = AGG_PLAIN;
1168 /* Result will be only one row anyway; no sort order */
1169 current_pathkeys = NIL;
1172 result_plan = (Plan *) make_agg(root,
1174 (List *) parse->havingQual,
1178 extract_grouping_ops(parse->groupClause),
1183 else if (parse->groupClause)
1186 * GROUP BY without aggregation, so insert a group node (plus
1187 * the appropriate sort node, if necessary).
1189 * Add an explicit sort if we couldn't make the path come out
1190 * the way the GROUP node needs it.
1192 if (need_sort_for_grouping)
1194 result_plan = (Plan *)
1195 make_sort_from_groupcols(root,
1199 current_pathkeys = root->group_pathkeys;
1202 result_plan = (Plan *) make_group(root,
1204 (List *) parse->havingQual,
1207 extract_grouping_ops(parse->groupClause),
1210 /* The Group node won't change sort ordering */
1212 else if (root->hasHavingQual)
1215 * No aggregates, and no GROUP BY, but we have a HAVING qual.
1216 * This is a degenerate case in which we are supposed to emit
1217 * either 0 or 1 row depending on whether HAVING succeeds.
1218 * Furthermore, there cannot be any variables in either HAVING
1219 * or the targetlist, so we actually do not need the FROM
1220 * table at all! We can just throw away the plan-so-far and
1221 * generate a Result node. This is a sufficiently unusual
1222 * corner case that it's not worth contorting the structure of
1223 * this routine to avoid having to generate the plan in the
1226 result_plan = (Plan *) make_result(root,
1231 } /* end of non-minmax-aggregate case */
1232 } /* end of if (setOperations) */
1235 * If there is a DISTINCT clause, add the necessary node(s).
1237 if (parse->distinctClause)
1239 double dNumDistinctRows;
1240 long numDistinctRows;
1241 bool use_hashed_distinct;
1246 * If there was grouping or aggregation, use the current number of
1247 * rows as the estimated number of DISTINCT rows (ie, assume the
1248 * result was already mostly unique). If not, use the number of
1249 * distinct-groups calculated by query_planner.
1251 if (parse->groupClause || root->hasHavingQual || parse->hasAggs)
1252 dNumDistinctRows = result_plan->plan_rows;
1254 dNumDistinctRows = dNumGroups;
1256 /* Also convert to long int --- but 'ware overflow! */
1257 numDistinctRows = (long) Min(dNumDistinctRows, (double) LONG_MAX);
1260 * If we have a sortable DISTINCT ON clause, we always use sorting.
1261 * This enforces the expected behavior of DISTINCT ON.
1263 can_sort = grouping_is_sortable(parse->distinctClause);
1264 if (can_sort && parse->hasDistinctOn)
1265 use_hashed_distinct = false;
1268 can_hash = grouping_is_hashable(parse->distinctClause);
1269 if (can_hash && can_sort)
1271 /* we have a meaningful choice to make ... */
1272 use_hashed_distinct =
1273 choose_hashed_distinct(root,
1274 result_plan, current_pathkeys,
1275 tuple_fraction, limit_tuples,
1279 use_hashed_distinct = true;
1281 use_hashed_distinct = false;
1285 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1286 errmsg("could not implement DISTINCT"),
1287 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
1288 use_hashed_distinct = false; /* keep compiler quiet */
1292 if (use_hashed_distinct)
1294 /* Hashed aggregate plan --- no sort needed */
1295 result_plan = (Plan *) make_agg(root,
1296 result_plan->targetlist,
1299 list_length(parse->distinctClause),
1300 extract_grouping_cols(parse->distinctClause,
1301 result_plan->targetlist),
1302 extract_grouping_ops(parse->distinctClause),
1306 /* Hashed aggregation produces randomly-ordered results */
1307 current_pathkeys = NIL;
1312 * Use a Unique node to implement DISTINCT. Add an explicit sort
1313 * if we couldn't make the path come out the way the Unique node
1314 * needs it. If we do have to sort, always sort by the more
1315 * rigorous of DISTINCT and ORDER BY, to avoid a second sort
1316 * below. However, for regular DISTINCT, don't sort now if we
1317 * don't have to --- sorting afterwards will likely be cheaper,
1318 * and also has the possibility of optimizing via LIMIT. But
1319 * for DISTINCT ON, we *must* force the final sort now, else
1320 * it won't have the desired behavior.
1322 List *needed_pathkeys;
1324 if (parse->hasDistinctOn &&
1325 list_length(root->distinct_pathkeys) <
1326 list_length(root->sort_pathkeys))
1327 needed_pathkeys = root->sort_pathkeys;
1329 needed_pathkeys = root->distinct_pathkeys;
1331 if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
1333 if (list_length(root->distinct_pathkeys) >=
1334 list_length(root->sort_pathkeys))
1335 current_pathkeys = root->distinct_pathkeys;
1338 current_pathkeys = root->sort_pathkeys;
1339 /* Assert checks that parser didn't mess up... */
1340 Assert(pathkeys_contained_in(root->distinct_pathkeys,
1344 result_plan = (Plan *) make_sort_from_pathkeys(root,
1350 result_plan = (Plan *) make_unique(result_plan,
1351 parse->distinctClause);
1352 result_plan->plan_rows = dNumDistinctRows;
1353 /* The Unique node won't change sort ordering */
1358 * If ORDER BY was given and we were not able to make the plan come out in
1359 * the right order, add an explicit sort step.
1361 if (parse->sortClause)
1363 if (!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
1365 result_plan = (Plan *) make_sort_from_pathkeys(root,
1367 root->sort_pathkeys,
1369 current_pathkeys = root->sort_pathkeys;
1374 * Finally, if there is a LIMIT/OFFSET clause, add the LIMIT node.
1376 if (parse->limitCount || parse->limitOffset)
1378 result_plan = (Plan *) make_limit(result_plan,
1386 * Deal with the RETURNING clause if any. It's convenient to pass the
1387 * returningList through setrefs.c now rather than at top level (if we
1388 * waited, handling inherited UPDATE/DELETE would be much harder).
1390 if (parse->returningList)
1394 Assert(parse->resultRelation);
1395 rlist = set_returning_clause_references(root->glob,
1396 parse->returningList,
1398 parse->resultRelation);
1399 root->returningLists = list_make1(rlist);
1402 root->returningLists = NIL;
1404 /* Compute result-relations list if needed */
1405 if (parse->resultRelation)
1406 root->resultRelations = list_make1_int(parse->resultRelation);
1408 root->resultRelations = NIL;
1411 * Return the actual output ordering in query_pathkeys for possible use by
1412 * an outer query level.
1414 root->query_pathkeys = current_pathkeys;
1420 * Detect whether a plan node is a "dummy" plan created when a relation
1421 * is deemed not to need scanning due to constraint exclusion.
1423 * Currently, such dummy plans are Result nodes with constant FALSE
1427 is_dummy_plan(Plan *plan)
1429 if (IsA(plan, Result))
1431 List *rcqual = (List *) ((Result *) plan)->resconstantqual;
1433 if (list_length(rcqual) == 1)
1435 Const *constqual = (Const *) linitial(rcqual);
1437 if (constqual && IsA(constqual, Const))
1439 if (!constqual->constisnull &&
1440 !DatumGetBool(constqual->constvalue))
1449 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
1451 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
1452 * results back in *count_est and *offset_est. These variables are set to
1453 * 0 if the corresponding clause is not present, and -1 if it's present
1454 * but we couldn't estimate the value for it. (The "0" convention is OK
1455 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
1456 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
1457 * usual practice of never estimating less than one row.) These values will
1458 * be passed to make_limit, which see if you change this code.
1460 * The return value is the suitably adjusted tuple_fraction to use for
1461 * planning the query. This adjustment is not overridable, since it reflects
1462 * plan actions that grouping_planner() will certainly take, not assumptions
1466 preprocess_limit(PlannerInfo *root, double tuple_fraction,
1467 int64 *offset_est, int64 *count_est)
1469 Query *parse = root->parse;
1471 double limit_fraction;
1473 /* Should not be called unless LIMIT or OFFSET */
1474 Assert(parse->limitCount || parse->limitOffset);
1477 * Try to obtain the clause values. We use estimate_expression_value
1478 * primarily because it can sometimes do something useful with Params.
1480 if (parse->limitCount)
1482 est = estimate_expression_value(root, parse->limitCount);
1483 if (est && IsA(est, Const))
1485 if (((Const *) est)->constisnull)
1487 /* NULL indicates LIMIT ALL, ie, no limit */
1488 *count_est = 0; /* treat as not present */
1492 *count_est = DatumGetInt64(((Const *) est)->constvalue);
1493 if (*count_est <= 0)
1494 *count_est = 1; /* force to at least 1 */
1498 *count_est = -1; /* can't estimate */
1501 *count_est = 0; /* not present */
1503 if (parse->limitOffset)
1505 est = estimate_expression_value(root, parse->limitOffset);
1506 if (est && IsA(est, Const))
1508 if (((Const *) est)->constisnull)
1510 /* Treat NULL as no offset; the executor will too */
1511 *offset_est = 0; /* treat as not present */
1515 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
1516 if (*offset_est < 0)
1517 *offset_est = 0; /* less than 0 is same as 0 */
1521 *offset_est = -1; /* can't estimate */
1524 *offset_est = 0; /* not present */
1526 if (*count_est != 0)
1529 * A LIMIT clause limits the absolute number of tuples returned.
1530 * However, if it's not a constant LIMIT then we have to guess; for
1531 * lack of a better idea, assume 10% of the plan's result is wanted.
1533 if (*count_est < 0 || *offset_est < 0)
1535 /* LIMIT or OFFSET is an expression ... punt ... */
1536 limit_fraction = 0.10;
1540 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
1541 limit_fraction = (double) *count_est + (double) *offset_est;
1545 * If we have absolute limits from both caller and LIMIT, use the
1546 * smaller value; likewise if they are both fractional. If one is
1547 * fractional and the other absolute, we can't easily determine which
1548 * is smaller, but we use the heuristic that the absolute will usually
1551 if (tuple_fraction >= 1.0)
1553 if (limit_fraction >= 1.0)
1556 tuple_fraction = Min(tuple_fraction, limit_fraction);
1560 /* caller absolute, limit fractional; use caller's value */
1563 else if (tuple_fraction > 0.0)
1565 if (limit_fraction >= 1.0)
1567 /* caller fractional, limit absolute; use limit */
1568 tuple_fraction = limit_fraction;
1572 /* both fractional */
1573 tuple_fraction = Min(tuple_fraction, limit_fraction);
1578 /* no info from caller, just use limit */
1579 tuple_fraction = limit_fraction;
1582 else if (*offset_est != 0 && tuple_fraction > 0.0)
1585 * We have an OFFSET but no LIMIT. This acts entirely differently
1586 * from the LIMIT case: here, we need to increase rather than decrease
1587 * the caller's tuple_fraction, because the OFFSET acts to cause more
1588 * tuples to be fetched instead of fewer. This only matters if we got
1589 * a tuple_fraction > 0, however.
1591 * As above, use 10% if OFFSET is present but unestimatable.
1593 if (*offset_est < 0)
1594 limit_fraction = 0.10;
1596 limit_fraction = (double) *offset_est;
1599 * If we have absolute counts from both caller and OFFSET, add them
1600 * together; likewise if they are both fractional. If one is
1601 * fractional and the other absolute, we want to take the larger, and
1602 * we heuristically assume that's the fractional one.
1604 if (tuple_fraction >= 1.0)
1606 if (limit_fraction >= 1.0)
1608 /* both absolute, so add them together */
1609 tuple_fraction += limit_fraction;
1613 /* caller absolute, limit fractional; use limit */
1614 tuple_fraction = limit_fraction;
1619 if (limit_fraction >= 1.0)
1621 /* caller fractional, limit absolute; use caller's value */
1625 /* both fractional, so add them together */
1626 tuple_fraction += limit_fraction;
1627 if (tuple_fraction >= 1.0)
1628 tuple_fraction = 0.0; /* assume fetch all */
1633 return tuple_fraction;
1638 * preprocess_groupclause - do preparatory work on GROUP BY clause
1640 * The idea here is to adjust the ordering of the GROUP BY elements
1641 * (which in itself is semantically insignificant) to match ORDER BY,
1642 * thereby allowing a single sort operation to both implement the ORDER BY
1643 * requirement and set up for a Unique step that implements GROUP BY.
1645 * In principle it might be interesting to consider other orderings of the
1646 * GROUP BY elements, which could match the sort ordering of other
1647 * possible plans (eg an indexscan) and thereby reduce cost. We don't
1648 * bother with that, though. Hashed grouping will frequently win anyway.
1650 * Note: we need no comparable processing of the distinctClause because
1651 * the parser already enforced that that matches ORDER BY.
1654 preprocess_groupclause(PlannerInfo *root)
1656 Query *parse = root->parse;
1657 List *new_groupclause;
1662 /* If no ORDER BY, nothing useful to do here */
1663 if (parse->sortClause == NIL)
1667 * Scan the ORDER BY clause and construct a list of matching GROUP BY
1668 * items, but only as far as we can make a matching prefix.
1670 * This code assumes that the sortClause contains no duplicate items.
1672 new_groupclause = NIL;
1673 foreach(sl, parse->sortClause)
1675 SortGroupClause *sc = (SortGroupClause *) lfirst(sl);
1677 foreach(gl, parse->groupClause)
1679 SortGroupClause *gc = (SortGroupClause *) lfirst(gl);
1683 new_groupclause = lappend(new_groupclause, gc);
1688 break; /* no match, so stop scanning */
1691 /* Did we match all of the ORDER BY list, or just some of it? */
1692 partial_match = (sl != NULL);
1694 /* If no match at all, no point in reordering GROUP BY */
1695 if (new_groupclause == NIL)
1699 * Add any remaining GROUP BY items to the new list, but only if we
1700 * were able to make a complete match. In other words, we only
1701 * rearrange the GROUP BY list if the result is that one list is a
1702 * prefix of the other --- otherwise there's no possibility of a
1703 * common sort. Also, give up if there are any non-sortable GROUP BY
1704 * items, since then there's no hope anyway.
1706 foreach(gl, parse->groupClause)
1708 SortGroupClause *gc = (SortGroupClause *) lfirst(gl);
1710 if (list_member_ptr(new_groupclause, gc))
1711 continue; /* it matched an ORDER BY item */
1713 return; /* give up, no common sort possible */
1714 if (!OidIsValid(gc->sortop))
1715 return; /* give up, GROUP BY can't be sorted */
1716 new_groupclause = lappend(new_groupclause, gc);
1719 /* Success --- install the rearranged GROUP BY list */
1720 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
1721 parse->groupClause = new_groupclause;
1725 * choose_hashed_grouping - should we use hashed grouping?
1727 * Note: this is only applied when both alternatives are actually feasible.
1730 choose_hashed_grouping(PlannerInfo *root,
1731 double tuple_fraction, double limit_tuples,
1732 Path *cheapest_path, Path *sorted_path,
1733 double dNumGroups, AggClauseCounts *agg_counts)
1735 int numGroupCols = list_length(root->parse->groupClause);
1736 double cheapest_path_rows;
1737 int cheapest_path_width;
1739 List *target_pathkeys;
1740 List *current_pathkeys;
1744 /* Prefer sorting when enable_hashagg is off */
1745 if (!enable_hashagg)
1749 * Don't do it if it doesn't look like the hashtable will fit into
1752 * Beware here of the possibility that cheapest_path->parent is NULL. This
1753 * could happen if user does something silly like SELECT 'foo' GROUP BY 1;
1755 if (cheapest_path->parent)
1757 cheapest_path_rows = cheapest_path->parent->rows;
1758 cheapest_path_width = cheapest_path->parent->width;
1762 cheapest_path_rows = 1; /* assume non-set result */
1763 cheapest_path_width = 100; /* arbitrary */
1766 /* Estimate per-hash-entry space at tuple width... */
1767 hashentrysize = MAXALIGN(cheapest_path_width) + MAXALIGN(sizeof(MinimalTupleData));
1768 /* plus space for pass-by-ref transition values... */
1769 hashentrysize += agg_counts->transitionSpace;
1770 /* plus the per-hash-entry overhead */
1771 hashentrysize += hash_agg_entry_size(agg_counts->numAggs);
1773 if (hashentrysize * dNumGroups > work_mem * 1024L)
1777 * When we have both GROUP BY and DISTINCT, use the more-rigorous of
1778 * DISTINCT and ORDER BY as the assumed required output sort order.
1779 * This is an oversimplification because the DISTINCT might get
1780 * implemented via hashing, but it's not clear that the case is common
1781 * enough (or that our estimates are good enough) to justify trying to
1784 if (list_length(root->distinct_pathkeys) >
1785 list_length(root->sort_pathkeys))
1786 target_pathkeys = root->distinct_pathkeys;
1788 target_pathkeys = root->sort_pathkeys;
1791 * See if the estimated cost is no more than doing it the other way. While
1792 * avoiding the need for sorted input is usually a win, the fact that the
1793 * output won't be sorted may be a loss; so we need to do an actual cost
1796 * We need to consider cheapest_path + hashagg [+ final sort] versus
1797 * either cheapest_path [+ sort] + group or agg [+ final sort] or
1798 * presorted_path + group or agg [+ final sort] where brackets indicate a
1799 * step that may not be needed. We assume query_planner() will have
1800 * returned a presorted path only if it's a winner compared to
1801 * cheapest_path for this purpose.
1803 * These path variables are dummies that just hold cost fields; we don't
1804 * make actual Paths for these steps.
1806 cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs,
1807 numGroupCols, dNumGroups,
1808 cheapest_path->startup_cost, cheapest_path->total_cost,
1809 cheapest_path_rows);
1810 /* Result of hashed agg is always unsorted */
1811 if (target_pathkeys)
1812 cost_sort(&hashed_p, root, target_pathkeys, hashed_p.total_cost,
1813 dNumGroups, cheapest_path_width, limit_tuples);
1817 sorted_p.startup_cost = sorted_path->startup_cost;
1818 sorted_p.total_cost = sorted_path->total_cost;
1819 current_pathkeys = sorted_path->pathkeys;
1823 sorted_p.startup_cost = cheapest_path->startup_cost;
1824 sorted_p.total_cost = cheapest_path->total_cost;
1825 current_pathkeys = cheapest_path->pathkeys;
1827 if (!pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
1829 cost_sort(&sorted_p, root, root->group_pathkeys, sorted_p.total_cost,
1830 cheapest_path_rows, cheapest_path_width, -1.0);
1831 current_pathkeys = root->group_pathkeys;
1834 if (root->parse->hasAggs)
1835 cost_agg(&sorted_p, root, AGG_SORTED, agg_counts->numAggs,
1836 numGroupCols, dNumGroups,
1837 sorted_p.startup_cost, sorted_p.total_cost,
1838 cheapest_path_rows);
1840 cost_group(&sorted_p, root, numGroupCols, dNumGroups,
1841 sorted_p.startup_cost, sorted_p.total_cost,
1842 cheapest_path_rows);
1843 /* The Agg or Group node will preserve ordering */
1844 if (target_pathkeys &&
1845 !pathkeys_contained_in(target_pathkeys, current_pathkeys))
1846 cost_sort(&sorted_p, root, target_pathkeys, sorted_p.total_cost,
1847 dNumGroups, cheapest_path_width, limit_tuples);
1850 * Now make the decision using the top-level tuple fraction. First we
1851 * have to convert an absolute count (LIMIT) into fractional form.
1853 if (tuple_fraction >= 1.0)
1854 tuple_fraction /= dNumGroups;
1856 if (compare_fractional_path_costs(&hashed_p, &sorted_p,
1857 tuple_fraction) < 0)
1859 /* Hashed is cheaper, so use it */
1866 * choose_hashed_distinct - should we use hashing for DISTINCT?
1868 * This is fairly similar to choose_hashed_grouping, but there are enough
1869 * differences that it doesn't seem worth trying to unify the two functions.
1871 * But note that making the two choices independently is a bit bogus in
1872 * itself. If the two could be combined into a single choice operation
1873 * it'd probably be better, but that seems far too unwieldy to be practical,
1874 * especially considering that the combination of GROUP BY and DISTINCT
1875 * isn't very common in real queries. By separating them, we are giving
1876 * extra preference to using a sorting implementation when a common sort key
1877 * is available ... and that's not necessarily wrong anyway.
1879 * Note: this is only applied when both alternatives are actually feasible.
1882 choose_hashed_distinct(PlannerInfo *root,
1883 Plan *input_plan, List *input_pathkeys,
1884 double tuple_fraction, double limit_tuples,
1885 double dNumDistinctRows)
1887 int numDistinctCols = list_length(root->parse->distinctClause);
1889 List *current_pathkeys;
1890 List *needed_pathkeys;
1894 /* Prefer sorting when enable_hashagg is off */
1895 if (!enable_hashagg)
1899 * Don't do it if it doesn't look like the hashtable will fit into
1902 hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(sizeof(MinimalTupleData));
1904 if (hashentrysize * dNumDistinctRows > work_mem * 1024L)
1908 * See if the estimated cost is no more than doing it the other way. While
1909 * avoiding the need for sorted input is usually a win, the fact that the
1910 * output won't be sorted may be a loss; so we need to do an actual cost
1913 * We need to consider input_plan + hashagg [+ final sort] versus
1914 * input_plan [+ sort] + group [+ final sort] where brackets indicate
1915 * a step that may not be needed.
1917 * These path variables are dummies that just hold cost fields; we don't
1918 * make actual Paths for these steps.
1920 cost_agg(&hashed_p, root, AGG_HASHED, 0,
1921 numDistinctCols, dNumDistinctRows,
1922 input_plan->startup_cost, input_plan->total_cost,
1923 input_plan->plan_rows);
1925 * Result of hashed agg is always unsorted, so if ORDER BY is present
1926 * we need to charge for the final sort.
1928 if (root->parse->sortClause)
1929 cost_sort(&hashed_p, root, root->sort_pathkeys, hashed_p.total_cost,
1930 dNumDistinctRows, input_plan->plan_width, limit_tuples);
1933 * Now for the GROUP case. See comments in grouping_planner about the
1934 * sorting choices here --- this code should match that code.
1936 sorted_p.startup_cost = input_plan->startup_cost;
1937 sorted_p.total_cost = input_plan->total_cost;
1938 current_pathkeys = input_pathkeys;
1939 if (root->parse->hasDistinctOn &&
1940 list_length(root->distinct_pathkeys) <
1941 list_length(root->sort_pathkeys))
1942 needed_pathkeys = root->sort_pathkeys;
1944 needed_pathkeys = root->distinct_pathkeys;
1945 if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
1947 if (list_length(root->distinct_pathkeys) >=
1948 list_length(root->sort_pathkeys))
1949 current_pathkeys = root->distinct_pathkeys;
1951 current_pathkeys = root->sort_pathkeys;
1952 cost_sort(&sorted_p, root, current_pathkeys, sorted_p.total_cost,
1953 input_plan->plan_rows, input_plan->plan_width, -1.0);
1955 cost_group(&sorted_p, root, numDistinctCols, dNumDistinctRows,
1956 sorted_p.startup_cost, sorted_p.total_cost,
1957 input_plan->plan_rows);
1958 if (root->parse->sortClause &&
1959 !pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
1960 cost_sort(&sorted_p, root, root->sort_pathkeys, sorted_p.total_cost,
1961 dNumDistinctRows, input_plan->plan_width, limit_tuples);
1964 * Now make the decision using the top-level tuple fraction. First we
1965 * have to convert an absolute count (LIMIT) into fractional form.
1967 if (tuple_fraction >= 1.0)
1968 tuple_fraction /= dNumDistinctRows;
1970 if (compare_fractional_path_costs(&hashed_p, &sorted_p,
1971 tuple_fraction) < 0)
1973 /* Hashed is cheaper, so use it */
1980 * make_subplanTargetList
1981 * Generate appropriate target list when grouping is required.
1983 * When grouping_planner inserts Aggregate, Group, or Result plan nodes
1984 * above the result of query_planner, we typically want to pass a different
1985 * target list to query_planner than the outer plan nodes should have.
1986 * This routine generates the correct target list for the subplan.
1988 * The initial target list passed from the parser already contains entries
1989 * for all ORDER BY and GROUP BY expressions, but it will not have entries
1990 * for variables used only in HAVING clauses; so we need to add those
1991 * variables to the subplan target list. Also, we flatten all expressions
1992 * except GROUP BY items into their component variables; the other expressions
1993 * will be computed by the inserted nodes rather than by the subplan.
1994 * For example, given a query like
1995 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
1996 * we want to pass this targetlist to the subplan:
1998 * where the a+b target will be used by the Sort/Group steps, and the
1999 * other targets will be used for computing the final results. (In the
2000 * above example we could theoretically suppress the a and b targets and
2001 * pass down only c,d,a+b, but it's not really worth the trouble to
2002 * eliminate simple var references from the subplan. We will avoid doing
2003 * the extra computation to recompute a+b at the outer level; see
2004 * fix_upper_expr() in setrefs.c.)
2006 * If we are grouping or aggregating, *and* there are no non-Var grouping
2007 * expressions, then the returned tlist is effectively dummy; we do not
2008 * need to force it to be evaluated, because all the Vars it contains
2009 * should be present in the output of query_planner anyway.
2011 * 'tlist' is the query's target list.
2012 * 'groupColIdx' receives an array of column numbers for the GROUP BY
2013 * expressions (if there are any) in the subplan's target list.
2014 * 'need_tlist_eval' is set true if we really need to evaluate the
2017 * The result is the targetlist to be passed to the subplan.
2021 make_subplanTargetList(PlannerInfo *root,
2023 AttrNumber **groupColIdx,
2024 bool *need_tlist_eval)
2026 Query *parse = root->parse;
2031 *groupColIdx = NULL;
2034 * If we're not grouping or aggregating, there's nothing to do here;
2035 * query_planner should receive the unmodified target list.
2037 if (!parse->hasAggs && !parse->groupClause && !root->hasHavingQual)
2039 *need_tlist_eval = true;
2044 * Otherwise, start with a "flattened" tlist (having just the vars
2045 * mentioned in the targetlist and HAVING qual --- but not upper-level
2046 * Vars; they will be replaced by Params later on).
2048 sub_tlist = flatten_tlist(tlist);
2049 extravars = pull_var_clause(parse->havingQual, false);
2050 sub_tlist = add_to_flat_tlist(sub_tlist, extravars);
2051 list_free(extravars);
2052 *need_tlist_eval = false; /* only eval if not flat tlist */
2055 * If grouping, create sub_tlist entries for all GROUP BY expressions
2056 * (GROUP BY items that are simple Vars should be in the list already),
2057 * and make an array showing where the group columns are in the sub_tlist.
2059 numCols = list_length(parse->groupClause);
2063 AttrNumber *grpColIdx;
2066 grpColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
2067 *groupColIdx = grpColIdx;
2069 foreach(gl, parse->groupClause)
2071 SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
2072 Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
2073 TargetEntry *te = NULL;
2076 * Find or make a matching sub_tlist entry. If the groupexpr
2077 * isn't a Var, no point in searching. (Note that the parser
2078 * won't make multiple groupClause entries for the same TLE.)
2080 if (groupexpr && IsA(groupexpr, Var))
2084 foreach(sl, sub_tlist)
2086 TargetEntry *lte = (TargetEntry *) lfirst(sl);
2088 if (equal(groupexpr, lte->expr))
2097 te = makeTargetEntry((Expr *) groupexpr,
2098 list_length(sub_tlist) + 1,
2101 sub_tlist = lappend(sub_tlist, te);
2102 *need_tlist_eval = true; /* it's not flat anymore */
2105 /* and save its resno */
2106 grpColIdx[keyno++] = te->resno;
2114 * locate_grouping_columns
2115 * Locate grouping columns in the tlist chosen by query_planner.
2117 * This is only needed if we don't use the sub_tlist chosen by
2118 * make_subplanTargetList. We have to forget the column indexes found
2119 * by that routine and re-locate the grouping vars in the real sub_tlist.
2122 locate_grouping_columns(PlannerInfo *root,
2125 AttrNumber *groupColIdx)
2131 * No work unless grouping.
2133 if (!root->parse->groupClause)
2135 Assert(groupColIdx == NULL);
2138 Assert(groupColIdx != NULL);
2140 foreach(gl, root->parse->groupClause)
2142 SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
2143 Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
2144 TargetEntry *te = NULL;
2147 foreach(sl, sub_tlist)
2149 te = (TargetEntry *) lfirst(sl);
2150 if (equal(groupexpr, te->expr))
2154 elog(ERROR, "failed to locate grouping columns");
2156 groupColIdx[keyno++] = te->resno;
2161 * postprocess_setop_tlist
2162 * Fix up targetlist returned by plan_set_operations().
2164 * We need to transpose sort key info from the orig_tlist into new_tlist.
2165 * NOTE: this would not be good enough if we supported resjunk sort keys
2166 * for results of set operations --- then, we'd need to project a whole
2167 * new tlist to evaluate the resjunk columns. For now, just ereport if we
2168 * find any resjunk columns in orig_tlist.
2171 postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
2174 ListCell *orig_tlist_item = list_head(orig_tlist);
2176 foreach(l, new_tlist)
2178 TargetEntry *new_tle = (TargetEntry *) lfirst(l);
2179 TargetEntry *orig_tle;
2181 /* ignore resjunk columns in setop result */
2182 if (new_tle->resjunk)
2185 Assert(orig_tlist_item != NULL);
2186 orig_tle = (TargetEntry *) lfirst(orig_tlist_item);
2187 orig_tlist_item = lnext(orig_tlist_item);
2188 if (orig_tle->resjunk) /* should not happen */
2189 elog(ERROR, "resjunk output columns are not implemented");
2190 Assert(new_tle->resno == orig_tle->resno);
2191 new_tle->ressortgroupref = orig_tle->ressortgroupref;
2193 if (orig_tlist_item != NULL)
2194 elog(ERROR, "resjunk output columns are not implemented");