1 /*-------------------------------------------------------------------------
4 * The query optimizer external interface.
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.247 2008/12/18 18:20:33 tgl Exp $
13 *-------------------------------------------------------------------------
20 #include "catalog/pg_operator.h"
21 #include "executor/executor.h"
22 #include "executor/nodeAgg.h"
23 #include "miscadmin.h"
24 #include "nodes/makefuncs.h"
25 #include "optimizer/clauses.h"
26 #include "optimizer/cost.h"
27 #include "optimizer/pathnode.h"
28 #include "optimizer/paths.h"
29 #include "optimizer/planmain.h"
30 #include "optimizer/planner.h"
31 #include "optimizer/prep.h"
32 #include "optimizer/subselect.h"
33 #include "optimizer/tlist.h"
34 #include "optimizer/var.h"
35 #ifdef OPTIMIZER_DEBUG
36 #include "nodes/print.h"
38 #include "parser/parse_expr.h"
39 #include "parser/parse_oper.h"
40 #include "parser/parsetree.h"
41 #include "utils/lsyscache.h"
42 #include "utils/syscache.h"
46 double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
48 /* Hook for plugins to get control in planner() */
49 planner_hook_type planner_hook = NULL;
52 /* Expression kind codes for preprocess_expression */
53 #define EXPRKIND_QUAL 0
54 #define EXPRKIND_TARGET 1
55 #define EXPRKIND_RTFUNC 2
56 #define EXPRKIND_VALUES 3
57 #define EXPRKIND_LIMIT 4
58 #define EXPRKIND_APPINFO 5
61 static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
62 static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
63 static Plan *inheritance_planner(PlannerInfo *root);
64 static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction);
65 static bool is_dummy_plan(Plan *plan);
66 static double preprocess_limit(PlannerInfo *root,
67 double tuple_fraction,
68 int64 *offset_est, int64 *count_est);
69 static void preprocess_groupclause(PlannerInfo *root);
70 static bool choose_hashed_grouping(PlannerInfo *root,
71 double tuple_fraction, double limit_tuples,
72 Path *cheapest_path, Path *sorted_path,
73 double dNumGroups, AggClauseCounts *agg_counts);
74 static bool choose_hashed_distinct(PlannerInfo *root,
75 Plan *input_plan, List *input_pathkeys,
76 double tuple_fraction, double limit_tuples,
77 double dNumDistinctRows);
78 static List *make_subplanTargetList(PlannerInfo *root, List *tlist,
79 AttrNumber **groupColIdx, bool *need_tlist_eval);
80 static void locate_grouping_columns(PlannerInfo *root,
83 AttrNumber *groupColIdx);
84 static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
87 /*****************************************************************************
89 * Query optimizer entry point
91 * To support loadable plugins that monitor or modify planner behavior,
92 * we provide a hook variable that lets a plugin get control before and
93 * after the standard planning process. The plugin would normally call
96 * Note to plugin authors: standard_planner() scribbles on its Query input,
97 * so you'd better copy that data structure if you want to plan more than once.
99 *****************************************************************************/
101 planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
106 result = (*planner_hook) (parse, cursorOptions, boundParams);
108 result = standard_planner(parse, cursorOptions, boundParams);
113 standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
117 double tuple_fraction;
123 /* Cursor options may come from caller or from DECLARE CURSOR stmt */
124 if (parse->utilityStmt &&
125 IsA(parse->utilityStmt, DeclareCursorStmt))
126 cursorOptions |= ((DeclareCursorStmt *) parse->utilityStmt)->options;
129 * Set up global state for this planner invocation. This data is needed
130 * across all levels of sub-Query that might exist in the given command,
131 * so we keep it in a separate struct that's linked to by each per-Query
134 glob = makeNode(PlannerGlobal);
136 glob->boundParams = boundParams;
137 glob->paramlist = NIL;
138 glob->subplans = NIL;
139 glob->subrtables = NIL;
140 glob->rewindPlanIDs = NULL;
141 glob->finalrtable = NIL;
142 glob->relationOids = NIL;
143 glob->invalItems = NIL;
145 glob->transientPlan = false;
147 /* Determine what fraction of the plan is likely to be scanned */
148 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
151 * We have no real idea how many tuples the user will ultimately FETCH
152 * from a cursor, but it is often the case that he doesn't want 'em
153 * all, or would prefer a fast-start plan anyway so that he can
154 * process some of the tuples sooner. Use a GUC parameter to decide
155 * what fraction to optimize for.
157 tuple_fraction = cursor_tuple_fraction;
160 * We document cursor_tuple_fraction as simply being a fraction,
161 * which means the edge cases 0 and 1 have to be treated specially
162 * here. We convert 1 to 0 ("all the tuples") and 0 to a very small
165 if (tuple_fraction >= 1.0)
166 tuple_fraction = 0.0;
167 else if (tuple_fraction <= 0.0)
168 tuple_fraction = 1e-10;
172 /* Default assumption is we need all the tuples */
173 tuple_fraction = 0.0;
176 /* primary planning entry point (may recurse for subqueries) */
177 top_plan = subquery_planner(glob, parse, NULL,
178 false, tuple_fraction, &root);
181 * If creating a plan for a scrollable cursor, make sure it can run
182 * backwards on demand. Add a Material node at the top at need.
184 if (cursorOptions & CURSOR_OPT_SCROLL)
186 if (!ExecSupportsBackwardScan(top_plan))
187 top_plan = materialize_finished_plan(top_plan);
190 /* final cleanup of the plan */
191 Assert(glob->finalrtable == NIL);
192 top_plan = set_plan_references(glob, top_plan, root->parse->rtable);
193 /* ... and the subplans (both regular subplans and initplans) */
194 Assert(list_length(glob->subplans) == list_length(glob->subrtables));
195 forboth(lp, glob->subplans, lr, glob->subrtables)
197 Plan *subplan = (Plan *) lfirst(lp);
198 List *subrtable = (List *) lfirst(lr);
200 lfirst(lp) = set_plan_references(glob, subplan, subrtable);
203 /* build the PlannedStmt result */
204 result = makeNode(PlannedStmt);
206 result->commandType = parse->commandType;
207 result->canSetTag = parse->canSetTag;
208 result->transientPlan = glob->transientPlan;
209 result->planTree = top_plan;
210 result->rtable = glob->finalrtable;
211 result->resultRelations = root->resultRelations;
212 result->utilityStmt = parse->utilityStmt;
213 result->intoClause = parse->intoClause;
214 result->subplans = glob->subplans;
215 result->rewindPlanIDs = glob->rewindPlanIDs;
216 result->returningLists = root->returningLists;
217 result->rowMarks = parse->rowMarks;
218 result->relationOids = glob->relationOids;
219 result->invalItems = glob->invalItems;
220 result->nParamExec = list_length(glob->paramlist);
226 /*--------------------
228 * Invokes the planner on a subquery. We recurse to here for each
229 * sub-SELECT found in the query tree.
231 * glob is the global state for the current planner run.
232 * parse is the querytree produced by the parser & rewriter.
233 * parent_root is the immediate parent Query's info (NULL at the top level).
234 * hasRecursion is true if this is a recursive WITH query.
235 * tuple_fraction is the fraction of tuples we expect will be retrieved.
236 * tuple_fraction is interpreted as explained for grouping_planner, below.
238 * If subroot isn't NULL, we pass back the query's final PlannerInfo struct;
239 * among other things this tells the output sort ordering of the plan.
241 * Basically, this routine does the stuff that should only be done once
242 * per Query object. It then calls grouping_planner. At one time,
243 * grouping_planner could be invoked recursively on the same Query object;
244 * that's not currently true, but we keep the separation between the two
245 * routines anyway, in case we need it again someday.
247 * subquery_planner will be called recursively to handle sub-Query nodes
248 * found within the query's expressions and rangetable.
250 * Returns a query plan.
251 *--------------------
254 subquery_planner(PlannerGlobal *glob, Query *parse,
255 PlannerInfo *parent_root,
256 bool hasRecursion, double tuple_fraction,
257 PlannerInfo **subroot)
259 int num_old_subplans = list_length(glob->subplans);
266 /* Create a PlannerInfo data structure for this subquery */
267 root = makeNode(PlannerInfo);
270 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
271 root->parent_root = parent_root;
272 root->planner_cxt = CurrentMemoryContext;
273 root->init_plans = NIL;
274 root->cte_plan_ids = NIL;
275 root->eq_classes = NIL;
276 root->append_rel_list = NIL;
278 root->hasRecursion = hasRecursion;
280 root->wt_param_id = SS_assign_worktable_param(root);
282 root->wt_param_id = -1;
283 root->non_recursive_plan = NULL;
286 * If there is a WITH list, process each WITH query and build an
287 * initplan SubPlan structure for it.
290 SS_process_ctes(root);
293 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
294 * to transform them into joins. Note that this step does not descend
295 * into subqueries; if we pull up any subqueries below, their SubLinks are
296 * processed just before pulling them up.
298 if (parse->hasSubLinks)
299 pull_up_sublinks(root);
302 * Scan the rangetable for set-returning functions, and inline them
303 * if possible (producing subqueries that might get pulled up next).
304 * Recursion issues here are handled in the same way as for SubLinks.
306 inline_set_returning_functions(root);
309 * Check to see if any subqueries in the rangetable can be merged into
312 parse->jointree = (FromExpr *)
313 pull_up_subqueries(root, (Node *) parse->jointree, false, false);
316 * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can
317 * avoid the expense of doing flatten_join_alias_vars(). Also check for
318 * outer joins --- if none, we can skip reduce_outer_joins().
319 * This must be done after we have done pull_up_subqueries, of course.
321 root->hasJoinRTEs = false;
322 hasOuterJoins = false;
323 foreach(l, parse->rtable)
325 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
327 if (rte->rtekind == RTE_JOIN)
329 root->hasJoinRTEs = true;
330 if (IS_OUTER_JOIN(rte->jointype))
332 hasOuterJoins = true;
333 /* Can quit scanning once we find an outer join */
340 * Expand any rangetable entries that are inheritance sets into "append
341 * relations". This can add entries to the rangetable, but they must be
342 * plain base relations not joins, so it's OK (and marginally more
343 * efficient) to do it after checking for join RTEs. We must do it after
344 * pulling up subqueries, else we'd fail to handle inherited tables in
347 expand_inherited_tables(root);
350 * Set hasHavingQual to remember if HAVING clause is present. Needed
351 * because preprocess_expression will reduce a constant-true condition to
352 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
354 root->hasHavingQual = (parse->havingQual != NULL);
356 /* Clear this flag; might get set in distribute_qual_to_rels */
357 root->hasPseudoConstantQuals = false;
360 * Do expression preprocessing on targetlist and quals.
362 parse->targetList = (List *)
363 preprocess_expression(root, (Node *) parse->targetList,
366 parse->returningList = (List *)
367 preprocess_expression(root, (Node *) parse->returningList,
370 preprocess_qual_conditions(root, (Node *) parse->jointree);
372 parse->havingQual = preprocess_expression(root, parse->havingQual,
375 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
377 parse->limitCount = preprocess_expression(root, parse->limitCount,
380 root->append_rel_list = (List *)
381 preprocess_expression(root, (Node *) root->append_rel_list,
384 /* Also need to preprocess expressions for function and values RTEs */
385 foreach(l, parse->rtable)
387 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
389 if (rte->rtekind == RTE_FUNCTION)
390 rte->funcexpr = preprocess_expression(root, rte->funcexpr,
392 else if (rte->rtekind == RTE_VALUES)
393 rte->values_lists = (List *)
394 preprocess_expression(root, (Node *) rte->values_lists,
399 * In some cases we may want to transfer a HAVING clause into WHERE. We
400 * cannot do so if the HAVING clause contains aggregates (obviously) or
401 * volatile functions (since a HAVING clause is supposed to be executed
402 * only once per group). Also, it may be that the clause is so expensive
403 * to execute that we're better off doing it only once per group, despite
404 * the loss of selectivity. This is hard to estimate short of doing the
405 * entire planning process twice, so we use a heuristic: clauses
406 * containing subplans are left in HAVING. Otherwise, we move or copy the
407 * HAVING clause into WHERE, in hopes of eliminating tuples before
408 * aggregation instead of after.
410 * If the query has explicit grouping then we can simply move such a
411 * clause into WHERE; any group that fails the clause will not be in the
412 * output because none of its tuples will reach the grouping or
413 * aggregation stage. Otherwise we must have a degenerate (variable-free)
414 * HAVING clause, which we put in WHERE so that query_planner() can use it
415 * in a gating Result node, but also keep in HAVING to ensure that we
416 * don't emit a bogus aggregated row. (This could be done better, but it
417 * seems not worth optimizing.)
419 * Note that both havingQual and parse->jointree->quals are in
420 * implicitly-ANDed-list form at this point, even though they are declared
424 foreach(l, (List *) parse->havingQual)
426 Node *havingclause = (Node *) lfirst(l);
428 if (contain_agg_clause(havingclause) ||
429 contain_volatile_functions(havingclause) ||
430 contain_subplans(havingclause))
432 /* keep it in HAVING */
433 newHaving = lappend(newHaving, havingclause);
435 else if (parse->groupClause)
437 /* move it to WHERE */
438 parse->jointree->quals = (Node *)
439 lappend((List *) parse->jointree->quals, havingclause);
443 /* put a copy in WHERE, keep it in HAVING */
444 parse->jointree->quals = (Node *)
445 lappend((List *) parse->jointree->quals,
446 copyObject(havingclause));
447 newHaving = lappend(newHaving, havingclause);
450 parse->havingQual = (Node *) newHaving;
453 * If we have any outer joins, try to reduce them to plain inner joins.
454 * This step is most easily done after we've done expression
458 reduce_outer_joins(root);
461 * Do the main planning. If we have an inherited target relation, that
462 * needs special processing, else go straight to grouping_planner.
464 if (parse->resultRelation &&
465 rt_fetch(parse->resultRelation, parse->rtable)->inh)
466 plan = inheritance_planner(root);
468 plan = grouping_planner(root, tuple_fraction);
471 * If any subplans were generated, or if we're inside a subplan, build
472 * initPlan list and extParam/allParam sets for plan nodes, and attach the
473 * initPlans to the top plan node.
475 if (list_length(glob->subplans) != num_old_subplans ||
476 root->query_level > 1)
477 SS_finalize_plan(root, plan, true);
479 /* Return internal info if caller wants it */
487 * preprocess_expression
488 * Do subquery_planner's preprocessing work for an expression,
489 * which can be a targetlist, a WHERE clause (including JOIN/ON
490 * conditions), or a HAVING clause.
493 preprocess_expression(PlannerInfo *root, Node *expr, int kind)
496 * Fall out quickly if expression is empty. This occurs often enough to
497 * be worth checking. Note that null->null is the correct conversion for
498 * implicit-AND result format, too.
504 * If the query has any join RTEs, replace join alias variables with
505 * base-relation variables. We must do this before sublink processing,
506 * else sublinks expanded out from join aliases wouldn't get processed. We
507 * can skip it in VALUES lists, however, since they can't contain any Vars
510 if (root->hasJoinRTEs && kind != EXPRKIND_VALUES)
511 expr = flatten_join_alias_vars(root, expr);
514 * Simplify constant expressions.
516 * Note: one essential effect here is to insert the current actual values
517 * of any default arguments for functions. To ensure that happens, we
518 * *must* process all expressions here. Previous PG versions sometimes
519 * skipped const-simplification if it didn't seem worth the trouble, but
520 * we can't do that anymore.
522 * Note: this also flattens nested AND and OR expressions into N-argument
523 * form. All processing of a qual expression after this point must be
524 * careful to maintain AND/OR flatness --- that is, do not generate a tree
525 * with AND directly under AND, nor OR directly under OR.
527 expr = eval_const_expressions(root, expr);
530 * If it's a qual or havingQual, canonicalize it.
532 if (kind == EXPRKIND_QUAL)
534 expr = (Node *) canonicalize_qual((Expr *) expr);
536 #ifdef OPTIMIZER_DEBUG
537 printf("After canonicalize_qual()\n");
542 /* Expand SubLinks to SubPlans */
543 if (root->parse->hasSubLinks)
544 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
547 * XXX do not insert anything here unless you have grokked the comments in
548 * SS_replace_correlation_vars ...
551 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
552 if (root->query_level > 1)
553 expr = SS_replace_correlation_vars(root, expr);
556 * If it's a qual or havingQual, convert it to implicit-AND format. (We
557 * don't want to do this before eval_const_expressions, since the latter
558 * would be unable to simplify a top-level AND correctly. Also,
559 * SS_process_sublinks expects explicit-AND format.)
561 if (kind == EXPRKIND_QUAL)
562 expr = (Node *) make_ands_implicit((Expr *) expr);
568 * preprocess_qual_conditions
569 * Recursively scan the query's jointree and do subquery_planner's
570 * preprocessing work on each qual condition found therein.
573 preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
577 if (IsA(jtnode, RangeTblRef))
579 /* nothing to do here */
581 else if (IsA(jtnode, FromExpr))
583 FromExpr *f = (FromExpr *) jtnode;
586 foreach(l, f->fromlist)
587 preprocess_qual_conditions(root, lfirst(l));
589 f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
591 else if (IsA(jtnode, JoinExpr))
593 JoinExpr *j = (JoinExpr *) jtnode;
595 preprocess_qual_conditions(root, j->larg);
596 preprocess_qual_conditions(root, j->rarg);
598 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
601 elog(ERROR, "unrecognized node type: %d",
602 (int) nodeTag(jtnode));
606 * inheritance_planner
607 * Generate a plan in the case where the result relation is an
610 * We have to handle this case differently from cases where a source relation
611 * is an inheritance set. Source inheritance is expanded at the bottom of the
612 * plan tree (see allpaths.c), but target inheritance has to be expanded at
613 * the top. The reason is that for UPDATE, each target relation needs a
614 * different targetlist matching its own column set. Also, for both UPDATE
615 * and DELETE, the executor needs the Append plan node at the top, else it
616 * can't keep track of which table is the current target table. Fortunately,
617 * the UPDATE/DELETE target can never be the nullable side of an outer join,
618 * so it's OK to generate the plan this way.
620 * Returns a query plan.
623 inheritance_planner(PlannerInfo *root)
625 Query *parse = root->parse;
626 int parentRTindex = parse->resultRelation;
627 List *subplans = NIL;
628 List *resultRelations = NIL;
629 List *returningLists = NIL;
635 foreach(l, root->append_rel_list)
637 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
640 /* append_rel_list contains all append rels; ignore others */
641 if (appinfo->parent_relid != parentRTindex)
645 * Generate modified query with this rel as target.
647 memcpy(&subroot, root, sizeof(PlannerInfo));
648 subroot.parse = (Query *)
649 adjust_appendrel_attrs((Node *) parse,
651 subroot.returningLists = NIL;
652 subroot.init_plans = NIL;
653 /* We needn't modify the child's append_rel_list */
654 /* There shouldn't be any OJ info to translate, as yet */
655 Assert(subroot.join_info_list == NIL);
656 /* and we haven't created PlaceHolderInfos, either */
657 Assert(subroot.placeholder_list == NIL);
660 subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ );
663 * If this child rel was excluded by constraint exclusion, exclude it
666 if (is_dummy_plan(subplan))
669 /* Save rtable and tlist from first rel for use below */
672 rtable = subroot.parse->rtable;
673 tlist = subplan->targetlist;
676 subplans = lappend(subplans, subplan);
678 /* Make sure any initplans from this rel get into the outer list */
679 root->init_plans = list_concat(root->init_plans, subroot.init_plans);
681 /* Build target-relations list for the executor */
682 resultRelations = lappend_int(resultRelations, appinfo->child_relid);
684 /* Build list of per-relation RETURNING targetlists */
685 if (parse->returningList)
687 Assert(list_length(subroot.returningLists) == 1);
688 returningLists = list_concat(returningLists,
689 subroot.returningLists);
693 root->resultRelations = resultRelations;
694 root->returningLists = returningLists;
696 /* Mark result as unordered (probably unnecessary) */
697 root->query_pathkeys = NIL;
700 * If we managed to exclude every child rel, return a dummy plan
704 root->resultRelations = list_make1_int(parentRTindex);
705 /* although dummy, it must have a valid tlist for executor */
706 tlist = preprocess_targetlist(root, parse->targetList);
707 return (Plan *) make_result(root,
709 (Node *) list_make1(makeBoolConst(false,
715 * Planning might have modified the rangetable, due to changes of the
716 * Query structures inside subquery RTEs. We have to ensure that this
717 * gets propagated back to the master copy. But can't do this until we
718 * are done planning, because all the calls to grouping_planner need
719 * virgin sub-Queries to work from. (We are effectively assuming that
720 * sub-Queries will get planned identically each time, or at least that
721 * the impacts on their rangetables will be the same each time.)
723 * XXX should clean this up someday
725 parse->rtable = rtable;
727 /* Suppress Append if there's only one surviving child rel */
728 if (list_length(subplans) == 1)
729 return (Plan *) linitial(subplans);
731 return (Plan *) make_append(subplans, true, tlist);
734 /*--------------------
736 * Perform planning steps related to grouping, aggregation, etc.
737 * This primarily means adding top-level processing to the basic
738 * query plan produced by query_planner.
740 * tuple_fraction is the fraction of tuples we expect will be retrieved
742 * tuple_fraction is interpreted as follows:
743 * 0: expect all tuples to be retrieved (normal case)
744 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
745 * from the plan to be retrieved
746 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
747 * expected to be retrieved (ie, a LIMIT specification)
749 * Returns a query plan. Also, root->query_pathkeys is returned as the
750 * actual output ordering of the plan (in pathkey format).
751 *--------------------
754 grouping_planner(PlannerInfo *root, double tuple_fraction)
756 Query *parse = root->parse;
757 List *tlist = parse->targetList;
758 int64 offset_est = 0;
760 double limit_tuples = -1.0;
762 List *current_pathkeys;
763 double dNumGroups = 0;
765 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
766 if (parse->limitCount || parse->limitOffset)
768 tuple_fraction = preprocess_limit(root, tuple_fraction,
769 &offset_est, &count_est);
772 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
773 * estimate the effects of using a bounded sort.
775 if (count_est > 0 && offset_est >= 0)
776 limit_tuples = (double) count_est + (double) offset_est;
779 if (parse->setOperations)
781 List *set_sortclauses;
784 * If there's a top-level ORDER BY, assume we have to fetch all the
785 * tuples. This might be too simplistic given all the hackery below
786 * to possibly avoid the sort; but the odds of accurate estimates
787 * here are pretty low anyway.
789 if (parse->sortClause)
790 tuple_fraction = 0.0;
793 * Construct the plan for set operations. The result will not need
794 * any work except perhaps a top-level sort and/or LIMIT. Note that
795 * any special work for recursive unions is the responsibility of
796 * plan_set_operations.
798 result_plan = plan_set_operations(root, tuple_fraction,
802 * Calculate pathkeys representing the sort order (if any) of the set
803 * operation's result. We have to do this before overwriting the sort
806 current_pathkeys = make_pathkeys_for_sortclauses(root,
808 result_plan->targetlist,
812 * We should not need to call preprocess_targetlist, since we must be
813 * in a SELECT query node. Instead, use the targetlist returned by
814 * plan_set_operations (since this tells whether it returned any
815 * resjunk columns!), and transfer any sort key information from the
818 Assert(parse->commandType == CMD_SELECT);
820 tlist = postprocess_setop_tlist(copyObject(result_plan->targetlist),
824 * Can't handle FOR UPDATE/SHARE here (parser should have checked
825 * already, but let's make sure).
829 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
830 errmsg("SELECT FOR UPDATE/SHARE is not allowed with UNION/INTERSECT/EXCEPT")));
833 * Calculate pathkeys that represent result ordering requirements
835 Assert(parse->distinctClause == NIL);
836 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
843 /* No set operations, do regular planning */
845 AttrNumber *groupColIdx = NULL;
846 bool need_tlist_eval = true;
852 AggClauseCounts agg_counts;
854 bool use_hashed_grouping = false;
856 MemSet(&agg_counts, 0, sizeof(AggClauseCounts));
858 /* A recursive query should always have setOperations */
859 Assert(!root->hasRecursion);
861 /* Preprocess GROUP BY clause, if any */
862 if (parse->groupClause)
863 preprocess_groupclause(root);
864 numGroupCols = list_length(parse->groupClause);
866 /* Preprocess targetlist */
867 tlist = preprocess_targetlist(root, tlist);
870 * Generate appropriate target list for subplan; may be different from
871 * tlist if grouping or aggregation is needed.
873 sub_tlist = make_subplanTargetList(root, tlist,
874 &groupColIdx, &need_tlist_eval);
877 * Calculate pathkeys that represent grouping/ordering requirements.
878 * Stash them in PlannerInfo so that query_planner can canonicalize
879 * them after EquivalenceClasses have been formed. The sortClause
880 * is certainly sort-able, but GROUP BY and DISTINCT might not be,
881 * in which case we just leave their pathkeys empty.
883 if (parse->groupClause &&
884 grouping_is_sortable(parse->groupClause))
885 root->group_pathkeys =
886 make_pathkeys_for_sortclauses(root,
891 root->group_pathkeys = NIL;
893 if (parse->distinctClause &&
894 grouping_is_sortable(parse->distinctClause))
895 root->distinct_pathkeys =
896 make_pathkeys_for_sortclauses(root,
897 parse->distinctClause,
901 root->distinct_pathkeys = NIL;
903 root->sort_pathkeys =
904 make_pathkeys_for_sortclauses(root,
910 * Will need actual number of aggregates for estimating costs.
912 * Note: we do not attempt to detect duplicate aggregates here; a
913 * somewhat-overestimated count is okay for our present purposes.
915 * Note: think not that we can turn off hasAggs if we find no aggs. It
916 * is possible for constant-expression simplification to remove all
917 * explicit references to aggs, but we still have to follow the
918 * aggregate semantics (eg, producing only one output row).
922 count_agg_clauses((Node *) tlist, &agg_counts);
923 count_agg_clauses(parse->havingQual, &agg_counts);
927 * Figure out whether we want a sorted result from query_planner.
929 * If we have a sortable GROUP BY clause, then we want a result sorted
930 * properly for grouping. Otherwise, if there's a sortable DISTINCT
931 * clause that's more rigorous than the ORDER BY clause, we try to
932 * produce output that's sufficiently well sorted for the DISTINCT.
933 * Otherwise, if there is an ORDER BY clause, we want to sort by the
936 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a
937 * superset of GROUP BY, it would be tempting to request sort by ORDER
938 * BY --- but that might just leave us failing to exploit an available
939 * sort order at all. Needs more thought. The choice for DISTINCT
940 * versus ORDER BY is much easier, since we know that the parser
941 * ensured that one is a superset of the other.
943 if (root->group_pathkeys)
944 root->query_pathkeys = root->group_pathkeys;
945 else if (list_length(root->distinct_pathkeys) >
946 list_length(root->sort_pathkeys))
947 root->query_pathkeys = root->distinct_pathkeys;
948 else if (root->sort_pathkeys)
949 root->query_pathkeys = root->sort_pathkeys;
951 root->query_pathkeys = NIL;
954 * Generate the best unsorted and presorted paths for this Query (but
955 * note there may not be any presorted path). query_planner will also
956 * estimate the number of groups in the query, and canonicalize all
959 query_planner(root, sub_tlist, tuple_fraction, limit_tuples,
960 &cheapest_path, &sorted_path, &dNumGroups);
963 * If grouping, decide whether to use sorted or hashed grouping.
965 if (parse->groupClause)
971 * Executor doesn't support hashed aggregation with DISTINCT
972 * aggregates. (Doing so would imply storing *all* the input
973 * values in the hash table, which seems like a certain loser.)
975 can_hash = (agg_counts.numDistinctAggs == 0 &&
976 grouping_is_hashable(parse->groupClause));
977 can_sort = grouping_is_sortable(parse->groupClause);
978 if (can_hash && can_sort)
980 /* we have a meaningful choice to make ... */
981 use_hashed_grouping =
982 choose_hashed_grouping(root,
983 tuple_fraction, limit_tuples,
984 cheapest_path, sorted_path,
985 dNumGroups, &agg_counts);
988 use_hashed_grouping = true;
990 use_hashed_grouping = false;
993 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
994 errmsg("could not implement GROUP BY"),
995 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
997 /* Also convert # groups to long int --- but 'ware overflow! */
998 numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
1002 * Select the best path. If we are doing hashed grouping, we will
1003 * always read all the input tuples, so use the cheapest-total path.
1004 * Otherwise, trust query_planner's decision about which to use.
1006 if (use_hashed_grouping || !sorted_path)
1007 best_path = cheapest_path;
1009 best_path = sorted_path;
1012 * Check to see if it's possible to optimize MIN/MAX aggregates. If
1013 * so, we will forget all the work we did so far to choose a "regular"
1014 * path ... but we had to do it anyway to be able to tell which way is
1017 result_plan = optimize_minmax_aggregates(root,
1020 if (result_plan != NULL)
1023 * optimize_minmax_aggregates generated the full plan, with the
1024 * right tlist, and it has no sort order.
1026 current_pathkeys = NIL;
1031 * Normal case --- create a plan according to query_planner's
1034 bool need_sort_for_grouping = false;
1036 result_plan = create_plan(root, best_path);
1037 current_pathkeys = best_path->pathkeys;
1039 /* Detect if we'll need an explicit sort for grouping */
1040 if (parse->groupClause && !use_hashed_grouping &&
1041 !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
1043 need_sort_for_grouping = true;
1045 * Always override query_planner's tlist, so that we don't
1046 * sort useless data from a "physical" tlist.
1048 need_tlist_eval = true;
1052 * create_plan() returns a plan with just a "flat" tlist of
1053 * required Vars. Usually we need to insert the sub_tlist as the
1054 * tlist of the top plan node. However, we can skip that if we
1055 * determined that whatever query_planner chose to return will be
1058 if (need_tlist_eval)
1061 * If the top-level plan node is one that cannot do expression
1062 * evaluation, we must insert a Result node to project the
1065 if (!is_projection_capable_plan(result_plan))
1067 result_plan = (Plan *) make_result(root,
1075 * Otherwise, just replace the subplan's flat tlist with
1076 * the desired tlist.
1078 result_plan->targetlist = sub_tlist;
1082 * Also, account for the cost of evaluation of the sub_tlist.
1084 * Up to now, we have only been dealing with "flat" tlists,
1085 * containing just Vars. So their evaluation cost is zero
1086 * according to the model used by cost_qual_eval() (or if you
1087 * prefer, the cost is factored into cpu_tuple_cost). Thus we
1088 * can avoid accounting for tlist cost throughout
1089 * query_planner() and subroutines. But now we've inserted a
1090 * tlist that might contain actual operators, sub-selects, etc
1091 * --- so we'd better account for its cost.
1093 * Below this point, any tlist eval cost for added-on nodes
1094 * should be accounted for as we create those nodes.
1095 * Presently, of the node types we can add on, only Agg and
1096 * Group project new tlists (the rest just copy their input
1097 * tuples) --- so make_agg() and make_group() are responsible
1098 * for computing the added cost.
1100 cost_qual_eval(&tlist_cost, sub_tlist, root);
1101 result_plan->startup_cost += tlist_cost.startup;
1102 result_plan->total_cost += tlist_cost.startup +
1103 tlist_cost.per_tuple * result_plan->plan_rows;
1108 * Since we're using query_planner's tlist and not the one
1109 * make_subplanTargetList calculated, we have to refigure any
1110 * grouping-column indexes make_subplanTargetList computed.
1112 locate_grouping_columns(root, tlist, result_plan->targetlist,
1117 * Insert AGG or GROUP node if needed, plus an explicit sort step
1120 * HAVING clause, if any, becomes qual of the Agg or Group node.
1122 if (use_hashed_grouping)
1124 /* Hashed aggregate plan --- no sort needed */
1125 result_plan = (Plan *) make_agg(root,
1127 (List *) parse->havingQual,
1131 extract_grouping_ops(parse->groupClause),
1135 /* Hashed aggregation produces randomly-ordered results */
1136 current_pathkeys = NIL;
1138 else if (parse->hasAggs)
1140 /* Plain aggregate plan --- sort if needed */
1141 AggStrategy aggstrategy;
1143 if (parse->groupClause)
1145 if (need_sort_for_grouping)
1147 result_plan = (Plan *)
1148 make_sort_from_groupcols(root,
1152 current_pathkeys = root->group_pathkeys;
1154 aggstrategy = AGG_SORTED;
1157 * The AGG node will not change the sort ordering of its
1158 * groups, so current_pathkeys describes the result too.
1163 aggstrategy = AGG_PLAIN;
1164 /* Result will be only one row anyway; no sort order */
1165 current_pathkeys = NIL;
1168 result_plan = (Plan *) make_agg(root,
1170 (List *) parse->havingQual,
1174 extract_grouping_ops(parse->groupClause),
1179 else if (parse->groupClause)
1182 * GROUP BY without aggregation, so insert a group node (plus
1183 * the appropriate sort node, if necessary).
1185 * Add an explicit sort if we couldn't make the path come out
1186 * the way the GROUP node needs it.
1188 if (need_sort_for_grouping)
1190 result_plan = (Plan *)
1191 make_sort_from_groupcols(root,
1195 current_pathkeys = root->group_pathkeys;
1198 result_plan = (Plan *) make_group(root,
1200 (List *) parse->havingQual,
1203 extract_grouping_ops(parse->groupClause),
1206 /* The Group node won't change sort ordering */
1208 else if (root->hasHavingQual)
1211 * No aggregates, and no GROUP BY, but we have a HAVING qual.
1212 * This is a degenerate case in which we are supposed to emit
1213 * either 0 or 1 row depending on whether HAVING succeeds.
1214 * Furthermore, there cannot be any variables in either HAVING
1215 * or the targetlist, so we actually do not need the FROM
1216 * table at all! We can just throw away the plan-so-far and
1217 * generate a Result node. This is a sufficiently unusual
1218 * corner case that it's not worth contorting the structure of
1219 * this routine to avoid having to generate the plan in the
1222 result_plan = (Plan *) make_result(root,
1227 } /* end of non-minmax-aggregate case */
1228 } /* end of if (setOperations) */
1231 * If there is a DISTINCT clause, add the necessary node(s).
1233 if (parse->distinctClause)
1235 double dNumDistinctRows;
1236 long numDistinctRows;
1237 bool use_hashed_distinct;
1242 * If there was grouping or aggregation, use the current number of
1243 * rows as the estimated number of DISTINCT rows (ie, assume the
1244 * result was already mostly unique). If not, use the number of
1245 * distinct-groups calculated by query_planner.
1247 if (parse->groupClause || root->hasHavingQual || parse->hasAggs)
1248 dNumDistinctRows = result_plan->plan_rows;
1250 dNumDistinctRows = dNumGroups;
1252 /* Also convert to long int --- but 'ware overflow! */
1253 numDistinctRows = (long) Min(dNumDistinctRows, (double) LONG_MAX);
1256 * If we have a sortable DISTINCT ON clause, we always use sorting.
1257 * This enforces the expected behavior of DISTINCT ON.
1259 can_sort = grouping_is_sortable(parse->distinctClause);
1260 if (can_sort && parse->hasDistinctOn)
1261 use_hashed_distinct = false;
1264 can_hash = grouping_is_hashable(parse->distinctClause);
1265 if (can_hash && can_sort)
1267 /* we have a meaningful choice to make ... */
1268 use_hashed_distinct =
1269 choose_hashed_distinct(root,
1270 result_plan, current_pathkeys,
1271 tuple_fraction, limit_tuples,
1275 use_hashed_distinct = true;
1277 use_hashed_distinct = false;
1281 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1282 errmsg("could not implement DISTINCT"),
1283 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
1284 use_hashed_distinct = false; /* keep compiler quiet */
1288 if (use_hashed_distinct)
1290 /* Hashed aggregate plan --- no sort needed */
1291 result_plan = (Plan *) make_agg(root,
1292 result_plan->targetlist,
1295 list_length(parse->distinctClause),
1296 extract_grouping_cols(parse->distinctClause,
1297 result_plan->targetlist),
1298 extract_grouping_ops(parse->distinctClause),
1302 /* Hashed aggregation produces randomly-ordered results */
1303 current_pathkeys = NIL;
1308 * Use a Unique node to implement DISTINCT. Add an explicit sort
1309 * if we couldn't make the path come out the way the Unique node
1310 * needs it. If we do have to sort, always sort by the more
1311 * rigorous of DISTINCT and ORDER BY, to avoid a second sort
1312 * below. However, for regular DISTINCT, don't sort now if we
1313 * don't have to --- sorting afterwards will likely be cheaper,
1314 * and also has the possibility of optimizing via LIMIT. But
1315 * for DISTINCT ON, we *must* force the final sort now, else
1316 * it won't have the desired behavior.
1318 List *needed_pathkeys;
1320 if (parse->hasDistinctOn &&
1321 list_length(root->distinct_pathkeys) <
1322 list_length(root->sort_pathkeys))
1323 needed_pathkeys = root->sort_pathkeys;
1325 needed_pathkeys = root->distinct_pathkeys;
1327 if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
1329 if (list_length(root->distinct_pathkeys) >=
1330 list_length(root->sort_pathkeys))
1331 current_pathkeys = root->distinct_pathkeys;
1334 current_pathkeys = root->sort_pathkeys;
1335 /* Assert checks that parser didn't mess up... */
1336 Assert(pathkeys_contained_in(root->distinct_pathkeys,
1340 result_plan = (Plan *) make_sort_from_pathkeys(root,
1346 result_plan = (Plan *) make_unique(result_plan,
1347 parse->distinctClause);
1348 result_plan->plan_rows = dNumDistinctRows;
1349 /* The Unique node won't change sort ordering */
1354 * If ORDER BY was given and we were not able to make the plan come out in
1355 * the right order, add an explicit sort step.
1357 if (parse->sortClause)
1359 if (!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
1361 result_plan = (Plan *) make_sort_from_pathkeys(root,
1363 root->sort_pathkeys,
1365 current_pathkeys = root->sort_pathkeys;
1370 * Finally, if there is a LIMIT/OFFSET clause, add the LIMIT node.
1372 if (parse->limitCount || parse->limitOffset)
1374 result_plan = (Plan *) make_limit(result_plan,
1382 * Deal with the RETURNING clause if any. It's convenient to pass the
1383 * returningList through setrefs.c now rather than at top level (if we
1384 * waited, handling inherited UPDATE/DELETE would be much harder).
1386 if (parse->returningList)
1390 Assert(parse->resultRelation);
1391 rlist = set_returning_clause_references(root->glob,
1392 parse->returningList,
1394 parse->resultRelation);
1395 root->returningLists = list_make1(rlist);
1398 root->returningLists = NIL;
1400 /* Compute result-relations list if needed */
1401 if (parse->resultRelation)
1402 root->resultRelations = list_make1_int(parse->resultRelation);
1404 root->resultRelations = NIL;
1407 * Return the actual output ordering in query_pathkeys for possible use by
1408 * an outer query level.
1410 root->query_pathkeys = current_pathkeys;
1416 * Detect whether a plan node is a "dummy" plan created when a relation
1417 * is deemed not to need scanning due to constraint exclusion.
1419 * Currently, such dummy plans are Result nodes with constant FALSE
1423 is_dummy_plan(Plan *plan)
1425 if (IsA(plan, Result))
1427 List *rcqual = (List *) ((Result *) plan)->resconstantqual;
1429 if (list_length(rcqual) == 1)
1431 Const *constqual = (Const *) linitial(rcqual);
1433 if (constqual && IsA(constqual, Const))
1435 if (!constqual->constisnull &&
1436 !DatumGetBool(constqual->constvalue))
1445 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
1447 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
1448 * results back in *count_est and *offset_est. These variables are set to
1449 * 0 if the corresponding clause is not present, and -1 if it's present
1450 * but we couldn't estimate the value for it. (The "0" convention is OK
1451 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
1452 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
1453 * usual practice of never estimating less than one row.) These values will
1454 * be passed to make_limit, which see if you change this code.
1456 * The return value is the suitably adjusted tuple_fraction to use for
1457 * planning the query. This adjustment is not overridable, since it reflects
1458 * plan actions that grouping_planner() will certainly take, not assumptions
1462 preprocess_limit(PlannerInfo *root, double tuple_fraction,
1463 int64 *offset_est, int64 *count_est)
1465 Query *parse = root->parse;
1467 double limit_fraction;
1469 /* Should not be called unless LIMIT or OFFSET */
1470 Assert(parse->limitCount || parse->limitOffset);
1473 * Try to obtain the clause values. We use estimate_expression_value
1474 * primarily because it can sometimes do something useful with Params.
1476 if (parse->limitCount)
1478 est = estimate_expression_value(root, parse->limitCount);
1479 if (est && IsA(est, Const))
1481 if (((Const *) est)->constisnull)
1483 /* NULL indicates LIMIT ALL, ie, no limit */
1484 *count_est = 0; /* treat as not present */
1488 *count_est = DatumGetInt64(((Const *) est)->constvalue);
1489 if (*count_est <= 0)
1490 *count_est = 1; /* force to at least 1 */
1494 *count_est = -1; /* can't estimate */
1497 *count_est = 0; /* not present */
1499 if (parse->limitOffset)
1501 est = estimate_expression_value(root, parse->limitOffset);
1502 if (est && IsA(est, Const))
1504 if (((Const *) est)->constisnull)
1506 /* Treat NULL as no offset; the executor will too */
1507 *offset_est = 0; /* treat as not present */
1511 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
1512 if (*offset_est < 0)
1513 *offset_est = 0; /* less than 0 is same as 0 */
1517 *offset_est = -1; /* can't estimate */
1520 *offset_est = 0; /* not present */
1522 if (*count_est != 0)
1525 * A LIMIT clause limits the absolute number of tuples returned.
1526 * However, if it's not a constant LIMIT then we have to guess; for
1527 * lack of a better idea, assume 10% of the plan's result is wanted.
1529 if (*count_est < 0 || *offset_est < 0)
1531 /* LIMIT or OFFSET is an expression ... punt ... */
1532 limit_fraction = 0.10;
1536 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
1537 limit_fraction = (double) *count_est + (double) *offset_est;
1541 * If we have absolute limits from both caller and LIMIT, use the
1542 * smaller value; likewise if they are both fractional. If one is
1543 * fractional and the other absolute, we can't easily determine which
1544 * is smaller, but we use the heuristic that the absolute will usually
1547 if (tuple_fraction >= 1.0)
1549 if (limit_fraction >= 1.0)
1552 tuple_fraction = Min(tuple_fraction, limit_fraction);
1556 /* caller absolute, limit fractional; use caller's value */
1559 else if (tuple_fraction > 0.0)
1561 if (limit_fraction >= 1.0)
1563 /* caller fractional, limit absolute; use limit */
1564 tuple_fraction = limit_fraction;
1568 /* both fractional */
1569 tuple_fraction = Min(tuple_fraction, limit_fraction);
1574 /* no info from caller, just use limit */
1575 tuple_fraction = limit_fraction;
1578 else if (*offset_est != 0 && tuple_fraction > 0.0)
1581 * We have an OFFSET but no LIMIT. This acts entirely differently
1582 * from the LIMIT case: here, we need to increase rather than decrease
1583 * the caller's tuple_fraction, because the OFFSET acts to cause more
1584 * tuples to be fetched instead of fewer. This only matters if we got
1585 * a tuple_fraction > 0, however.
1587 * As above, use 10% if OFFSET is present but unestimatable.
1589 if (*offset_est < 0)
1590 limit_fraction = 0.10;
1592 limit_fraction = (double) *offset_est;
1595 * If we have absolute counts from both caller and OFFSET, add them
1596 * together; likewise if they are both fractional. If one is
1597 * fractional and the other absolute, we want to take the larger, and
1598 * we heuristically assume that's the fractional one.
1600 if (tuple_fraction >= 1.0)
1602 if (limit_fraction >= 1.0)
1604 /* both absolute, so add them together */
1605 tuple_fraction += limit_fraction;
1609 /* caller absolute, limit fractional; use limit */
1610 tuple_fraction = limit_fraction;
1615 if (limit_fraction >= 1.0)
1617 /* caller fractional, limit absolute; use caller's value */
1621 /* both fractional, so add them together */
1622 tuple_fraction += limit_fraction;
1623 if (tuple_fraction >= 1.0)
1624 tuple_fraction = 0.0; /* assume fetch all */
1629 return tuple_fraction;
1634 * preprocess_groupclause - do preparatory work on GROUP BY clause
1636 * The idea here is to adjust the ordering of the GROUP BY elements
1637 * (which in itself is semantically insignificant) to match ORDER BY,
1638 * thereby allowing a single sort operation to both implement the ORDER BY
1639 * requirement and set up for a Unique step that implements GROUP BY.
1641 * In principle it might be interesting to consider other orderings of the
1642 * GROUP BY elements, which could match the sort ordering of other
1643 * possible plans (eg an indexscan) and thereby reduce cost. We don't
1644 * bother with that, though. Hashed grouping will frequently win anyway.
1646 * Note: we need no comparable processing of the distinctClause because
1647 * the parser already enforced that that matches ORDER BY.
1650 preprocess_groupclause(PlannerInfo *root)
1652 Query *parse = root->parse;
1653 List *new_groupclause;
1658 /* If no ORDER BY, nothing useful to do here */
1659 if (parse->sortClause == NIL)
1663 * Scan the ORDER BY clause and construct a list of matching GROUP BY
1664 * items, but only as far as we can make a matching prefix.
1666 * This code assumes that the sortClause contains no duplicate items.
1668 new_groupclause = NIL;
1669 foreach(sl, parse->sortClause)
1671 SortGroupClause *sc = (SortGroupClause *) lfirst(sl);
1673 foreach(gl, parse->groupClause)
1675 SortGroupClause *gc = (SortGroupClause *) lfirst(gl);
1679 new_groupclause = lappend(new_groupclause, gc);
1684 break; /* no match, so stop scanning */
1687 /* Did we match all of the ORDER BY list, or just some of it? */
1688 partial_match = (sl != NULL);
1690 /* If no match at all, no point in reordering GROUP BY */
1691 if (new_groupclause == NIL)
1695 * Add any remaining GROUP BY items to the new list, but only if we
1696 * were able to make a complete match. In other words, we only
1697 * rearrange the GROUP BY list if the result is that one list is a
1698 * prefix of the other --- otherwise there's no possibility of a
1699 * common sort. Also, give up if there are any non-sortable GROUP BY
1700 * items, since then there's no hope anyway.
1702 foreach(gl, parse->groupClause)
1704 SortGroupClause *gc = (SortGroupClause *) lfirst(gl);
1706 if (list_member_ptr(new_groupclause, gc))
1707 continue; /* it matched an ORDER BY item */
1709 return; /* give up, no common sort possible */
1710 if (!OidIsValid(gc->sortop))
1711 return; /* give up, GROUP BY can't be sorted */
1712 new_groupclause = lappend(new_groupclause, gc);
1715 /* Success --- install the rearranged GROUP BY list */
1716 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
1717 parse->groupClause = new_groupclause;
1721 * choose_hashed_grouping - should we use hashed grouping?
1723 * Note: this is only applied when both alternatives are actually feasible.
1726 choose_hashed_grouping(PlannerInfo *root,
1727 double tuple_fraction, double limit_tuples,
1728 Path *cheapest_path, Path *sorted_path,
1729 double dNumGroups, AggClauseCounts *agg_counts)
1731 int numGroupCols = list_length(root->parse->groupClause);
1732 double cheapest_path_rows;
1733 int cheapest_path_width;
1735 List *target_pathkeys;
1736 List *current_pathkeys;
1740 /* Prefer sorting when enable_hashagg is off */
1741 if (!enable_hashagg)
1745 * Don't do it if it doesn't look like the hashtable will fit into
1748 * Beware here of the possibility that cheapest_path->parent is NULL. This
1749 * could happen if user does something silly like SELECT 'foo' GROUP BY 1;
1751 if (cheapest_path->parent)
1753 cheapest_path_rows = cheapest_path->parent->rows;
1754 cheapest_path_width = cheapest_path->parent->width;
1758 cheapest_path_rows = 1; /* assume non-set result */
1759 cheapest_path_width = 100; /* arbitrary */
1762 /* Estimate per-hash-entry space at tuple width... */
1763 hashentrysize = MAXALIGN(cheapest_path_width) + MAXALIGN(sizeof(MinimalTupleData));
1764 /* plus space for pass-by-ref transition values... */
1765 hashentrysize += agg_counts->transitionSpace;
1766 /* plus the per-hash-entry overhead */
1767 hashentrysize += hash_agg_entry_size(agg_counts->numAggs);
1769 if (hashentrysize * dNumGroups > work_mem * 1024L)
1773 * When we have both GROUP BY and DISTINCT, use the more-rigorous of
1774 * DISTINCT and ORDER BY as the assumed required output sort order.
1775 * This is an oversimplification because the DISTINCT might get
1776 * implemented via hashing, but it's not clear that the case is common
1777 * enough (or that our estimates are good enough) to justify trying to
1780 if (list_length(root->distinct_pathkeys) >
1781 list_length(root->sort_pathkeys))
1782 target_pathkeys = root->distinct_pathkeys;
1784 target_pathkeys = root->sort_pathkeys;
1787 * See if the estimated cost is no more than doing it the other way. While
1788 * avoiding the need for sorted input is usually a win, the fact that the
1789 * output won't be sorted may be a loss; so we need to do an actual cost
1792 * We need to consider cheapest_path + hashagg [+ final sort] versus
1793 * either cheapest_path [+ sort] + group or agg [+ final sort] or
1794 * presorted_path + group or agg [+ final sort] where brackets indicate a
1795 * step that may not be needed. We assume query_planner() will have
1796 * returned a presorted path only if it's a winner compared to
1797 * cheapest_path for this purpose.
1799 * These path variables are dummies that just hold cost fields; we don't
1800 * make actual Paths for these steps.
1802 cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs,
1803 numGroupCols, dNumGroups,
1804 cheapest_path->startup_cost, cheapest_path->total_cost,
1805 cheapest_path_rows);
1806 /* Result of hashed agg is always unsorted */
1807 if (target_pathkeys)
1808 cost_sort(&hashed_p, root, target_pathkeys, hashed_p.total_cost,
1809 dNumGroups, cheapest_path_width, limit_tuples);
1813 sorted_p.startup_cost = sorted_path->startup_cost;
1814 sorted_p.total_cost = sorted_path->total_cost;
1815 current_pathkeys = sorted_path->pathkeys;
1819 sorted_p.startup_cost = cheapest_path->startup_cost;
1820 sorted_p.total_cost = cheapest_path->total_cost;
1821 current_pathkeys = cheapest_path->pathkeys;
1823 if (!pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
1825 cost_sort(&sorted_p, root, root->group_pathkeys, sorted_p.total_cost,
1826 cheapest_path_rows, cheapest_path_width, -1.0);
1827 current_pathkeys = root->group_pathkeys;
1830 if (root->parse->hasAggs)
1831 cost_agg(&sorted_p, root, AGG_SORTED, agg_counts->numAggs,
1832 numGroupCols, dNumGroups,
1833 sorted_p.startup_cost, sorted_p.total_cost,
1834 cheapest_path_rows);
1836 cost_group(&sorted_p, root, numGroupCols, dNumGroups,
1837 sorted_p.startup_cost, sorted_p.total_cost,
1838 cheapest_path_rows);
1839 /* The Agg or Group node will preserve ordering */
1840 if (target_pathkeys &&
1841 !pathkeys_contained_in(target_pathkeys, current_pathkeys))
1842 cost_sort(&sorted_p, root, target_pathkeys, sorted_p.total_cost,
1843 dNumGroups, cheapest_path_width, limit_tuples);
1846 * Now make the decision using the top-level tuple fraction. First we
1847 * have to convert an absolute count (LIMIT) into fractional form.
1849 if (tuple_fraction >= 1.0)
1850 tuple_fraction /= dNumGroups;
1852 if (compare_fractional_path_costs(&hashed_p, &sorted_p,
1853 tuple_fraction) < 0)
1855 /* Hashed is cheaper, so use it */
1862 * choose_hashed_distinct - should we use hashing for DISTINCT?
1864 * This is fairly similar to choose_hashed_grouping, but there are enough
1865 * differences that it doesn't seem worth trying to unify the two functions.
1867 * But note that making the two choices independently is a bit bogus in
1868 * itself. If the two could be combined into a single choice operation
1869 * it'd probably be better, but that seems far too unwieldy to be practical,
1870 * especially considering that the combination of GROUP BY and DISTINCT
1871 * isn't very common in real queries. By separating them, we are giving
1872 * extra preference to using a sorting implementation when a common sort key
1873 * is available ... and that's not necessarily wrong anyway.
1875 * Note: this is only applied when both alternatives are actually feasible.
1878 choose_hashed_distinct(PlannerInfo *root,
1879 Plan *input_plan, List *input_pathkeys,
1880 double tuple_fraction, double limit_tuples,
1881 double dNumDistinctRows)
1883 int numDistinctCols = list_length(root->parse->distinctClause);
1885 List *current_pathkeys;
1886 List *needed_pathkeys;
1890 /* Prefer sorting when enable_hashagg is off */
1891 if (!enable_hashagg)
1895 * Don't do it if it doesn't look like the hashtable will fit into
1898 hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(sizeof(MinimalTupleData));
1900 if (hashentrysize * dNumDistinctRows > work_mem * 1024L)
1904 * See if the estimated cost is no more than doing it the other way. While
1905 * avoiding the need for sorted input is usually a win, the fact that the
1906 * output won't be sorted may be a loss; so we need to do an actual cost
1909 * We need to consider input_plan + hashagg [+ final sort] versus
1910 * input_plan [+ sort] + group [+ final sort] where brackets indicate
1911 * a step that may not be needed.
1913 * These path variables are dummies that just hold cost fields; we don't
1914 * make actual Paths for these steps.
1916 cost_agg(&hashed_p, root, AGG_HASHED, 0,
1917 numDistinctCols, dNumDistinctRows,
1918 input_plan->startup_cost, input_plan->total_cost,
1919 input_plan->plan_rows);
1921 * Result of hashed agg is always unsorted, so if ORDER BY is present
1922 * we need to charge for the final sort.
1924 if (root->parse->sortClause)
1925 cost_sort(&hashed_p, root, root->sort_pathkeys, hashed_p.total_cost,
1926 dNumDistinctRows, input_plan->plan_width, limit_tuples);
1929 * Now for the GROUP case. See comments in grouping_planner about the
1930 * sorting choices here --- this code should match that code.
1932 sorted_p.startup_cost = input_plan->startup_cost;
1933 sorted_p.total_cost = input_plan->total_cost;
1934 current_pathkeys = input_pathkeys;
1935 if (root->parse->hasDistinctOn &&
1936 list_length(root->distinct_pathkeys) <
1937 list_length(root->sort_pathkeys))
1938 needed_pathkeys = root->sort_pathkeys;
1940 needed_pathkeys = root->distinct_pathkeys;
1941 if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
1943 if (list_length(root->distinct_pathkeys) >=
1944 list_length(root->sort_pathkeys))
1945 current_pathkeys = root->distinct_pathkeys;
1947 current_pathkeys = root->sort_pathkeys;
1948 cost_sort(&sorted_p, root, current_pathkeys, sorted_p.total_cost,
1949 input_plan->plan_rows, input_plan->plan_width, -1.0);
1951 cost_group(&sorted_p, root, numDistinctCols, dNumDistinctRows,
1952 sorted_p.startup_cost, sorted_p.total_cost,
1953 input_plan->plan_rows);
1954 if (root->parse->sortClause &&
1955 !pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
1956 cost_sort(&sorted_p, root, root->sort_pathkeys, sorted_p.total_cost,
1957 dNumDistinctRows, input_plan->plan_width, limit_tuples);
1960 * Now make the decision using the top-level tuple fraction. First we
1961 * have to convert an absolute count (LIMIT) into fractional form.
1963 if (tuple_fraction >= 1.0)
1964 tuple_fraction /= dNumDistinctRows;
1966 if (compare_fractional_path_costs(&hashed_p, &sorted_p,
1967 tuple_fraction) < 0)
1969 /* Hashed is cheaper, so use it */
1976 * make_subplanTargetList
1977 * Generate appropriate target list when grouping is required.
1979 * When grouping_planner inserts Aggregate, Group, or Result plan nodes
1980 * above the result of query_planner, we typically want to pass a different
1981 * target list to query_planner than the outer plan nodes should have.
1982 * This routine generates the correct target list for the subplan.
1984 * The initial target list passed from the parser already contains entries
1985 * for all ORDER BY and GROUP BY expressions, but it will not have entries
1986 * for variables used only in HAVING clauses; so we need to add those
1987 * variables to the subplan target list. Also, we flatten all expressions
1988 * except GROUP BY items into their component variables; the other expressions
1989 * will be computed by the inserted nodes rather than by the subplan.
1990 * For example, given a query like
1991 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
1992 * we want to pass this targetlist to the subplan:
1994 * where the a+b target will be used by the Sort/Group steps, and the
1995 * other targets will be used for computing the final results. (In the
1996 * above example we could theoretically suppress the a and b targets and
1997 * pass down only c,d,a+b, but it's not really worth the trouble to
1998 * eliminate simple var references from the subplan. We will avoid doing
1999 * the extra computation to recompute a+b at the outer level; see
2000 * fix_upper_expr() in setrefs.c.)
2002 * If we are grouping or aggregating, *and* there are no non-Var grouping
2003 * expressions, then the returned tlist is effectively dummy; we do not
2004 * need to force it to be evaluated, because all the Vars it contains
2005 * should be present in the output of query_planner anyway.
2007 * 'tlist' is the query's target list.
2008 * 'groupColIdx' receives an array of column numbers for the GROUP BY
2009 * expressions (if there are any) in the subplan's target list.
2010 * 'need_tlist_eval' is set true if we really need to evaluate the
2013 * The result is the targetlist to be passed to the subplan.
2017 make_subplanTargetList(PlannerInfo *root,
2019 AttrNumber **groupColIdx,
2020 bool *need_tlist_eval)
2022 Query *parse = root->parse;
2027 *groupColIdx = NULL;
2030 * If we're not grouping or aggregating, there's nothing to do here;
2031 * query_planner should receive the unmodified target list.
2033 if (!parse->hasAggs && !parse->groupClause && !root->hasHavingQual)
2035 *need_tlist_eval = true;
2040 * Otherwise, start with a "flattened" tlist (having just the vars
2041 * mentioned in the targetlist and HAVING qual --- but not upper-level
2042 * Vars; they will be replaced by Params later on).
2044 sub_tlist = flatten_tlist(tlist);
2045 extravars = pull_var_clause(parse->havingQual, true);
2046 sub_tlist = add_to_flat_tlist(sub_tlist, extravars);
2047 list_free(extravars);
2048 *need_tlist_eval = false; /* only eval if not flat tlist */
2051 * If grouping, create sub_tlist entries for all GROUP BY expressions
2052 * (GROUP BY items that are simple Vars should be in the list already),
2053 * and make an array showing where the group columns are in the sub_tlist.
2055 numCols = list_length(parse->groupClause);
2059 AttrNumber *grpColIdx;
2062 grpColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
2063 *groupColIdx = grpColIdx;
2065 foreach(gl, parse->groupClause)
2067 SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
2068 Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
2069 TargetEntry *te = NULL;
2072 * Find or make a matching sub_tlist entry. If the groupexpr
2073 * isn't a Var, no point in searching. (Note that the parser
2074 * won't make multiple groupClause entries for the same TLE.)
2076 if (groupexpr && IsA(groupexpr, Var))
2080 foreach(sl, sub_tlist)
2082 TargetEntry *lte = (TargetEntry *) lfirst(sl);
2084 if (equal(groupexpr, lte->expr))
2093 te = makeTargetEntry((Expr *) groupexpr,
2094 list_length(sub_tlist) + 1,
2097 sub_tlist = lappend(sub_tlist, te);
2098 *need_tlist_eval = true; /* it's not flat anymore */
2101 /* and save its resno */
2102 grpColIdx[keyno++] = te->resno;
2110 * locate_grouping_columns
2111 * Locate grouping columns in the tlist chosen by query_planner.
2113 * This is only needed if we don't use the sub_tlist chosen by
2114 * make_subplanTargetList. We have to forget the column indexes found
2115 * by that routine and re-locate the grouping vars in the real sub_tlist.
2118 locate_grouping_columns(PlannerInfo *root,
2121 AttrNumber *groupColIdx)
2127 * No work unless grouping.
2129 if (!root->parse->groupClause)
2131 Assert(groupColIdx == NULL);
2134 Assert(groupColIdx != NULL);
2136 foreach(gl, root->parse->groupClause)
2138 SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
2139 Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
2140 TargetEntry *te = NULL;
2143 foreach(sl, sub_tlist)
2145 te = (TargetEntry *) lfirst(sl);
2146 if (equal(groupexpr, te->expr))
2150 elog(ERROR, "failed to locate grouping columns");
2152 groupColIdx[keyno++] = te->resno;
2157 * postprocess_setop_tlist
2158 * Fix up targetlist returned by plan_set_operations().
2160 * We need to transpose sort key info from the orig_tlist into new_tlist.
2161 * NOTE: this would not be good enough if we supported resjunk sort keys
2162 * for results of set operations --- then, we'd need to project a whole
2163 * new tlist to evaluate the resjunk columns. For now, just ereport if we
2164 * find any resjunk columns in orig_tlist.
2167 postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
2170 ListCell *orig_tlist_item = list_head(orig_tlist);
2172 foreach(l, new_tlist)
2174 TargetEntry *new_tle = (TargetEntry *) lfirst(l);
2175 TargetEntry *orig_tle;
2177 /* ignore resjunk columns in setop result */
2178 if (new_tle->resjunk)
2181 Assert(orig_tlist_item != NULL);
2182 orig_tle = (TargetEntry *) lfirst(orig_tlist_item);
2183 orig_tlist_item = lnext(orig_tlist_item);
2184 if (orig_tle->resjunk) /* should not happen */
2185 elog(ERROR, "resjunk output columns are not implemented");
2186 Assert(new_tle->resno == orig_tle->resno);
2187 new_tle->ressortgroupref = orig_tle->ressortgroupref;
2189 if (orig_tlist_item != NULL)
2190 elog(ERROR, "resjunk output columns are not implemented");