1 /*-------------------------------------------------------------------------
4 * The query optimizer external interface.
6 * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.264 2010/02/10 03:38:35 tgl Exp $
13 *-------------------------------------------------------------------------
20 #include "catalog/pg_operator.h"
21 #include "executor/executor.h"
22 #include "executor/nodeAgg.h"
23 #include "miscadmin.h"
24 #include "nodes/makefuncs.h"
25 #include "optimizer/clauses.h"
26 #include "optimizer/cost.h"
27 #include "optimizer/pathnode.h"
28 #include "optimizer/paths.h"
29 #include "optimizer/planmain.h"
30 #include "optimizer/planner.h"
31 #include "optimizer/prep.h"
32 #include "optimizer/subselect.h"
33 #include "optimizer/tlist.h"
34 #include "optimizer/var.h"
35 #ifdef OPTIMIZER_DEBUG
36 #include "nodes/print.h"
38 #include "parser/analyze.h"
39 #include "parser/parse_expr.h"
40 #include "parser/parse_oper.h"
41 #include "parser/parsetree.h"
42 #include "utils/lsyscache.h"
43 #include "utils/syscache.h"
47 double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
49 /* Hook for plugins to get control in planner() */
50 planner_hook_type planner_hook = NULL;
53 /* Expression kind codes for preprocess_expression */
54 #define EXPRKIND_QUAL 0
55 #define EXPRKIND_TARGET 1
56 #define EXPRKIND_RTFUNC 2
57 #define EXPRKIND_VALUES 3
58 #define EXPRKIND_LIMIT 4
59 #define EXPRKIND_APPINFO 5
62 static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
63 static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
64 static Plan *inheritance_planner(PlannerInfo *root);
65 static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction);
66 static bool is_dummy_plan(Plan *plan);
67 static void preprocess_rowmarks(PlannerInfo *root);
68 static double preprocess_limit(PlannerInfo *root,
69 double tuple_fraction,
70 int64 *offset_est, int64 *count_est);
71 static void preprocess_groupclause(PlannerInfo *root);
72 static bool choose_hashed_grouping(PlannerInfo *root,
73 double tuple_fraction, double limit_tuples,
74 double path_rows, int path_width,
75 Path *cheapest_path, Path *sorted_path,
76 double dNumGroups, AggClauseCounts *agg_counts);
77 static bool choose_hashed_distinct(PlannerInfo *root,
78 double tuple_fraction, double limit_tuples,
79 double path_rows, int path_width,
80 Cost cheapest_startup_cost, Cost cheapest_total_cost,
81 Cost sorted_startup_cost, Cost sorted_total_cost,
82 List *sorted_pathkeys,
83 double dNumDistinctRows);
84 static List *make_subplanTargetList(PlannerInfo *root, List *tlist,
85 AttrNumber **groupColIdx, bool *need_tlist_eval);
86 static void locate_grouping_columns(PlannerInfo *root,
89 AttrNumber *groupColIdx);
90 static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
91 static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
92 static List *add_volatile_sort_exprs(List *window_tlist, List *tlist,
94 static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
95 List *tlist, bool canonicalize);
96 static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
98 int numSortCols, AttrNumber *sortColIdx,
100 AttrNumber **partColIdx,
103 AttrNumber **ordColIdx,
107 /*****************************************************************************
109 * Query optimizer entry point
111 * To support loadable plugins that monitor or modify planner behavior,
112 * we provide a hook variable that lets a plugin get control before and
113 * after the standard planning process. The plugin would normally call
114 * standard_planner().
116 * Note to plugin authors: standard_planner() scribbles on its Query input,
117 * so you'd better copy that data structure if you want to plan more than once.
119 *****************************************************************************/
121 planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
126 result = (*planner_hook) (parse, cursorOptions, boundParams);
128 result = standard_planner(parse, cursorOptions, boundParams);
133 standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
137 double tuple_fraction;
144 /* Cursor options may come from caller or from DECLARE CURSOR stmt */
145 if (parse->utilityStmt &&
146 IsA(parse->utilityStmt, DeclareCursorStmt))
147 cursorOptions |= ((DeclareCursorStmt *) parse->utilityStmt)->options;
150 * Set up global state for this planner invocation. This data is needed
151 * across all levels of sub-Query that might exist in the given command,
152 * so we keep it in a separate struct that's linked to by each per-Query
155 glob = makeNode(PlannerGlobal);
157 glob->boundParams = boundParams;
158 glob->paramlist = NIL;
159 glob->subplans = NIL;
160 glob->subrtables = NIL;
161 glob->subrowmarks = NIL;
162 glob->rewindPlanIDs = NULL;
163 glob->finalrtable = NIL;
164 glob->finalrowmarks = NIL;
165 glob->relationOids = NIL;
166 glob->invalItems = NIL;
168 glob->transientPlan = false;
170 /* Determine what fraction of the plan is likely to be scanned */
171 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
174 * We have no real idea how many tuples the user will ultimately FETCH
175 * from a cursor, but it is often the case that he doesn't want 'em
176 * all, or would prefer a fast-start plan anyway so that he can
177 * process some of the tuples sooner. Use a GUC parameter to decide
178 * what fraction to optimize for.
180 tuple_fraction = cursor_tuple_fraction;
183 * We document cursor_tuple_fraction as simply being a fraction, which
184 * means the edge cases 0 and 1 have to be treated specially here. We
185 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
187 if (tuple_fraction >= 1.0)
188 tuple_fraction = 0.0;
189 else if (tuple_fraction <= 0.0)
190 tuple_fraction = 1e-10;
194 /* Default assumption is we need all the tuples */
195 tuple_fraction = 0.0;
198 /* primary planning entry point (may recurse for subqueries) */
199 top_plan = subquery_planner(glob, parse, NULL,
200 false, tuple_fraction, &root);
203 * If creating a plan for a scrollable cursor, make sure it can run
204 * backwards on demand. Add a Material node at the top at need.
206 if (cursorOptions & CURSOR_OPT_SCROLL)
208 if (!ExecSupportsBackwardScan(top_plan))
209 top_plan = materialize_finished_plan(top_plan);
212 /* final cleanup of the plan */
213 Assert(glob->finalrtable == NIL);
214 Assert(glob->finalrowmarks == NIL);
215 top_plan = set_plan_references(glob, top_plan,
218 /* ... and the subplans (both regular subplans and initplans) */
219 Assert(list_length(glob->subplans) == list_length(glob->subrtables));
220 Assert(list_length(glob->subplans) == list_length(glob->subrowmarks));
221 lrt = list_head(glob->subrtables);
222 lrm = list_head(glob->subrowmarks);
223 foreach(lp, glob->subplans)
225 Plan *subplan = (Plan *) lfirst(lp);
226 List *subrtable = (List *) lfirst(lrt);
227 List *subrowmark = (List *) lfirst(lrm);
229 lfirst(lp) = set_plan_references(glob, subplan,
230 subrtable, subrowmark);
235 /* build the PlannedStmt result */
236 result = makeNode(PlannedStmt);
238 result->commandType = parse->commandType;
239 result->hasReturning = (parse->returningList != NIL);
240 result->canSetTag = parse->canSetTag;
241 result->transientPlan = glob->transientPlan;
242 result->planTree = top_plan;
243 result->rtable = glob->finalrtable;
244 result->resultRelations = root->resultRelations;
245 result->utilityStmt = parse->utilityStmt;
246 result->intoClause = parse->intoClause;
247 result->subplans = glob->subplans;
248 result->rewindPlanIDs = glob->rewindPlanIDs;
249 result->rowMarks = glob->finalrowmarks;
250 result->relationOids = glob->relationOids;
251 result->invalItems = glob->invalItems;
252 result->nParamExec = list_length(glob->paramlist);
258 /*--------------------
260 * Invokes the planner on a subquery. We recurse to here for each
261 * sub-SELECT found in the query tree.
263 * glob is the global state for the current planner run.
264 * parse is the querytree produced by the parser & rewriter.
265 * parent_root is the immediate parent Query's info (NULL at the top level).
266 * hasRecursion is true if this is a recursive WITH query.
267 * tuple_fraction is the fraction of tuples we expect will be retrieved.
268 * tuple_fraction is interpreted as explained for grouping_planner, below.
270 * If subroot isn't NULL, we pass back the query's final PlannerInfo struct;
271 * among other things this tells the output sort ordering of the plan.
273 * Basically, this routine does the stuff that should only be done once
274 * per Query object. It then calls grouping_planner. At one time,
275 * grouping_planner could be invoked recursively on the same Query object;
276 * that's not currently true, but we keep the separation between the two
277 * routines anyway, in case we need it again someday.
279 * subquery_planner will be called recursively to handle sub-Query nodes
280 * found within the query's expressions and rangetable.
282 * Returns a query plan.
283 *--------------------
286 subquery_planner(PlannerGlobal *glob, Query *parse,
287 PlannerInfo *parent_root,
288 bool hasRecursion, double tuple_fraction,
289 PlannerInfo **subroot)
291 int num_old_subplans = list_length(glob->subplans);
298 /* Create a PlannerInfo data structure for this subquery */
299 root = makeNode(PlannerInfo);
302 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
303 root->parent_root = parent_root;
304 root->planner_cxt = CurrentMemoryContext;
305 root->init_plans = NIL;
306 root->cte_plan_ids = NIL;
307 root->eq_classes = NIL;
308 root->append_rel_list = NIL;
309 root->rowMarks = NIL;
311 root->hasRecursion = hasRecursion;
313 root->wt_param_id = SS_assign_special_param(root);
315 root->wt_param_id = -1;
316 root->non_recursive_plan = NULL;
319 * If there is a WITH list, process each WITH query and build an initplan
320 * SubPlan structure for it.
323 SS_process_ctes(root);
326 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
327 * to transform them into joins. Note that this step does not descend
328 * into subqueries; if we pull up any subqueries below, their SubLinks are
329 * processed just before pulling them up.
331 if (parse->hasSubLinks)
332 pull_up_sublinks(root);
335 * Scan the rangetable for set-returning functions, and inline them if
336 * possible (producing subqueries that might get pulled up next).
337 * Recursion issues here are handled in the same way as for SubLinks.
339 inline_set_returning_functions(root);
342 * Check to see if any subqueries in the rangetable can be merged into
345 parse->jointree = (FromExpr *)
346 pull_up_subqueries(root, (Node *) parse->jointree, NULL, NULL);
349 * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can
350 * avoid the expense of doing flatten_join_alias_vars(). Also check for
351 * outer joins --- if none, we can skip reduce_outer_joins(). This must be
352 * done after we have done pull_up_subqueries, of course.
354 root->hasJoinRTEs = false;
355 hasOuterJoins = false;
356 foreach(l, parse->rtable)
358 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
360 if (rte->rtekind == RTE_JOIN)
362 root->hasJoinRTEs = true;
363 if (IS_OUTER_JOIN(rte->jointype))
365 hasOuterJoins = true;
366 /* Can quit scanning once we find an outer join */
373 * Preprocess RowMark information. We need to do this after subquery
374 * pullup (so that all non-inherited RTEs are present) and before
375 * inheritance expansion (so that the info is available for
376 * expand_inherited_tables to examine and modify).
378 preprocess_rowmarks(root);
381 * Expand any rangetable entries that are inheritance sets into "append
382 * relations". This can add entries to the rangetable, but they must be
383 * plain base relations not joins, so it's OK (and marginally more
384 * efficient) to do it after checking for join RTEs. We must do it after
385 * pulling up subqueries, else we'd fail to handle inherited tables in
388 expand_inherited_tables(root);
391 * Set hasHavingQual to remember if HAVING clause is present. Needed
392 * because preprocess_expression will reduce a constant-true condition to
393 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
395 root->hasHavingQual = (parse->havingQual != NULL);
397 /* Clear this flag; might get set in distribute_qual_to_rels */
398 root->hasPseudoConstantQuals = false;
401 * Do expression preprocessing on targetlist and quals.
403 parse->targetList = (List *)
404 preprocess_expression(root, (Node *) parse->targetList,
407 parse->returningList = (List *)
408 preprocess_expression(root, (Node *) parse->returningList,
411 preprocess_qual_conditions(root, (Node *) parse->jointree);
413 parse->havingQual = preprocess_expression(root, parse->havingQual,
416 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
418 parse->limitCount = preprocess_expression(root, parse->limitCount,
421 root->append_rel_list = (List *)
422 preprocess_expression(root, (Node *) root->append_rel_list,
425 /* Also need to preprocess expressions for function and values RTEs */
426 foreach(l, parse->rtable)
428 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
430 if (rte->rtekind == RTE_FUNCTION)
431 rte->funcexpr = preprocess_expression(root, rte->funcexpr,
433 else if (rte->rtekind == RTE_VALUES)
434 rte->values_lists = (List *)
435 preprocess_expression(root, (Node *) rte->values_lists,
440 * In some cases we may want to transfer a HAVING clause into WHERE. We
441 * cannot do so if the HAVING clause contains aggregates (obviously) or
442 * volatile functions (since a HAVING clause is supposed to be executed
443 * only once per group). Also, it may be that the clause is so expensive
444 * to execute that we're better off doing it only once per group, despite
445 * the loss of selectivity. This is hard to estimate short of doing the
446 * entire planning process twice, so we use a heuristic: clauses
447 * containing subplans are left in HAVING. Otherwise, we move or copy the
448 * HAVING clause into WHERE, in hopes of eliminating tuples before
449 * aggregation instead of after.
451 * If the query has explicit grouping then we can simply move such a
452 * clause into WHERE; any group that fails the clause will not be in the
453 * output because none of its tuples will reach the grouping or
454 * aggregation stage. Otherwise we must have a degenerate (variable-free)
455 * HAVING clause, which we put in WHERE so that query_planner() can use it
456 * in a gating Result node, but also keep in HAVING to ensure that we
457 * don't emit a bogus aggregated row. (This could be done better, but it
458 * seems not worth optimizing.)
460 * Note that both havingQual and parse->jointree->quals are in
461 * implicitly-ANDed-list form at this point, even though they are declared
465 foreach(l, (List *) parse->havingQual)
467 Node *havingclause = (Node *) lfirst(l);
469 if (contain_agg_clause(havingclause) ||
470 contain_volatile_functions(havingclause) ||
471 contain_subplans(havingclause))
473 /* keep it in HAVING */
474 newHaving = lappend(newHaving, havingclause);
476 else if (parse->groupClause)
478 /* move it to WHERE */
479 parse->jointree->quals = (Node *)
480 lappend((List *) parse->jointree->quals, havingclause);
484 /* put a copy in WHERE, keep it in HAVING */
485 parse->jointree->quals = (Node *)
486 lappend((List *) parse->jointree->quals,
487 copyObject(havingclause));
488 newHaving = lappend(newHaving, havingclause);
491 parse->havingQual = (Node *) newHaving;
494 * If we have any outer joins, try to reduce them to plain inner joins.
495 * This step is most easily done after we've done expression
499 reduce_outer_joins(root);
502 * Do the main planning. If we have an inherited target relation, that
503 * needs special processing, else go straight to grouping_planner.
505 if (parse->resultRelation &&
506 rt_fetch(parse->resultRelation, parse->rtable)->inh)
507 plan = inheritance_planner(root);
510 plan = grouping_planner(root, tuple_fraction);
511 /* If it's not SELECT, we need a ModifyTable node */
512 if (parse->commandType != CMD_SELECT)
514 List *returningLists;
518 * Deal with the RETURNING clause if any. It's convenient to pass
519 * the returningList through setrefs.c now rather than at top
520 * level (if we waited, handling inherited UPDATE/DELETE would be
523 if (parse->returningList)
527 Assert(parse->resultRelation);
528 rlist = set_returning_clause_references(root->glob,
529 parse->returningList,
531 parse->resultRelation);
532 returningLists = list_make1(rlist);
535 returningLists = NIL;
538 * If there was a FOR UPDATE/SHARE clause, the LockRows node will
539 * have dealt with fetching non-locked marked rows, else we need
540 * to have ModifyTable do that.
545 rowMarks = root->rowMarks;
547 plan = (Plan *) make_modifytable(parse->commandType,
548 copyObject(root->resultRelations),
552 SS_assign_special_param(root));
557 * If any subplans were generated, or if there are any parameters to worry
558 * about, build initPlan list and extParam/allParam sets for plan nodes,
559 * and attach the initPlans to the top plan node.
561 if (list_length(glob->subplans) != num_old_subplans ||
562 root->glob->paramlist != NIL)
563 SS_finalize_plan(root, plan, true);
565 /* Return internal info if caller wants it */
573 * preprocess_expression
574 * Do subquery_planner's preprocessing work for an expression,
575 * which can be a targetlist, a WHERE clause (including JOIN/ON
576 * conditions), or a HAVING clause.
579 preprocess_expression(PlannerInfo *root, Node *expr, int kind)
582 * Fall out quickly if expression is empty. This occurs often enough to
583 * be worth checking. Note that null->null is the correct conversion for
584 * implicit-AND result format, too.
590 * If the query has any join RTEs, replace join alias variables with
591 * base-relation variables. We must do this before sublink processing,
592 * else sublinks expanded out from join aliases wouldn't get processed. We
593 * can skip it in VALUES lists, however, since they can't contain any Vars
596 if (root->hasJoinRTEs && kind != EXPRKIND_VALUES)
597 expr = flatten_join_alias_vars(root, expr);
600 * Simplify constant expressions.
602 * Note: an essential effect of this is to convert named-argument function
603 * calls to positional notation and insert the current actual values
604 * of any default arguments for functions. To ensure that happens, we
605 * *must* process all expressions here. Previous PG versions sometimes
606 * skipped const-simplification if it didn't seem worth the trouble, but
607 * we can't do that anymore.
609 * Note: this also flattens nested AND and OR expressions into N-argument
610 * form. All processing of a qual expression after this point must be
611 * careful to maintain AND/OR flatness --- that is, do not generate a tree
612 * with AND directly under AND, nor OR directly under OR.
614 expr = eval_const_expressions(root, expr);
617 * If it's a qual or havingQual, canonicalize it.
619 if (kind == EXPRKIND_QUAL)
621 expr = (Node *) canonicalize_qual((Expr *) expr);
623 #ifdef OPTIMIZER_DEBUG
624 printf("After canonicalize_qual()\n");
629 /* Expand SubLinks to SubPlans */
630 if (root->parse->hasSubLinks)
631 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
634 * XXX do not insert anything here unless you have grokked the comments in
635 * SS_replace_correlation_vars ...
638 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
639 if (root->query_level > 1)
640 expr = SS_replace_correlation_vars(root, expr);
643 * If it's a qual or havingQual, convert it to implicit-AND format. (We
644 * don't want to do this before eval_const_expressions, since the latter
645 * would be unable to simplify a top-level AND correctly. Also,
646 * SS_process_sublinks expects explicit-AND format.)
648 if (kind == EXPRKIND_QUAL)
649 expr = (Node *) make_ands_implicit((Expr *) expr);
655 * preprocess_qual_conditions
656 * Recursively scan the query's jointree and do subquery_planner's
657 * preprocessing work on each qual condition found therein.
660 preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
664 if (IsA(jtnode, RangeTblRef))
666 /* nothing to do here */
668 else if (IsA(jtnode, FromExpr))
670 FromExpr *f = (FromExpr *) jtnode;
673 foreach(l, f->fromlist)
674 preprocess_qual_conditions(root, lfirst(l));
676 f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
678 else if (IsA(jtnode, JoinExpr))
680 JoinExpr *j = (JoinExpr *) jtnode;
682 preprocess_qual_conditions(root, j->larg);
683 preprocess_qual_conditions(root, j->rarg);
685 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
688 elog(ERROR, "unrecognized node type: %d",
689 (int) nodeTag(jtnode));
693 * inheritance_planner
694 * Generate a plan in the case where the result relation is an
697 * We have to handle this case differently from cases where a source relation
698 * is an inheritance set. Source inheritance is expanded at the bottom of the
699 * plan tree (see allpaths.c), but target inheritance has to be expanded at
700 * the top. The reason is that for UPDATE, each target relation needs a
701 * different targetlist matching its own column set. Fortunately,
702 * the UPDATE/DELETE target can never be the nullable side of an outer join,
703 * so it's OK to generate the plan this way.
705 * Returns a query plan.
708 inheritance_planner(PlannerInfo *root)
710 Query *parse = root->parse;
711 int parentRTindex = parse->resultRelation;
712 List *subplans = NIL;
713 List *resultRelations = NIL;
714 List *returningLists = NIL;
721 foreach(l, root->append_rel_list)
723 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
726 /* append_rel_list contains all append rels; ignore others */
727 if (appinfo->parent_relid != parentRTindex)
731 * Generate modified query with this rel as target.
733 memcpy(&subroot, root, sizeof(PlannerInfo));
734 subroot.parse = (Query *)
735 adjust_appendrel_attrs((Node *) parse,
737 subroot.init_plans = NIL;
738 /* We needn't modify the child's append_rel_list */
739 /* There shouldn't be any OJ info to translate, as yet */
740 Assert(subroot.join_info_list == NIL);
741 /* and we haven't created PlaceHolderInfos, either */
742 Assert(subroot.placeholder_list == NIL);
745 subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ );
748 * If this child rel was excluded by constraint exclusion, exclude it
751 if (is_dummy_plan(subplan))
754 /* Save rtable from first rel for use below */
756 rtable = subroot.parse->rtable;
758 subplans = lappend(subplans, subplan);
760 /* Make sure any initplans from this rel get into the outer list */
761 root->init_plans = list_concat(root->init_plans, subroot.init_plans);
763 /* Build target-relations list for the executor */
764 resultRelations = lappend_int(resultRelations, appinfo->child_relid);
766 /* Build list of per-relation RETURNING targetlists */
767 if (parse->returningList)
771 rlist = set_returning_clause_references(root->glob,
772 subroot.parse->returningList,
774 appinfo->child_relid);
775 returningLists = lappend(returningLists, rlist);
779 root->resultRelations = resultRelations;
781 /* Mark result as unordered (probably unnecessary) */
782 root->query_pathkeys = NIL;
785 * If we managed to exclude every child rel, return a dummy plan;
786 * it doesn't even need a ModifyTable node.
790 root->resultRelations = list_make1_int(parentRTindex);
791 /* although dummy, it must have a valid tlist for executor */
792 tlist = preprocess_targetlist(root, parse->targetList);
793 return (Plan *) make_result(root,
795 (Node *) list_make1(makeBoolConst(false,
801 * Planning might have modified the rangetable, due to changes of the
802 * Query structures inside subquery RTEs. We have to ensure that this
803 * gets propagated back to the master copy. But can't do this until we
804 * are done planning, because all the calls to grouping_planner need
805 * virgin sub-Queries to work from. (We are effectively assuming that
806 * sub-Queries will get planned identically each time, or at least that
807 * the impacts on their rangetables will be the same each time.)
809 * XXX should clean this up someday
811 parse->rtable = rtable;
814 * If there was a FOR UPDATE/SHARE clause, the LockRows node will
815 * have dealt with fetching non-locked marked rows, else we need
816 * to have ModifyTable do that.
821 rowMarks = root->rowMarks;
823 /* And last, tack on a ModifyTable node to do the UPDATE/DELETE work */
824 return (Plan *) make_modifytable(parse->commandType,
825 copyObject(root->resultRelations),
829 SS_assign_special_param(root));
832 /*--------------------
834 * Perform planning steps related to grouping, aggregation, etc.
835 * This primarily means adding top-level processing to the basic
836 * query plan produced by query_planner.
838 * tuple_fraction is the fraction of tuples we expect will be retrieved
840 * tuple_fraction is interpreted as follows:
841 * 0: expect all tuples to be retrieved (normal case)
842 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
843 * from the plan to be retrieved
844 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
845 * expected to be retrieved (ie, a LIMIT specification)
847 * Returns a query plan. Also, root->query_pathkeys is returned as the
848 * actual output ordering of the plan (in pathkey format).
849 *--------------------
852 grouping_planner(PlannerInfo *root, double tuple_fraction)
854 Query *parse = root->parse;
855 List *tlist = parse->targetList;
856 int64 offset_est = 0;
858 double limit_tuples = -1.0;
860 List *current_pathkeys;
861 double dNumGroups = 0;
862 bool use_hashed_distinct = false;
863 bool tested_hashed_distinct = false;
865 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
866 if (parse->limitCount || parse->limitOffset)
868 tuple_fraction = preprocess_limit(root, tuple_fraction,
869 &offset_est, &count_est);
872 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
873 * estimate the effects of using a bounded sort.
875 if (count_est > 0 && offset_est >= 0)
876 limit_tuples = (double) count_est + (double) offset_est;
879 if (parse->setOperations)
881 List *set_sortclauses;
884 * If there's a top-level ORDER BY, assume we have to fetch all the
885 * tuples. This might be too simplistic given all the hackery below
886 * to possibly avoid the sort; but the odds of accurate estimates here
887 * are pretty low anyway.
889 if (parse->sortClause)
890 tuple_fraction = 0.0;
893 * Construct the plan for set operations. The result will not need
894 * any work except perhaps a top-level sort and/or LIMIT. Note that
895 * any special work for recursive unions is the responsibility of
896 * plan_set_operations.
898 result_plan = plan_set_operations(root, tuple_fraction,
902 * Calculate pathkeys representing the sort order (if any) of the set
903 * operation's result. We have to do this before overwriting the sort
906 current_pathkeys = make_pathkeys_for_sortclauses(root,
908 result_plan->targetlist,
912 * We should not need to call preprocess_targetlist, since we must be
913 * in a SELECT query node. Instead, use the targetlist returned by
914 * plan_set_operations (since this tells whether it returned any
915 * resjunk columns!), and transfer any sort key information from the
918 Assert(parse->commandType == CMD_SELECT);
920 tlist = postprocess_setop_tlist(copyObject(result_plan->targetlist),
924 * Can't handle FOR UPDATE/SHARE here (parser should have checked
925 * already, but let's make sure).
929 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
930 errmsg("SELECT FOR UPDATE/SHARE is not allowed with UNION/INTERSECT/EXCEPT")));
933 * Calculate pathkeys that represent result ordering requirements
935 Assert(parse->distinctClause == NIL);
936 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
943 /* No set operations, do regular planning */
945 AttrNumber *groupColIdx = NULL;
946 bool need_tlist_eval = true;
952 AggClauseCounts agg_counts;
956 bool use_hashed_grouping = false;
957 WindowFuncLists *wflists = NULL;
958 List *activeWindows = NIL;
960 MemSet(&agg_counts, 0, sizeof(AggClauseCounts));
962 /* A recursive query should always have setOperations */
963 Assert(!root->hasRecursion);
965 /* Preprocess GROUP BY clause, if any */
966 if (parse->groupClause)
967 preprocess_groupclause(root);
968 numGroupCols = list_length(parse->groupClause);
970 /* Preprocess targetlist */
971 tlist = preprocess_targetlist(root, tlist);
974 * Locate any window functions in the tlist. (We don't need to look
975 * anywhere else, since expressions used in ORDER BY will be in there
976 * too.) Note that they could all have been eliminated by constant
977 * folding, in which case we don't need to do any more work.
979 if (parse->hasWindowFuncs)
981 wflists = find_window_functions((Node *) tlist,
982 list_length(parse->windowClause));
983 if (wflists->numWindowFuncs > 0)
984 activeWindows = select_active_windows(root, wflists);
986 parse->hasWindowFuncs = false;
990 * Generate appropriate target list for subplan; may be different from
991 * tlist if grouping or aggregation is needed.
993 sub_tlist = make_subplanTargetList(root, tlist,
994 &groupColIdx, &need_tlist_eval);
997 * Calculate pathkeys that represent grouping/ordering requirements.
998 * Stash them in PlannerInfo so that query_planner can canonicalize
999 * them after EquivalenceClasses have been formed. The sortClause is
1000 * certainly sort-able, but GROUP BY and DISTINCT might not be, in
1001 * which case we just leave their pathkeys empty.
1003 if (parse->groupClause &&
1004 grouping_is_sortable(parse->groupClause))
1005 root->group_pathkeys =
1006 make_pathkeys_for_sortclauses(root,
1011 root->group_pathkeys = NIL;
1013 /* We consider only the first (bottom) window in pathkeys logic */
1014 if (activeWindows != NIL)
1016 WindowClause *wc = (WindowClause *) linitial(activeWindows);
1018 root->window_pathkeys = make_pathkeys_for_window(root,
1024 root->window_pathkeys = NIL;
1026 if (parse->distinctClause &&
1027 grouping_is_sortable(parse->distinctClause))
1028 root->distinct_pathkeys =
1029 make_pathkeys_for_sortclauses(root,
1030 parse->distinctClause,
1034 root->distinct_pathkeys = NIL;
1036 root->sort_pathkeys =
1037 make_pathkeys_for_sortclauses(root,
1043 * Will need actual number of aggregates for estimating costs.
1045 * Note: we do not attempt to detect duplicate aggregates here; a
1046 * somewhat-overestimated count is okay for our present purposes.
1048 * Note: think not that we can turn off hasAggs if we find no aggs. It
1049 * is possible for constant-expression simplification to remove all
1050 * explicit references to aggs, but we still have to follow the
1051 * aggregate semantics (eg, producing only one output row).
1055 count_agg_clauses((Node *) tlist, &agg_counts);
1056 count_agg_clauses(parse->havingQual, &agg_counts);
1060 * Figure out whether we want a sorted result from query_planner.
1062 * If we have a sortable GROUP BY clause, then we want a result sorted
1063 * properly for grouping. Otherwise, if we have window functions to
1064 * evaluate, we try to sort for the first window. Otherwise, if
1065 * there's a sortable DISTINCT clause that's more rigorous than the
1066 * ORDER BY clause, we try to produce output that's sufficiently well
1067 * sorted for the DISTINCT. Otherwise, if there is an ORDER BY
1068 * clause, we want to sort by the ORDER BY clause.
1070 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a
1071 * superset of GROUP BY, it would be tempting to request sort by ORDER
1072 * BY --- but that might just leave us failing to exploit an available
1073 * sort order at all. Needs more thought. The choice for DISTINCT
1074 * versus ORDER BY is much easier, since we know that the parser
1075 * ensured that one is a superset of the other.
1077 if (root->group_pathkeys)
1078 root->query_pathkeys = root->group_pathkeys;
1079 else if (root->window_pathkeys)
1080 root->query_pathkeys = root->window_pathkeys;
1081 else if (list_length(root->distinct_pathkeys) >
1082 list_length(root->sort_pathkeys))
1083 root->query_pathkeys = root->distinct_pathkeys;
1084 else if (root->sort_pathkeys)
1085 root->query_pathkeys = root->sort_pathkeys;
1087 root->query_pathkeys = NIL;
1090 * Generate the best unsorted and presorted paths for this Query (but
1091 * note there may not be any presorted path). query_planner will also
1092 * estimate the number of groups in the query, and canonicalize all
1095 query_planner(root, sub_tlist, tuple_fraction, limit_tuples,
1096 &cheapest_path, &sorted_path, &dNumGroups);
1099 * Extract rowcount and width estimates for possible use in grouping
1100 * decisions. Beware here of the possibility that
1101 * cheapest_path->parent is NULL (ie, there is no FROM clause).
1103 if (cheapest_path->parent)
1105 path_rows = cheapest_path->parent->rows;
1106 path_width = cheapest_path->parent->width;
1110 path_rows = 1; /* assume non-set result */
1111 path_width = 100; /* arbitrary */
1114 if (parse->groupClause)
1117 * If grouping, decide whether to use sorted or hashed grouping.
1119 use_hashed_grouping =
1120 choose_hashed_grouping(root,
1121 tuple_fraction, limit_tuples,
1122 path_rows, path_width,
1123 cheapest_path, sorted_path,
1124 dNumGroups, &agg_counts);
1125 /* Also convert # groups to long int --- but 'ware overflow! */
1126 numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
1128 else if (parse->distinctClause && sorted_path &&
1129 !root->hasHavingQual && !parse->hasAggs && !activeWindows)
1132 * We'll reach the DISTINCT stage without any intermediate
1133 * processing, so figure out whether we will want to hash or not
1134 * so we can choose whether to use cheapest or sorted path.
1136 use_hashed_distinct =
1137 choose_hashed_distinct(root,
1138 tuple_fraction, limit_tuples,
1139 path_rows, path_width,
1140 cheapest_path->startup_cost,
1141 cheapest_path->total_cost,
1142 sorted_path->startup_cost,
1143 sorted_path->total_cost,
1144 sorted_path->pathkeys,
1146 tested_hashed_distinct = true;
1150 * Select the best path. If we are doing hashed grouping, we will
1151 * always read all the input tuples, so use the cheapest-total path.
1152 * Otherwise, trust query_planner's decision about which to use.
1154 if (use_hashed_grouping || use_hashed_distinct || !sorted_path)
1155 best_path = cheapest_path;
1157 best_path = sorted_path;
1160 * Check to see if it's possible to optimize MIN/MAX aggregates. If
1161 * so, we will forget all the work we did so far to choose a "regular"
1162 * path ... but we had to do it anyway to be able to tell which way is
1165 result_plan = optimize_minmax_aggregates(root,
1168 if (result_plan != NULL)
1171 * optimize_minmax_aggregates generated the full plan, with the
1172 * right tlist, and it has no sort order.
1174 current_pathkeys = NIL;
1179 * Normal case --- create a plan according to query_planner's
1182 bool need_sort_for_grouping = false;
1184 result_plan = create_plan(root, best_path);
1185 current_pathkeys = best_path->pathkeys;
1187 /* Detect if we'll need an explicit sort for grouping */
1188 if (parse->groupClause && !use_hashed_grouping &&
1189 !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
1191 need_sort_for_grouping = true;
1194 * Always override query_planner's tlist, so that we don't
1195 * sort useless data from a "physical" tlist.
1197 need_tlist_eval = true;
1201 * create_plan() returns a plan with just a "flat" tlist of
1202 * required Vars. Usually we need to insert the sub_tlist as the
1203 * tlist of the top plan node. However, we can skip that if we
1204 * determined that whatever query_planner chose to return will be
1207 if (need_tlist_eval)
1210 * If the top-level plan node is one that cannot do expression
1211 * evaluation, we must insert a Result node to project the
1214 if (!is_projection_capable_plan(result_plan))
1216 result_plan = (Plan *) make_result(root,
1224 * Otherwise, just replace the subplan's flat tlist with
1225 * the desired tlist.
1227 result_plan->targetlist = sub_tlist;
1231 * Also, account for the cost of evaluation of the sub_tlist.
1233 * Up to now, we have only been dealing with "flat" tlists,
1234 * containing just Vars. So their evaluation cost is zero
1235 * according to the model used by cost_qual_eval() (or if you
1236 * prefer, the cost is factored into cpu_tuple_cost). Thus we
1237 * can avoid accounting for tlist cost throughout
1238 * query_planner() and subroutines. But now we've inserted a
1239 * tlist that might contain actual operators, sub-selects, etc
1240 * --- so we'd better account for its cost.
1242 * Below this point, any tlist eval cost for added-on nodes
1243 * should be accounted for as we create those nodes.
1244 * Presently, of the node types we can add on, only Agg,
1245 * WindowAgg, and Group project new tlists (the rest just copy
1246 * their input tuples) --- so make_agg(), make_windowagg() and
1247 * make_group() are responsible for computing the added cost.
1249 cost_qual_eval(&tlist_cost, sub_tlist, root);
1250 result_plan->startup_cost += tlist_cost.startup;
1251 result_plan->total_cost += tlist_cost.startup +
1252 tlist_cost.per_tuple * result_plan->plan_rows;
1257 * Since we're using query_planner's tlist and not the one
1258 * make_subplanTargetList calculated, we have to refigure any
1259 * grouping-column indexes make_subplanTargetList computed.
1261 locate_grouping_columns(root, tlist, result_plan->targetlist,
1266 * Insert AGG or GROUP node if needed, plus an explicit sort step
1269 * HAVING clause, if any, becomes qual of the Agg or Group node.
1271 if (use_hashed_grouping)
1273 /* Hashed aggregate plan --- no sort needed */
1274 result_plan = (Plan *) make_agg(root,
1276 (List *) parse->havingQual,
1280 extract_grouping_ops(parse->groupClause),
1284 /* Hashed aggregation produces randomly-ordered results */
1285 current_pathkeys = NIL;
1287 else if (parse->hasAggs)
1289 /* Plain aggregate plan --- sort if needed */
1290 AggStrategy aggstrategy;
1292 if (parse->groupClause)
1294 if (need_sort_for_grouping)
1296 result_plan = (Plan *)
1297 make_sort_from_groupcols(root,
1301 current_pathkeys = root->group_pathkeys;
1303 aggstrategy = AGG_SORTED;
1306 * The AGG node will not change the sort ordering of its
1307 * groups, so current_pathkeys describes the result too.
1312 aggstrategy = AGG_PLAIN;
1313 /* Result will be only one row anyway; no sort order */
1314 current_pathkeys = NIL;
1317 result_plan = (Plan *) make_agg(root,
1319 (List *) parse->havingQual,
1323 extract_grouping_ops(parse->groupClause),
1328 else if (parse->groupClause)
1331 * GROUP BY without aggregation, so insert a group node (plus
1332 * the appropriate sort node, if necessary).
1334 * Add an explicit sort if we couldn't make the path come out
1335 * the way the GROUP node needs it.
1337 if (need_sort_for_grouping)
1339 result_plan = (Plan *)
1340 make_sort_from_groupcols(root,
1344 current_pathkeys = root->group_pathkeys;
1347 result_plan = (Plan *) make_group(root,
1349 (List *) parse->havingQual,
1352 extract_grouping_ops(parse->groupClause),
1355 /* The Group node won't change sort ordering */
1357 else if (root->hasHavingQual)
1360 * No aggregates, and no GROUP BY, but we have a HAVING qual.
1361 * This is a degenerate case in which we are supposed to emit
1362 * either 0 or 1 row depending on whether HAVING succeeds.
1363 * Furthermore, there cannot be any variables in either HAVING
1364 * or the targetlist, so we actually do not need the FROM
1365 * table at all! We can just throw away the plan-so-far and
1366 * generate a Result node. This is a sufficiently unusual
1367 * corner case that it's not worth contorting the structure of
1368 * this routine to avoid having to generate the plan in the
1371 result_plan = (Plan *) make_result(root,
1376 } /* end of non-minmax-aggregate case */
1379 * Since each window function could require a different sort order, we
1380 * stack up a WindowAgg node for each window, with sort steps between
1389 * If the top-level plan node is one that cannot do expression
1390 * evaluation, we must insert a Result node to project the desired
1391 * tlist. (In some cases this might not really be required, but
1392 * it's not worth trying to avoid it.) Note that on second and
1393 * subsequent passes through the following loop, the top-level
1394 * node will be a WindowAgg which we know can project; so we only
1395 * need to check once.
1397 if (!is_projection_capable_plan(result_plan))
1399 result_plan = (Plan *) make_result(root,
1406 * The "base" targetlist for all steps of the windowing process is
1407 * a flat tlist of all Vars and Aggs needed in the result. (In
1408 * some cases we wouldn't need to propagate all of these all the
1409 * way to the top, since they might only be needed as inputs to
1410 * WindowFuncs. It's probably not worth trying to optimize that
1411 * though.) We also need any volatile sort expressions, because
1412 * make_sort_from_pathkeys won't add those on its own, and anyway
1413 * we want them evaluated only once at the bottom of the stack.
1414 * As we climb up the stack, we add outputs for the WindowFuncs
1415 * computed at each level. Also, each input tlist has to present
1416 * all the columns needed to sort the data for the next WindowAgg
1417 * step. That's handled internally by make_sort_from_pathkeys,
1418 * but we need the copyObject steps here to ensure that each plan
1419 * node has a separately modifiable tlist.
1421 window_tlist = flatten_tlist(tlist);
1423 window_tlist = add_to_flat_tlist(window_tlist,
1424 pull_agg_clause((Node *) tlist));
1425 window_tlist = add_volatile_sort_exprs(window_tlist, tlist,
1427 result_plan->targetlist = (List *) copyObject(window_tlist);
1429 foreach(l, activeWindows)
1431 WindowClause *wc = (WindowClause *) lfirst(l);
1432 List *window_pathkeys;
1434 AttrNumber *partColIdx;
1437 AttrNumber *ordColIdx;
1440 window_pathkeys = make_pathkeys_for_window(root,
1446 * This is a bit tricky: we build a sort node even if we don't
1447 * really have to sort. Even when no explicit sort is needed,
1448 * we need to have suitable resjunk items added to the input
1449 * plan's tlist for any partitioning or ordering columns that
1450 * aren't plain Vars. Furthermore, this way we can use
1451 * existing infrastructure to identify which input columns are
1452 * the interesting ones.
1454 if (window_pathkeys)
1458 sort_plan = make_sort_from_pathkeys(root,
1462 if (!pathkeys_contained_in(window_pathkeys,
1465 /* we do indeed need to sort */
1466 result_plan = (Plan *) sort_plan;
1467 current_pathkeys = window_pathkeys;
1469 /* In either case, extract the per-column information */
1470 get_column_info_for_window(root, wc, tlist,
1472 sort_plan->sortColIdx,
1482 /* empty window specification, nothing to sort */
1485 partOperators = NULL;
1488 ordOperators = NULL;
1493 /* Add the current WindowFuncs to the running tlist */
1494 window_tlist = add_to_flat_tlist(window_tlist,
1495 wflists->windowFuncs[wc->winref]);
1499 /* Install the original tlist in the topmost WindowAgg */
1500 window_tlist = tlist;
1503 /* ... and make the WindowAgg plan node */
1504 result_plan = (Plan *)
1505 make_windowagg(root,
1506 (List *) copyObject(window_tlist),
1507 list_length(wflists->windowFuncs[wc->winref]),
1519 } /* end of if (setOperations) */
1522 * If there is a DISTINCT clause, add the necessary node(s).
1524 if (parse->distinctClause)
1526 double dNumDistinctRows;
1527 long numDistinctRows;
1530 * If there was grouping or aggregation, use the current number of
1531 * rows as the estimated number of DISTINCT rows (ie, assume the
1532 * result was already mostly unique). If not, use the number of
1533 * distinct-groups calculated by query_planner.
1535 if (parse->groupClause || root->hasHavingQual || parse->hasAggs)
1536 dNumDistinctRows = result_plan->plan_rows;
1538 dNumDistinctRows = dNumGroups;
1540 /* Also convert to long int --- but 'ware overflow! */
1541 numDistinctRows = (long) Min(dNumDistinctRows, (double) LONG_MAX);
1543 /* Choose implementation method if we didn't already */
1544 if (!tested_hashed_distinct)
1547 * At this point, either hashed or sorted grouping will have to
1548 * work from result_plan, so we pass that as both "cheapest" and
1551 use_hashed_distinct =
1552 choose_hashed_distinct(root,
1553 tuple_fraction, limit_tuples,
1554 result_plan->plan_rows,
1555 result_plan->plan_width,
1556 result_plan->startup_cost,
1557 result_plan->total_cost,
1558 result_plan->startup_cost,
1559 result_plan->total_cost,
1564 if (use_hashed_distinct)
1566 /* Hashed aggregate plan --- no sort needed */
1567 result_plan = (Plan *) make_agg(root,
1568 result_plan->targetlist,
1571 list_length(parse->distinctClause),
1572 extract_grouping_cols(parse->distinctClause,
1573 result_plan->targetlist),
1574 extract_grouping_ops(parse->distinctClause),
1578 /* Hashed aggregation produces randomly-ordered results */
1579 current_pathkeys = NIL;
1584 * Use a Unique node to implement DISTINCT. Add an explicit sort
1585 * if we couldn't make the path come out the way the Unique node
1586 * needs it. If we do have to sort, always sort by the more
1587 * rigorous of DISTINCT and ORDER BY, to avoid a second sort
1588 * below. However, for regular DISTINCT, don't sort now if we
1589 * don't have to --- sorting afterwards will likely be cheaper,
1590 * and also has the possibility of optimizing via LIMIT. But for
1591 * DISTINCT ON, we *must* force the final sort now, else it won't
1592 * have the desired behavior.
1594 List *needed_pathkeys;
1596 if (parse->hasDistinctOn &&
1597 list_length(root->distinct_pathkeys) <
1598 list_length(root->sort_pathkeys))
1599 needed_pathkeys = root->sort_pathkeys;
1601 needed_pathkeys = root->distinct_pathkeys;
1603 if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
1605 if (list_length(root->distinct_pathkeys) >=
1606 list_length(root->sort_pathkeys))
1607 current_pathkeys = root->distinct_pathkeys;
1610 current_pathkeys = root->sort_pathkeys;
1611 /* Assert checks that parser didn't mess up... */
1612 Assert(pathkeys_contained_in(root->distinct_pathkeys,
1616 result_plan = (Plan *) make_sort_from_pathkeys(root,
1622 result_plan = (Plan *) make_unique(result_plan,
1623 parse->distinctClause);
1624 result_plan->plan_rows = dNumDistinctRows;
1625 /* The Unique node won't change sort ordering */
1630 * If ORDER BY was given and we were not able to make the plan come out in
1631 * the right order, add an explicit sort step.
1633 if (parse->sortClause)
1635 if (!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
1637 result_plan = (Plan *) make_sort_from_pathkeys(root,
1639 root->sort_pathkeys,
1641 current_pathkeys = root->sort_pathkeys;
1646 * If there is a FOR UPDATE/SHARE clause, add the LockRows node.
1647 * (Note: we intentionally test parse->rowMarks not root->rowMarks here.
1648 * If there are only non-locking rowmarks, they should be handled by
1649 * the ModifyTable node instead.)
1651 if (parse->rowMarks)
1653 result_plan = (Plan *) make_lockrows(result_plan,
1655 SS_assign_special_param(root));
1657 * The result can no longer be assumed sorted, since locking might
1658 * cause the sort key columns to be replaced with new values.
1660 current_pathkeys = NIL;
1664 * Finally, if there is a LIMIT/OFFSET clause, add the LIMIT node.
1666 if (parse->limitCount || parse->limitOffset)
1668 result_plan = (Plan *) make_limit(result_plan,
1675 /* Compute result-relations list if needed */
1676 if (parse->resultRelation)
1677 root->resultRelations = list_make1_int(parse->resultRelation);
1679 root->resultRelations = NIL;
1682 * Return the actual output ordering in query_pathkeys for possible use by
1683 * an outer query level.
1685 root->query_pathkeys = current_pathkeys;
1691 * Detect whether a plan node is a "dummy" plan created when a relation
1692 * is deemed not to need scanning due to constraint exclusion.
1694 * Currently, such dummy plans are Result nodes with constant FALSE
1698 is_dummy_plan(Plan *plan)
1700 if (IsA(plan, Result))
1702 List *rcqual = (List *) ((Result *) plan)->resconstantqual;
1704 if (list_length(rcqual) == 1)
1706 Const *constqual = (Const *) linitial(rcqual);
1708 if (constqual && IsA(constqual, Const))
1710 if (!constqual->constisnull &&
1711 !DatumGetBool(constqual->constvalue))
1720 * Create a bitmapset of the RT indexes of live base relations
1722 * Helper for preprocess_rowmarks ... at this point in the proceedings,
1723 * the only good way to distinguish baserels from appendrel children
1724 * is to see what is in the join tree.
1727 get_base_rel_indexes(Node *jtnode)
1733 if (IsA(jtnode, RangeTblRef))
1735 int varno = ((RangeTblRef *) jtnode)->rtindex;
1737 result = bms_make_singleton(varno);
1739 else if (IsA(jtnode, FromExpr))
1741 FromExpr *f = (FromExpr *) jtnode;
1745 foreach(l, f->fromlist)
1746 result = bms_join(result,
1747 get_base_rel_indexes(lfirst(l)));
1749 else if (IsA(jtnode, JoinExpr))
1751 JoinExpr *j = (JoinExpr *) jtnode;
1753 result = bms_join(get_base_rel_indexes(j->larg),
1754 get_base_rel_indexes(j->rarg));
1758 elog(ERROR, "unrecognized node type: %d",
1759 (int) nodeTag(jtnode));
1760 result = NULL; /* keep compiler quiet */
1766 * preprocess_rowmarks - set up PlanRowMarks if needed
1769 preprocess_rowmarks(PlannerInfo *root)
1771 Query *parse = root->parse;
1777 if (parse->rowMarks)
1780 * We've got trouble if FOR UPDATE/SHARE appears inside grouping,
1781 * since grouping renders a reference to individual tuple CTIDs
1782 * invalid. This is also checked at parse time, but that's
1783 * insufficient because of rule substitution, query pullup, etc.
1785 CheckSelectLocking(parse);
1790 * We only need rowmarks for UPDATE, DELETE, or FOR UPDATE/SHARE.
1792 if (parse->commandType != CMD_UPDATE &&
1793 parse->commandType != CMD_DELETE)
1798 * We need to have rowmarks for all base relations except the target.
1799 * We make a bitmapset of all base rels and then remove the items we
1800 * don't need or have FOR UPDATE/SHARE marks for.
1802 rels = get_base_rel_indexes((Node *) parse->jointree);
1803 if (parse->resultRelation)
1804 rels = bms_del_member(rels, parse->resultRelation);
1807 * Convert RowMarkClauses to PlanRowMark representation.
1810 foreach(l, parse->rowMarks)
1812 RowMarkClause *rc = (RowMarkClause *) lfirst(l);
1813 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
1817 * Currently, it is syntactically impossible to have FOR UPDATE
1818 * applied to an update/delete target rel. If that ever becomes
1819 * possible, we should drop the target from the PlanRowMark list.
1821 Assert(rc->rti != parse->resultRelation);
1824 * Ignore RowMarkClauses for subqueries; they aren't real tables
1825 * and can't support true locking. Subqueries that got flattened
1826 * into the main query should be ignored completely. Any that didn't
1827 * will get ROW_MARK_COPY items in the next loop.
1829 if (rte->rtekind != RTE_RELATION)
1832 rels = bms_del_member(rels, rc->rti);
1834 newrc = makeNode(PlanRowMark);
1835 newrc->rti = newrc->prti = rc->rti;
1837 newrc->markType = ROW_MARK_EXCLUSIVE;
1839 newrc->markType = ROW_MARK_SHARE;
1840 newrc->noWait = rc->noWait;
1841 newrc->isParent = false;
1842 /* attnos will be assigned in preprocess_targetlist */
1843 newrc->ctidAttNo = InvalidAttrNumber;
1844 newrc->toidAttNo = InvalidAttrNumber;
1845 newrc->wholeAttNo = InvalidAttrNumber;
1847 prowmarks = lappend(prowmarks, newrc);
1851 * Now, add rowmarks for any non-target, non-locked base relations.
1854 foreach(l, parse->rtable)
1856 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
1860 if (!bms_is_member(i, rels))
1863 newrc = makeNode(PlanRowMark);
1864 newrc->rti = newrc->prti = i;
1865 /* real tables support REFERENCE, anything else needs COPY */
1866 if (rte->rtekind == RTE_RELATION)
1867 newrc->markType = ROW_MARK_REFERENCE;
1869 newrc->markType = ROW_MARK_COPY;
1870 newrc->noWait = false; /* doesn't matter */
1871 newrc->isParent = false;
1872 /* attnos will be assigned in preprocess_targetlist */
1873 newrc->ctidAttNo = InvalidAttrNumber;
1874 newrc->toidAttNo = InvalidAttrNumber;
1875 newrc->wholeAttNo = InvalidAttrNumber;
1877 prowmarks = lappend(prowmarks, newrc);
1880 root->rowMarks = prowmarks;
1884 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
1886 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
1887 * results back in *count_est and *offset_est. These variables are set to
1888 * 0 if the corresponding clause is not present, and -1 if it's present
1889 * but we couldn't estimate the value for it. (The "0" convention is OK
1890 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
1891 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
1892 * usual practice of never estimating less than one row.) These values will
1893 * be passed to make_limit, which see if you change this code.
1895 * The return value is the suitably adjusted tuple_fraction to use for
1896 * planning the query. This adjustment is not overridable, since it reflects
1897 * plan actions that grouping_planner() will certainly take, not assumptions
1901 preprocess_limit(PlannerInfo *root, double tuple_fraction,
1902 int64 *offset_est, int64 *count_est)
1904 Query *parse = root->parse;
1906 double limit_fraction;
1908 /* Should not be called unless LIMIT or OFFSET */
1909 Assert(parse->limitCount || parse->limitOffset);
1912 * Try to obtain the clause values. We use estimate_expression_value
1913 * primarily because it can sometimes do something useful with Params.
1915 if (parse->limitCount)
1917 est = estimate_expression_value(root, parse->limitCount);
1918 if (est && IsA(est, Const))
1920 if (((Const *) est)->constisnull)
1922 /* NULL indicates LIMIT ALL, ie, no limit */
1923 *count_est = 0; /* treat as not present */
1927 *count_est = DatumGetInt64(((Const *) est)->constvalue);
1928 if (*count_est <= 0)
1929 *count_est = 1; /* force to at least 1 */
1933 *count_est = -1; /* can't estimate */
1936 *count_est = 0; /* not present */
1938 if (parse->limitOffset)
1940 est = estimate_expression_value(root, parse->limitOffset);
1941 if (est && IsA(est, Const))
1943 if (((Const *) est)->constisnull)
1945 /* Treat NULL as no offset; the executor will too */
1946 *offset_est = 0; /* treat as not present */
1950 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
1951 if (*offset_est < 0)
1952 *offset_est = 0; /* less than 0 is same as 0 */
1956 *offset_est = -1; /* can't estimate */
1959 *offset_est = 0; /* not present */
1961 if (*count_est != 0)
1964 * A LIMIT clause limits the absolute number of tuples returned.
1965 * However, if it's not a constant LIMIT then we have to guess; for
1966 * lack of a better idea, assume 10% of the plan's result is wanted.
1968 if (*count_est < 0 || *offset_est < 0)
1970 /* LIMIT or OFFSET is an expression ... punt ... */
1971 limit_fraction = 0.10;
1975 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
1976 limit_fraction = (double) *count_est + (double) *offset_est;
1980 * If we have absolute limits from both caller and LIMIT, use the
1981 * smaller value; likewise if they are both fractional. If one is
1982 * fractional and the other absolute, we can't easily determine which
1983 * is smaller, but we use the heuristic that the absolute will usually
1986 if (tuple_fraction >= 1.0)
1988 if (limit_fraction >= 1.0)
1991 tuple_fraction = Min(tuple_fraction, limit_fraction);
1995 /* caller absolute, limit fractional; use caller's value */
1998 else if (tuple_fraction > 0.0)
2000 if (limit_fraction >= 1.0)
2002 /* caller fractional, limit absolute; use limit */
2003 tuple_fraction = limit_fraction;
2007 /* both fractional */
2008 tuple_fraction = Min(tuple_fraction, limit_fraction);
2013 /* no info from caller, just use limit */
2014 tuple_fraction = limit_fraction;
2017 else if (*offset_est != 0 && tuple_fraction > 0.0)
2020 * We have an OFFSET but no LIMIT. This acts entirely differently
2021 * from the LIMIT case: here, we need to increase rather than decrease
2022 * the caller's tuple_fraction, because the OFFSET acts to cause more
2023 * tuples to be fetched instead of fewer. This only matters if we got
2024 * a tuple_fraction > 0, however.
2026 * As above, use 10% if OFFSET is present but unestimatable.
2028 if (*offset_est < 0)
2029 limit_fraction = 0.10;
2031 limit_fraction = (double) *offset_est;
2034 * If we have absolute counts from both caller and OFFSET, add them
2035 * together; likewise if they are both fractional. If one is
2036 * fractional and the other absolute, we want to take the larger, and
2037 * we heuristically assume that's the fractional one.
2039 if (tuple_fraction >= 1.0)
2041 if (limit_fraction >= 1.0)
2043 /* both absolute, so add them together */
2044 tuple_fraction += limit_fraction;
2048 /* caller absolute, limit fractional; use limit */
2049 tuple_fraction = limit_fraction;
2054 if (limit_fraction >= 1.0)
2056 /* caller fractional, limit absolute; use caller's value */
2060 /* both fractional, so add them together */
2061 tuple_fraction += limit_fraction;
2062 if (tuple_fraction >= 1.0)
2063 tuple_fraction = 0.0; /* assume fetch all */
2068 return tuple_fraction;
2073 * preprocess_groupclause - do preparatory work on GROUP BY clause
2075 * The idea here is to adjust the ordering of the GROUP BY elements
2076 * (which in itself is semantically insignificant) to match ORDER BY,
2077 * thereby allowing a single sort operation to both implement the ORDER BY
2078 * requirement and set up for a Unique step that implements GROUP BY.
2080 * In principle it might be interesting to consider other orderings of the
2081 * GROUP BY elements, which could match the sort ordering of other
2082 * possible plans (eg an indexscan) and thereby reduce cost. We don't
2083 * bother with that, though. Hashed grouping will frequently win anyway.
2085 * Note: we need no comparable processing of the distinctClause because
2086 * the parser already enforced that that matches ORDER BY.
2089 preprocess_groupclause(PlannerInfo *root)
2091 Query *parse = root->parse;
2092 List *new_groupclause;
2097 /* If no ORDER BY, nothing useful to do here */
2098 if (parse->sortClause == NIL)
2102 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2103 * items, but only as far as we can make a matching prefix.
2105 * This code assumes that the sortClause contains no duplicate items.
2107 new_groupclause = NIL;
2108 foreach(sl, parse->sortClause)
2110 SortGroupClause *sc = (SortGroupClause *) lfirst(sl);
2112 foreach(gl, parse->groupClause)
2114 SortGroupClause *gc = (SortGroupClause *) lfirst(gl);
2118 new_groupclause = lappend(new_groupclause, gc);
2123 break; /* no match, so stop scanning */
2126 /* Did we match all of the ORDER BY list, or just some of it? */
2127 partial_match = (sl != NULL);
2129 /* If no match at all, no point in reordering GROUP BY */
2130 if (new_groupclause == NIL)
2134 * Add any remaining GROUP BY items to the new list, but only if we were
2135 * able to make a complete match. In other words, we only rearrange the
2136 * GROUP BY list if the result is that one list is a prefix of the other
2137 * --- otherwise there's no possibility of a common sort. Also, give up
2138 * if there are any non-sortable GROUP BY items, since then there's no
2141 foreach(gl, parse->groupClause)
2143 SortGroupClause *gc = (SortGroupClause *) lfirst(gl);
2145 if (list_member_ptr(new_groupclause, gc))
2146 continue; /* it matched an ORDER BY item */
2148 return; /* give up, no common sort possible */
2149 if (!OidIsValid(gc->sortop))
2150 return; /* give up, GROUP BY can't be sorted */
2151 new_groupclause = lappend(new_groupclause, gc);
2154 /* Success --- install the rearranged GROUP BY list */
2155 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2156 parse->groupClause = new_groupclause;
2160 * choose_hashed_grouping - should we use hashed grouping?
2162 * Returns TRUE to select hashing, FALSE to select sorting.
2165 choose_hashed_grouping(PlannerInfo *root,
2166 double tuple_fraction, double limit_tuples,
2167 double path_rows, int path_width,
2168 Path *cheapest_path, Path *sorted_path,
2169 double dNumGroups, AggClauseCounts *agg_counts)
2171 Query *parse = root->parse;
2172 int numGroupCols = list_length(parse->groupClause);
2176 List *target_pathkeys;
2177 List *current_pathkeys;
2182 * Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
2183 * aggregates. (Doing so would imply storing *all* the input values in
2184 * the hash table, and/or running many sorts in parallel, either of which
2185 * seems like a certain loser.)
2187 can_hash = (agg_counts->numOrderedAggs == 0 &&
2188 grouping_is_hashable(parse->groupClause));
2189 can_sort = grouping_is_sortable(parse->groupClause);
2191 /* Quick out if only one choice is workable */
2192 if (!(can_hash && can_sort))
2200 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2201 errmsg("could not implement GROUP BY"),
2202 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2205 /* Prefer sorting when enable_hashagg is off */
2206 if (!enable_hashagg)
2210 * Don't do it if it doesn't look like the hashtable will fit into
2214 /* Estimate per-hash-entry space at tuple width... */
2215 hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
2216 /* plus space for pass-by-ref transition values... */
2217 hashentrysize += agg_counts->transitionSpace;
2218 /* plus the per-hash-entry overhead */
2219 hashentrysize += hash_agg_entry_size(agg_counts->numAggs);
2221 if (hashentrysize * dNumGroups > work_mem * 1024L)
2225 * When we have both GROUP BY and DISTINCT, use the more-rigorous of
2226 * DISTINCT and ORDER BY as the assumed required output sort order. This
2227 * is an oversimplification because the DISTINCT might get implemented via
2228 * hashing, but it's not clear that the case is common enough (or that our
2229 * estimates are good enough) to justify trying to solve it exactly.
2231 if (list_length(root->distinct_pathkeys) >
2232 list_length(root->sort_pathkeys))
2233 target_pathkeys = root->distinct_pathkeys;
2235 target_pathkeys = root->sort_pathkeys;
2238 * See if the estimated cost is no more than doing it the other way. While
2239 * avoiding the need for sorted input is usually a win, the fact that the
2240 * output won't be sorted may be a loss; so we need to do an actual cost
2243 * We need to consider cheapest_path + hashagg [+ final sort] versus
2244 * either cheapest_path [+ sort] + group or agg [+ final sort] or
2245 * presorted_path + group or agg [+ final sort] where brackets indicate a
2246 * step that may not be needed. We assume query_planner() will have
2247 * returned a presorted path only if it's a winner compared to
2248 * cheapest_path for this purpose.
2250 * These path variables are dummies that just hold cost fields; we don't
2251 * make actual Paths for these steps.
2253 cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs,
2254 numGroupCols, dNumGroups,
2255 cheapest_path->startup_cost, cheapest_path->total_cost,
2257 /* Result of hashed agg is always unsorted */
2258 if (target_pathkeys)
2259 cost_sort(&hashed_p, root, target_pathkeys, hashed_p.total_cost,
2260 dNumGroups, path_width, limit_tuples);
2264 sorted_p.startup_cost = sorted_path->startup_cost;
2265 sorted_p.total_cost = sorted_path->total_cost;
2266 current_pathkeys = sorted_path->pathkeys;
2270 sorted_p.startup_cost = cheapest_path->startup_cost;
2271 sorted_p.total_cost = cheapest_path->total_cost;
2272 current_pathkeys = cheapest_path->pathkeys;
2274 if (!pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
2276 cost_sort(&sorted_p, root, root->group_pathkeys, sorted_p.total_cost,
2277 path_rows, path_width, -1.0);
2278 current_pathkeys = root->group_pathkeys;
2282 cost_agg(&sorted_p, root, AGG_SORTED, agg_counts->numAggs,
2283 numGroupCols, dNumGroups,
2284 sorted_p.startup_cost, sorted_p.total_cost,
2287 cost_group(&sorted_p, root, numGroupCols, dNumGroups,
2288 sorted_p.startup_cost, sorted_p.total_cost,
2290 /* The Agg or Group node will preserve ordering */
2291 if (target_pathkeys &&
2292 !pathkeys_contained_in(target_pathkeys, current_pathkeys))
2293 cost_sort(&sorted_p, root, target_pathkeys, sorted_p.total_cost,
2294 dNumGroups, path_width, limit_tuples);
2297 * Now make the decision using the top-level tuple fraction. First we
2298 * have to convert an absolute count (LIMIT) into fractional form.
2300 if (tuple_fraction >= 1.0)
2301 tuple_fraction /= dNumGroups;
2303 if (compare_fractional_path_costs(&hashed_p, &sorted_p,
2304 tuple_fraction) < 0)
2306 /* Hashed is cheaper, so use it */
2313 * choose_hashed_distinct - should we use hashing for DISTINCT?
2315 * This is fairly similar to choose_hashed_grouping, but there are enough
2316 * differences that it doesn't seem worth trying to unify the two functions.
2317 * (One difference is that we sometimes apply this after forming a Plan,
2318 * so the input alternatives can't be represented as Paths --- instead we
2319 * pass in the costs as individual variables.)
2321 * But note that making the two choices independently is a bit bogus in
2322 * itself. If the two could be combined into a single choice operation
2323 * it'd probably be better, but that seems far too unwieldy to be practical,
2324 * especially considering that the combination of GROUP BY and DISTINCT
2325 * isn't very common in real queries. By separating them, we are giving
2326 * extra preference to using a sorting implementation when a common sort key
2327 * is available ... and that's not necessarily wrong anyway.
2329 * Returns TRUE to select hashing, FALSE to select sorting.
2332 choose_hashed_distinct(PlannerInfo *root,
2333 double tuple_fraction, double limit_tuples,
2334 double path_rows, int path_width,
2335 Cost cheapest_startup_cost, Cost cheapest_total_cost,
2336 Cost sorted_startup_cost, Cost sorted_total_cost,
2337 List *sorted_pathkeys,
2338 double dNumDistinctRows)
2340 Query *parse = root->parse;
2341 int numDistinctCols = list_length(parse->distinctClause);
2345 List *current_pathkeys;
2346 List *needed_pathkeys;
2351 * If we have a sortable DISTINCT ON clause, we always use sorting.
2352 * This enforces the expected behavior of DISTINCT ON.
2354 can_sort = grouping_is_sortable(parse->distinctClause);
2355 if (can_sort && parse->hasDistinctOn)
2358 can_hash = grouping_is_hashable(parse->distinctClause);
2360 /* Quick out if only one choice is workable */
2361 if (!(can_hash && can_sort))
2369 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2370 errmsg("could not implement DISTINCT"),
2371 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2374 /* Prefer sorting when enable_hashagg is off */
2375 if (!enable_hashagg)
2379 * Don't do it if it doesn't look like the hashtable will fit into
2382 hashentrysize = MAXALIGN(path_width) + MAXALIGN(sizeof(MinimalTupleData));
2384 if (hashentrysize * dNumDistinctRows > work_mem * 1024L)
2388 * See if the estimated cost is no more than doing it the other way. While
2389 * avoiding the need for sorted input is usually a win, the fact that the
2390 * output won't be sorted may be a loss; so we need to do an actual cost
2393 * We need to consider cheapest_path + hashagg [+ final sort] versus
2394 * sorted_path [+ sort] + group [+ final sort] where brackets indicate a
2395 * step that may not be needed.
2397 * These path variables are dummies that just hold cost fields; we don't
2398 * make actual Paths for these steps.
2400 cost_agg(&hashed_p, root, AGG_HASHED, 0,
2401 numDistinctCols, dNumDistinctRows,
2402 cheapest_startup_cost, cheapest_total_cost,
2406 * Result of hashed agg is always unsorted, so if ORDER BY is present we
2407 * need to charge for the final sort.
2409 if (parse->sortClause)
2410 cost_sort(&hashed_p, root, root->sort_pathkeys, hashed_p.total_cost,
2411 dNumDistinctRows, path_width, limit_tuples);
2414 * Now for the GROUP case. See comments in grouping_planner about the
2415 * sorting choices here --- this code should match that code.
2417 sorted_p.startup_cost = sorted_startup_cost;
2418 sorted_p.total_cost = sorted_total_cost;
2419 current_pathkeys = sorted_pathkeys;
2420 if (parse->hasDistinctOn &&
2421 list_length(root->distinct_pathkeys) <
2422 list_length(root->sort_pathkeys))
2423 needed_pathkeys = root->sort_pathkeys;
2425 needed_pathkeys = root->distinct_pathkeys;
2426 if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
2428 if (list_length(root->distinct_pathkeys) >=
2429 list_length(root->sort_pathkeys))
2430 current_pathkeys = root->distinct_pathkeys;
2432 current_pathkeys = root->sort_pathkeys;
2433 cost_sort(&sorted_p, root, current_pathkeys, sorted_p.total_cost,
2434 path_rows, path_width, -1.0);
2436 cost_group(&sorted_p, root, numDistinctCols, dNumDistinctRows,
2437 sorted_p.startup_cost, sorted_p.total_cost,
2439 if (parse->sortClause &&
2440 !pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
2441 cost_sort(&sorted_p, root, root->sort_pathkeys, sorted_p.total_cost,
2442 dNumDistinctRows, path_width, limit_tuples);
2445 * Now make the decision using the top-level tuple fraction. First we
2446 * have to convert an absolute count (LIMIT) into fractional form.
2448 if (tuple_fraction >= 1.0)
2449 tuple_fraction /= dNumDistinctRows;
2451 if (compare_fractional_path_costs(&hashed_p, &sorted_p,
2452 tuple_fraction) < 0)
2454 /* Hashed is cheaper, so use it */
2461 * make_subplanTargetList
2462 * Generate appropriate target list when grouping is required.
2464 * When grouping_planner inserts Aggregate, Group, or Result plan nodes
2465 * above the result of query_planner, we typically want to pass a different
2466 * target list to query_planner than the outer plan nodes should have.
2467 * This routine generates the correct target list for the subplan.
2469 * The initial target list passed from the parser already contains entries
2470 * for all ORDER BY and GROUP BY expressions, but it will not have entries
2471 * for variables used only in HAVING clauses; so we need to add those
2472 * variables to the subplan target list. Also, we flatten all expressions
2473 * except GROUP BY items into their component variables; the other expressions
2474 * will be computed by the inserted nodes rather than by the subplan.
2475 * For example, given a query like
2476 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
2477 * we want to pass this targetlist to the subplan:
2479 * where the a+b target will be used by the Sort/Group steps, and the
2480 * other targets will be used for computing the final results. (In the
2481 * above example we could theoretically suppress the a and b targets and
2482 * pass down only c,d,a+b, but it's not really worth the trouble to
2483 * eliminate simple var references from the subplan. We will avoid doing
2484 * the extra computation to recompute a+b at the outer level; see
2485 * fix_upper_expr() in setrefs.c.)
2487 * If we are grouping or aggregating, *and* there are no non-Var grouping
2488 * expressions, then the returned tlist is effectively dummy; we do not
2489 * need to force it to be evaluated, because all the Vars it contains
2490 * should be present in the output of query_planner anyway.
2492 * 'tlist' is the query's target list.
2493 * 'groupColIdx' receives an array of column numbers for the GROUP BY
2494 * expressions (if there are any) in the subplan's target list.
2495 * 'need_tlist_eval' is set true if we really need to evaluate the
2498 * The result is the targetlist to be passed to the subplan.
2501 make_subplanTargetList(PlannerInfo *root,
2503 AttrNumber **groupColIdx,
2504 bool *need_tlist_eval)
2506 Query *parse = root->parse;
2511 *groupColIdx = NULL;
2514 * If we're not grouping or aggregating, there's nothing to do here;
2515 * query_planner should receive the unmodified target list.
2517 if (!parse->hasAggs && !parse->groupClause && !root->hasHavingQual &&
2518 !parse->hasWindowFuncs)
2520 *need_tlist_eval = true;
2525 * Otherwise, start with a "flattened" tlist (having just the vars
2526 * mentioned in the targetlist and HAVING qual --- but not upper-level
2527 * Vars; they will be replaced by Params later on). Note this includes
2528 * vars used in resjunk items, so we are covering the needs of ORDER BY
2529 * and window specifications.
2531 sub_tlist = flatten_tlist(tlist);
2532 extravars = pull_var_clause(parse->havingQual, PVC_INCLUDE_PLACEHOLDERS);
2533 sub_tlist = add_to_flat_tlist(sub_tlist, extravars);
2534 list_free(extravars);
2535 *need_tlist_eval = false; /* only eval if not flat tlist */
2538 * If grouping, create sub_tlist entries for all GROUP BY expressions
2539 * (GROUP BY items that are simple Vars should be in the list already),
2540 * and make an array showing where the group columns are in the sub_tlist.
2542 numCols = list_length(parse->groupClause);
2546 AttrNumber *grpColIdx;
2549 grpColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
2550 *groupColIdx = grpColIdx;
2552 foreach(gl, parse->groupClause)
2554 SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
2555 Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
2559 * Find or make a matching sub_tlist entry. If the groupexpr
2560 * isn't a Var, no point in searching. (Note that the parser
2561 * won't make multiple groupClause entries for the same TLE.)
2563 if (groupexpr && IsA(groupexpr, Var))
2564 te = tlist_member(groupexpr, sub_tlist);
2570 te = makeTargetEntry((Expr *) groupexpr,
2571 list_length(sub_tlist) + 1,
2574 sub_tlist = lappend(sub_tlist, te);
2575 *need_tlist_eval = true; /* it's not flat anymore */
2578 /* and save its resno */
2579 grpColIdx[keyno++] = te->resno;
2587 * locate_grouping_columns
2588 * Locate grouping columns in the tlist chosen by query_planner.
2590 * This is only needed if we don't use the sub_tlist chosen by
2591 * make_subplanTargetList. We have to forget the column indexes found
2592 * by that routine and re-locate the grouping exprs in the real sub_tlist.
2595 locate_grouping_columns(PlannerInfo *root,
2598 AttrNumber *groupColIdx)
2604 * No work unless grouping.
2606 if (!root->parse->groupClause)
2608 Assert(groupColIdx == NULL);
2611 Assert(groupColIdx != NULL);
2613 foreach(gl, root->parse->groupClause)
2615 SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
2616 Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
2617 TargetEntry *te = tlist_member(groupexpr, sub_tlist);
2620 elog(ERROR, "failed to locate grouping columns");
2621 groupColIdx[keyno++] = te->resno;
2626 * postprocess_setop_tlist
2627 * Fix up targetlist returned by plan_set_operations().
2629 * We need to transpose sort key info from the orig_tlist into new_tlist.
2630 * NOTE: this would not be good enough if we supported resjunk sort keys
2631 * for results of set operations --- then, we'd need to project a whole
2632 * new tlist to evaluate the resjunk columns. For now, just ereport if we
2633 * find any resjunk columns in orig_tlist.
2636 postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
2639 ListCell *orig_tlist_item = list_head(orig_tlist);
2641 foreach(l, new_tlist)
2643 TargetEntry *new_tle = (TargetEntry *) lfirst(l);
2644 TargetEntry *orig_tle;
2646 /* ignore resjunk columns in setop result */
2647 if (new_tle->resjunk)
2650 Assert(orig_tlist_item != NULL);
2651 orig_tle = (TargetEntry *) lfirst(orig_tlist_item);
2652 orig_tlist_item = lnext(orig_tlist_item);
2653 if (orig_tle->resjunk) /* should not happen */
2654 elog(ERROR, "resjunk output columns are not implemented");
2655 Assert(new_tle->resno == orig_tle->resno);
2656 new_tle->ressortgroupref = orig_tle->ressortgroupref;
2658 if (orig_tlist_item != NULL)
2659 elog(ERROR, "resjunk output columns are not implemented");
2664 * select_active_windows
2665 * Create a list of the "active" window clauses (ie, those referenced
2666 * by non-deleted WindowFuncs) in the order they are to be executed.
2669 select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
2675 /* First, make a list of the active windows */
2677 foreach(lc, root->parse->windowClause)
2679 WindowClause *wc = (WindowClause *) lfirst(lc);
2681 /* It's only active if wflists shows some related WindowFuncs */
2682 Assert(wc->winref <= wflists->maxWinRef);
2683 if (wflists->windowFuncs[wc->winref] != NIL)
2684 actives = lappend(actives, wc);
2688 * Now, ensure that windows with identical partitioning/ordering clauses
2689 * are adjacent in the list. This is required by the SQL standard, which
2690 * says that only one sort is to be used for such windows, even if they
2691 * are otherwise distinct (eg, different names or framing clauses).
2693 * There is room to be much smarter here, for example detecting whether
2694 * one window's sort keys are a prefix of another's (so that sorting for
2695 * the latter would do for the former), or putting windows first that
2696 * match a sort order available for the underlying query. For the moment
2697 * we are content with meeting the spec.
2700 while (actives != NIL)
2702 WindowClause *wc = (WindowClause *) linitial(actives);
2706 /* Move wc from actives to result */
2707 actives = list_delete_first(actives);
2708 result = lappend(result, wc);
2710 /* Now move any matching windows from actives to result */
2712 for (lc = list_head(actives); lc; lc = next)
2714 WindowClause *wc2 = (WindowClause *) lfirst(lc);
2717 /* framing options are NOT to be compared here! */
2718 if (equal(wc->partitionClause, wc2->partitionClause) &&
2719 equal(wc->orderClause, wc2->orderClause))
2721 actives = list_delete_cell(actives, lc, prev);
2722 result = lappend(result, wc2);
2733 * add_volatile_sort_exprs
2734 * Identify any volatile sort/group expressions used by the active
2735 * windows, and add them to window_tlist if not already present.
2736 * Return the modified window_tlist.
2739 add_volatile_sort_exprs(List *window_tlist, List *tlist, List *activeWindows)
2741 Bitmapset *sgrefs = NULL;
2744 /* First, collect the sortgrouprefs of the windows into a bitmapset */
2745 foreach(lc, activeWindows)
2747 WindowClause *wc = (WindowClause *) lfirst(lc);
2750 foreach(lc2, wc->partitionClause)
2752 SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc2);
2754 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
2756 foreach(lc2, wc->orderClause)
2758 SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc2);
2760 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
2765 * Now scan the original tlist to find the referenced expressions. Any
2766 * that are volatile must be added to window_tlist.
2768 * Note: we know that the input window_tlist contains no items marked with
2769 * ressortgrouprefs, so we don't have to worry about collisions of the
2770 * reference numbers.
2774 TargetEntry *tle = (TargetEntry *) lfirst(lc);
2776 if (tle->ressortgroupref != 0 &&
2777 bms_is_member(tle->ressortgroupref, sgrefs) &&
2778 contain_volatile_functions((Node *) tle->expr))
2780 TargetEntry *newtle;
2782 newtle = makeTargetEntry(tle->expr,
2783 list_length(window_tlist) + 1,
2786 newtle->ressortgroupref = tle->ressortgroupref;
2787 window_tlist = lappend(window_tlist, newtle);
2791 return window_tlist;
2795 * make_pathkeys_for_window
2796 * Create a pathkeys list describing the required input ordering
2797 * for the given WindowClause.
2799 * The required ordering is first the PARTITION keys, then the ORDER keys.
2800 * In the future we might try to implement windowing using hashing, in which
2801 * case the ordering could be relaxed, but for now we always sort.
2804 make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
2805 List *tlist, bool canonicalize)
2807 List *window_pathkeys;
2808 List *window_sortclauses;
2810 /* Throw error if can't sort */
2811 if (!grouping_is_sortable(wc->partitionClause))
2813 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2814 errmsg("could not implement window PARTITION BY"),
2815 errdetail("Window partitioning columns must be of sortable datatypes.")));
2816 if (!grouping_is_sortable(wc->orderClause))
2818 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2819 errmsg("could not implement window ORDER BY"),
2820 errdetail("Window ordering columns must be of sortable datatypes.")));
2822 /* Okay, make the combined pathkeys */
2823 window_sortclauses = list_concat(list_copy(wc->partitionClause),
2824 list_copy(wc->orderClause));
2825 window_pathkeys = make_pathkeys_for_sortclauses(root,
2829 list_free(window_sortclauses);
2830 return window_pathkeys;
2834 * get_column_info_for_window
2835 * Get the partitioning/ordering column numbers and equality operators
2836 * for a WindowAgg node.
2838 * This depends on the behavior of make_pathkeys_for_window()!
2840 * We are given the target WindowClause and an array of the input column
2841 * numbers associated with the resulting pathkeys. In the easy case, there
2842 * are the same number of pathkey columns as partitioning + ordering columns
2843 * and we just have to copy some data around. However, it's possible that
2844 * some of the original partitioning + ordering columns were eliminated as
2845 * redundant during the transformation to pathkeys. (This can happen even
2846 * though the parser gets rid of obvious duplicates. A typical scenario is a
2847 * window specification "PARTITION BY x ORDER BY y" coupled with a clause
2848 * "WHERE x = y" that causes the two sort columns to be recognized as
2849 * redundant.) In that unusual case, we have to work a lot harder to
2850 * determine which keys are significant.
2852 * The method used here is a bit brute-force: add the sort columns to a list
2853 * one at a time and note when the resulting pathkey list gets longer. But
2854 * it's a sufficiently uncommon case that a faster way doesn't seem worth
2855 * the amount of code refactoring that'd be needed.
2859 get_column_info_for_window(PlannerInfo *root, WindowClause *wc, List *tlist,
2860 int numSortCols, AttrNumber *sortColIdx,
2862 AttrNumber **partColIdx,
2863 Oid **partOperators,
2865 AttrNumber **ordColIdx,
2868 int numPart = list_length(wc->partitionClause);
2869 int numOrder = list_length(wc->orderClause);
2871 if (numSortCols == numPart + numOrder)
2874 *partNumCols = numPart;
2875 *partColIdx = sortColIdx;
2876 *partOperators = extract_grouping_ops(wc->partitionClause);
2877 *ordNumCols = numOrder;
2878 *ordColIdx = sortColIdx + numPart;
2879 *ordOperators = extract_grouping_ops(wc->orderClause);
2888 /* first, allocate what's certainly enough space for the arrays */
2890 *partColIdx = (AttrNumber *) palloc(numPart * sizeof(AttrNumber));
2891 *partOperators = (Oid *) palloc(numPart * sizeof(Oid));
2893 *ordColIdx = (AttrNumber *) palloc(numOrder * sizeof(AttrNumber));
2894 *ordOperators = (Oid *) palloc(numOrder * sizeof(Oid));
2898 foreach(lc, wc->partitionClause)
2900 SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2903 sortclauses = lappend(sortclauses, sgc);
2904 new_pathkeys = make_pathkeys_for_sortclauses(root,
2908 if (list_length(new_pathkeys) > list_length(pathkeys))
2910 /* this sort clause is actually significant */
2911 (*partColIdx)[*partNumCols] = sortColIdx[scidx++];
2912 (*partOperators)[*partNumCols] = sgc->eqop;
2914 pathkeys = new_pathkeys;
2917 foreach(lc, wc->orderClause)
2919 SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2922 sortclauses = lappend(sortclauses, sgc);
2923 new_pathkeys = make_pathkeys_for_sortclauses(root,
2927 if (list_length(new_pathkeys) > list_length(pathkeys))
2929 /* this sort clause is actually significant */
2930 (*ordColIdx)[*ordNumCols] = sortColIdx[scidx++];
2931 (*ordOperators)[*ordNumCols] = sgc->eqop;
2933 pathkeys = new_pathkeys;
2936 /* complain if we didn't eat exactly the right number of sort cols */
2937 if (scidx != numSortCols)
2938 elog(ERROR, "failed to deconstruct sort operators into partitioning/ordering operators");
2944 * expression_planner
2945 * Perform planner's transformations on a standalone expression.
2947 * Various utility commands need to evaluate expressions that are not part
2948 * of a plannable query. They can do so using the executor's regular
2949 * expression-execution machinery, but first the expression has to be fed
2950 * through here to transform it from parser output to something executable.
2952 * Currently, we disallow sublinks in standalone expressions, so there's no
2953 * real "planning" involved here. (That might not always be true though.)
2954 * What we must do is run eval_const_expressions to ensure that any function
2955 * calls are converted to positional notation and function default arguments
2956 * get inserted. The fact that constant subexpressions get simplified is a
2957 * side-effect that is useful when the expression will get evaluated more than
2958 * once. Also, we must fix operator function IDs.
2960 * Note: this must not make any damaging changes to the passed-in expression
2961 * tree. (It would actually be okay to apply fix_opfuncids to it, but since
2962 * we first do an expression_tree_mutator-based walk, what is returned will
2963 * be a new node tree.)
2966 expression_planner(Expr *expr)
2971 * Convert named-argument function calls, insert default arguments and
2972 * simplify constant subexprs
2974 result = eval_const_expressions(NULL, (Node *) expr);
2976 /* Fill in opfuncid values if missing */
2977 fix_opfuncids(result);
2979 return (Expr *) result;