1 /*-------------------------------------------------------------------------
4 * The query optimizer external interface.
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.248 2008/12/28 18:53:56 tgl Exp $
13 *-------------------------------------------------------------------------
20 #include "catalog/pg_operator.h"
21 #include "executor/executor.h"
22 #include "executor/nodeAgg.h"
23 #include "miscadmin.h"
24 #include "nodes/makefuncs.h"
25 #include "optimizer/clauses.h"
26 #include "optimizer/cost.h"
27 #include "optimizer/pathnode.h"
28 #include "optimizer/paths.h"
29 #include "optimizer/planmain.h"
30 #include "optimizer/planner.h"
31 #include "optimizer/prep.h"
32 #include "optimizer/subselect.h"
33 #include "optimizer/tlist.h"
34 #include "optimizer/var.h"
35 #ifdef OPTIMIZER_DEBUG
36 #include "nodes/print.h"
38 #include "parser/parse_expr.h"
39 #include "parser/parse_oper.h"
40 #include "parser/parsetree.h"
41 #include "utils/lsyscache.h"
42 #include "utils/syscache.h"
46 double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
48 /* Hook for plugins to get control in planner() */
49 planner_hook_type planner_hook = NULL;
52 /* Expression kind codes for preprocess_expression */
53 #define EXPRKIND_QUAL 0
54 #define EXPRKIND_TARGET 1
55 #define EXPRKIND_RTFUNC 2
56 #define EXPRKIND_VALUES 3
57 #define EXPRKIND_LIMIT 4
58 #define EXPRKIND_APPINFO 5
61 static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
62 static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
63 static Plan *inheritance_planner(PlannerInfo *root);
64 static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction);
65 static bool is_dummy_plan(Plan *plan);
66 static double preprocess_limit(PlannerInfo *root,
67 double tuple_fraction,
68 int64 *offset_est, int64 *count_est);
69 static void preprocess_groupclause(PlannerInfo *root);
70 static bool choose_hashed_grouping(PlannerInfo *root,
71 double tuple_fraction, double limit_tuples,
72 Path *cheapest_path, Path *sorted_path,
73 double dNumGroups, AggClauseCounts *agg_counts);
74 static bool choose_hashed_distinct(PlannerInfo *root,
75 Plan *input_plan, List *input_pathkeys,
76 double tuple_fraction, double limit_tuples,
77 double dNumDistinctRows);
78 static List *make_subplanTargetList(PlannerInfo *root, List *tlist,
79 AttrNumber **groupColIdx, bool *need_tlist_eval);
80 static void locate_grouping_columns(PlannerInfo *root,
83 AttrNumber *groupColIdx);
84 static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
85 static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
86 static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
87 List *tlist, bool canonicalize);
88 static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
90 int numSortCols, AttrNumber *sortColIdx,
92 AttrNumber **partColIdx,
95 AttrNumber **ordColIdx,
99 /*****************************************************************************
101 * Query optimizer entry point
103 * To support loadable plugins that monitor or modify planner behavior,
104 * we provide a hook variable that lets a plugin get control before and
105 * after the standard planning process. The plugin would normally call
106 * standard_planner().
108 * Note to plugin authors: standard_planner() scribbles on its Query input,
109 * so you'd better copy that data structure if you want to plan more than once.
111 *****************************************************************************/
113 planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
118 result = (*planner_hook) (parse, cursorOptions, boundParams);
120 result = standard_planner(parse, cursorOptions, boundParams);
125 standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
129 double tuple_fraction;
135 /* Cursor options may come from caller or from DECLARE CURSOR stmt */
136 if (parse->utilityStmt &&
137 IsA(parse->utilityStmt, DeclareCursorStmt))
138 cursorOptions |= ((DeclareCursorStmt *) parse->utilityStmt)->options;
141 * Set up global state for this planner invocation. This data is needed
142 * across all levels of sub-Query that might exist in the given command,
143 * so we keep it in a separate struct that's linked to by each per-Query
146 glob = makeNode(PlannerGlobal);
148 glob->boundParams = boundParams;
149 glob->paramlist = NIL;
150 glob->subplans = NIL;
151 glob->subrtables = NIL;
152 glob->rewindPlanIDs = NULL;
153 glob->finalrtable = NIL;
154 glob->relationOids = NIL;
155 glob->invalItems = NIL;
157 glob->transientPlan = false;
159 /* Determine what fraction of the plan is likely to be scanned */
160 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
163 * We have no real idea how many tuples the user will ultimately FETCH
164 * from a cursor, but it is often the case that he doesn't want 'em
165 * all, or would prefer a fast-start plan anyway so that he can
166 * process some of the tuples sooner. Use a GUC parameter to decide
167 * what fraction to optimize for.
169 tuple_fraction = cursor_tuple_fraction;
172 * We document cursor_tuple_fraction as simply being a fraction,
173 * which means the edge cases 0 and 1 have to be treated specially
174 * here. We convert 1 to 0 ("all the tuples") and 0 to a very small
177 if (tuple_fraction >= 1.0)
178 tuple_fraction = 0.0;
179 else if (tuple_fraction <= 0.0)
180 tuple_fraction = 1e-10;
184 /* Default assumption is we need all the tuples */
185 tuple_fraction = 0.0;
188 /* primary planning entry point (may recurse for subqueries) */
189 top_plan = subquery_planner(glob, parse, NULL,
190 false, tuple_fraction, &root);
193 * If creating a plan for a scrollable cursor, make sure it can run
194 * backwards on demand. Add a Material node at the top at need.
196 if (cursorOptions & CURSOR_OPT_SCROLL)
198 if (!ExecSupportsBackwardScan(top_plan))
199 top_plan = materialize_finished_plan(top_plan);
202 /* final cleanup of the plan */
203 Assert(glob->finalrtable == NIL);
204 top_plan = set_plan_references(glob, top_plan, root->parse->rtable);
205 /* ... and the subplans (both regular subplans and initplans) */
206 Assert(list_length(glob->subplans) == list_length(glob->subrtables));
207 forboth(lp, glob->subplans, lr, glob->subrtables)
209 Plan *subplan = (Plan *) lfirst(lp);
210 List *subrtable = (List *) lfirst(lr);
212 lfirst(lp) = set_plan_references(glob, subplan, subrtable);
215 /* build the PlannedStmt result */
216 result = makeNode(PlannedStmt);
218 result->commandType = parse->commandType;
219 result->canSetTag = parse->canSetTag;
220 result->transientPlan = glob->transientPlan;
221 result->planTree = top_plan;
222 result->rtable = glob->finalrtable;
223 result->resultRelations = root->resultRelations;
224 result->utilityStmt = parse->utilityStmt;
225 result->intoClause = parse->intoClause;
226 result->subplans = glob->subplans;
227 result->rewindPlanIDs = glob->rewindPlanIDs;
228 result->returningLists = root->returningLists;
229 result->rowMarks = parse->rowMarks;
230 result->relationOids = glob->relationOids;
231 result->invalItems = glob->invalItems;
232 result->nParamExec = list_length(glob->paramlist);
238 /*--------------------
240 * Invokes the planner on a subquery. We recurse to here for each
241 * sub-SELECT found in the query tree.
243 * glob is the global state for the current planner run.
244 * parse is the querytree produced by the parser & rewriter.
245 * parent_root is the immediate parent Query's info (NULL at the top level).
246 * hasRecursion is true if this is a recursive WITH query.
247 * tuple_fraction is the fraction of tuples we expect will be retrieved.
248 * tuple_fraction is interpreted as explained for grouping_planner, below.
250 * If subroot isn't NULL, we pass back the query's final PlannerInfo struct;
251 * among other things this tells the output sort ordering of the plan.
253 * Basically, this routine does the stuff that should only be done once
254 * per Query object. It then calls grouping_planner. At one time,
255 * grouping_planner could be invoked recursively on the same Query object;
256 * that's not currently true, but we keep the separation between the two
257 * routines anyway, in case we need it again someday.
259 * subquery_planner will be called recursively to handle sub-Query nodes
260 * found within the query's expressions and rangetable.
262 * Returns a query plan.
263 *--------------------
266 subquery_planner(PlannerGlobal *glob, Query *parse,
267 PlannerInfo *parent_root,
268 bool hasRecursion, double tuple_fraction,
269 PlannerInfo **subroot)
271 int num_old_subplans = list_length(glob->subplans);
278 /* Create a PlannerInfo data structure for this subquery */
279 root = makeNode(PlannerInfo);
282 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
283 root->parent_root = parent_root;
284 root->planner_cxt = CurrentMemoryContext;
285 root->init_plans = NIL;
286 root->cte_plan_ids = NIL;
287 root->eq_classes = NIL;
288 root->append_rel_list = NIL;
290 root->hasRecursion = hasRecursion;
292 root->wt_param_id = SS_assign_worktable_param(root);
294 root->wt_param_id = -1;
295 root->non_recursive_plan = NULL;
298 * If there is a WITH list, process each WITH query and build an
299 * initplan SubPlan structure for it.
302 SS_process_ctes(root);
305 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
306 * to transform them into joins. Note that this step does not descend
307 * into subqueries; if we pull up any subqueries below, their SubLinks are
308 * processed just before pulling them up.
310 if (parse->hasSubLinks)
311 pull_up_sublinks(root);
314 * Scan the rangetable for set-returning functions, and inline them
315 * if possible (producing subqueries that might get pulled up next).
316 * Recursion issues here are handled in the same way as for SubLinks.
318 inline_set_returning_functions(root);
321 * Check to see if any subqueries in the rangetable can be merged into
324 parse->jointree = (FromExpr *)
325 pull_up_subqueries(root, (Node *) parse->jointree, false, false);
328 * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can
329 * avoid the expense of doing flatten_join_alias_vars(). Also check for
330 * outer joins --- if none, we can skip reduce_outer_joins().
331 * This must be done after we have done pull_up_subqueries, of course.
333 root->hasJoinRTEs = false;
334 hasOuterJoins = false;
335 foreach(l, parse->rtable)
337 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
339 if (rte->rtekind == RTE_JOIN)
341 root->hasJoinRTEs = true;
342 if (IS_OUTER_JOIN(rte->jointype))
344 hasOuterJoins = true;
345 /* Can quit scanning once we find an outer join */
352 * Expand any rangetable entries that are inheritance sets into "append
353 * relations". This can add entries to the rangetable, but they must be
354 * plain base relations not joins, so it's OK (and marginally more
355 * efficient) to do it after checking for join RTEs. We must do it after
356 * pulling up subqueries, else we'd fail to handle inherited tables in
359 expand_inherited_tables(root);
362 * Set hasHavingQual to remember if HAVING clause is present. Needed
363 * because preprocess_expression will reduce a constant-true condition to
364 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
366 root->hasHavingQual = (parse->havingQual != NULL);
368 /* Clear this flag; might get set in distribute_qual_to_rels */
369 root->hasPseudoConstantQuals = false;
372 * Do expression preprocessing on targetlist and quals.
374 parse->targetList = (List *)
375 preprocess_expression(root, (Node *) parse->targetList,
378 parse->returningList = (List *)
379 preprocess_expression(root, (Node *) parse->returningList,
382 preprocess_qual_conditions(root, (Node *) parse->jointree);
384 parse->havingQual = preprocess_expression(root, parse->havingQual,
387 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
389 parse->limitCount = preprocess_expression(root, parse->limitCount,
392 root->append_rel_list = (List *)
393 preprocess_expression(root, (Node *) root->append_rel_list,
396 /* Also need to preprocess expressions for function and values RTEs */
397 foreach(l, parse->rtable)
399 RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
401 if (rte->rtekind == RTE_FUNCTION)
402 rte->funcexpr = preprocess_expression(root, rte->funcexpr,
404 else if (rte->rtekind == RTE_VALUES)
405 rte->values_lists = (List *)
406 preprocess_expression(root, (Node *) rte->values_lists,
411 * In some cases we may want to transfer a HAVING clause into WHERE. We
412 * cannot do so if the HAVING clause contains aggregates (obviously) or
413 * volatile functions (since a HAVING clause is supposed to be executed
414 * only once per group). Also, it may be that the clause is so expensive
415 * to execute that we're better off doing it only once per group, despite
416 * the loss of selectivity. This is hard to estimate short of doing the
417 * entire planning process twice, so we use a heuristic: clauses
418 * containing subplans are left in HAVING. Otherwise, we move or copy the
419 * HAVING clause into WHERE, in hopes of eliminating tuples before
420 * aggregation instead of after.
422 * If the query has explicit grouping then we can simply move such a
423 * clause into WHERE; any group that fails the clause will not be in the
424 * output because none of its tuples will reach the grouping or
425 * aggregation stage. Otherwise we must have a degenerate (variable-free)
426 * HAVING clause, which we put in WHERE so that query_planner() can use it
427 * in a gating Result node, but also keep in HAVING to ensure that we
428 * don't emit a bogus aggregated row. (This could be done better, but it
429 * seems not worth optimizing.)
431 * Note that both havingQual and parse->jointree->quals are in
432 * implicitly-ANDed-list form at this point, even though they are declared
436 foreach(l, (List *) parse->havingQual)
438 Node *havingclause = (Node *) lfirst(l);
440 if (contain_agg_clause(havingclause) ||
441 contain_volatile_functions(havingclause) ||
442 contain_subplans(havingclause))
444 /* keep it in HAVING */
445 newHaving = lappend(newHaving, havingclause);
447 else if (parse->groupClause)
449 /* move it to WHERE */
450 parse->jointree->quals = (Node *)
451 lappend((List *) parse->jointree->quals, havingclause);
455 /* put a copy in WHERE, keep it in HAVING */
456 parse->jointree->quals = (Node *)
457 lappend((List *) parse->jointree->quals,
458 copyObject(havingclause));
459 newHaving = lappend(newHaving, havingclause);
462 parse->havingQual = (Node *) newHaving;
465 * If we have any outer joins, try to reduce them to plain inner joins.
466 * This step is most easily done after we've done expression
470 reduce_outer_joins(root);
473 * Do the main planning. If we have an inherited target relation, that
474 * needs special processing, else go straight to grouping_planner.
476 if (parse->resultRelation &&
477 rt_fetch(parse->resultRelation, parse->rtable)->inh)
478 plan = inheritance_planner(root);
480 plan = grouping_planner(root, tuple_fraction);
483 * If any subplans were generated, or if we're inside a subplan, build
484 * initPlan list and extParam/allParam sets for plan nodes, and attach the
485 * initPlans to the top plan node.
487 if (list_length(glob->subplans) != num_old_subplans ||
488 root->query_level > 1)
489 SS_finalize_plan(root, plan, true);
491 /* Return internal info if caller wants it */
499 * preprocess_expression
500 * Do subquery_planner's preprocessing work for an expression,
501 * which can be a targetlist, a WHERE clause (including JOIN/ON
502 * conditions), or a HAVING clause.
505 preprocess_expression(PlannerInfo *root, Node *expr, int kind)
508 * Fall out quickly if expression is empty. This occurs often enough to
509 * be worth checking. Note that null->null is the correct conversion for
510 * implicit-AND result format, too.
516 * If the query has any join RTEs, replace join alias variables with
517 * base-relation variables. We must do this before sublink processing,
518 * else sublinks expanded out from join aliases wouldn't get processed. We
519 * can skip it in VALUES lists, however, since they can't contain any Vars
522 if (root->hasJoinRTEs && kind != EXPRKIND_VALUES)
523 expr = flatten_join_alias_vars(root, expr);
526 * Simplify constant expressions.
528 * Note: one essential effect here is to insert the current actual values
529 * of any default arguments for functions. To ensure that happens, we
530 * *must* process all expressions here. Previous PG versions sometimes
531 * skipped const-simplification if it didn't seem worth the trouble, but
532 * we can't do that anymore.
534 * Note: this also flattens nested AND and OR expressions into N-argument
535 * form. All processing of a qual expression after this point must be
536 * careful to maintain AND/OR flatness --- that is, do not generate a tree
537 * with AND directly under AND, nor OR directly under OR.
539 expr = eval_const_expressions(root, expr);
542 * If it's a qual or havingQual, canonicalize it.
544 if (kind == EXPRKIND_QUAL)
546 expr = (Node *) canonicalize_qual((Expr *) expr);
548 #ifdef OPTIMIZER_DEBUG
549 printf("After canonicalize_qual()\n");
554 /* Expand SubLinks to SubPlans */
555 if (root->parse->hasSubLinks)
556 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
559 * XXX do not insert anything here unless you have grokked the comments in
560 * SS_replace_correlation_vars ...
563 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
564 if (root->query_level > 1)
565 expr = SS_replace_correlation_vars(root, expr);
568 * If it's a qual or havingQual, convert it to implicit-AND format. (We
569 * don't want to do this before eval_const_expressions, since the latter
570 * would be unable to simplify a top-level AND correctly. Also,
571 * SS_process_sublinks expects explicit-AND format.)
573 if (kind == EXPRKIND_QUAL)
574 expr = (Node *) make_ands_implicit((Expr *) expr);
580 * preprocess_qual_conditions
581 * Recursively scan the query's jointree and do subquery_planner's
582 * preprocessing work on each qual condition found therein.
585 preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
589 if (IsA(jtnode, RangeTblRef))
591 /* nothing to do here */
593 else if (IsA(jtnode, FromExpr))
595 FromExpr *f = (FromExpr *) jtnode;
598 foreach(l, f->fromlist)
599 preprocess_qual_conditions(root, lfirst(l));
601 f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
603 else if (IsA(jtnode, JoinExpr))
605 JoinExpr *j = (JoinExpr *) jtnode;
607 preprocess_qual_conditions(root, j->larg);
608 preprocess_qual_conditions(root, j->rarg);
610 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
613 elog(ERROR, "unrecognized node type: %d",
614 (int) nodeTag(jtnode));
618 * inheritance_planner
619 * Generate a plan in the case where the result relation is an
622 * We have to handle this case differently from cases where a source relation
623 * is an inheritance set. Source inheritance is expanded at the bottom of the
624 * plan tree (see allpaths.c), but target inheritance has to be expanded at
625 * the top. The reason is that for UPDATE, each target relation needs a
626 * different targetlist matching its own column set. Also, for both UPDATE
627 * and DELETE, the executor needs the Append plan node at the top, else it
628 * can't keep track of which table is the current target table. Fortunately,
629 * the UPDATE/DELETE target can never be the nullable side of an outer join,
630 * so it's OK to generate the plan this way.
632 * Returns a query plan.
635 inheritance_planner(PlannerInfo *root)
637 Query *parse = root->parse;
638 int parentRTindex = parse->resultRelation;
639 List *subplans = NIL;
640 List *resultRelations = NIL;
641 List *returningLists = NIL;
647 foreach(l, root->append_rel_list)
649 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
652 /* append_rel_list contains all append rels; ignore others */
653 if (appinfo->parent_relid != parentRTindex)
657 * Generate modified query with this rel as target.
659 memcpy(&subroot, root, sizeof(PlannerInfo));
660 subroot.parse = (Query *)
661 adjust_appendrel_attrs((Node *) parse,
663 subroot.returningLists = NIL;
664 subroot.init_plans = NIL;
665 /* We needn't modify the child's append_rel_list */
666 /* There shouldn't be any OJ info to translate, as yet */
667 Assert(subroot.join_info_list == NIL);
668 /* and we haven't created PlaceHolderInfos, either */
669 Assert(subroot.placeholder_list == NIL);
672 subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ );
675 * If this child rel was excluded by constraint exclusion, exclude it
678 if (is_dummy_plan(subplan))
681 /* Save rtable and tlist from first rel for use below */
684 rtable = subroot.parse->rtable;
685 tlist = subplan->targetlist;
688 subplans = lappend(subplans, subplan);
690 /* Make sure any initplans from this rel get into the outer list */
691 root->init_plans = list_concat(root->init_plans, subroot.init_plans);
693 /* Build target-relations list for the executor */
694 resultRelations = lappend_int(resultRelations, appinfo->child_relid);
696 /* Build list of per-relation RETURNING targetlists */
697 if (parse->returningList)
699 Assert(list_length(subroot.returningLists) == 1);
700 returningLists = list_concat(returningLists,
701 subroot.returningLists);
705 root->resultRelations = resultRelations;
706 root->returningLists = returningLists;
708 /* Mark result as unordered (probably unnecessary) */
709 root->query_pathkeys = NIL;
712 * If we managed to exclude every child rel, return a dummy plan
716 root->resultRelations = list_make1_int(parentRTindex);
717 /* although dummy, it must have a valid tlist for executor */
718 tlist = preprocess_targetlist(root, parse->targetList);
719 return (Plan *) make_result(root,
721 (Node *) list_make1(makeBoolConst(false,
727 * Planning might have modified the rangetable, due to changes of the
728 * Query structures inside subquery RTEs. We have to ensure that this
729 * gets propagated back to the master copy. But can't do this until we
730 * are done planning, because all the calls to grouping_planner need
731 * virgin sub-Queries to work from. (We are effectively assuming that
732 * sub-Queries will get planned identically each time, or at least that
733 * the impacts on their rangetables will be the same each time.)
735 * XXX should clean this up someday
737 parse->rtable = rtable;
739 /* Suppress Append if there's only one surviving child rel */
740 if (list_length(subplans) == 1)
741 return (Plan *) linitial(subplans);
743 return (Plan *) make_append(subplans, true, tlist);
746 /*--------------------
748 * Perform planning steps related to grouping, aggregation, etc.
749 * This primarily means adding top-level processing to the basic
750 * query plan produced by query_planner.
752 * tuple_fraction is the fraction of tuples we expect will be retrieved
754 * tuple_fraction is interpreted as follows:
755 * 0: expect all tuples to be retrieved (normal case)
756 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
757 * from the plan to be retrieved
758 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
759 * expected to be retrieved (ie, a LIMIT specification)
761 * Returns a query plan. Also, root->query_pathkeys is returned as the
762 * actual output ordering of the plan (in pathkey format).
763 *--------------------
766 grouping_planner(PlannerInfo *root, double tuple_fraction)
768 Query *parse = root->parse;
769 List *tlist = parse->targetList;
770 int64 offset_est = 0;
772 double limit_tuples = -1.0;
774 List *current_pathkeys;
775 double dNumGroups = 0;
777 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
778 if (parse->limitCount || parse->limitOffset)
780 tuple_fraction = preprocess_limit(root, tuple_fraction,
781 &offset_est, &count_est);
784 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
785 * estimate the effects of using a bounded sort.
787 if (count_est > 0 && offset_est >= 0)
788 limit_tuples = (double) count_est + (double) offset_est;
791 if (parse->setOperations)
793 List *set_sortclauses;
796 * If there's a top-level ORDER BY, assume we have to fetch all the
797 * tuples. This might be too simplistic given all the hackery below
798 * to possibly avoid the sort; but the odds of accurate estimates
799 * here are pretty low anyway.
801 if (parse->sortClause)
802 tuple_fraction = 0.0;
805 * Construct the plan for set operations. The result will not need
806 * any work except perhaps a top-level sort and/or LIMIT. Note that
807 * any special work for recursive unions is the responsibility of
808 * plan_set_operations.
810 result_plan = plan_set_operations(root, tuple_fraction,
814 * Calculate pathkeys representing the sort order (if any) of the set
815 * operation's result. We have to do this before overwriting the sort
818 current_pathkeys = make_pathkeys_for_sortclauses(root,
820 result_plan->targetlist,
824 * We should not need to call preprocess_targetlist, since we must be
825 * in a SELECT query node. Instead, use the targetlist returned by
826 * plan_set_operations (since this tells whether it returned any
827 * resjunk columns!), and transfer any sort key information from the
830 Assert(parse->commandType == CMD_SELECT);
832 tlist = postprocess_setop_tlist(copyObject(result_plan->targetlist),
836 * Can't handle FOR UPDATE/SHARE here (parser should have checked
837 * already, but let's make sure).
841 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
842 errmsg("SELECT FOR UPDATE/SHARE is not allowed with UNION/INTERSECT/EXCEPT")));
845 * Calculate pathkeys that represent result ordering requirements
847 Assert(parse->distinctClause == NIL);
848 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
855 /* No set operations, do regular planning */
857 AttrNumber *groupColIdx = NULL;
858 bool need_tlist_eval = true;
864 AggClauseCounts agg_counts;
866 bool use_hashed_grouping = false;
867 WindowFuncLists *wflists = NULL;
868 List *activeWindows = NIL;
870 MemSet(&agg_counts, 0, sizeof(AggClauseCounts));
872 /* A recursive query should always have setOperations */
873 Assert(!root->hasRecursion);
875 /* Preprocess GROUP BY clause, if any */
876 if (parse->groupClause)
877 preprocess_groupclause(root);
878 numGroupCols = list_length(parse->groupClause);
880 /* Preprocess targetlist */
881 tlist = preprocess_targetlist(root, tlist);
884 * Locate any window functions in the tlist. (We don't need to look
885 * anywhere else, since expressions used in ORDER BY will be in there
886 * too.) Note that they could all have been eliminated by constant
887 * folding, in which case we don't need to do any more work.
889 if (parse->hasWindowFuncs)
891 wflists = find_window_functions((Node *) tlist,
892 list_length(parse->windowClause));
893 if (wflists->numWindowFuncs > 0)
894 activeWindows = select_active_windows(root, wflists);
896 parse->hasWindowFuncs = false;
900 * Generate appropriate target list for subplan; may be different from
901 * tlist if grouping or aggregation is needed.
903 sub_tlist = make_subplanTargetList(root, tlist,
904 &groupColIdx, &need_tlist_eval);
907 * Calculate pathkeys that represent grouping/ordering requirements.
908 * Stash them in PlannerInfo so that query_planner can canonicalize
909 * them after EquivalenceClasses have been formed. The sortClause
910 * is certainly sort-able, but GROUP BY and DISTINCT might not be,
911 * in which case we just leave their pathkeys empty.
913 if (parse->groupClause &&
914 grouping_is_sortable(parse->groupClause))
915 root->group_pathkeys =
916 make_pathkeys_for_sortclauses(root,
921 root->group_pathkeys = NIL;
923 /* We consider only the first (bottom) window in pathkeys logic */
924 if (activeWindows != NIL)
926 WindowClause *wc = (WindowClause *) linitial(activeWindows);
928 root->window_pathkeys = make_pathkeys_for_window(root,
934 root->window_pathkeys = NIL;
936 if (parse->distinctClause &&
937 grouping_is_sortable(parse->distinctClause))
938 root->distinct_pathkeys =
939 make_pathkeys_for_sortclauses(root,
940 parse->distinctClause,
944 root->distinct_pathkeys = NIL;
946 root->sort_pathkeys =
947 make_pathkeys_for_sortclauses(root,
953 * Will need actual number of aggregates for estimating costs.
955 * Note: we do not attempt to detect duplicate aggregates here; a
956 * somewhat-overestimated count is okay for our present purposes.
958 * Note: think not that we can turn off hasAggs if we find no aggs. It
959 * is possible for constant-expression simplification to remove all
960 * explicit references to aggs, but we still have to follow the
961 * aggregate semantics (eg, producing only one output row).
965 count_agg_clauses((Node *) tlist, &agg_counts);
966 count_agg_clauses(parse->havingQual, &agg_counts);
970 * Figure out whether we want a sorted result from query_planner.
972 * If we have a sortable GROUP BY clause, then we want a result sorted
973 * properly for grouping. Otherwise, if we have window functions to
974 * evaluate, we try to sort for the first window. Otherwise, if
975 * there's a sortable DISTINCT clause that's more rigorous than the
976 * ORDER BY clause, we try to produce output that's sufficiently well
977 * sorted for the DISTINCT. Otherwise, if there is an ORDER BY
978 * clause, we want to sort by the ORDER BY clause.
980 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a
981 * superset of GROUP BY, it would be tempting to request sort by ORDER
982 * BY --- but that might just leave us failing to exploit an available
983 * sort order at all. Needs more thought. The choice for DISTINCT
984 * versus ORDER BY is much easier, since we know that the parser
985 * ensured that one is a superset of the other.
987 if (root->group_pathkeys)
988 root->query_pathkeys = root->group_pathkeys;
989 else if (root->window_pathkeys)
990 root->query_pathkeys = root->window_pathkeys;
991 else if (list_length(root->distinct_pathkeys) >
992 list_length(root->sort_pathkeys))
993 root->query_pathkeys = root->distinct_pathkeys;
994 else if (root->sort_pathkeys)
995 root->query_pathkeys = root->sort_pathkeys;
997 root->query_pathkeys = NIL;
1000 * Generate the best unsorted and presorted paths for this Query (but
1001 * note there may not be any presorted path). query_planner will also
1002 * estimate the number of groups in the query, and canonicalize all
1005 query_planner(root, sub_tlist, tuple_fraction, limit_tuples,
1006 &cheapest_path, &sorted_path, &dNumGroups);
1009 * If grouping, decide whether to use sorted or hashed grouping.
1011 if (parse->groupClause)
1017 * Executor doesn't support hashed aggregation with DISTINCT
1018 * aggregates. (Doing so would imply storing *all* the input
1019 * values in the hash table, which seems like a certain loser.)
1021 can_hash = (agg_counts.numDistinctAggs == 0 &&
1022 grouping_is_hashable(parse->groupClause));
1023 can_sort = grouping_is_sortable(parse->groupClause);
1024 if (can_hash && can_sort)
1026 /* we have a meaningful choice to make ... */
1027 use_hashed_grouping =
1028 choose_hashed_grouping(root,
1029 tuple_fraction, limit_tuples,
1030 cheapest_path, sorted_path,
1031 dNumGroups, &agg_counts);
1034 use_hashed_grouping = true;
1036 use_hashed_grouping = false;
1039 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1040 errmsg("could not implement GROUP BY"),
1041 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
1043 /* Also convert # groups to long int --- but 'ware overflow! */
1044 numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
1048 * Select the best path. If we are doing hashed grouping, we will
1049 * always read all the input tuples, so use the cheapest-total path.
1050 * Otherwise, trust query_planner's decision about which to use.
1052 if (use_hashed_grouping || !sorted_path)
1053 best_path = cheapest_path;
1055 best_path = sorted_path;
1058 * Check to see if it's possible to optimize MIN/MAX aggregates. If
1059 * so, we will forget all the work we did so far to choose a "regular"
1060 * path ... but we had to do it anyway to be able to tell which way is
1063 result_plan = optimize_minmax_aggregates(root,
1066 if (result_plan != NULL)
1069 * optimize_minmax_aggregates generated the full plan, with the
1070 * right tlist, and it has no sort order.
1072 current_pathkeys = NIL;
1077 * Normal case --- create a plan according to query_planner's
1080 bool need_sort_for_grouping = false;
1082 result_plan = create_plan(root, best_path);
1083 current_pathkeys = best_path->pathkeys;
1085 /* Detect if we'll need an explicit sort for grouping */
1086 if (parse->groupClause && !use_hashed_grouping &&
1087 !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
1089 need_sort_for_grouping = true;
1091 * Always override query_planner's tlist, so that we don't
1092 * sort useless data from a "physical" tlist.
1094 need_tlist_eval = true;
1098 * create_plan() returns a plan with just a "flat" tlist of
1099 * required Vars. Usually we need to insert the sub_tlist as the
1100 * tlist of the top plan node. However, we can skip that if we
1101 * determined that whatever query_planner chose to return will be
1104 if (need_tlist_eval)
1107 * If the top-level plan node is one that cannot do expression
1108 * evaluation, we must insert a Result node to project the
1111 if (!is_projection_capable_plan(result_plan))
1113 result_plan = (Plan *) make_result(root,
1121 * Otherwise, just replace the subplan's flat tlist with
1122 * the desired tlist.
1124 result_plan->targetlist = sub_tlist;
1128 * Also, account for the cost of evaluation of the sub_tlist.
1130 * Up to now, we have only been dealing with "flat" tlists,
1131 * containing just Vars. So their evaluation cost is zero
1132 * according to the model used by cost_qual_eval() (or if you
1133 * prefer, the cost is factored into cpu_tuple_cost). Thus we
1134 * can avoid accounting for tlist cost throughout
1135 * query_planner() and subroutines. But now we've inserted a
1136 * tlist that might contain actual operators, sub-selects, etc
1137 * --- so we'd better account for its cost.
1139 * Below this point, any tlist eval cost for added-on nodes
1140 * should be accounted for as we create those nodes.
1141 * Presently, of the node types we can add on, only Agg,
1142 * WindowAgg, and Group project new tlists (the rest just copy
1143 * their input tuples) --- so make_agg(), make_windowagg() and
1144 * make_group() are responsible for computing the added cost.
1146 cost_qual_eval(&tlist_cost, sub_tlist, root);
1147 result_plan->startup_cost += tlist_cost.startup;
1148 result_plan->total_cost += tlist_cost.startup +
1149 tlist_cost.per_tuple * result_plan->plan_rows;
1154 * Since we're using query_planner's tlist and not the one
1155 * make_subplanTargetList calculated, we have to refigure any
1156 * grouping-column indexes make_subplanTargetList computed.
1158 locate_grouping_columns(root, tlist, result_plan->targetlist,
1163 * Insert AGG or GROUP node if needed, plus an explicit sort step
1166 * HAVING clause, if any, becomes qual of the Agg or Group node.
1168 if (use_hashed_grouping)
1170 /* Hashed aggregate plan --- no sort needed */
1171 result_plan = (Plan *) make_agg(root,
1173 (List *) parse->havingQual,
1177 extract_grouping_ops(parse->groupClause),
1181 /* Hashed aggregation produces randomly-ordered results */
1182 current_pathkeys = NIL;
1184 else if (parse->hasAggs)
1186 /* Plain aggregate plan --- sort if needed */
1187 AggStrategy aggstrategy;
1189 if (parse->groupClause)
1191 if (need_sort_for_grouping)
1193 result_plan = (Plan *)
1194 make_sort_from_groupcols(root,
1198 current_pathkeys = root->group_pathkeys;
1200 aggstrategy = AGG_SORTED;
1203 * The AGG node will not change the sort ordering of its
1204 * groups, so current_pathkeys describes the result too.
1209 aggstrategy = AGG_PLAIN;
1210 /* Result will be only one row anyway; no sort order */
1211 current_pathkeys = NIL;
1214 result_plan = (Plan *) make_agg(root,
1216 (List *) parse->havingQual,
1220 extract_grouping_ops(parse->groupClause),
1225 else if (parse->groupClause)
1228 * GROUP BY without aggregation, so insert a group node (plus
1229 * the appropriate sort node, if necessary).
1231 * Add an explicit sort if we couldn't make the path come out
1232 * the way the GROUP node needs it.
1234 if (need_sort_for_grouping)
1236 result_plan = (Plan *)
1237 make_sort_from_groupcols(root,
1241 current_pathkeys = root->group_pathkeys;
1244 result_plan = (Plan *) make_group(root,
1246 (List *) parse->havingQual,
1249 extract_grouping_ops(parse->groupClause),
1252 /* The Group node won't change sort ordering */
1254 else if (root->hasHavingQual)
1257 * No aggregates, and no GROUP BY, but we have a HAVING qual.
1258 * This is a degenerate case in which we are supposed to emit
1259 * either 0 or 1 row depending on whether HAVING succeeds.
1260 * Furthermore, there cannot be any variables in either HAVING
1261 * or the targetlist, so we actually do not need the FROM
1262 * table at all! We can just throw away the plan-so-far and
1263 * generate a Result node. This is a sufficiently unusual
1264 * corner case that it's not worth contorting the structure of
1265 * this routine to avoid having to generate the plan in the
1268 result_plan = (Plan *) make_result(root,
1273 } /* end of non-minmax-aggregate case */
1276 * Since each window function could require a different sort order,
1277 * we stack up a WindowAgg node for each window, with sort steps
1278 * between them as needed.
1286 * If the top-level plan node is one that cannot do expression
1287 * evaluation, we must insert a Result node to project the
1288 * desired tlist. (In some cases this might not really be
1289 * required, but it's not worth trying to avoid it.) Note that
1290 * on second and subsequent passes through the following loop,
1291 * the top-level node will be a WindowAgg which we know can
1292 * project; so we only need to check once.
1294 if (!is_projection_capable_plan(result_plan))
1296 result_plan = (Plan *) make_result(root,
1303 * The "base" targetlist for all steps of the windowing process
1304 * is a flat tlist of all Vars and Aggs needed in the result.
1305 * (In some cases we wouldn't need to propagate all of these
1306 * all the way to the top, since they might only be needed as
1307 * inputs to WindowFuncs. It's probably not worth trying to
1308 * optimize that though.) As we climb up the stack, we add
1309 * outputs for the WindowFuncs computed at each level. Also,
1310 * each input tlist has to present all the columns needed to
1311 * sort the data for the next WindowAgg step. That's handled
1312 * internally by make_sort_from_pathkeys, but we need the
1313 * copyObject steps here to ensure that each plan node has
1314 * a separately modifiable tlist.
1316 window_tlist = flatten_tlist(tlist);
1318 window_tlist = add_to_flat_tlist(window_tlist,
1319 pull_agg_clause((Node *) tlist));
1320 result_plan->targetlist = (List *) copyObject(window_tlist);
1322 foreach(l, activeWindows)
1324 WindowClause *wc = (WindowClause *) lfirst(l);
1325 List *window_pathkeys;
1327 AttrNumber *partColIdx;
1330 AttrNumber *ordColIdx;
1333 window_pathkeys = make_pathkeys_for_window(root,
1339 * This is a bit tricky: we build a sort node even if we don't
1340 * really have to sort. Even when no explicit sort is needed,
1341 * we need to have suitable resjunk items added to the input
1342 * plan's tlist for any partitioning or ordering columns that
1343 * aren't plain Vars. Furthermore, this way we can use
1344 * existing infrastructure to identify which input columns are
1345 * the interesting ones.
1347 if (window_pathkeys)
1351 sort_plan = make_sort_from_pathkeys(root,
1355 if (!pathkeys_contained_in(window_pathkeys,
1358 /* we do indeed need to sort */
1359 result_plan = (Plan *) sort_plan;
1360 current_pathkeys = window_pathkeys;
1362 /* In either case, extract the per-column information */
1363 get_column_info_for_window(root, wc, tlist,
1365 sort_plan->sortColIdx,
1375 /* empty window specification, nothing to sort */
1378 partOperators = NULL;
1381 ordOperators = NULL;
1386 /* Add the current WindowFuncs to the running tlist */
1387 window_tlist = add_to_flat_tlist(window_tlist,
1388 wflists->windowFuncs[wc->winref]);
1392 /* Install the original tlist in the topmost WindowAgg */
1393 window_tlist = tlist;
1396 /* ... and make the WindowAgg plan node */
1397 result_plan = (Plan *)
1398 make_windowagg(root,
1399 (List *) copyObject(window_tlist),
1400 list_length(wflists->windowFuncs[wc->winref]),
1410 } /* end of if (setOperations) */
1413 * If there is a DISTINCT clause, add the necessary node(s).
1415 if (parse->distinctClause)
1417 double dNumDistinctRows;
1418 long numDistinctRows;
1419 bool use_hashed_distinct;
1424 * If there was grouping or aggregation, use the current number of
1425 * rows as the estimated number of DISTINCT rows (ie, assume the
1426 * result was already mostly unique). If not, use the number of
1427 * distinct-groups calculated by query_planner.
1429 if (parse->groupClause || root->hasHavingQual || parse->hasAggs)
1430 dNumDistinctRows = result_plan->plan_rows;
1432 dNumDistinctRows = dNumGroups;
1434 /* Also convert to long int --- but 'ware overflow! */
1435 numDistinctRows = (long) Min(dNumDistinctRows, (double) LONG_MAX);
1438 * If we have a sortable DISTINCT ON clause, we always use sorting.
1439 * This enforces the expected behavior of DISTINCT ON.
1441 can_sort = grouping_is_sortable(parse->distinctClause);
1442 if (can_sort && parse->hasDistinctOn)
1443 use_hashed_distinct = false;
1446 can_hash = grouping_is_hashable(parse->distinctClause);
1447 if (can_hash && can_sort)
1449 /* we have a meaningful choice to make ... */
1450 use_hashed_distinct =
1451 choose_hashed_distinct(root,
1452 result_plan, current_pathkeys,
1453 tuple_fraction, limit_tuples,
1457 use_hashed_distinct = true;
1459 use_hashed_distinct = false;
1463 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1464 errmsg("could not implement DISTINCT"),
1465 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
1466 use_hashed_distinct = false; /* keep compiler quiet */
1470 if (use_hashed_distinct)
1472 /* Hashed aggregate plan --- no sort needed */
1473 result_plan = (Plan *) make_agg(root,
1474 result_plan->targetlist,
1477 list_length(parse->distinctClause),
1478 extract_grouping_cols(parse->distinctClause,
1479 result_plan->targetlist),
1480 extract_grouping_ops(parse->distinctClause),
1484 /* Hashed aggregation produces randomly-ordered results */
1485 current_pathkeys = NIL;
1490 * Use a Unique node to implement DISTINCT. Add an explicit sort
1491 * if we couldn't make the path come out the way the Unique node
1492 * needs it. If we do have to sort, always sort by the more
1493 * rigorous of DISTINCT and ORDER BY, to avoid a second sort
1494 * below. However, for regular DISTINCT, don't sort now if we
1495 * don't have to --- sorting afterwards will likely be cheaper,
1496 * and also has the possibility of optimizing via LIMIT. But
1497 * for DISTINCT ON, we *must* force the final sort now, else
1498 * it won't have the desired behavior.
1500 List *needed_pathkeys;
1502 if (parse->hasDistinctOn &&
1503 list_length(root->distinct_pathkeys) <
1504 list_length(root->sort_pathkeys))
1505 needed_pathkeys = root->sort_pathkeys;
1507 needed_pathkeys = root->distinct_pathkeys;
1509 if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
1511 if (list_length(root->distinct_pathkeys) >=
1512 list_length(root->sort_pathkeys))
1513 current_pathkeys = root->distinct_pathkeys;
1516 current_pathkeys = root->sort_pathkeys;
1517 /* Assert checks that parser didn't mess up... */
1518 Assert(pathkeys_contained_in(root->distinct_pathkeys,
1522 result_plan = (Plan *) make_sort_from_pathkeys(root,
1528 result_plan = (Plan *) make_unique(result_plan,
1529 parse->distinctClause);
1530 result_plan->plan_rows = dNumDistinctRows;
1531 /* The Unique node won't change sort ordering */
1536 * If ORDER BY was given and we were not able to make the plan come out in
1537 * the right order, add an explicit sort step.
1539 if (parse->sortClause)
1541 if (!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
1543 result_plan = (Plan *) make_sort_from_pathkeys(root,
1545 root->sort_pathkeys,
1547 current_pathkeys = root->sort_pathkeys;
1552 * Finally, if there is a LIMIT/OFFSET clause, add the LIMIT node.
1554 if (parse->limitCount || parse->limitOffset)
1556 result_plan = (Plan *) make_limit(result_plan,
1564 * Deal with the RETURNING clause if any. It's convenient to pass the
1565 * returningList through setrefs.c now rather than at top level (if we
1566 * waited, handling inherited UPDATE/DELETE would be much harder).
1568 if (parse->returningList)
1572 Assert(parse->resultRelation);
1573 rlist = set_returning_clause_references(root->glob,
1574 parse->returningList,
1576 parse->resultRelation);
1577 root->returningLists = list_make1(rlist);
1580 root->returningLists = NIL;
1582 /* Compute result-relations list if needed */
1583 if (parse->resultRelation)
1584 root->resultRelations = list_make1_int(parse->resultRelation);
1586 root->resultRelations = NIL;
1589 * Return the actual output ordering in query_pathkeys for possible use by
1590 * an outer query level.
1592 root->query_pathkeys = current_pathkeys;
1598 * Detect whether a plan node is a "dummy" plan created when a relation
1599 * is deemed not to need scanning due to constraint exclusion.
1601 * Currently, such dummy plans are Result nodes with constant FALSE
1605 is_dummy_plan(Plan *plan)
1607 if (IsA(plan, Result))
1609 List *rcqual = (List *) ((Result *) plan)->resconstantqual;
1611 if (list_length(rcqual) == 1)
1613 Const *constqual = (Const *) linitial(rcqual);
1615 if (constqual && IsA(constqual, Const))
1617 if (!constqual->constisnull &&
1618 !DatumGetBool(constqual->constvalue))
1627 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
1629 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
1630 * results back in *count_est and *offset_est. These variables are set to
1631 * 0 if the corresponding clause is not present, and -1 if it's present
1632 * but we couldn't estimate the value for it. (The "0" convention is OK
1633 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
1634 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
1635 * usual practice of never estimating less than one row.) These values will
1636 * be passed to make_limit, which see if you change this code.
1638 * The return value is the suitably adjusted tuple_fraction to use for
1639 * planning the query. This adjustment is not overridable, since it reflects
1640 * plan actions that grouping_planner() will certainly take, not assumptions
1644 preprocess_limit(PlannerInfo *root, double tuple_fraction,
1645 int64 *offset_est, int64 *count_est)
1647 Query *parse = root->parse;
1649 double limit_fraction;
1651 /* Should not be called unless LIMIT or OFFSET */
1652 Assert(parse->limitCount || parse->limitOffset);
1655 * Try to obtain the clause values. We use estimate_expression_value
1656 * primarily because it can sometimes do something useful with Params.
1658 if (parse->limitCount)
1660 est = estimate_expression_value(root, parse->limitCount);
1661 if (est && IsA(est, Const))
1663 if (((Const *) est)->constisnull)
1665 /* NULL indicates LIMIT ALL, ie, no limit */
1666 *count_est = 0; /* treat as not present */
1670 *count_est = DatumGetInt64(((Const *) est)->constvalue);
1671 if (*count_est <= 0)
1672 *count_est = 1; /* force to at least 1 */
1676 *count_est = -1; /* can't estimate */
1679 *count_est = 0; /* not present */
1681 if (parse->limitOffset)
1683 est = estimate_expression_value(root, parse->limitOffset);
1684 if (est && IsA(est, Const))
1686 if (((Const *) est)->constisnull)
1688 /* Treat NULL as no offset; the executor will too */
1689 *offset_est = 0; /* treat as not present */
1693 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
1694 if (*offset_est < 0)
1695 *offset_est = 0; /* less than 0 is same as 0 */
1699 *offset_est = -1; /* can't estimate */
1702 *offset_est = 0; /* not present */
1704 if (*count_est != 0)
1707 * A LIMIT clause limits the absolute number of tuples returned.
1708 * However, if it's not a constant LIMIT then we have to guess; for
1709 * lack of a better idea, assume 10% of the plan's result is wanted.
1711 if (*count_est < 0 || *offset_est < 0)
1713 /* LIMIT or OFFSET is an expression ... punt ... */
1714 limit_fraction = 0.10;
1718 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
1719 limit_fraction = (double) *count_est + (double) *offset_est;
1723 * If we have absolute limits from both caller and LIMIT, use the
1724 * smaller value; likewise if they are both fractional. If one is
1725 * fractional and the other absolute, we can't easily determine which
1726 * is smaller, but we use the heuristic that the absolute will usually
1729 if (tuple_fraction >= 1.0)
1731 if (limit_fraction >= 1.0)
1734 tuple_fraction = Min(tuple_fraction, limit_fraction);
1738 /* caller absolute, limit fractional; use caller's value */
1741 else if (tuple_fraction > 0.0)
1743 if (limit_fraction >= 1.0)
1745 /* caller fractional, limit absolute; use limit */
1746 tuple_fraction = limit_fraction;
1750 /* both fractional */
1751 tuple_fraction = Min(tuple_fraction, limit_fraction);
1756 /* no info from caller, just use limit */
1757 tuple_fraction = limit_fraction;
1760 else if (*offset_est != 0 && tuple_fraction > 0.0)
1763 * We have an OFFSET but no LIMIT. This acts entirely differently
1764 * from the LIMIT case: here, we need to increase rather than decrease
1765 * the caller's tuple_fraction, because the OFFSET acts to cause more
1766 * tuples to be fetched instead of fewer. This only matters if we got
1767 * a tuple_fraction > 0, however.
1769 * As above, use 10% if OFFSET is present but unestimatable.
1771 if (*offset_est < 0)
1772 limit_fraction = 0.10;
1774 limit_fraction = (double) *offset_est;
1777 * If we have absolute counts from both caller and OFFSET, add them
1778 * together; likewise if they are both fractional. If one is
1779 * fractional and the other absolute, we want to take the larger, and
1780 * we heuristically assume that's the fractional one.
1782 if (tuple_fraction >= 1.0)
1784 if (limit_fraction >= 1.0)
1786 /* both absolute, so add them together */
1787 tuple_fraction += limit_fraction;
1791 /* caller absolute, limit fractional; use limit */
1792 tuple_fraction = limit_fraction;
1797 if (limit_fraction >= 1.0)
1799 /* caller fractional, limit absolute; use caller's value */
1803 /* both fractional, so add them together */
1804 tuple_fraction += limit_fraction;
1805 if (tuple_fraction >= 1.0)
1806 tuple_fraction = 0.0; /* assume fetch all */
1811 return tuple_fraction;
1816 * preprocess_groupclause - do preparatory work on GROUP BY clause
1818 * The idea here is to adjust the ordering of the GROUP BY elements
1819 * (which in itself is semantically insignificant) to match ORDER BY,
1820 * thereby allowing a single sort operation to both implement the ORDER BY
1821 * requirement and set up for a Unique step that implements GROUP BY.
1823 * In principle it might be interesting to consider other orderings of the
1824 * GROUP BY elements, which could match the sort ordering of other
1825 * possible plans (eg an indexscan) and thereby reduce cost. We don't
1826 * bother with that, though. Hashed grouping will frequently win anyway.
1828 * Note: we need no comparable processing of the distinctClause because
1829 * the parser already enforced that that matches ORDER BY.
1832 preprocess_groupclause(PlannerInfo *root)
1834 Query *parse = root->parse;
1835 List *new_groupclause;
1840 /* If no ORDER BY, nothing useful to do here */
1841 if (parse->sortClause == NIL)
1845 * Scan the ORDER BY clause and construct a list of matching GROUP BY
1846 * items, but only as far as we can make a matching prefix.
1848 * This code assumes that the sortClause contains no duplicate items.
1850 new_groupclause = NIL;
1851 foreach(sl, parse->sortClause)
1853 SortGroupClause *sc = (SortGroupClause *) lfirst(sl);
1855 foreach(gl, parse->groupClause)
1857 SortGroupClause *gc = (SortGroupClause *) lfirst(gl);
1861 new_groupclause = lappend(new_groupclause, gc);
1866 break; /* no match, so stop scanning */
1869 /* Did we match all of the ORDER BY list, or just some of it? */
1870 partial_match = (sl != NULL);
1872 /* If no match at all, no point in reordering GROUP BY */
1873 if (new_groupclause == NIL)
1877 * Add any remaining GROUP BY items to the new list, but only if we
1878 * were able to make a complete match. In other words, we only
1879 * rearrange the GROUP BY list if the result is that one list is a
1880 * prefix of the other --- otherwise there's no possibility of a
1881 * common sort. Also, give up if there are any non-sortable GROUP BY
1882 * items, since then there's no hope anyway.
1884 foreach(gl, parse->groupClause)
1886 SortGroupClause *gc = (SortGroupClause *) lfirst(gl);
1888 if (list_member_ptr(new_groupclause, gc))
1889 continue; /* it matched an ORDER BY item */
1891 return; /* give up, no common sort possible */
1892 if (!OidIsValid(gc->sortop))
1893 return; /* give up, GROUP BY can't be sorted */
1894 new_groupclause = lappend(new_groupclause, gc);
1897 /* Success --- install the rearranged GROUP BY list */
1898 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
1899 parse->groupClause = new_groupclause;
1903 * choose_hashed_grouping - should we use hashed grouping?
1905 * Note: this is only applied when both alternatives are actually feasible.
1908 choose_hashed_grouping(PlannerInfo *root,
1909 double tuple_fraction, double limit_tuples,
1910 Path *cheapest_path, Path *sorted_path,
1911 double dNumGroups, AggClauseCounts *agg_counts)
1913 int numGroupCols = list_length(root->parse->groupClause);
1914 double cheapest_path_rows;
1915 int cheapest_path_width;
1917 List *target_pathkeys;
1918 List *current_pathkeys;
1922 /* Prefer sorting when enable_hashagg is off */
1923 if (!enable_hashagg)
1927 * Don't do it if it doesn't look like the hashtable will fit into
1930 * Beware here of the possibility that cheapest_path->parent is NULL. This
1931 * could happen if user does something silly like SELECT 'foo' GROUP BY 1;
1933 if (cheapest_path->parent)
1935 cheapest_path_rows = cheapest_path->parent->rows;
1936 cheapest_path_width = cheapest_path->parent->width;
1940 cheapest_path_rows = 1; /* assume non-set result */
1941 cheapest_path_width = 100; /* arbitrary */
1944 /* Estimate per-hash-entry space at tuple width... */
1945 hashentrysize = MAXALIGN(cheapest_path_width) + MAXALIGN(sizeof(MinimalTupleData));
1946 /* plus space for pass-by-ref transition values... */
1947 hashentrysize += agg_counts->transitionSpace;
1948 /* plus the per-hash-entry overhead */
1949 hashentrysize += hash_agg_entry_size(agg_counts->numAggs);
1951 if (hashentrysize * dNumGroups > work_mem * 1024L)
1955 * When we have both GROUP BY and DISTINCT, use the more-rigorous of
1956 * DISTINCT and ORDER BY as the assumed required output sort order.
1957 * This is an oversimplification because the DISTINCT might get
1958 * implemented via hashing, but it's not clear that the case is common
1959 * enough (or that our estimates are good enough) to justify trying to
1962 if (list_length(root->distinct_pathkeys) >
1963 list_length(root->sort_pathkeys))
1964 target_pathkeys = root->distinct_pathkeys;
1966 target_pathkeys = root->sort_pathkeys;
1969 * See if the estimated cost is no more than doing it the other way. While
1970 * avoiding the need for sorted input is usually a win, the fact that the
1971 * output won't be sorted may be a loss; so we need to do an actual cost
1974 * We need to consider cheapest_path + hashagg [+ final sort] versus
1975 * either cheapest_path [+ sort] + group or agg [+ final sort] or
1976 * presorted_path + group or agg [+ final sort] where brackets indicate a
1977 * step that may not be needed. We assume query_planner() will have
1978 * returned a presorted path only if it's a winner compared to
1979 * cheapest_path for this purpose.
1981 * These path variables are dummies that just hold cost fields; we don't
1982 * make actual Paths for these steps.
1984 cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs,
1985 numGroupCols, dNumGroups,
1986 cheapest_path->startup_cost, cheapest_path->total_cost,
1987 cheapest_path_rows);
1988 /* Result of hashed agg is always unsorted */
1989 if (target_pathkeys)
1990 cost_sort(&hashed_p, root, target_pathkeys, hashed_p.total_cost,
1991 dNumGroups, cheapest_path_width, limit_tuples);
1995 sorted_p.startup_cost = sorted_path->startup_cost;
1996 sorted_p.total_cost = sorted_path->total_cost;
1997 current_pathkeys = sorted_path->pathkeys;
2001 sorted_p.startup_cost = cheapest_path->startup_cost;
2002 sorted_p.total_cost = cheapest_path->total_cost;
2003 current_pathkeys = cheapest_path->pathkeys;
2005 if (!pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
2007 cost_sort(&sorted_p, root, root->group_pathkeys, sorted_p.total_cost,
2008 cheapest_path_rows, cheapest_path_width, -1.0);
2009 current_pathkeys = root->group_pathkeys;
2012 if (root->parse->hasAggs)
2013 cost_agg(&sorted_p, root, AGG_SORTED, agg_counts->numAggs,
2014 numGroupCols, dNumGroups,
2015 sorted_p.startup_cost, sorted_p.total_cost,
2016 cheapest_path_rows);
2018 cost_group(&sorted_p, root, numGroupCols, dNumGroups,
2019 sorted_p.startup_cost, sorted_p.total_cost,
2020 cheapest_path_rows);
2021 /* The Agg or Group node will preserve ordering */
2022 if (target_pathkeys &&
2023 !pathkeys_contained_in(target_pathkeys, current_pathkeys))
2024 cost_sort(&sorted_p, root, target_pathkeys, sorted_p.total_cost,
2025 dNumGroups, cheapest_path_width, limit_tuples);
2028 * Now make the decision using the top-level tuple fraction. First we
2029 * have to convert an absolute count (LIMIT) into fractional form.
2031 if (tuple_fraction >= 1.0)
2032 tuple_fraction /= dNumGroups;
2034 if (compare_fractional_path_costs(&hashed_p, &sorted_p,
2035 tuple_fraction) < 0)
2037 /* Hashed is cheaper, so use it */
2044 * choose_hashed_distinct - should we use hashing for DISTINCT?
2046 * This is fairly similar to choose_hashed_grouping, but there are enough
2047 * differences that it doesn't seem worth trying to unify the two functions.
2049 * But note that making the two choices independently is a bit bogus in
2050 * itself. If the two could be combined into a single choice operation
2051 * it'd probably be better, but that seems far too unwieldy to be practical,
2052 * especially considering that the combination of GROUP BY and DISTINCT
2053 * isn't very common in real queries. By separating them, we are giving
2054 * extra preference to using a sorting implementation when a common sort key
2055 * is available ... and that's not necessarily wrong anyway.
2057 * Note: this is only applied when both alternatives are actually feasible.
2060 choose_hashed_distinct(PlannerInfo *root,
2061 Plan *input_plan, List *input_pathkeys,
2062 double tuple_fraction, double limit_tuples,
2063 double dNumDistinctRows)
2065 int numDistinctCols = list_length(root->parse->distinctClause);
2067 List *current_pathkeys;
2068 List *needed_pathkeys;
2072 /* Prefer sorting when enable_hashagg is off */
2073 if (!enable_hashagg)
2077 * Don't do it if it doesn't look like the hashtable will fit into
2080 hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(sizeof(MinimalTupleData));
2082 if (hashentrysize * dNumDistinctRows > work_mem * 1024L)
2086 * See if the estimated cost is no more than doing it the other way. While
2087 * avoiding the need for sorted input is usually a win, the fact that the
2088 * output won't be sorted may be a loss; so we need to do an actual cost
2091 * We need to consider input_plan + hashagg [+ final sort] versus
2092 * input_plan [+ sort] + group [+ final sort] where brackets indicate
2093 * a step that may not be needed.
2095 * These path variables are dummies that just hold cost fields; we don't
2096 * make actual Paths for these steps.
2098 cost_agg(&hashed_p, root, AGG_HASHED, 0,
2099 numDistinctCols, dNumDistinctRows,
2100 input_plan->startup_cost, input_plan->total_cost,
2101 input_plan->plan_rows);
2103 * Result of hashed agg is always unsorted, so if ORDER BY is present
2104 * we need to charge for the final sort.
2106 if (root->parse->sortClause)
2107 cost_sort(&hashed_p, root, root->sort_pathkeys, hashed_p.total_cost,
2108 dNumDistinctRows, input_plan->plan_width, limit_tuples);
2111 * Now for the GROUP case. See comments in grouping_planner about the
2112 * sorting choices here --- this code should match that code.
2114 sorted_p.startup_cost = input_plan->startup_cost;
2115 sorted_p.total_cost = input_plan->total_cost;
2116 current_pathkeys = input_pathkeys;
2117 if (root->parse->hasDistinctOn &&
2118 list_length(root->distinct_pathkeys) <
2119 list_length(root->sort_pathkeys))
2120 needed_pathkeys = root->sort_pathkeys;
2122 needed_pathkeys = root->distinct_pathkeys;
2123 if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys))
2125 if (list_length(root->distinct_pathkeys) >=
2126 list_length(root->sort_pathkeys))
2127 current_pathkeys = root->distinct_pathkeys;
2129 current_pathkeys = root->sort_pathkeys;
2130 cost_sort(&sorted_p, root, current_pathkeys, sorted_p.total_cost,
2131 input_plan->plan_rows, input_plan->plan_width, -1.0);
2133 cost_group(&sorted_p, root, numDistinctCols, dNumDistinctRows,
2134 sorted_p.startup_cost, sorted_p.total_cost,
2135 input_plan->plan_rows);
2136 if (root->parse->sortClause &&
2137 !pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
2138 cost_sort(&sorted_p, root, root->sort_pathkeys, sorted_p.total_cost,
2139 dNumDistinctRows, input_plan->plan_width, limit_tuples);
2142 * Now make the decision using the top-level tuple fraction. First we
2143 * have to convert an absolute count (LIMIT) into fractional form.
2145 if (tuple_fraction >= 1.0)
2146 tuple_fraction /= dNumDistinctRows;
2148 if (compare_fractional_path_costs(&hashed_p, &sorted_p,
2149 tuple_fraction) < 0)
2151 /* Hashed is cheaper, so use it */
2158 * make_subplanTargetList
2159 * Generate appropriate target list when grouping is required.
2161 * When grouping_planner inserts Aggregate, Group, or Result plan nodes
2162 * above the result of query_planner, we typically want to pass a different
2163 * target list to query_planner than the outer plan nodes should have.
2164 * This routine generates the correct target list for the subplan.
2166 * The initial target list passed from the parser already contains entries
2167 * for all ORDER BY and GROUP BY expressions, but it will not have entries
2168 * for variables used only in HAVING clauses; so we need to add those
2169 * variables to the subplan target list. Also, we flatten all expressions
2170 * except GROUP BY items into their component variables; the other expressions
2171 * will be computed by the inserted nodes rather than by the subplan.
2172 * For example, given a query like
2173 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
2174 * we want to pass this targetlist to the subplan:
2176 * where the a+b target will be used by the Sort/Group steps, and the
2177 * other targets will be used for computing the final results. (In the
2178 * above example we could theoretically suppress the a and b targets and
2179 * pass down only c,d,a+b, but it's not really worth the trouble to
2180 * eliminate simple var references from the subplan. We will avoid doing
2181 * the extra computation to recompute a+b at the outer level; see
2182 * fix_upper_expr() in setrefs.c.)
2184 * If we are grouping or aggregating, *and* there are no non-Var grouping
2185 * expressions, then the returned tlist is effectively dummy; we do not
2186 * need to force it to be evaluated, because all the Vars it contains
2187 * should be present in the output of query_planner anyway.
2189 * 'tlist' is the query's target list.
2190 * 'groupColIdx' receives an array of column numbers for the GROUP BY
2191 * expressions (if there are any) in the subplan's target list.
2192 * 'need_tlist_eval' is set true if we really need to evaluate the
2195 * The result is the targetlist to be passed to the subplan.
2199 make_subplanTargetList(PlannerInfo *root,
2201 AttrNumber **groupColIdx,
2202 bool *need_tlist_eval)
2204 Query *parse = root->parse;
2209 *groupColIdx = NULL;
2212 * If we're not grouping or aggregating, there's nothing to do here;
2213 * query_planner should receive the unmodified target list.
2215 if (!parse->hasAggs && !parse->groupClause && !root->hasHavingQual &&
2216 !parse->hasWindowFuncs)
2218 *need_tlist_eval = true;
2223 * Otherwise, start with a "flattened" tlist (having just the vars
2224 * mentioned in the targetlist and HAVING qual --- but not upper-level
2225 * Vars; they will be replaced by Params later on). Note this includes
2226 * vars used in resjunk items, so we are covering the needs of ORDER BY
2227 * and window specifications.
2229 sub_tlist = flatten_tlist(tlist);
2230 extravars = pull_var_clause(parse->havingQual, true);
2231 sub_tlist = add_to_flat_tlist(sub_tlist, extravars);
2232 list_free(extravars);
2233 *need_tlist_eval = false; /* only eval if not flat tlist */
2236 * If grouping, create sub_tlist entries for all GROUP BY expressions
2237 * (GROUP BY items that are simple Vars should be in the list already),
2238 * and make an array showing where the group columns are in the sub_tlist.
2240 numCols = list_length(parse->groupClause);
2244 AttrNumber *grpColIdx;
2247 grpColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
2248 *groupColIdx = grpColIdx;
2250 foreach(gl, parse->groupClause)
2252 SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
2253 Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
2257 * Find or make a matching sub_tlist entry. If the groupexpr
2258 * isn't a Var, no point in searching. (Note that the parser
2259 * won't make multiple groupClause entries for the same TLE.)
2261 if (groupexpr && IsA(groupexpr, Var))
2262 te = tlist_member(groupexpr, sub_tlist);
2268 te = makeTargetEntry((Expr *) groupexpr,
2269 list_length(sub_tlist) + 1,
2272 sub_tlist = lappend(sub_tlist, te);
2273 *need_tlist_eval = true; /* it's not flat anymore */
2276 /* and save its resno */
2277 grpColIdx[keyno++] = te->resno;
2285 * locate_grouping_columns
2286 * Locate grouping columns in the tlist chosen by query_planner.
2288 * This is only needed if we don't use the sub_tlist chosen by
2289 * make_subplanTargetList. We have to forget the column indexes found
2290 * by that routine and re-locate the grouping exprs in the real sub_tlist.
2293 locate_grouping_columns(PlannerInfo *root,
2296 AttrNumber *groupColIdx)
2302 * No work unless grouping.
2304 if (!root->parse->groupClause)
2306 Assert(groupColIdx == NULL);
2309 Assert(groupColIdx != NULL);
2311 foreach(gl, root->parse->groupClause)
2313 SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
2314 Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
2315 TargetEntry *te = tlist_member(groupexpr, sub_tlist);
2318 elog(ERROR, "failed to locate grouping columns");
2319 groupColIdx[keyno++] = te->resno;
2324 * postprocess_setop_tlist
2325 * Fix up targetlist returned by plan_set_operations().
2327 * We need to transpose sort key info from the orig_tlist into new_tlist.
2328 * NOTE: this would not be good enough if we supported resjunk sort keys
2329 * for results of set operations --- then, we'd need to project a whole
2330 * new tlist to evaluate the resjunk columns. For now, just ereport if we
2331 * find any resjunk columns in orig_tlist.
2334 postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
2337 ListCell *orig_tlist_item = list_head(orig_tlist);
2339 foreach(l, new_tlist)
2341 TargetEntry *new_tle = (TargetEntry *) lfirst(l);
2342 TargetEntry *orig_tle;
2344 /* ignore resjunk columns in setop result */
2345 if (new_tle->resjunk)
2348 Assert(orig_tlist_item != NULL);
2349 orig_tle = (TargetEntry *) lfirst(orig_tlist_item);
2350 orig_tlist_item = lnext(orig_tlist_item);
2351 if (orig_tle->resjunk) /* should not happen */
2352 elog(ERROR, "resjunk output columns are not implemented");
2353 Assert(new_tle->resno == orig_tle->resno);
2354 new_tle->ressortgroupref = orig_tle->ressortgroupref;
2356 if (orig_tlist_item != NULL)
2357 elog(ERROR, "resjunk output columns are not implemented");
2362 * select_active_windows
2363 * Create a list of the "active" window clauses (ie, those referenced
2364 * by non-deleted WindowFuncs) in the order they are to be executed.
2367 select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
2373 /* First, make a list of the active windows */
2375 foreach(lc, root->parse->windowClause)
2377 WindowClause *wc = (WindowClause *) lfirst(lc);
2379 /* It's only active if wflists shows some related WindowFuncs */
2380 Assert(wc->winref <= wflists->maxWinRef);
2381 if (wflists->windowFuncs[wc->winref] != NIL)
2382 actives = lappend(actives, wc);
2386 * Now, ensure that windows with identical partitioning/ordering clauses
2387 * are adjacent in the list. This is required by the SQL standard, which
2388 * says that only one sort is to be used for such windows, even if they
2389 * are otherwise distinct (eg, different names or framing clauses).
2391 * There is room to be much smarter here, for example detecting whether
2392 * one window's sort keys are a prefix of another's (so that sorting
2393 * for the latter would do for the former), or putting windows first
2394 * that match a sort order available for the underlying query. For the
2395 * moment we are content with meeting the spec.
2398 while (actives != NIL)
2400 WindowClause *wc = (WindowClause *) linitial(actives);
2404 /* Move wc from actives to result */
2405 actives = list_delete_first(actives);
2406 result = lappend(result, wc);
2408 /* Now move any matching windows from actives to result */
2410 for (lc = list_head(actives); lc; lc = next)
2412 WindowClause *wc2 = (WindowClause *) lfirst(lc);
2415 if (equal(wc->partitionClause, wc2->partitionClause) &&
2416 equal(wc->orderClause, wc2->orderClause))
2418 actives = list_delete_cell(actives, lc, prev);
2419 result = lappend(result, wc2);
2430 * make_pathkeys_for_window
2431 * Create a pathkeys list describing the required input ordering
2432 * for the given WindowClause.
2434 * The required ordering is first the PARTITION keys, then the ORDER keys.
2435 * In the future we might try to implement windowing using hashing, in which
2436 * case the ordering could be relaxed, but for now we always sort.
2439 make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
2440 List *tlist, bool canonicalize)
2442 List *window_pathkeys;
2443 List *window_sortclauses;
2445 /* Throw error if can't sort */
2446 if (!grouping_is_sortable(wc->partitionClause))
2448 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2449 errmsg("could not implement window PARTITION BY"),
2450 errdetail("Window partitioning columns must be of sortable datatypes.")));
2451 if (!grouping_is_sortable(wc->orderClause))
2453 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2454 errmsg("could not implement window ORDER BY"),
2455 errdetail("Window ordering columns must be of sortable datatypes.")));
2457 /* Okay, make the combined pathkeys */
2458 window_sortclauses = list_concat(list_copy(wc->partitionClause),
2459 list_copy(wc->orderClause));
2460 window_pathkeys = make_pathkeys_for_sortclauses(root,
2464 list_free(window_sortclauses);
2465 return window_pathkeys;
2469 * get_column_info_for_window
2470 * Get the partitioning/ordering column numbers and equality operators
2471 * for a WindowAgg node.
2473 * This depends on the behavior of make_pathkeys_for_window()!
2475 * We are given the target WindowClause and an array of the input column
2476 * numbers associated with the resulting pathkeys. In the easy case, there
2477 * are the same number of pathkey columns as partitioning + ordering columns
2478 * and we just have to copy some data around. However, it's possible that
2479 * some of the original partitioning + ordering columns were eliminated as
2480 * redundant during the transformation to pathkeys. (This can happen even
2481 * though the parser gets rid of obvious duplicates. A typical scenario is a
2482 * window specification "PARTITION BY x ORDER BY y" coupled with a clause
2483 * "WHERE x = y" that causes the two sort columns to be recognized as
2484 * redundant.) In that unusual case, we have to work a lot harder to
2485 * determine which keys are significant.
2487 * The method used here is a bit brute-force: add the sort columns to a list
2488 * one at a time and note when the resulting pathkey list gets longer. But
2489 * it's a sufficiently uncommon case that a faster way doesn't seem worth
2490 * the amount of code refactoring that'd be needed.
2494 get_column_info_for_window(PlannerInfo *root, WindowClause *wc, List *tlist,
2495 int numSortCols, AttrNumber *sortColIdx,
2497 AttrNumber **partColIdx,
2498 Oid **partOperators,
2500 AttrNumber **ordColIdx,
2503 int numPart = list_length(wc->partitionClause);
2504 int numOrder = list_length(wc->orderClause);
2506 if (numSortCols == numPart + numOrder)
2509 *partNumCols = numPart;
2510 *partColIdx = sortColIdx;
2511 *partOperators = extract_grouping_ops(wc->partitionClause);
2512 *ordNumCols = numOrder;
2513 *ordColIdx = sortColIdx + numPart;
2514 *ordOperators = extract_grouping_ops(wc->orderClause);
2523 /* first, allocate what's certainly enough space for the arrays */
2525 *partColIdx = (AttrNumber *) palloc(numPart * sizeof(AttrNumber));
2526 *partOperators = (Oid *) palloc(numPart * sizeof(Oid));
2528 *ordColIdx = (AttrNumber *) palloc(numOrder * sizeof(AttrNumber));
2529 *ordOperators = (Oid *) palloc(numOrder * sizeof(Oid));
2533 foreach(lc, wc->partitionClause)
2535 SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2538 sortclauses = lappend(sortclauses, sgc);
2539 new_pathkeys = make_pathkeys_for_sortclauses(root,
2543 if (list_length(new_pathkeys) > list_length(pathkeys))
2545 /* this sort clause is actually significant */
2546 *partColIdx[*partNumCols] = sortColIdx[scidx++];
2547 *partOperators[*partNumCols] = sgc->eqop;
2549 pathkeys = new_pathkeys;
2552 foreach(lc, wc->orderClause)
2554 SortGroupClause *sgc = (SortGroupClause *) lfirst(lc);
2557 sortclauses = lappend(sortclauses, sgc);
2558 new_pathkeys = make_pathkeys_for_sortclauses(root,
2562 if (list_length(new_pathkeys) > list_length(pathkeys))
2564 /* this sort clause is actually significant */
2565 *ordColIdx[*ordNumCols] = sortColIdx[scidx++];
2566 *ordOperators[*ordNumCols] = sgc->eqop;
2568 pathkeys = new_pathkeys;
2571 /* complain if we didn't eat exactly the right number of sort cols */
2572 if (scidx != numSortCols)
2573 elog(ERROR, "failed to deconstruct sort operators into partitioning/ordering operators");