1 /*-------------------------------------------------------------------------
4 * Routines to create the desired plan for processing a query.
5 * Planning is complete, we just need to convert the selected
8 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
9 * Portions Copyright (c) 1994, Regents of the University of California
13 * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.240 2008/04/17 21:22:14 tgl Exp $
15 *-------------------------------------------------------------------------
21 #include "access/skey.h"
22 #include "nodes/makefuncs.h"
23 #include "optimizer/clauses.h"
24 #include "optimizer/cost.h"
25 #include "optimizer/plancat.h"
26 #include "optimizer/planmain.h"
27 #include "optimizer/predtest.h"
28 #include "optimizer/restrictinfo.h"
29 #include "optimizer/tlist.h"
30 #include "optimizer/var.h"
31 #include "parser/parse_clause.h"
32 #include "parser/parse_expr.h"
33 #include "parser/parsetree.h"
34 #include "utils/lsyscache.h"
37 static Plan *create_scan_plan(PlannerInfo *root, Path *best_path);
38 static List *build_relation_tlist(RelOptInfo *rel);
39 static bool use_physical_tlist(RelOptInfo *rel);
40 static void disuse_physical_tlist(Plan *plan, Path *path);
41 static Plan *create_gating_plan(PlannerInfo *root, Plan *plan, List *quals);
42 static Plan *create_join_plan(PlannerInfo *root, JoinPath *best_path);
43 static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path);
44 static Result *create_result_plan(PlannerInfo *root, ResultPath *best_path);
45 static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path);
46 static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path);
47 static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
48 List *tlist, List *scan_clauses);
49 static IndexScan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
50 List *tlist, List *scan_clauses);
51 static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root,
52 BitmapHeapPath *best_path,
53 List *tlist, List *scan_clauses);
54 static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
56 static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
57 List *tlist, List *scan_clauses);
58 static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path,
59 List *tlist, List *scan_clauses);
60 static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
61 List *tlist, List *scan_clauses);
62 static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
63 List *tlist, List *scan_clauses);
64 static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path,
65 Plan *outer_plan, Plan *inner_plan);
66 static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path,
67 Plan *outer_plan, Plan *inner_plan);
68 static HashJoin *create_hashjoin_plan(PlannerInfo *root, HashPath *best_path,
69 Plan *outer_plan, Plan *inner_plan);
70 static List *fix_indexqual_references(List *indexquals, IndexPath *index_path);
71 static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index);
72 static List *get_switched_clauses(List *clauses, Relids outerrelids);
73 static List *order_qual_clauses(PlannerInfo *root, List *clauses);
74 static void copy_path_costsize(Plan *dest, Path *src);
75 static void copy_plan_costsize(Plan *dest, Plan *src);
76 static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
77 static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
78 Oid indexid, List *indexqual, List *indexqualorig,
79 ScanDirection indexscandir);
80 static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
83 static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
88 static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
90 static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
91 Index scanrelid, Node *funcexpr, List *funccolnames,
92 List *funccoltypes, List *funccoltypmods);
93 static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
94 Index scanrelid, List *values_lists);
95 static BitmapAnd *make_bitmap_and(List *bitmapplans);
96 static BitmapOr *make_bitmap_or(List *bitmapplans);
97 static NestLoop *make_nestloop(List *tlist,
98 List *joinclauses, List *otherclauses,
99 Plan *lefttree, Plan *righttree,
101 static HashJoin *make_hashjoin(List *tlist,
102 List *joinclauses, List *otherclauses,
104 Plan *lefttree, Plan *righttree,
106 static Hash *make_hash(Plan *lefttree);
107 static MergeJoin *make_mergejoin(List *tlist,
108 List *joinclauses, List *otherclauses,
111 int *mergestrategies,
112 bool *mergenullsfirst,
113 Plan *lefttree, Plan *righttree,
115 static Sort *make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
116 AttrNumber *sortColIdx, Oid *sortOperators, bool *nullsFirst,
117 double limit_tuples);
118 static Material *make_material(Plan *lefttree);
123 * Creates the access plan for a query by tracing backwards through the
124 * desired chain of pathnodes, starting at the node 'best_path'. For
125 * every pathnode found, we create a corresponding plan node containing
126 * appropriate id, target list, and qualification information.
128 * The tlists and quals in the plan tree are still in planner format,
129 * ie, Vars still correspond to the parser's numbering. This will be
130 * fixed later by setrefs.c.
132 * best_path is the best access path
134 * Returns a Plan tree.
137 create_plan(PlannerInfo *root, Path *best_path)
141 switch (best_path->pathtype)
145 case T_BitmapHeapScan:
150 plan = create_scan_plan(root, best_path);
155 plan = create_join_plan(root,
156 (JoinPath *) best_path);
159 plan = create_append_plan(root,
160 (AppendPath *) best_path);
163 plan = (Plan *) create_result_plan(root,
164 (ResultPath *) best_path);
167 plan = (Plan *) create_material_plan(root,
168 (MaterialPath *) best_path);
171 plan = create_unique_plan(root,
172 (UniquePath *) best_path);
175 elog(ERROR, "unrecognized node type: %d",
176 (int) best_path->pathtype);
177 plan = NULL; /* keep compiler quiet */
186 * Create a scan plan for the parent relation of 'best_path'.
189 create_scan_plan(PlannerInfo *root, Path *best_path)
191 RelOptInfo *rel = best_path->parent;
197 * For table scans, rather than using the relation targetlist (which is
198 * only those Vars actually needed by the query), we prefer to generate a
199 * tlist containing all Vars in order. This will allow the executor to
200 * optimize away projection of the table tuples, if possible. (Note that
201 * planner.c may replace the tlist we generate here, forcing projection to
204 if (use_physical_tlist(rel))
206 tlist = build_physical_tlist(root, rel);
207 /* if fail because of dropped cols, use regular method */
209 tlist = build_relation_tlist(rel);
212 tlist = build_relation_tlist(rel);
215 * Extract the relevant restriction clauses from the parent relation. The
216 * executor must apply all these restrictions during the scan, except for
217 * pseudoconstants which we'll take care of below.
219 scan_clauses = rel->baserestrictinfo;
221 switch (best_path->pathtype)
224 plan = (Plan *) create_seqscan_plan(root,
231 plan = (Plan *) create_indexscan_plan(root,
232 (IndexPath *) best_path,
237 case T_BitmapHeapScan:
238 plan = (Plan *) create_bitmap_scan_plan(root,
239 (BitmapHeapPath *) best_path,
245 plan = (Plan *) create_tidscan_plan(root,
246 (TidPath *) best_path,
252 plan = (Plan *) create_subqueryscan_plan(root,
259 plan = (Plan *) create_functionscan_plan(root,
266 plan = (Plan *) create_valuesscan_plan(root,
273 elog(ERROR, "unrecognized node type: %d",
274 (int) best_path->pathtype);
275 plan = NULL; /* keep compiler quiet */
280 * If there are any pseudoconstant clauses attached to this node, insert a
281 * gating Result node that evaluates the pseudoconstants as one-time
284 if (root->hasPseudoConstantQuals)
285 plan = create_gating_plan(root, plan, scan_clauses);
291 * Build a target list (ie, a list of TargetEntry) for a relation.
294 build_relation_tlist(RelOptInfo *rel)
300 foreach(v, rel->reltargetlist)
302 /* Do we really need to copy here? Not sure */
303 Var *var = (Var *) copyObject(lfirst(v));
305 tlist = lappend(tlist, makeTargetEntry((Expr *) var,
316 * Decide whether to use a tlist matching relation structure,
317 * rather than only those Vars actually referenced.
320 use_physical_tlist(RelOptInfo *rel)
325 * We can do this for real relation scans, subquery scans, function scans,
326 * and values scans (but not for, eg, joins).
328 if (rel->rtekind != RTE_RELATION &&
329 rel->rtekind != RTE_SUBQUERY &&
330 rel->rtekind != RTE_FUNCTION &&
331 rel->rtekind != RTE_VALUES)
335 * Can't do it with inheritance cases either (mainly because Append
338 if (rel->reloptkind != RELOPT_BASEREL)
342 * Can't do it if any system columns or whole-row Vars are requested,
343 * either. (This could possibly be fixed but would take some fragile
344 * assumptions in setrefs.c, I think.)
346 for (i = rel->min_attr; i <= 0; i++)
348 if (!bms_is_empty(rel->attr_needed[i - rel->min_attr]))
356 * disuse_physical_tlist
357 * Switch a plan node back to emitting only Vars actually referenced.
359 * If the plan node immediately above a scan would prefer to get only
360 * needed Vars and not a physical tlist, it must call this routine to
361 * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
362 * and Material nodes want this, so they don't have to store useless columns.
365 disuse_physical_tlist(Plan *plan, Path *path)
367 /* Only need to undo it for path types handled by create_scan_plan() */
368 switch (path->pathtype)
372 case T_BitmapHeapScan:
377 plan->targetlist = build_relation_tlist(path->parent);
386 * Deal with pseudoconstant qual clauses
388 * If the node's quals list includes any pseudoconstant quals, put them
389 * into a gating Result node atop the already-built plan. Otherwise,
390 * return the plan as-is.
392 * Note that we don't change cost or size estimates when doing gating.
393 * The costs of qual eval were already folded into the plan's startup cost.
394 * Leaving the size alone amounts to assuming that the gating qual will
395 * succeed, which is the conservative estimate for planning upper queries.
396 * We certainly don't want to assume the output size is zero (unless the
397 * gating qual is actually constant FALSE, and that case is dealt with in
398 * clausesel.c). Interpolating between the two cases is silly, because
399 * it doesn't reflect what will really happen at runtime, and besides which
400 * in most cases we have only a very bad idea of the probability of the gating
404 create_gating_plan(PlannerInfo *root, Plan *plan, List *quals)
406 List *pseudoconstants;
408 /* Sort into desirable execution order while still in RestrictInfo form */
409 quals = order_qual_clauses(root, quals);
411 /* Pull out any pseudoconstant quals from the RestrictInfo list */
412 pseudoconstants = extract_actual_clauses(quals, true);
414 if (!pseudoconstants)
417 return (Plan *) make_result(root,
419 (Node *) pseudoconstants,
425 * Create a join plan for 'best_path' and (recursively) plans for its
426 * inner and outer paths.
429 create_join_plan(PlannerInfo *root, JoinPath *best_path)
435 outer_plan = create_plan(root, best_path->outerjoinpath);
436 inner_plan = create_plan(root, best_path->innerjoinpath);
438 switch (best_path->path.pathtype)
441 plan = (Plan *) create_mergejoin_plan(root,
442 (MergePath *) best_path,
447 plan = (Plan *) create_hashjoin_plan(root,
448 (HashPath *) best_path,
453 plan = (Plan *) create_nestloop_plan(root,
454 (NestPath *) best_path,
459 elog(ERROR, "unrecognized node type: %d",
460 (int) best_path->path.pathtype);
461 plan = NULL; /* keep compiler quiet */
466 * If there are any pseudoconstant clauses attached to this node, insert a
467 * gating Result node that evaluates the pseudoconstants as one-time
470 if (root->hasPseudoConstantQuals)
471 plan = create_gating_plan(root, plan, best_path->joinrestrictinfo);
476 * * Expensive function pullups may have pulled local predicates * into
477 * this path node. Put them in the qpqual of the plan node. * JMH,
480 if (get_loc_restrictinfo(best_path) != NIL)
481 set_qpqual((Plan) plan,
482 list_concat(get_qpqual((Plan) plan),
483 get_actual_clauses(get_loc_restrictinfo(best_path))));
491 * Create an Append plan for 'best_path' and (recursively) plans
494 * Returns a Plan node.
497 create_append_plan(PlannerInfo *root, AppendPath *best_path)
500 List *tlist = build_relation_tlist(best_path->path.parent);
501 List *subplans = NIL;
505 * It is possible for the subplans list to contain only one entry, or even
506 * no entries. Handle these cases specially.
508 * XXX ideally, if there's just one entry, we'd not bother to generate an
509 * Append node but just return the single child. At the moment this does
510 * not work because the varno of the child scan plan won't match the
511 * parent-rel Vars it'll be asked to emit.
513 if (best_path->subpaths == NIL)
515 /* Generate a Result plan with constant-FALSE gating qual */
516 return (Plan *) make_result(root,
518 (Node *) list_make1(makeBoolConst(false,
523 /* Normal case with multiple subpaths */
524 foreach(subpaths, best_path->subpaths)
526 Path *subpath = (Path *) lfirst(subpaths);
528 subplans = lappend(subplans, create_plan(root, subpath));
531 plan = make_append(subplans, false, tlist);
533 return (Plan *) plan;
538 * Create a Result plan for 'best_path'.
539 * This is only used for the case of a query with an empty jointree.
541 * Returns a Plan node.
544 create_result_plan(PlannerInfo *root, ResultPath *best_path)
549 /* The tlist will be installed later, since we have no RelOptInfo */
550 Assert(best_path->path.parent == NULL);
553 /* best_path->quals is just bare clauses */
555 quals = order_qual_clauses(root, best_path->quals);
557 return make_result(root, tlist, (Node *) quals, NULL);
561 * create_material_plan
562 * Create a Material plan for 'best_path' and (recursively) plans
565 * Returns a Plan node.
568 create_material_plan(PlannerInfo *root, MaterialPath *best_path)
573 subplan = create_plan(root, best_path->subpath);
575 /* We don't want any excess columns in the materialized tuples */
576 disuse_physical_tlist(subplan, best_path->subpath);
578 plan = make_material(subplan);
580 copy_path_costsize(&plan->plan, (Path *) best_path);
587 * Create a Unique plan for 'best_path' and (recursively) plans
590 * Returns a Plan node.
593 create_unique_plan(PlannerInfo *root, UniquePath *best_path)
603 AttrNumber *groupColIdx;
607 subplan = create_plan(root, best_path->subpath);
609 /* Done if we don't need to do any actual unique-ifying */
610 if (best_path->umethod == UNIQUE_PATH_NOOP)
614 * As constructed, the subplan has a "flat" tlist containing just the
615 * Vars needed here and at upper levels. The values we are supposed
616 * to unique-ify may be expressions in these variables. We have to
617 * add any such expressions to the subplan's tlist.
619 * The subplan may have a "physical" tlist if it is a simple scan plan.
620 * If we're going to sort, this should be reduced to the regular tlist,
621 * so that we don't sort more data than we need to. For hashing, the
622 * tlist should be left as-is if we don't need to add any expressions;
623 * but if we do have to add expressions, then a projection step will be
624 * needed at runtime anyway, so we may as well remove unneeded items.
625 * Therefore newtlist starts from build_relation_tlist() not just a
626 * copy of the subplan's tlist; and we don't install it into the subplan
627 * unless we are sorting or stuff has to be added.
629 * To find the correct list of values to unique-ify, we look in the
630 * information saved for IN expressions. If this code is ever used in
631 * other scenarios, some other way of finding what to unique-ify will
632 * be needed. The IN clause's operators are needed too, since they
633 * determine what the meaning of "unique" is in this context.
636 uniq_exprs = NIL; /* just to keep compiler quiet */
638 foreach(l, root->in_info_list)
640 InClauseInfo *ininfo = (InClauseInfo *) lfirst(l);
642 if (bms_equal(ininfo->righthand, best_path->path.parent->relids))
644 uniq_exprs = ininfo->sub_targetlist;
645 in_operators = ininfo->in_operators;
649 if (l == NULL) /* fell out of loop? */
650 elog(ERROR, "could not find UniquePath in in_info_list");
652 /* initialize modified subplan tlist as just the "required" vars */
653 newtlist = build_relation_tlist(best_path->path.parent);
654 nextresno = list_length(newtlist) + 1;
657 foreach(l, uniq_exprs)
659 Node *uniqexpr = lfirst(l);
662 tle = tlist_member(uniqexpr, newtlist);
665 tle = makeTargetEntry((Expr *) uniqexpr,
669 newtlist = lappend(newtlist, tle);
675 if (newitems || best_path->umethod == UNIQUE_PATH_SORT)
678 * If the top plan node can't do projections, we need to add a Result
679 * node to help it along.
681 if (!is_projection_capable_plan(subplan))
682 subplan = (Plan *) make_result(root, newtlist, NULL, subplan);
684 subplan->targetlist = newtlist;
688 * Build control information showing which subplan output columns are to
689 * be examined by the grouping step. Unfortunately we can't merge this
690 * with the previous loop, since we didn't then know which version of the
691 * subplan tlist we'd end up using.
693 newtlist = subplan->targetlist;
694 numGroupCols = list_length(uniq_exprs);
695 groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
698 foreach(l, uniq_exprs)
700 Node *uniqexpr = lfirst(l);
703 tle = tlist_member(uniqexpr, newtlist);
704 if (!tle) /* shouldn't happen */
705 elog(ERROR, "failed to find unique expression in subplan tlist");
706 groupColIdx[groupColPos++] = tle->resno;
709 if (best_path->umethod == UNIQUE_PATH_HASH)
714 numGroups = (long) Min(best_path->rows, (double) LONG_MAX);
717 * Get the hashable equality operators for the Agg node to use.
718 * Normally these are the same as the IN clause operators, but if
719 * those are cross-type operators then the equality operators are the
720 * ones for the IN clause operators' RHS datatype.
722 groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
724 foreach(l, in_operators)
726 Oid in_oper = lfirst_oid(l);
729 if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
730 elog(ERROR, "could not find compatible hash operator for operator %u",
732 groupOperators[groupColPos++] = eq_oper;
736 * Since the Agg node is going to project anyway, we can give it the
737 * minimum output tlist, without any stuff we might have added to the
740 plan = (Plan *) make_agg(root,
741 build_relation_tlist(best_path->path.parent),
753 List *sortList = NIL;
755 /* Create an ORDER BY list to sort the input compatibly */
757 foreach(l, in_operators)
759 Oid in_oper = lfirst_oid(l);
764 sortop = get_ordering_op_for_equality_op(in_oper, false);
765 if (!OidIsValid(sortop)) /* shouldn't happen */
766 elog(ERROR, "could not find ordering operator for equality operator %u",
768 tle = get_tle_by_resno(subplan->targetlist,
769 groupColIdx[groupColPos]);
771 sortcl = makeNode(SortClause);
772 sortcl->tleSortGroupRef = assignSortGroupRef(tle,
773 subplan->targetlist);
774 sortcl->sortop = sortop;
775 sortcl->nulls_first = false;
776 sortList = lappend(sortList, sortcl);
779 plan = (Plan *) make_sort_from_sortclauses(root, sortList, subplan);
780 plan = (Plan *) make_unique(plan, sortList);
783 /* Adjust output size estimate (other fields should be OK already) */
784 plan->plan_rows = best_path->rows;
790 /*****************************************************************************
792 * BASE-RELATION SCAN METHODS
794 *****************************************************************************/
798 * create_seqscan_plan
799 * Returns a seqscan plan for the base relation scanned by 'best_path'
800 * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
803 create_seqscan_plan(PlannerInfo *root, Path *best_path,
804 List *tlist, List *scan_clauses)
807 Index scan_relid = best_path->parent->relid;
809 /* it should be a base rel... */
810 Assert(scan_relid > 0);
811 Assert(best_path->parent->rtekind == RTE_RELATION);
813 /* Sort clauses into best execution order */
814 scan_clauses = order_qual_clauses(root, scan_clauses);
816 /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
817 scan_clauses = extract_actual_clauses(scan_clauses, false);
819 scan_plan = make_seqscan(tlist,
823 copy_path_costsize(&scan_plan->plan, best_path);
829 * create_indexscan_plan
830 * Returns an indexscan plan for the base relation scanned by 'best_path'
831 * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
833 * The indexquals list of the path contains implicitly-ANDed qual conditions.
834 * The list can be empty --- then no index restrictions will be applied during
838 create_indexscan_plan(PlannerInfo *root,
839 IndexPath *best_path,
843 List *indexquals = best_path->indexquals;
844 Index baserelid = best_path->path.parent->relid;
845 Oid indexoid = best_path->indexinfo->indexoid;
847 List *stripped_indexquals;
848 List *fixed_indexquals;
850 IndexScan *scan_plan;
852 /* it should be a base rel... */
853 Assert(baserelid > 0);
854 Assert(best_path->path.parent->rtekind == RTE_RELATION);
857 * Build "stripped" indexquals structure (no RestrictInfos) to pass to
858 * executor as indexqualorig
860 stripped_indexquals = get_actual_clauses(indexquals);
863 * The executor needs a copy with the indexkey on the left of each clause
864 * and with index attr numbers substituted for table ones.
866 fixed_indexquals = fix_indexqual_references(indexquals, best_path);
869 * If this is an innerjoin scan, the indexclauses will contain join
870 * clauses that are not present in scan_clauses (since the passed-in value
871 * is just the rel's baserestrictinfo list). We must add these clauses to
872 * scan_clauses to ensure they get checked. In most cases we will remove
873 * the join clauses again below, but if a join clause contains a special
874 * operator, we need to make sure it gets into the scan_clauses.
876 * Note: pointer comparison should be enough to determine RestrictInfo
879 if (best_path->isjoininner)
880 scan_clauses = list_union_ptr(scan_clauses, best_path->indexclauses);
883 * The qpqual list must contain all restrictions not automatically handled
884 * by the index. All the predicates in the indexquals will be checked
885 * (either by the index itself, or by nodeIndexscan.c), but if there are
886 * any "special" operators involved then they must be included in qpqual.
887 * The upshot is that qpqual must contain scan_clauses minus whatever
888 * appears in indexquals.
890 * In normal cases simple pointer equality checks will be enough to spot
891 * duplicate RestrictInfos, so we try that first. In some situations
892 * (particularly with OR'd index conditions) we may have scan_clauses that
893 * are not equal to, but are logically implied by, the index quals; so we
894 * also try a predicate_implied_by() check to see if we can discard quals
895 * that way. (predicate_implied_by assumes its first input contains only
896 * immutable functions, so we have to check that.)
898 * We can also discard quals that are implied by a partial index's
899 * predicate, but only in a plain SELECT; when scanning a target relation
900 * of UPDATE/DELETE/SELECT FOR UPDATE, we must leave such quals in the
901 * plan so that they'll be properly rechecked by EvalPlanQual testing.
904 foreach(l, scan_clauses)
906 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
908 Assert(IsA(rinfo, RestrictInfo));
909 if (rinfo->pseudoconstant)
910 continue; /* we may drop pseudoconstants here */
911 if (list_member_ptr(indexquals, rinfo))
913 if (!contain_mutable_functions((Node *) rinfo->clause))
915 List *clausel = list_make1(rinfo->clause);
917 if (predicate_implied_by(clausel, indexquals))
919 if (best_path->indexinfo->indpred)
921 if (baserelid != root->parse->resultRelation &&
922 get_rowmark(root->parse, baserelid) == NULL)
923 if (predicate_implied_by(clausel,
924 best_path->indexinfo->indpred))
928 qpqual = lappend(qpqual, rinfo);
931 /* Sort clauses into best execution order */
932 qpqual = order_qual_clauses(root, qpqual);
934 /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
935 qpqual = extract_actual_clauses(qpqual, false);
937 /* Finally ready to build the plan node */
938 scan_plan = make_indexscan(tlist,
944 best_path->indexscandir);
946 copy_path_costsize(&scan_plan->scan.plan, &best_path->path);
947 /* use the indexscan-specific rows estimate, not the parent rel's */
948 scan_plan->scan.plan.plan_rows = best_path->rows;
954 * create_bitmap_scan_plan
955 * Returns a bitmap scan plan for the base relation scanned by 'best_path'
956 * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
958 static BitmapHeapScan *
959 create_bitmap_scan_plan(PlannerInfo *root,
960 BitmapHeapPath *best_path,
964 Index baserelid = best_path->path.parent->relid;
965 Plan *bitmapqualplan;
966 List *bitmapqualorig;
969 BitmapHeapScan *scan_plan;
971 /* it should be a base rel... */
972 Assert(baserelid > 0);
973 Assert(best_path->path.parent->rtekind == RTE_RELATION);
975 /* Process the bitmapqual tree into a Plan tree and qual list */
976 bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual,
979 /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
980 scan_clauses = extract_actual_clauses(scan_clauses, false);
983 * If this is a innerjoin scan, the indexclauses will contain join clauses
984 * that are not present in scan_clauses (since the passed-in value is just
985 * the rel's baserestrictinfo list). We must add these clauses to
986 * scan_clauses to ensure they get checked. In most cases we will remove
987 * the join clauses again below, but if a join clause contains a special
988 * operator, we need to make sure it gets into the scan_clauses.
990 if (best_path->isjoininner)
992 scan_clauses = list_concat_unique(scan_clauses, bitmapqualorig);
996 * The qpqual list must contain all restrictions not automatically handled
997 * by the index. All the predicates in the indexquals will be checked
998 * (either by the index itself, or by nodeBitmapHeapscan.c), but if there
999 * are any "special" operators involved then they must be added to qpqual.
1000 * The upshot is that qpqual must contain scan_clauses minus whatever
1001 * appears in bitmapqualorig.
1003 * In normal cases simple equal() checks will be enough to spot duplicate
1004 * clauses, so we try that first. In some situations (particularly with
1005 * OR'd index conditions) we may have scan_clauses that are not equal to,
1006 * but are logically implied by, the index quals; so we also try a
1007 * predicate_implied_by() check to see if we can discard quals that way.
1008 * (predicate_implied_by assumes its first input contains only immutable
1009 * functions, so we have to check that.)
1011 * Unlike create_indexscan_plan(), we need take no special thought here
1012 * for partial index predicates; this is because the predicate conditions
1013 * are already listed in bitmapqualorig. Bitmap scans have to do it that
1014 * way because predicate conditions need to be rechecked if the scan's
1015 * bitmap becomes lossy.
1018 foreach(l, scan_clauses)
1020 Node *clause = (Node *) lfirst(l);
1022 if (list_member(bitmapqualorig, clause))
1024 if (!contain_mutable_functions(clause))
1026 List *clausel = list_make1(clause);
1028 if (predicate_implied_by(clausel, bitmapqualorig))
1031 qpqual = lappend(qpqual, clause);
1034 /* Sort clauses into best execution order */
1035 qpqual = order_qual_clauses(root, qpqual);
1038 * When dealing with special operators, we will at this point
1039 * have duplicate clauses in qpqual and bitmapqualorig. We may as well
1040 * drop 'em from bitmapqualorig, since there's no point in making the
1043 bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
1045 /* Finally ready to build the plan node */
1046 scan_plan = make_bitmap_heapscan(tlist,
1052 copy_path_costsize(&scan_plan->scan.plan, &best_path->path);
1053 /* use the indexscan-specific rows estimate, not the parent rel's */
1054 scan_plan->scan.plan.plan_rows = best_path->rows;
1060 * Given a bitmapqual tree, generate the Plan tree that implements it
1062 * As a byproduct, we also return in *qual a qual list (in implicit-AND
1063 * form, without RestrictInfos) describing the generated indexqual
1064 * conditions, as needed for rechecking heap tuples in lossy cases.
1065 * This list also includes partial-index predicates, because we have to
1066 * recheck predicates as well as index conditions if the scan's bitmap
1069 * Note: if you find yourself changing this, you probably need to change
1070 * make_restrictinfo_from_bitmapqual too.
1073 create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
1078 if (IsA(bitmapqual, BitmapAndPath))
1080 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
1081 List *subplans = NIL;
1082 List *subquals = NIL;
1086 * There may well be redundant quals among the subplans, since a
1087 * top-level WHERE qual might have gotten used to form several
1088 * different index quals. We don't try exceedingly hard to eliminate
1089 * redundancies, but we do eliminate obvious duplicates by using
1090 * list_concat_unique.
1092 foreach(l, apath->bitmapquals)
1097 subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
1099 subplans = lappend(subplans, subplan);
1100 subquals = list_concat_unique(subquals, subqual);
1102 plan = (Plan *) make_bitmap_and(subplans);
1103 plan->startup_cost = apath->path.startup_cost;
1104 plan->total_cost = apath->path.total_cost;
1106 clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples);
1107 plan->plan_width = 0; /* meaningless */
1110 else if (IsA(bitmapqual, BitmapOrPath))
1112 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
1113 List *subplans = NIL;
1114 List *subquals = NIL;
1115 bool const_true_subqual = false;
1119 * Here, we only detect qual-free subplans. A qual-free subplan would
1120 * cause us to generate "... OR true ..." which we may as well reduce
1121 * to just "true". We do not try to eliminate redundant subclauses
1122 * because (a) it's not as likely as in the AND case, and (b) we might
1123 * well be working with hundreds or even thousands of OR conditions,
1124 * perhaps from a long IN list. The performance of list_append_unique
1125 * would be unacceptable.
1127 foreach(l, opath->bitmapquals)
1132 subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
1134 subplans = lappend(subplans, subplan);
1136 const_true_subqual = true;
1137 else if (!const_true_subqual)
1138 subquals = lappend(subquals,
1139 make_ands_explicit(subqual));
1143 * In the presence of ScalarArrayOpExpr quals, we might have built
1144 * BitmapOrPaths with just one subpath; don't add an OR step.
1146 if (list_length(subplans) == 1)
1148 plan = (Plan *) linitial(subplans);
1152 plan = (Plan *) make_bitmap_or(subplans);
1153 plan->startup_cost = opath->path.startup_cost;
1154 plan->total_cost = opath->path.total_cost;
1156 clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
1157 plan->plan_width = 0; /* meaningless */
1161 * If there were constant-TRUE subquals, the OR reduces to constant
1162 * TRUE. Also, avoid generating one-element ORs, which could happen
1163 * due to redundancy elimination or ScalarArrayOpExpr quals.
1165 if (const_true_subqual)
1167 else if (list_length(subquals) <= 1)
1170 *qual = list_make1(make_orclause(subquals));
1172 else if (IsA(bitmapqual, IndexPath))
1174 IndexPath *ipath = (IndexPath *) bitmapqual;
1178 /* Use the regular indexscan plan build machinery... */
1179 iscan = create_indexscan_plan(root, ipath, NIL, NIL);
1180 /* then convert to a bitmap indexscan */
1181 plan = (Plan *) make_bitmap_indexscan(iscan->scan.scanrelid,
1184 iscan->indexqualorig);
1185 plan->startup_cost = 0.0;
1186 plan->total_cost = ipath->indextotalcost;
1188 clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
1189 plan->plan_width = 0; /* meaningless */
1190 *qual = get_actual_clauses(ipath->indexclauses);
1191 foreach(l, ipath->indexinfo->indpred)
1193 Expr *pred = (Expr *) lfirst(l);
1196 * We know that the index predicate must have been implied by the
1197 * query condition as a whole, but it may or may not be implied by
1198 * the conditions that got pushed into the bitmapqual. Avoid
1199 * generating redundant conditions.
1201 if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
1202 *qual = lappend(*qual, pred);
1207 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
1208 plan = NULL; /* keep compiler quiet */
1215 * create_tidscan_plan
1216 * Returns a tidscan plan for the base relation scanned by 'best_path'
1217 * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
1220 create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
1221 List *tlist, List *scan_clauses)
1224 Index scan_relid = best_path->path.parent->relid;
1227 /* it should be a base rel... */
1228 Assert(scan_relid > 0);
1229 Assert(best_path->path.parent->rtekind == RTE_RELATION);
1231 /* Sort clauses into best execution order */
1232 scan_clauses = order_qual_clauses(root, scan_clauses);
1234 /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
1235 scan_clauses = extract_actual_clauses(scan_clauses, false);
1238 * Remove any clauses that are TID quals. This is a bit tricky since the
1239 * tidquals list has implicit OR semantics.
1241 ortidquals = best_path->tidquals;
1242 if (list_length(ortidquals) > 1)
1243 ortidquals = list_make1(make_orclause(ortidquals));
1244 scan_clauses = list_difference(scan_clauses, ortidquals);
1246 scan_plan = make_tidscan(tlist,
1249 best_path->tidquals);
1251 copy_path_costsize(&scan_plan->scan.plan, &best_path->path);
1257 * create_subqueryscan_plan
1258 * Returns a subqueryscan plan for the base relation scanned by 'best_path'
1259 * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
1261 static SubqueryScan *
1262 create_subqueryscan_plan(PlannerInfo *root, Path *best_path,
1263 List *tlist, List *scan_clauses)
1265 SubqueryScan *scan_plan;
1266 Index scan_relid = best_path->parent->relid;
1268 /* it should be a subquery base rel... */
1269 Assert(scan_relid > 0);
1270 Assert(best_path->parent->rtekind == RTE_SUBQUERY);
1272 /* Sort clauses into best execution order */
1273 scan_clauses = order_qual_clauses(root, scan_clauses);
1275 /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
1276 scan_clauses = extract_actual_clauses(scan_clauses, false);
1278 scan_plan = make_subqueryscan(tlist,
1281 best_path->parent->subplan,
1282 best_path->parent->subrtable);
1284 copy_path_costsize(&scan_plan->scan.plan, best_path);
1290 * create_functionscan_plan
1291 * Returns a functionscan plan for the base relation scanned by 'best_path'
1292 * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
1294 static FunctionScan *
1295 create_functionscan_plan(PlannerInfo *root, Path *best_path,
1296 List *tlist, List *scan_clauses)
1298 FunctionScan *scan_plan;
1299 Index scan_relid = best_path->parent->relid;
1302 /* it should be a function base rel... */
1303 Assert(scan_relid > 0);
1304 rte = planner_rt_fetch(scan_relid, root);
1305 Assert(rte->rtekind == RTE_FUNCTION);
1307 /* Sort clauses into best execution order */
1308 scan_clauses = order_qual_clauses(root, scan_clauses);
1310 /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
1311 scan_clauses = extract_actual_clauses(scan_clauses, false);
1313 scan_plan = make_functionscan(tlist, scan_clauses, scan_relid,
1315 rte->eref->colnames,
1317 rte->funccoltypmods);
1319 copy_path_costsize(&scan_plan->scan.plan, best_path);
1325 * create_valuesscan_plan
1326 * Returns a valuesscan plan for the base relation scanned by 'best_path'
1327 * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
1330 create_valuesscan_plan(PlannerInfo *root, Path *best_path,
1331 List *tlist, List *scan_clauses)
1333 ValuesScan *scan_plan;
1334 Index scan_relid = best_path->parent->relid;
1337 /* it should be a values base rel... */
1338 Assert(scan_relid > 0);
1339 rte = planner_rt_fetch(scan_relid, root);
1340 Assert(rte->rtekind == RTE_VALUES);
1342 /* Sort clauses into best execution order */
1343 scan_clauses = order_qual_clauses(root, scan_clauses);
1345 /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
1346 scan_clauses = extract_actual_clauses(scan_clauses, false);
1348 scan_plan = make_valuesscan(tlist, scan_clauses, scan_relid,
1351 copy_path_costsize(&scan_plan->scan.plan, best_path);
1356 /*****************************************************************************
1360 *****************************************************************************/
1363 create_nestloop_plan(PlannerInfo *root,
1364 NestPath *best_path,
1368 List *tlist = build_relation_tlist(best_path->path.parent);
1369 List *joinrestrictclauses = best_path->joinrestrictinfo;
1372 NestLoop *join_plan;
1374 if (IsA(best_path->innerjoinpath, IndexPath))
1377 * An index is being used to reduce the number of tuples scanned in
1378 * the inner relation. If there are join clauses being used with the
1379 * index, we may remove those join clauses from the list of clauses
1380 * that have to be checked as qpquals at the join node.
1382 * We can also remove any join clauses that are redundant with those
1383 * being used in the index scan; this check is needed because
1384 * find_eclass_clauses_for_index_join() may emit different clauses
1385 * than generate_join_implied_equalities() did.
1387 * We can skip this if the index path is an ordinary indexpath and not
1388 * a special innerjoin path, since it then wouldn't be using any join
1391 IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
1393 if (innerpath->isjoininner)
1394 joinrestrictclauses =
1395 select_nonredundant_join_clauses(root,
1396 joinrestrictclauses,
1397 innerpath->indexclauses);
1399 else if (IsA(best_path->innerjoinpath, BitmapHeapPath))
1402 * Same deal for bitmapped index scans.
1404 * Note: both here and above, we ignore any implicit index
1405 * restrictions associated with the use of partial indexes. This is
1406 * OK because we're only trying to prove we can dispense with some
1407 * join quals; failing to prove that doesn't result in an incorrect
1408 * plan. It is the right way to proceed because adding more quals to
1409 * the stuff we got from the original query would just make it harder
1410 * to detect duplication. (Also, to change this we'd have to be wary
1411 * of UPDATE/DELETE/SELECT FOR UPDATE target relations; see notes
1412 * above about EvalPlanQual.)
1414 BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath;
1416 if (innerpath->isjoininner)
1418 List *bitmapclauses;
1421 make_restrictinfo_from_bitmapqual(innerpath->bitmapqual,
1424 joinrestrictclauses =
1425 select_nonredundant_join_clauses(root,
1426 joinrestrictclauses,
1431 /* Sort join qual clauses into best execution order */
1432 joinrestrictclauses = order_qual_clauses(root, joinrestrictclauses);
1434 /* Get the join qual clauses (in plain expression form) */
1435 /* Any pseudoconstant clauses are ignored here */
1436 if (IS_OUTER_JOIN(best_path->jointype))
1438 extract_actual_join_clauses(joinrestrictclauses,
1439 &joinclauses, &otherclauses);
1443 /* We can treat all clauses alike for an inner join */
1444 joinclauses = extract_actual_clauses(joinrestrictclauses, false);
1448 join_plan = make_nestloop(tlist,
1453 best_path->jointype);
1455 copy_path_costsize(&join_plan->join.plan, &best_path->path);
1461 create_mergejoin_plan(PlannerInfo *root,
1462 MergePath *best_path,
1466 List *tlist = build_relation_tlist(best_path->jpath.path.parent);
1470 List *outerpathkeys;
1471 List *innerpathkeys;
1474 int *mergestrategies;
1475 bool *mergenullsfirst;
1476 MergeJoin *join_plan;
1478 EquivalenceClass *lastoeclass;
1479 EquivalenceClass *lastieclass;
1486 /* Sort join qual clauses into best execution order */
1487 /* NB: do NOT reorder the mergeclauses */
1488 joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
1490 /* Get the join qual clauses (in plain expression form) */
1491 /* Any pseudoconstant clauses are ignored here */
1492 if (IS_OUTER_JOIN(best_path->jpath.jointype))
1494 extract_actual_join_clauses(joinclauses,
1495 &joinclauses, &otherclauses);
1499 /* We can treat all clauses alike for an inner join */
1500 joinclauses = extract_actual_clauses(joinclauses, false);
1505 * Remove the mergeclauses from the list of join qual clauses, leaving the
1506 * list of quals that must be checked as qpquals.
1508 mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
1509 joinclauses = list_difference(joinclauses, mergeclauses);
1512 * Rearrange mergeclauses, if needed, so that the outer variable is always
1513 * on the left; mark the mergeclause restrictinfos with correct
1514 * outer_is_left status.
1516 mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
1517 best_path->jpath.outerjoinpath->parent->relids);
1520 * Create explicit sort nodes for the outer and inner join paths if
1521 * necessary. The sort cost was already accounted for in the path. Make
1522 * sure there are no excess columns in the inputs if sorting.
1524 if (best_path->outersortkeys)
1526 disuse_physical_tlist(outer_plan, best_path->jpath.outerjoinpath);
1527 outer_plan = (Plan *)
1528 make_sort_from_pathkeys(root,
1530 best_path->outersortkeys,
1532 outerpathkeys = best_path->outersortkeys;
1535 outerpathkeys = best_path->jpath.outerjoinpath->pathkeys;
1537 if (best_path->innersortkeys)
1539 disuse_physical_tlist(inner_plan, best_path->jpath.innerjoinpath);
1540 inner_plan = (Plan *)
1541 make_sort_from_pathkeys(root,
1543 best_path->innersortkeys,
1545 innerpathkeys = best_path->innersortkeys;
1548 innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
1551 * If inner plan is a sort that is expected to spill to disk, add a
1552 * materialize node to shield it from the need to handle mark/restore.
1553 * This will allow it to perform the last merge pass on-the-fly, while in
1554 * most cases not requiring the materialize to spill to disk.
1556 * XXX really, Sort oughta do this for itself, probably, to avoid the
1557 * overhead of a separate plan node.
1559 if (IsA(inner_plan, Sort) &&
1560 sort_exceeds_work_mem((Sort *) inner_plan))
1562 Plan *matplan = (Plan *) make_material(inner_plan);
1565 * We assume the materialize will not spill to disk, and therefore
1566 * charge just cpu_tuple_cost per tuple.
1568 copy_plan_costsize(matplan, inner_plan);
1569 matplan->total_cost += cpu_tuple_cost * matplan->plan_rows;
1571 inner_plan = matplan;
1575 * Compute the opfamily/strategy/nullsfirst arrays needed by the executor.
1576 * The information is in the pathkeys for the two inputs, but we need to
1577 * be careful about the possibility of mergeclauses sharing a pathkey
1578 * (compare find_mergeclauses_for_pathkeys()).
1580 nClauses = list_length(mergeclauses);
1581 Assert(nClauses == list_length(best_path->path_mergeclauses));
1582 mergefamilies = (Oid *) palloc(nClauses * sizeof(Oid));
1583 mergestrategies = (int *) palloc(nClauses * sizeof(int));
1584 mergenullsfirst = (bool *) palloc(nClauses * sizeof(bool));
1590 lop = list_head(outerpathkeys);
1591 lip = list_head(innerpathkeys);
1593 foreach(lc, best_path->path_mergeclauses)
1595 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1596 EquivalenceClass *oeclass;
1597 EquivalenceClass *ieclass;
1599 /* fetch outer/inner eclass from mergeclause */
1600 Assert(IsA(rinfo, RestrictInfo));
1601 if (rinfo->outer_is_left)
1603 oeclass = rinfo->left_ec;
1604 ieclass = rinfo->right_ec;
1608 oeclass = rinfo->right_ec;
1609 ieclass = rinfo->left_ec;
1611 Assert(oeclass != NULL);
1612 Assert(ieclass != NULL);
1614 /* should match current or next pathkeys */
1615 /* we check this carefully for debugging reasons */
1616 if (oeclass != lastoeclass)
1619 elog(ERROR, "too few pathkeys for mergeclauses");
1620 opathkey = (PathKey *) lfirst(lop);
1622 lastoeclass = opathkey->pk_eclass;
1623 if (oeclass != lastoeclass)
1624 elog(ERROR, "outer pathkeys do not match mergeclause");
1626 if (ieclass != lastieclass)
1629 elog(ERROR, "too few pathkeys for mergeclauses");
1630 ipathkey = (PathKey *) lfirst(lip);
1632 lastieclass = ipathkey->pk_eclass;
1633 if (ieclass != lastieclass)
1634 elog(ERROR, "inner pathkeys do not match mergeclause");
1636 /* pathkeys should match each other too (more debugging) */
1637 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1638 opathkey->pk_strategy != ipathkey->pk_strategy ||
1639 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1640 elog(ERROR, "left and right pathkeys do not match in mergejoin");
1642 /* OK, save info for executor */
1643 mergefamilies[i] = opathkey->pk_opfamily;
1644 mergestrategies[i] = opathkey->pk_strategy;
1645 mergenullsfirst[i] = opathkey->pk_nulls_first;
1651 * Now we can build the mergejoin node.
1653 join_plan = make_mergejoin(tlist,
1662 best_path->jpath.jointype);
1664 copy_path_costsize(&join_plan->join.plan, &best_path->jpath.path);
1670 create_hashjoin_plan(PlannerInfo *root,
1671 HashPath *best_path,
1675 List *tlist = build_relation_tlist(best_path->jpath.path.parent);
1679 HashJoin *join_plan;
1682 /* Sort join qual clauses into best execution order */
1683 joinclauses = order_qual_clauses(root, best_path->jpath.joinrestrictinfo);
1684 /* There's no point in sorting the hash clauses ... */
1686 /* Get the join qual clauses (in plain expression form) */
1687 /* Any pseudoconstant clauses are ignored here */
1688 if (IS_OUTER_JOIN(best_path->jpath.jointype))
1690 extract_actual_join_clauses(joinclauses,
1691 &joinclauses, &otherclauses);
1695 /* We can treat all clauses alike for an inner join */
1696 joinclauses = extract_actual_clauses(joinclauses, false);
1701 * Remove the hashclauses from the list of join qual clauses, leaving the
1702 * list of quals that must be checked as qpquals.
1704 hashclauses = get_actual_clauses(best_path->path_hashclauses);
1705 joinclauses = list_difference(joinclauses, hashclauses);
1708 * Rearrange hashclauses, if needed, so that the outer variable is always
1711 hashclauses = get_switched_clauses(best_path->path_hashclauses,
1712 best_path->jpath.outerjoinpath->parent->relids);
1714 /* We don't want any excess columns in the hashed tuples */
1715 disuse_physical_tlist(inner_plan, best_path->jpath.innerjoinpath);
1718 * Build the hash node and hash join node.
1720 hash_plan = make_hash(inner_plan);
1721 join_plan = make_hashjoin(tlist,
1727 best_path->jpath.jointype);
1729 copy_path_costsize(&join_plan->join.plan, &best_path->jpath.path);
1735 /*****************************************************************************
1737 * SUPPORTING ROUTINES
1739 *****************************************************************************/
1742 * fix_indexqual_references
1743 * Adjust indexqual clauses to the form the executor's indexqual
1746 * We have three tasks here:
1747 * * Remove RestrictInfo nodes from the input clauses.
1748 * * Index keys must be represented by Var nodes with varattno set to the
1749 * index's attribute number, not the attribute number in the original rel.
1750 * * If the index key is on the right, commute the clause to put it on the
1753 * The result is a modified copy of the indexquals list --- the
1754 * original is not changed. Note also that the copy shares no substructure
1755 * with the original; this is needed in case there is a subplan in it (we need
1756 * two separate copies of the subplan tree, or things will go awry).
1759 fix_indexqual_references(List *indexquals, IndexPath *index_path)
1761 IndexOptInfo *index = index_path->indexinfo;
1762 List *fixed_indexquals;
1765 fixed_indexquals = NIL;
1768 * For each qual clause, commute if needed to put the indexkey operand on
1769 * the left, and then fix its varattno. (We do not need to change the
1770 * other side of the clause.)
1772 foreach(l, indexquals)
1774 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
1777 Assert(IsA(rinfo, RestrictInfo));
1780 * Make a copy that will become the fixed clause.
1782 * We used to try to do a shallow copy here, but that fails if there
1783 * is a subplan in the arguments of the opclause. So just do a full
1786 clause = (Expr *) copyObject((Node *) rinfo->clause);
1788 if (IsA(clause, OpExpr))
1790 OpExpr *op = (OpExpr *) clause;
1792 if (list_length(op->args) != 2)
1793 elog(ERROR, "indexqual clause is not binary opclause");
1796 * Check to see if the indexkey is on the right; if so, commute
1797 * the clause. The indexkey should be the side that refers to
1798 * (only) the base relation.
1800 if (!bms_equal(rinfo->left_relids, index->rel->relids))
1804 * Now, determine which index attribute this is and change the
1805 * indexkey operand as needed.
1807 linitial(op->args) = fix_indexqual_operand(linitial(op->args),
1810 else if (IsA(clause, RowCompareExpr))
1812 RowCompareExpr *rc = (RowCompareExpr *) clause;
1816 * Check to see if the indexkey is on the right; if so, commute
1817 * the clause. The indexkey should be the side that refers to
1818 * (only) the base relation.
1820 if (!bms_overlap(pull_varnos(linitial(rc->largs)),
1821 index->rel->relids))
1822 CommuteRowCompareExpr(rc);
1825 * For each column in the row comparison, determine which index
1826 * attribute this is and change the indexkey operand as needed.
1828 foreach(lc, rc->largs)
1830 lfirst(lc) = fix_indexqual_operand(lfirst(lc),
1834 else if (IsA(clause, ScalarArrayOpExpr))
1836 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
1838 /* Never need to commute... */
1841 * Determine which index attribute this is and change the
1842 * indexkey operand as needed.
1844 linitial(saop->args) = fix_indexqual_operand(linitial(saop->args),
1847 else if (IsA(clause, NullTest))
1849 NullTest *nt = (NullTest *) clause;
1851 Assert(nt->nulltesttype == IS_NULL);
1852 nt->arg = (Expr *) fix_indexqual_operand((Node *) nt->arg,
1856 elog(ERROR, "unsupported indexqual type: %d",
1857 (int) nodeTag(clause));
1859 fixed_indexquals = lappend(fixed_indexquals, clause);
1862 return fixed_indexquals;
1866 fix_indexqual_operand(Node *node, IndexOptInfo *index)
1869 * We represent index keys by Var nodes having the varno of the base table
1870 * but varattno equal to the index's attribute number (index column
1871 * position). This is a bit hokey ... would be cleaner to use a
1872 * special-purpose node type that could not be mistaken for a regular Var.
1873 * But it will do for now.
1877 ListCell *indexpr_item;
1880 * Remove any binary-compatible relabeling of the indexkey
1882 if (IsA(node, RelabelType))
1883 node = (Node *) ((RelabelType *) node)->arg;
1885 if (IsA(node, Var) &&
1886 ((Var *) node)->varno == index->rel->relid)
1888 /* Try to match against simple index columns */
1889 int varatt = ((Var *) node)->varattno;
1893 for (pos = 0; pos < index->ncolumns; pos++)
1895 if (index->indexkeys[pos] == varatt)
1897 result = (Var *) copyObject(node);
1898 result->varattno = pos + 1;
1899 return (Node *) result;
1905 /* Try to match against index expressions */
1906 indexpr_item = list_head(index->indexprs);
1907 for (pos = 0; pos < index->ncolumns; pos++)
1909 if (index->indexkeys[pos] == 0)
1913 if (indexpr_item == NULL)
1914 elog(ERROR, "too few entries in indexprs list");
1915 indexkey = (Node *) lfirst(indexpr_item);
1916 if (indexkey && IsA(indexkey, RelabelType))
1917 indexkey = (Node *) ((RelabelType *) indexkey)->arg;
1918 if (equal(node, indexkey))
1921 result = makeVar(index->rel->relid, pos + 1,
1922 exprType(lfirst(indexpr_item)), -1,
1924 return (Node *) result;
1926 indexpr_item = lnext(indexpr_item);
1931 elog(ERROR, "node is not an index attribute");
1932 return NULL; /* keep compiler quiet */
1936 * get_switched_clauses
1937 * Given a list of merge or hash joinclauses (as RestrictInfo nodes),
1938 * extract the bare clauses, and rearrange the elements within the
1939 * clauses, if needed, so the outer join variable is on the left and
1940 * the inner is on the right. The original clause data structure is not
1941 * touched; a modified list is returned. We do, however, set the transient
1942 * outer_is_left field in each RestrictInfo to show which side was which.
1945 get_switched_clauses(List *clauses, Relids outerrelids)
1952 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
1953 OpExpr *clause = (OpExpr *) restrictinfo->clause;
1955 Assert(is_opclause(clause));
1956 if (bms_is_subset(restrictinfo->right_relids, outerrelids))
1959 * Duplicate just enough of the structure to allow commuting the
1960 * clause without changing the original list. Could use
1961 * copyObject, but a complete deep copy is overkill.
1963 OpExpr *temp = makeNode(OpExpr);
1965 temp->opno = clause->opno;
1966 temp->opfuncid = InvalidOid;
1967 temp->opresulttype = clause->opresulttype;
1968 temp->opretset = clause->opretset;
1969 temp->args = list_copy(clause->args);
1970 /* Commute it --- note this modifies the temp node in-place. */
1971 CommuteOpExpr(temp);
1972 t_list = lappend(t_list, temp);
1973 restrictinfo->outer_is_left = false;
1977 Assert(bms_is_subset(restrictinfo->left_relids, outerrelids));
1978 t_list = lappend(t_list, clause);
1979 restrictinfo->outer_is_left = true;
1986 * order_qual_clauses
1987 * Given a list of qual clauses that will all be evaluated at the same
1988 * plan node, sort the list into the order we want to check the quals
1991 * Ideally the order should be driven by a combination of execution cost and
1992 * selectivity, but it's not immediately clear how to account for both,
1993 * and given the uncertainty of the estimates the reliability of the decisions
1994 * would be doubtful anyway. So we just order by estimated per-tuple cost,
1995 * being careful not to change the order when (as is often the case) the
1996 * estimates are identical.
1998 * Although this will work on either bare clauses or RestrictInfos, it's
1999 * much faster to apply it to RestrictInfos, since it can re-use cost
2000 * information that is cached in RestrictInfos.
2002 * Note: some callers pass lists that contain entries that will later be
2003 * removed; this is the easiest way to let this routine see RestrictInfos
2004 * instead of bare clauses. It's OK because we only sort by cost, but
2005 * a cost/selectivity combination would likely do the wrong thing.
2008 order_qual_clauses(PlannerInfo *root, List *clauses)
2015 int nitems = list_length(clauses);
2021 /* No need to work hard for 0 or 1 clause */
2026 * Collect the items and costs into an array. This is to avoid repeated
2027 * cost_qual_eval work if the inputs aren't RestrictInfos.
2029 items = (QualItem *) palloc(nitems * sizeof(QualItem));
2031 foreach(lc, clauses)
2033 Node *clause = (Node *) lfirst(lc);
2036 cost_qual_eval_node(&qcost, clause, root);
2037 items[i].clause = clause;
2038 items[i].cost = qcost.per_tuple;
2043 * Sort. We don't use qsort() because it's not guaranteed stable for
2044 * equal keys. The expected number of entries is small enough that a
2045 * simple insertion sort should be good enough.
2047 for (i = 1; i < nitems; i++)
2049 QualItem newitem = items[i];
2052 /* insert newitem into the already-sorted subarray */
2053 for (j = i; j > 0; j--)
2055 if (newitem.cost >= items[j - 1].cost)
2057 items[j] = items[j - 1];
2062 /* Convert back to a list */
2064 for (i = 0; i < nitems; i++)
2065 result = lappend(result, items[i].clause);
2071 * Copy cost and size info from a Path node to the Plan node created from it.
2072 * The executor won't use this info, but it's needed by EXPLAIN.
2075 copy_path_costsize(Plan *dest, Path *src)
2079 dest->startup_cost = src->startup_cost;
2080 dest->total_cost = src->total_cost;
2081 dest->plan_rows = src->parent->rows;
2082 dest->plan_width = src->parent->width;
2086 dest->startup_cost = 0;
2087 dest->total_cost = 0;
2088 dest->plan_rows = 0;
2089 dest->plan_width = 0;
2094 * Copy cost and size info from a lower plan node to an inserted node.
2095 * This is not critical, since the decisions have already been made,
2096 * but it helps produce more reasonable-looking EXPLAIN output.
2097 * (Some callers alter the info after copying it.)
2100 copy_plan_costsize(Plan *dest, Plan *src)
2104 dest->startup_cost = src->startup_cost;
2105 dest->total_cost = src->total_cost;
2106 dest->plan_rows = src->plan_rows;
2107 dest->plan_width = src->plan_width;
2111 dest->startup_cost = 0;
2112 dest->total_cost = 0;
2113 dest->plan_rows = 0;
2114 dest->plan_width = 0;
2119 /*****************************************************************************
2121 * PLAN NODE BUILDING ROUTINES
2123 * Some of these are exported because they are called to build plan nodes
2124 * in contexts where we're not deriving the plan node from a path node.
2126 *****************************************************************************/
2129 make_seqscan(List *qptlist,
2133 SeqScan *node = makeNode(SeqScan);
2134 Plan *plan = &node->plan;
2136 /* cost should be inserted by caller */
2137 plan->targetlist = qptlist;
2138 plan->qual = qpqual;
2139 plan->lefttree = NULL;
2140 plan->righttree = NULL;
2141 node->scanrelid = scanrelid;
2147 make_indexscan(List *qptlist,
2152 List *indexqualorig,
2153 ScanDirection indexscandir)
2155 IndexScan *node = makeNode(IndexScan);
2156 Plan *plan = &node->scan.plan;
2158 /* cost should be inserted by caller */
2159 plan->targetlist = qptlist;
2160 plan->qual = qpqual;
2161 plan->lefttree = NULL;
2162 plan->righttree = NULL;
2163 node->scan.scanrelid = scanrelid;
2164 node->indexid = indexid;
2165 node->indexqual = indexqual;
2166 node->indexqualorig = indexqualorig;
2167 node->indexorderdir = indexscandir;
2172 static BitmapIndexScan *
2173 make_bitmap_indexscan(Index scanrelid,
2176 List *indexqualorig)
2178 BitmapIndexScan *node = makeNode(BitmapIndexScan);
2179 Plan *plan = &node->scan.plan;
2181 /* cost should be inserted by caller */
2182 plan->targetlist = NIL; /* not used */
2183 plan->qual = NIL; /* not used */
2184 plan->lefttree = NULL;
2185 plan->righttree = NULL;
2186 node->scan.scanrelid = scanrelid;
2187 node->indexid = indexid;
2188 node->indexqual = indexqual;
2189 node->indexqualorig = indexqualorig;
2194 static BitmapHeapScan *
2195 make_bitmap_heapscan(List *qptlist,
2198 List *bitmapqualorig,
2201 BitmapHeapScan *node = makeNode(BitmapHeapScan);
2202 Plan *plan = &node->scan.plan;
2204 /* cost should be inserted by caller */
2205 plan->targetlist = qptlist;
2206 plan->qual = qpqual;
2207 plan->lefttree = lefttree;
2208 plan->righttree = NULL;
2209 node->scan.scanrelid = scanrelid;
2210 node->bitmapqualorig = bitmapqualorig;
2216 make_tidscan(List *qptlist,
2221 TidScan *node = makeNode(TidScan);
2222 Plan *plan = &node->scan.plan;
2224 /* cost should be inserted by caller */
2225 plan->targetlist = qptlist;
2226 plan->qual = qpqual;
2227 plan->lefttree = NULL;
2228 plan->righttree = NULL;
2229 node->scan.scanrelid = scanrelid;
2230 node->tidquals = tidquals;
2236 make_subqueryscan(List *qptlist,
2242 SubqueryScan *node = makeNode(SubqueryScan);
2243 Plan *plan = &node->scan.plan;
2246 * Cost is figured here for the convenience of prepunion.c. Note this is
2247 * only correct for the case where qpqual is empty; otherwise caller
2248 * should overwrite cost with a better estimate.
2250 copy_plan_costsize(plan, subplan);
2251 plan->total_cost += cpu_tuple_cost * subplan->plan_rows;
2253 plan->targetlist = qptlist;
2254 plan->qual = qpqual;
2255 plan->lefttree = NULL;
2256 plan->righttree = NULL;
2257 node->scan.scanrelid = scanrelid;
2258 node->subplan = subplan;
2259 node->subrtable = subrtable;
2264 static FunctionScan *
2265 make_functionscan(List *qptlist,
2271 List *funccoltypmods)
2273 FunctionScan *node = makeNode(FunctionScan);
2274 Plan *plan = &node->scan.plan;
2276 /* cost should be inserted by caller */
2277 plan->targetlist = qptlist;
2278 plan->qual = qpqual;
2279 plan->lefttree = NULL;
2280 plan->righttree = NULL;
2281 node->scan.scanrelid = scanrelid;
2282 node->funcexpr = funcexpr;
2283 node->funccolnames = funccolnames;
2284 node->funccoltypes = funccoltypes;
2285 node->funccoltypmods = funccoltypmods;
2291 make_valuesscan(List *qptlist,
2296 ValuesScan *node = makeNode(ValuesScan);
2297 Plan *plan = &node->scan.plan;
2299 /* cost should be inserted by caller */
2300 plan->targetlist = qptlist;
2301 plan->qual = qpqual;
2302 plan->lefttree = NULL;
2303 plan->righttree = NULL;
2304 node->scan.scanrelid = scanrelid;
2305 node->values_lists = values_lists;
2311 make_append(List *appendplans, bool isTarget, List *tlist)
2313 Append *node = makeNode(Append);
2314 Plan *plan = &node->plan;
2318 * Compute cost as sum of subplan costs. We charge nothing extra for the
2319 * Append itself, which perhaps is too optimistic, but since it doesn't do
2320 * any selection or projection, it is a pretty cheap node.
2322 plan->startup_cost = 0;
2323 plan->total_cost = 0;
2324 plan->plan_rows = 0;
2325 plan->plan_width = 0;
2326 foreach(subnode, appendplans)
2328 Plan *subplan = (Plan *) lfirst(subnode);
2330 if (subnode == list_head(appendplans)) /* first node? */
2331 plan->startup_cost = subplan->startup_cost;
2332 plan->total_cost += subplan->total_cost;
2333 plan->plan_rows += subplan->plan_rows;
2334 if (plan->plan_width < subplan->plan_width)
2335 plan->plan_width = subplan->plan_width;
2338 plan->targetlist = tlist;
2340 plan->lefttree = NULL;
2341 plan->righttree = NULL;
2342 node->appendplans = appendplans;
2343 node->isTarget = isTarget;
2349 make_bitmap_and(List *bitmapplans)
2351 BitmapAnd *node = makeNode(BitmapAnd);
2352 Plan *plan = &node->plan;
2354 /* cost should be inserted by caller */
2355 plan->targetlist = NIL;
2357 plan->lefttree = NULL;
2358 plan->righttree = NULL;
2359 node->bitmapplans = bitmapplans;
2365 make_bitmap_or(List *bitmapplans)
2367 BitmapOr *node = makeNode(BitmapOr);
2368 Plan *plan = &node->plan;
2370 /* cost should be inserted by caller */
2371 plan->targetlist = NIL;
2373 plan->lefttree = NULL;
2374 plan->righttree = NULL;
2375 node->bitmapplans = bitmapplans;
2381 make_nestloop(List *tlist,
2388 NestLoop *node = makeNode(NestLoop);
2389 Plan *plan = &node->join.plan;
2391 /* cost should be inserted by caller */
2392 plan->targetlist = tlist;
2393 plan->qual = otherclauses;
2394 plan->lefttree = lefttree;
2395 plan->righttree = righttree;
2396 node->join.jointype = jointype;
2397 node->join.joinqual = joinclauses;
2403 make_hashjoin(List *tlist,
2411 HashJoin *node = makeNode(HashJoin);
2412 Plan *plan = &node->join.plan;
2414 /* cost should be inserted by caller */
2415 plan->targetlist = tlist;
2416 plan->qual = otherclauses;
2417 plan->lefttree = lefttree;
2418 plan->righttree = righttree;
2419 node->hashclauses = hashclauses;
2420 node->join.jointype = jointype;
2421 node->join.joinqual = joinclauses;
2427 make_hash(Plan *lefttree)
2429 Hash *node = makeNode(Hash);
2430 Plan *plan = &node->plan;
2432 copy_plan_costsize(plan, lefttree);
2435 * For plausibility, make startup & total costs equal total cost of input
2436 * plan; this only affects EXPLAIN display not decisions.
2438 plan->startup_cost = plan->total_cost;
2439 plan->targetlist = lefttree->targetlist;
2441 plan->lefttree = lefttree;
2442 plan->righttree = NULL;
2448 make_mergejoin(List *tlist,
2453 int *mergestrategies,
2454 bool *mergenullsfirst,
2459 MergeJoin *node = makeNode(MergeJoin);
2460 Plan *plan = &node->join.plan;
2462 /* cost should be inserted by caller */
2463 plan->targetlist = tlist;
2464 plan->qual = otherclauses;
2465 plan->lefttree = lefttree;
2466 plan->righttree = righttree;
2467 node->mergeclauses = mergeclauses;
2468 node->mergeFamilies = mergefamilies;
2469 node->mergeStrategies = mergestrategies;
2470 node->mergeNullsFirst = mergenullsfirst;
2471 node->join.jointype = jointype;
2472 node->join.joinqual = joinclauses;
2478 * make_sort --- basic routine to build a Sort plan node
2480 * Caller must have built the sortColIdx, sortOperators, and nullsFirst
2481 * arrays already. limit_tuples is as for cost_sort (in particular, pass
2485 make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
2486 AttrNumber *sortColIdx, Oid *sortOperators, bool *nullsFirst,
2487 double limit_tuples)
2489 Sort *node = makeNode(Sort);
2490 Plan *plan = &node->plan;
2491 Path sort_path; /* dummy for result of cost_sort */
2493 copy_plan_costsize(plan, lefttree); /* only care about copying size */
2494 cost_sort(&sort_path, root, NIL,
2495 lefttree->total_cost,
2496 lefttree->plan_rows,
2497 lefttree->plan_width,
2499 plan->startup_cost = sort_path.startup_cost;
2500 plan->total_cost = sort_path.total_cost;
2501 plan->targetlist = lefttree->targetlist;
2503 plan->lefttree = lefttree;
2504 plan->righttree = NULL;
2505 node->numCols = numCols;
2506 node->sortColIdx = sortColIdx;
2507 node->sortOperators = sortOperators;
2508 node->nullsFirst = nullsFirst;
2514 * add_sort_column --- utility subroutine for building sort info arrays
2516 * We need this routine because the same column might be selected more than
2517 * once as a sort key column; if so, the extra mentions are redundant.
2519 * Caller is assumed to have allocated the arrays large enough for the
2520 * max possible number of columns. Return value is the new column count.
2523 add_sort_column(AttrNumber colIdx, Oid sortOp, bool nulls_first,
2524 int numCols, AttrNumber *sortColIdx,
2525 Oid *sortOperators, bool *nullsFirst)
2529 for (i = 0; i < numCols; i++)
2532 * Note: we check sortOp because it's conceivable that "ORDER BY foo
2533 * USING <, foo USING <<<" is not redundant, if <<< distinguishes
2534 * values that < considers equal. We need not check nulls_first
2535 * however because a lower-order column with the same sortop but
2536 * opposite nulls direction is redundant.
2538 if (sortColIdx[i] == colIdx &&
2539 sortOperators[numCols] == sortOp)
2541 /* Already sorting by this col, so extra sort key is useless */
2546 /* Add the column */
2547 sortColIdx[numCols] = colIdx;
2548 sortOperators[numCols] = sortOp;
2549 nullsFirst[numCols] = nulls_first;
2554 * make_sort_from_pathkeys
2555 * Create sort plan to sort according to given pathkeys
2557 * 'lefttree' is the node which yields input tuples
2558 * 'pathkeys' is the list of pathkeys by which the result is to be sorted
2559 * 'limit_tuples' is the bound on the number of output tuples;
2562 * We must convert the pathkey information into arrays of sort key column
2563 * numbers and sort operator OIDs.
2565 * If the pathkeys include expressions that aren't simple Vars, we will
2566 * usually need to add resjunk items to the input plan's targetlist to
2567 * compute these expressions (since the Sort node itself won't do it).
2568 * If the input plan type isn't one that can do projections, this means
2569 * adding a Result node just to do the projection.
2572 make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
2573 double limit_tuples)
2575 List *tlist = lefttree->targetlist;
2578 AttrNumber *sortColIdx;
2583 * We will need at most list_length(pathkeys) sort columns; possibly less
2585 numsortkeys = list_length(pathkeys);
2586 sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
2587 sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
2588 nullsFirst = (bool *) palloc(numsortkeys * sizeof(bool));
2592 foreach(i, pathkeys)
2594 PathKey *pathkey = (PathKey *) lfirst(i);
2595 EquivalenceClass *ec = pathkey->pk_eclass;
2596 TargetEntry *tle = NULL;
2597 Oid pk_datatype = InvalidOid;
2601 if (ec->ec_has_volatile)
2604 * If the pathkey's EquivalenceClass is volatile, then it must
2605 * have come from an ORDER BY clause, and we have to match it to
2606 * that same targetlist entry.
2608 if (ec->ec_sortref == 0) /* can't happen */
2609 elog(ERROR, "volatile EquivalenceClass has no sortref");
2610 tle = get_sortgroupref_tle(ec->ec_sortref, tlist);
2612 Assert(list_length(ec->ec_members) == 1);
2613 pk_datatype = ((EquivalenceMember *) linitial(ec->ec_members))->em_datatype;
2618 * Otherwise, we can sort by any non-constant expression listed in
2619 * the pathkey's EquivalenceClass. For now, we take the first one
2620 * that corresponds to an available item in the tlist. If there
2621 * isn't any, use the first one that is an expression in the
2622 * input's vars. (The non-const restriction only matters if the
2623 * EC is below_outer_join; but if it isn't, it won't contain
2624 * consts anyway, else we'd have discarded the pathkey as
2627 * XXX if we have a choice, is there any way of figuring out which
2628 * might be cheapest to execute? (For example, int4lt is likely
2629 * much cheaper to execute than numericlt, but both might appear
2630 * in the same equivalence class...) Not clear that we ever will
2631 * have an interesting choice in practice, so it may not matter.
2633 foreach(j, ec->ec_members)
2635 EquivalenceMember *em = (EquivalenceMember *) lfirst(j);
2637 if (em->em_is_const || em->em_is_child)
2640 tle = tlist_member((Node *) em->em_expr, tlist);
2643 pk_datatype = em->em_datatype;
2644 break; /* found expr already in tlist */
2648 * We can also use it if the pathkey expression is a relabel
2649 * of the tlist entry, or vice versa. This is needed for
2650 * binary-compatible cases (cf. make_pathkey_from_sortinfo).
2651 * We prefer an exact match, though, so we do the basic search
2654 tle = tlist_member_ignore_relabel((Node *) em->em_expr, tlist);
2657 pk_datatype = em->em_datatype;
2658 break; /* found expr already in tlist */
2664 /* No matching tlist item; look for a computable expression */
2665 Expr *sortexpr = NULL;
2667 foreach(j, ec->ec_members)
2669 EquivalenceMember *em = (EquivalenceMember *) lfirst(j);
2673 if (em->em_is_const || em->em_is_child)
2675 sortexpr = em->em_expr;
2676 exprvars = pull_var_clause((Node *) sortexpr, false);
2677 foreach(k, exprvars)
2679 if (!tlist_member_ignore_relabel(lfirst(k), tlist))
2682 list_free(exprvars);
2685 pk_datatype = em->em_datatype;
2686 break; /* found usable expression */
2690 elog(ERROR, "could not find pathkey item to sort");
2693 * Do we need to insert a Result node?
2695 if (!is_projection_capable_plan(lefttree))
2697 /* copy needed so we don't modify input's tlist below */
2698 tlist = copyObject(tlist);
2699 lefttree = (Plan *) make_result(root, tlist, NULL,
2704 * Add resjunk entry to input's tlist
2706 tle = makeTargetEntry(sortexpr,
2707 list_length(tlist) + 1,
2710 tlist = lappend(tlist, tle);
2711 lefttree->targetlist = tlist; /* just in case NIL before */
2716 * Look up the correct sort operator from the PathKey's slightly
2717 * abstracted representation.
2719 sortop = get_opfamily_member(pathkey->pk_opfamily,
2722 pathkey->pk_strategy);
2723 if (!OidIsValid(sortop)) /* should not happen */
2724 elog(ERROR, "could not find member %d(%u,%u) of opfamily %u",
2725 pathkey->pk_strategy, pk_datatype, pk_datatype,
2726 pathkey->pk_opfamily);
2729 * The column might already be selected as a sort key, if the pathkeys
2730 * contain duplicate entries. (This can happen in scenarios where
2731 * multiple mergejoinable clauses mention the same var, for example.)
2732 * So enter it only once in the sort arrays.
2734 numsortkeys = add_sort_column(tle->resno,
2736 pathkey->pk_nulls_first,
2738 sortColIdx, sortOperators, nullsFirst);
2741 Assert(numsortkeys > 0);
2743 return make_sort(root, lefttree, numsortkeys,
2744 sortColIdx, sortOperators, nullsFirst, limit_tuples);
2748 * make_sort_from_sortclauses
2749 * Create sort plan to sort according to given sortclauses
2751 * 'sortcls' is a list of SortClauses
2752 * 'lefttree' is the node which yields input tuples
2755 make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree)
2757 List *sub_tlist = lefttree->targetlist;
2760 AttrNumber *sortColIdx;
2765 * We will need at most list_length(sortcls) sort columns; possibly less
2767 numsortkeys = list_length(sortcls);
2768 sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
2769 sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
2770 nullsFirst = (bool *) palloc(numsortkeys * sizeof(bool));
2776 SortClause *sortcl = (SortClause *) lfirst(l);
2777 TargetEntry *tle = get_sortgroupclause_tle(sortcl, sub_tlist);
2780 * Check for the possibility of duplicate order-by clauses --- the
2781 * parser should have removed 'em, but no point in sorting
2784 numsortkeys = add_sort_column(tle->resno, sortcl->sortop,
2785 sortcl->nulls_first,
2787 sortColIdx, sortOperators, nullsFirst);
2790 Assert(numsortkeys > 0);
2792 return make_sort(root, lefttree, numsortkeys,
2793 sortColIdx, sortOperators, nullsFirst, -1.0);
2797 * make_sort_from_groupcols
2798 * Create sort plan to sort based on grouping columns
2800 * 'groupcls' is the list of GroupClauses
2801 * 'grpColIdx' gives the column numbers to use
2803 * This might look like it could be merged with make_sort_from_sortclauses,
2804 * but presently we *must* use the grpColIdx[] array to locate sort columns,
2805 * because the child plan's tlist is not marked with ressortgroupref info
2806 * appropriate to the grouping node. So, only the sort ordering info
2807 * is used from the GroupClause entries.
2810 make_sort_from_groupcols(PlannerInfo *root,
2812 AttrNumber *grpColIdx,
2815 List *sub_tlist = lefttree->targetlist;
2819 AttrNumber *sortColIdx;
2824 * We will need at most list_length(groupcls) sort columns; possibly less
2826 numsortkeys = list_length(groupcls);
2827 sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
2828 sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
2829 nullsFirst = (bool *) palloc(numsortkeys * sizeof(bool));
2833 foreach(l, groupcls)
2835 GroupClause *grpcl = (GroupClause *) lfirst(l);
2836 TargetEntry *tle = get_tle_by_resno(sub_tlist, grpColIdx[grpno]);
2839 * Check for the possibility of duplicate group-by clauses --- the
2840 * parser should have removed 'em, but no point in sorting
2843 numsortkeys = add_sort_column(tle->resno, grpcl->sortop,
2846 sortColIdx, sortOperators, nullsFirst);
2850 Assert(numsortkeys > 0);
2852 return make_sort(root, lefttree, numsortkeys,
2853 sortColIdx, sortOperators, nullsFirst, -1.0);
2857 make_material(Plan *lefttree)
2859 Material *node = makeNode(Material);
2860 Plan *plan = &node->plan;
2862 /* cost should be inserted by caller */
2863 plan->targetlist = lefttree->targetlist;
2865 plan->lefttree = lefttree;
2866 plan->righttree = NULL;
2872 * materialize_finished_plan: stick a Material node atop a completed plan
2874 * There are a couple of places where we want to attach a Material node
2875 * after completion of subquery_planner(). This currently requires hackery.
2876 * Since subquery_planner has already run SS_finalize_plan on the subplan
2877 * tree, we have to kluge up parameter lists for the Material node.
2878 * Possibly this could be fixed by postponing SS_finalize_plan processing
2879 * until setrefs.c is run?
2882 materialize_finished_plan(Plan *subplan)
2885 Path matpath; /* dummy for result of cost_material */
2887 matplan = (Plan *) make_material(subplan);
2890 cost_material(&matpath,
2891 subplan->total_cost,
2893 subplan->plan_width);
2894 matplan->startup_cost = matpath.startup_cost;
2895 matplan->total_cost = matpath.total_cost;
2896 matplan->plan_rows = subplan->plan_rows;
2897 matplan->plan_width = subplan->plan_width;
2899 /* parameter kluge --- see comments above */
2900 matplan->extParam = bms_copy(subplan->extParam);
2901 matplan->allParam = bms_copy(subplan->allParam);
2907 make_agg(PlannerInfo *root, List *tlist, List *qual,
2908 AggStrategy aggstrategy,
2909 int numGroupCols, AttrNumber *grpColIdx, Oid *grpOperators,
2910 long numGroups, int numAggs,
2913 Agg *node = makeNode(Agg);
2914 Plan *plan = &node->plan;
2915 Path agg_path; /* dummy for result of cost_agg */
2918 node->aggstrategy = aggstrategy;
2919 node->numCols = numGroupCols;
2920 node->grpColIdx = grpColIdx;
2921 node->grpOperators = grpOperators;
2922 node->numGroups = numGroups;
2924 copy_plan_costsize(plan, lefttree); /* only care about copying size */
2925 cost_agg(&agg_path, root,
2926 aggstrategy, numAggs,
2927 numGroupCols, numGroups,
2928 lefttree->startup_cost,
2929 lefttree->total_cost,
2930 lefttree->plan_rows);
2931 plan->startup_cost = agg_path.startup_cost;
2932 plan->total_cost = agg_path.total_cost;
2935 * We will produce a single output tuple if not grouping, and a tuple per
2938 if (aggstrategy == AGG_PLAIN)
2939 plan->plan_rows = 1;
2941 plan->plan_rows = numGroups;
2944 * We also need to account for the cost of evaluation of the qual (ie, the
2945 * HAVING clause) and the tlist. Note that cost_qual_eval doesn't charge
2946 * anything for Aggref nodes; this is okay since they are really
2947 * comparable to Vars.
2949 * See notes in grouping_planner about why this routine and make_group are
2950 * the only ones in this file that worry about tlist eval cost.
2954 cost_qual_eval(&qual_cost, qual, root);
2955 plan->startup_cost += qual_cost.startup;
2956 plan->total_cost += qual_cost.startup;
2957 plan->total_cost += qual_cost.per_tuple * plan->plan_rows;
2959 cost_qual_eval(&qual_cost, tlist, root);
2960 plan->startup_cost += qual_cost.startup;
2961 plan->total_cost += qual_cost.startup;
2962 plan->total_cost += qual_cost.per_tuple * plan->plan_rows;
2965 plan->targetlist = tlist;
2966 plan->lefttree = lefttree;
2967 plan->righttree = NULL;
2973 make_group(PlannerInfo *root,
2977 AttrNumber *grpColIdx,
2982 Group *node = makeNode(Group);
2983 Plan *plan = &node->plan;
2984 Path group_path; /* dummy for result of cost_group */
2987 node->numCols = numGroupCols;
2988 node->grpColIdx = grpColIdx;
2989 node->grpOperators = grpOperators;
2991 copy_plan_costsize(plan, lefttree); /* only care about copying size */
2992 cost_group(&group_path, root,
2993 numGroupCols, numGroups,
2994 lefttree->startup_cost,
2995 lefttree->total_cost,
2996 lefttree->plan_rows);
2997 plan->startup_cost = group_path.startup_cost;
2998 plan->total_cost = group_path.total_cost;
3000 /* One output tuple per estimated result group */
3001 plan->plan_rows = numGroups;
3004 * We also need to account for the cost of evaluation of the qual (ie, the
3005 * HAVING clause) and the tlist.
3007 * XXX this double-counts the cost of evaluation of any expressions used
3008 * for grouping, since in reality those will have been evaluated at a
3009 * lower plan level and will only be copied by the Group node. Worth
3012 * See notes in grouping_planner about why this routine and make_agg are
3013 * the only ones in this file that worry about tlist eval cost.
3017 cost_qual_eval(&qual_cost, qual, root);
3018 plan->startup_cost += qual_cost.startup;
3019 plan->total_cost += qual_cost.startup;
3020 plan->total_cost += qual_cost.per_tuple * plan->plan_rows;
3022 cost_qual_eval(&qual_cost, tlist, root);
3023 plan->startup_cost += qual_cost.startup;
3024 plan->total_cost += qual_cost.startup;
3025 plan->total_cost += qual_cost.per_tuple * plan->plan_rows;
3028 plan->targetlist = tlist;
3029 plan->lefttree = lefttree;
3030 plan->righttree = NULL;
3036 * distinctList is a list of SortClauses, identifying the targetlist items
3037 * that should be considered by the Unique filter. The input path must
3038 * already be sorted accordingly.
3041 make_unique(Plan *lefttree, List *distinctList)
3043 Unique *node = makeNode(Unique);
3044 Plan *plan = &node->plan;
3045 int numCols = list_length(distinctList);
3047 AttrNumber *uniqColIdx;
3051 copy_plan_costsize(plan, lefttree);
3054 * Charge one cpu_operator_cost per comparison per input tuple. We assume
3055 * all columns get compared at most of the tuples. (XXX probably this is
3058 plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
3061 * plan->plan_rows is left as a copy of the input subplan's plan_rows; ie,
3062 * we assume the filter removes nothing. The caller must alter this if he
3063 * has a better idea.
3066 plan->targetlist = lefttree->targetlist;
3068 plan->lefttree = lefttree;
3069 plan->righttree = NULL;
3072 * convert SortClause list into arrays of attr indexes and equality
3073 * operators, as wanted by executor
3075 Assert(numCols > 0);
3076 uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
3077 uniqOperators = (Oid *) palloc(sizeof(Oid) * numCols);
3079 foreach(slitem, distinctList)
3081 SortClause *sortcl = (SortClause *) lfirst(slitem);
3082 TargetEntry *tle = get_sortgroupclause_tle(sortcl, plan->targetlist);
3084 uniqColIdx[keyno] = tle->resno;
3085 uniqOperators[keyno] = get_equality_op_for_ordering_op(sortcl->sortop);
3086 if (!OidIsValid(uniqOperators[keyno])) /* shouldn't happen */
3087 elog(ERROR, "could not find equality operator for ordering operator %u",
3092 node->numCols = numCols;
3093 node->uniqColIdx = uniqColIdx;
3094 node->uniqOperators = uniqOperators;
3100 * distinctList is a list of SortClauses, identifying the targetlist items
3101 * that should be considered by the SetOp filter. The input path must
3102 * already be sorted accordingly.
3105 make_setop(SetOpCmd cmd, Plan *lefttree,
3106 List *distinctList, AttrNumber flagColIdx)
3108 SetOp *node = makeNode(SetOp);
3109 Plan *plan = &node->plan;
3110 int numCols = list_length(distinctList);
3112 AttrNumber *dupColIdx;
3116 copy_plan_costsize(plan, lefttree);
3119 * Charge one cpu_operator_cost per comparison per input tuple. We assume
3120 * all columns get compared at most of the tuples.
3122 plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
3125 * We make the unsupported assumption that there will be 10% as many
3126 * tuples out as in. Any way to do better?
3128 plan->plan_rows *= 0.1;
3129 if (plan->plan_rows < 1)
3130 plan->plan_rows = 1;
3132 plan->targetlist = lefttree->targetlist;
3134 plan->lefttree = lefttree;
3135 plan->righttree = NULL;
3138 * convert SortClause list into arrays of attr indexes and equality
3139 * operators, as wanted by executor
3141 Assert(numCols > 0);
3142 dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
3143 dupOperators = (Oid *) palloc(sizeof(Oid) * numCols);
3145 foreach(slitem, distinctList)
3147 SortClause *sortcl = (SortClause *) lfirst(slitem);
3148 TargetEntry *tle = get_sortgroupclause_tle(sortcl, plan->targetlist);
3150 dupColIdx[keyno] = tle->resno;
3151 dupOperators[keyno] = get_equality_op_for_ordering_op(sortcl->sortop);
3152 if (!OidIsValid(dupOperators[keyno])) /* shouldn't happen */
3153 elog(ERROR, "could not find equality operator for ordering operator %u",
3159 node->numCols = numCols;
3160 node->dupColIdx = dupColIdx;
3161 node->dupOperators = dupOperators;
3162 node->flagColIdx = flagColIdx;
3168 * Note: offset_est and count_est are passed in to save having to repeat
3169 * work already done to estimate the values of the limitOffset and limitCount
3170 * expressions. Their values are as returned by preprocess_limit (0 means
3171 * "not relevant", -1 means "couldn't estimate"). Keep the code below in sync
3172 * with that function!
3175 make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount,
3176 int64 offset_est, int64 count_est)
3178 Limit *node = makeNode(Limit);
3179 Plan *plan = &node->plan;
3181 copy_plan_costsize(plan, lefttree);
3184 * Adjust the output rows count and costs according to the offset/limit.
3185 * This is only a cosmetic issue if we are at top level, but if we are
3186 * building a subquery then it's important to report correct info to the
3189 * When the offset or count couldn't be estimated, use 10% of the
3190 * estimated number of rows emitted from the subplan.
3192 if (offset_est != 0)
3197 offset_rows = (double) offset_est;
3199 offset_rows = clamp_row_est(lefttree->plan_rows * 0.10);
3200 if (offset_rows > plan->plan_rows)
3201 offset_rows = plan->plan_rows;
3202 if (plan->plan_rows > 0)
3203 plan->startup_cost +=
3204 (plan->total_cost - plan->startup_cost)
3205 * offset_rows / plan->plan_rows;
3206 plan->plan_rows -= offset_rows;
3207 if (plan->plan_rows < 1)
3208 plan->plan_rows = 1;
3216 count_rows = (double) count_est;
3218 count_rows = clamp_row_est(lefttree->plan_rows * 0.10);
3219 if (count_rows > plan->plan_rows)
3220 count_rows = plan->plan_rows;
3221 if (plan->plan_rows > 0)
3222 plan->total_cost = plan->startup_cost +
3223 (plan->total_cost - plan->startup_cost)
3224 * count_rows / plan->plan_rows;
3225 plan->plan_rows = count_rows;
3226 if (plan->plan_rows < 1)
3227 plan->plan_rows = 1;
3230 plan->targetlist = lefttree->targetlist;
3232 plan->lefttree = lefttree;
3233 plan->righttree = NULL;
3235 node->limitOffset = limitOffset;
3236 node->limitCount = limitCount;
3243 * Build a Result plan node
3245 * If we have a subplan, assume that any evaluation costs for the gating qual
3246 * were already factored into the subplan's startup cost, and just copy the
3247 * subplan cost. If there's no subplan, we should include the qual eval
3248 * cost. In either case, tlist eval cost is not to be included here.
3251 make_result(PlannerInfo *root,
3253 Node *resconstantqual,
3256 Result *node = makeNode(Result);
3257 Plan *plan = &node->plan;
3260 copy_plan_costsize(plan, subplan);
3263 plan->startup_cost = 0;
3264 plan->total_cost = cpu_tuple_cost;
3265 plan->plan_rows = 1; /* wrong if we have a set-valued function? */
3266 plan->plan_width = 0; /* XXX is it worth being smarter? */
3267 if (resconstantqual)
3271 cost_qual_eval(&qual_cost, (List *) resconstantqual, root);
3272 /* resconstantqual is evaluated once at startup */
3273 plan->startup_cost += qual_cost.startup + qual_cost.per_tuple;
3274 plan->total_cost += qual_cost.startup + qual_cost.per_tuple;
3278 plan->targetlist = tlist;
3280 plan->lefttree = subplan;
3281 plan->righttree = NULL;
3282 node->resconstantqual = resconstantqual;
3288 * is_projection_capable_plan
3289 * Check whether a given Plan node is able to do projection.
3292 is_projection_capable_plan(Plan *plan)
3294 /* Most plan types can project, so just list the ones that can't */
3295 switch (nodeTag(plan))