1 /*-------------------------------------------------------------------------
4 * Routines to manipulate pathlists and create path nodes
6 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/optimizer/util/pathnode.c
13 *-------------------------------------------------------------------------
19 #include "foreign/fdwapi.h"
20 #include "miscadmin.h"
21 #include "nodes/nodeFuncs.h"
22 #include "optimizer/clauses.h"
23 #include "optimizer/cost.h"
24 #include "optimizer/pathnode.h"
25 #include "optimizer/paths.h"
26 #include "optimizer/tlist.h"
27 #include "parser/parsetree.h"
28 #include "utils/lsyscache.h"
29 #include "utils/selfuncs.h"
32 static List *translate_sub_tlist(List *tlist, int relid);
33 static bool query_is_distinct_for(Query *query, List *colnos, List *opids);
34 static Oid distinct_col_search(int colno, List *colnos, List *opids);
37 /*****************************************************************************
38 * MISC. PATH UTILITIES
39 *****************************************************************************/
43 * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
44 * or more expensive than path2 for the specified criterion.
47 compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
49 if (criterion == STARTUP_COST)
51 if (path1->startup_cost < path2->startup_cost)
53 if (path1->startup_cost > path2->startup_cost)
57 * If paths have the same startup cost (not at all unlikely), order
60 if (path1->total_cost < path2->total_cost)
62 if (path1->total_cost > path2->total_cost)
67 if (path1->total_cost < path2->total_cost)
69 if (path1->total_cost > path2->total_cost)
73 * If paths have the same total cost, order them by startup cost.
75 if (path1->startup_cost < path2->startup_cost)
77 if (path1->startup_cost > path2->startup_cost)
84 * compare_fuzzy_path_costs
85 * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
86 * or more expensive than path2 for the specified criterion.
88 * This differs from compare_path_costs in that we consider the costs the
89 * same if they agree to within a "fuzz factor". This is used by add_path
90 * to avoid keeping both of a pair of paths that really have insignificantly
94 compare_fuzzy_path_costs(Path *path1, Path *path2, CostSelector criterion)
97 * We use a fuzz factor of 1% of the smaller cost.
99 * XXX does this percentage need to be user-configurable?
101 if (criterion == STARTUP_COST)
103 if (path1->startup_cost > path2->startup_cost * 1.01)
105 if (path2->startup_cost > path1->startup_cost * 1.01)
109 * If paths have the same startup cost (not at all unlikely), order
110 * them by total cost.
112 if (path1->total_cost > path2->total_cost * 1.01)
114 if (path2->total_cost > path1->total_cost * 1.01)
119 if (path1->total_cost > path2->total_cost * 1.01)
121 if (path2->total_cost > path1->total_cost * 1.01)
125 * If paths have the same total cost, order them by startup cost.
127 if (path1->startup_cost > path2->startup_cost * 1.01)
129 if (path2->startup_cost > path1->startup_cost * 1.01)
136 * compare_path_fractional_costs
137 * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
138 * or more expensive than path2 for fetching the specified fraction
139 * of the total tuples.
141 * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
142 * path with the cheaper total_cost.
145 compare_fractional_path_costs(Path *path1, Path *path2,
151 if (fraction <= 0.0 || fraction >= 1.0)
152 return compare_path_costs(path1, path2, TOTAL_COST);
153 cost1 = path1->startup_cost +
154 fraction * (path1->total_cost - path1->startup_cost);
155 cost2 = path2->startup_cost +
156 fraction * (path2->total_cost - path2->startup_cost);
166 * Find the minimum-cost paths from among a relation's paths,
167 * and save them in the rel's cheapest-path fields.
169 * This is normally called only after we've finished constructing the path
170 * list for the rel node.
172 * If we find two paths of identical costs, try to keep the better-sorted one.
173 * The paths might have unrelated sort orderings, in which case we can only
174 * guess which might be better to keep, but if one is superior then we
175 * definitely should keep it.
178 set_cheapest(RelOptInfo *parent_rel)
180 List *pathlist = parent_rel->pathlist;
182 Path *cheapest_startup_path;
183 Path *cheapest_total_path;
185 Assert(IsA(parent_rel, RelOptInfo));
188 elog(ERROR, "could not devise a query plan for the given query");
190 cheapest_startup_path = cheapest_total_path = (Path *) linitial(pathlist);
192 for_each_cell(p, lnext(list_head(pathlist)))
194 Path *path = (Path *) lfirst(p);
197 cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
200 compare_pathkeys(cheapest_startup_path->pathkeys,
201 path->pathkeys) == PATHKEYS_BETTER2))
202 cheapest_startup_path = path;
204 cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
207 compare_pathkeys(cheapest_total_path->pathkeys,
208 path->pathkeys) == PATHKEYS_BETTER2))
209 cheapest_total_path = path;
212 parent_rel->cheapest_startup_path = cheapest_startup_path;
213 parent_rel->cheapest_total_path = cheapest_total_path;
214 parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
219 * Consider a potential implementation path for the specified parent rel,
220 * and add it to the rel's pathlist if it is worthy of consideration.
221 * A path is worthy if it has either a better sort order (better pathkeys)
222 * or cheaper cost (on either dimension) than any of the existing old paths.
224 * We also remove from the rel's pathlist any old paths that are dominated
225 * by new_path --- that is, new_path is both cheaper and at least as well
228 * The pathlist is kept sorted by TOTAL_COST metric, with cheaper paths
229 * at the front. No code depends on that for correctness; it's simply
230 * a speed hack within this routine. Doing it that way makes it more
231 * likely that we will reject an inferior path after a few comparisons,
232 * rather than many comparisons.
234 * NOTE: discarded Path objects are immediately pfree'd to reduce planner
235 * memory consumption. We dare not try to free the substructure of a Path,
236 * since much of it may be shared with other Paths or the query tree itself;
237 * but just recycling discarded Path nodes is a very useful savings in
238 * a large join tree. We can recycle the List nodes of pathlist, too.
240 * BUT: we do not pfree IndexPath objects, since they may be referenced as
241 * children of BitmapHeapPaths as well as being paths in their own right.
243 * 'parent_rel' is the relation entry to which the path corresponds.
244 * 'new_path' is a potential path for parent_rel.
246 * Returns nothing, but modifies parent_rel->pathlist.
249 add_path(RelOptInfo *parent_rel, Path *new_path)
251 bool accept_new = true; /* unless we find a superior old path */
252 ListCell *insert_after = NULL; /* where to insert new item */
258 * This is a convenient place to check for query cancel --- no part of the
259 * planner goes very long without calling add_path().
261 CHECK_FOR_INTERRUPTS();
264 * Loop to check proposed new path against old paths. Note it is possible
265 * for more than one old path to be tossed out because new_path dominates
268 * We can't use foreach here because the loop body may delete the current
272 for (p1 = list_head(parent_rel->pathlist); p1 != NULL; p1 = p1_next)
274 Path *old_path = (Path *) lfirst(p1);
275 bool remove_old = false; /* unless new proves superior */
281 * As of Postgres 8.0, we use fuzzy cost comparison to avoid wasting
282 * cycles keeping paths that are really not significantly different in
285 costcmp = compare_fuzzy_path_costs(new_path, old_path, TOTAL_COST);
288 * If the two paths compare differently for startup and total cost,
289 * then we want to keep both, and we can skip the (much slower)
290 * comparison of pathkeys. If they compare the same, proceed with the
291 * pathkeys comparison. Note: this test relies on the fact that
292 * compare_fuzzy_path_costs will only return 0 if both costs are
293 * effectively equal (and, therefore, there's no need to call it twice
297 costcmp == compare_fuzzy_path_costs(new_path, old_path,
300 switch (compare_pathkeys(new_path->pathkeys, old_path->pathkeys))
304 remove_old = true; /* new dominates old */
305 else if (costcmp > 0)
306 accept_new = false; /* old dominates new */
310 * Same pathkeys, and fuzzily the same cost, so keep
311 * just one --- but we'll do an exact cost comparison
314 if (compare_path_costs(new_path, old_path,
316 remove_old = true; /* new dominates old */
318 accept_new = false; /* old equals or dominates new */
321 case PATHKEYS_BETTER1:
323 remove_old = true; /* new dominates old */
325 case PATHKEYS_BETTER2:
327 accept_new = false; /* old dominates new */
329 case PATHKEYS_DIFFERENT:
330 /* keep both paths, since they have different ordering */
336 * Remove current element from pathlist if dominated by new.
340 parent_rel->pathlist = list_delete_cell(parent_rel->pathlist,
344 * Delete the data pointed-to by the deleted cell, if possible
346 if (!IsA(old_path, IndexPath))
348 /* p1_prev does not advance */
352 /* new belongs after this old path if it has cost >= old's */
355 /* p1_prev advances */
360 * If we found an old path that dominates new_path, we can quit
361 * scanning the pathlist; we will not add new_path, and we assume
362 * new_path cannot dominate any other elements of the pathlist.
370 /* Accept the new path: insert it at proper place in pathlist */
372 lappend_cell(parent_rel->pathlist, insert_after, new_path);
374 parent_rel->pathlist = lcons(new_path, parent_rel->pathlist);
378 /* Reject and recycle the new path */
379 if (!IsA(new_path, IndexPath))
385 /*****************************************************************************
386 * PATH NODE CREATION ROUTINES
387 *****************************************************************************/
390 * create_seqscan_path
391 * Creates a path corresponding to a sequential scan, returning the
395 create_seqscan_path(PlannerInfo *root, RelOptInfo *rel)
397 Path *pathnode = makeNode(Path);
399 pathnode->pathtype = T_SeqScan;
400 pathnode->parent = rel;
401 pathnode->pathkeys = NIL; /* seqscan has unordered result */
403 cost_seqscan(pathnode, root, rel);
410 * Creates a path node for an index scan.
412 * 'index' is a usable index.
413 * 'clause_groups' is a list of lists of RestrictInfo nodes
414 * to be used as index qual conditions in the scan.
415 * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
416 * to be used as index ordering operators in the scan.
417 * 'pathkeys' describes the ordering of the path.
418 * 'indexscandir' is ForwardScanDirection or BackwardScanDirection
419 * for an ordered index, or NoMovementScanDirection for
420 * an unordered index.
421 * 'indexonly' is true if an index-only scan is wanted.
422 * 'outer_rel' is the outer relation if this is a join inner indexscan path.
423 * (pathkeys and indexscandir are ignored if so.) NULL if not.
425 * Returns the new path node.
428 create_index_path(PlannerInfo *root,
433 ScanDirection indexscandir,
435 RelOptInfo *outer_rel)
437 IndexPath *pathnode = makeNode(IndexPath);
438 RelOptInfo *rel = index->rel;
443 * For a join inner scan, there's no point in marking the path with any
444 * pathkeys, since it will only ever be used as the inner path of a
445 * nestloop, and so its ordering does not matter. For the same reason we
446 * don't really care what order it's scanned in. (We could expect the
447 * caller to supply the correct values, but it's easier to force it here.)
449 if (outer_rel != NULL)
452 indexscandir = NoMovementScanDirection;
455 pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
456 pathnode->path.parent = rel;
457 pathnode->path.pathkeys = pathkeys;
459 /* Convert clauses to indexquals the executor can handle */
460 indexquals = expand_indexqual_conditions(index, clause_groups);
462 /* Flatten the clause-groups list to produce indexclauses list */
463 allclauses = flatten_clausegroups_list(clause_groups);
465 /* Fill in the pathnode */
466 pathnode->indexinfo = index;
467 pathnode->indexclauses = allclauses;
468 pathnode->indexquals = indexquals;
469 pathnode->indexorderbys = indexorderbys;
471 pathnode->isjoininner = (outer_rel != NULL);
472 pathnode->indexscandir = indexscandir;
474 if (outer_rel != NULL)
477 * We must compute the estimated number of output rows for the
478 * indexscan. This is less than rel->rows because of the additional
479 * selectivity of the join clauses. Since clause_groups may contain
480 * both restriction and join clauses, we have to do a set union to get
481 * the full set of clauses that must be considered to compute the
482 * correct selectivity. (Without the union operation, we might have
483 * some restriction clauses appearing twice, which'd mislead
484 * clauselist_selectivity into double-counting their selectivity.
485 * However, since RestrictInfo nodes aren't copied when linking them
486 * into different lists, it should be sufficient to use pointer
487 * comparison to remove duplicates.)
489 * Note that we force the clauses to be treated as non-join clauses
490 * during selectivity estimation.
492 allclauses = list_union_ptr(rel->baserestrictinfo, allclauses);
493 pathnode->rows = rel->tuples *
494 clauselist_selectivity(root,
496 rel->relid, /* do not use 0! */
499 /* Like costsize.c, force estimate to be at least one row */
500 pathnode->rows = clamp_row_est(pathnode->rows);
505 * The number of rows is the same as the parent rel's estimate, since
506 * this isn't a join inner indexscan.
508 pathnode->rows = rel->rows;
511 cost_index(pathnode, root, index, indexquals, indexorderbys,
512 indexonly, outer_rel);
518 * create_bitmap_heap_path
519 * Creates a path node for a bitmap scan.
521 * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
523 * If this is a join inner indexscan path, 'outer_rel' is the outer relation,
524 * and all the component IndexPaths should have been costed accordingly.
527 create_bitmap_heap_path(PlannerInfo *root,
530 RelOptInfo *outer_rel)
532 BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
534 pathnode->path.pathtype = T_BitmapHeapScan;
535 pathnode->path.parent = rel;
536 pathnode->path.pathkeys = NIL; /* always unordered */
538 pathnode->bitmapqual = bitmapqual;
539 pathnode->isjoininner = (outer_rel != NULL);
541 if (pathnode->isjoininner)
544 * We must compute the estimated number of output rows for the
545 * indexscan. This is less than rel->rows because of the additional
546 * selectivity of the join clauses. We make use of the selectivity
547 * estimated for the bitmap to do this; this isn't really quite right
548 * since there may be restriction conditions not included in the
552 Selectivity indexSelectivity;
554 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
555 pathnode->rows = rel->tuples * indexSelectivity;
556 if (pathnode->rows > rel->rows)
557 pathnode->rows = rel->rows;
558 /* Like costsize.c, force estimate to be at least one row */
559 pathnode->rows = clamp_row_est(pathnode->rows);
564 * The number of rows is the same as the parent rel's estimate, since
565 * this isn't a join inner indexscan.
567 pathnode->rows = rel->rows;
570 cost_bitmap_heap_scan(&pathnode->path, root, rel, bitmapqual, outer_rel);
576 * create_bitmap_and_path
577 * Creates a path node representing a BitmapAnd.
580 create_bitmap_and_path(PlannerInfo *root,
584 BitmapAndPath *pathnode = makeNode(BitmapAndPath);
586 pathnode->path.pathtype = T_BitmapAnd;
587 pathnode->path.parent = rel;
588 pathnode->path.pathkeys = NIL; /* always unordered */
590 pathnode->bitmapquals = bitmapquals;
592 /* this sets bitmapselectivity as well as the regular cost fields: */
593 cost_bitmap_and_node(pathnode, root);
599 * create_bitmap_or_path
600 * Creates a path node representing a BitmapOr.
603 create_bitmap_or_path(PlannerInfo *root,
607 BitmapOrPath *pathnode = makeNode(BitmapOrPath);
609 pathnode->path.pathtype = T_BitmapOr;
610 pathnode->path.parent = rel;
611 pathnode->path.pathkeys = NIL; /* always unordered */
613 pathnode->bitmapquals = bitmapquals;
615 /* this sets bitmapselectivity as well as the regular cost fields: */
616 cost_bitmap_or_node(pathnode, root);
622 * create_tidscan_path
623 * Creates a path corresponding to a scan by TID, returning the pathnode.
626 create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals)
628 TidPath *pathnode = makeNode(TidPath);
630 pathnode->path.pathtype = T_TidScan;
631 pathnode->path.parent = rel;
632 pathnode->path.pathkeys = NIL;
634 pathnode->tidquals = tidquals;
636 cost_tidscan(&pathnode->path, root, rel, tidquals);
643 * Creates a path corresponding to an Append plan, returning the
646 * Note that we must handle subpaths = NIL, representing a dummy access path.
649 create_append_path(RelOptInfo *rel, List *subpaths)
651 AppendPath *pathnode = makeNode(AppendPath);
654 pathnode->path.pathtype = T_Append;
655 pathnode->path.parent = rel;
656 pathnode->path.pathkeys = NIL; /* result is always considered
658 pathnode->subpaths = subpaths;
661 * Compute cost as sum of subplan costs. We charge nothing extra for the
662 * Append itself, which perhaps is too optimistic, but since it doesn't do
663 * any selection or projection, it is a pretty cheap node.
665 * If you change this, see also make_append().
667 pathnode->path.startup_cost = 0;
668 pathnode->path.total_cost = 0;
671 Path *subpath = (Path *) lfirst(l);
673 if (l == list_head(subpaths)) /* first node? */
674 pathnode->path.startup_cost = subpath->startup_cost;
675 pathnode->path.total_cost += subpath->total_cost;
682 * create_merge_append_path
683 * Creates a path corresponding to a MergeAppend plan, returning the
687 create_merge_append_path(PlannerInfo *root,
692 MergeAppendPath *pathnode = makeNode(MergeAppendPath);
693 Cost input_startup_cost;
694 Cost input_total_cost;
697 pathnode->path.pathtype = T_MergeAppend;
698 pathnode->path.parent = rel;
699 pathnode->path.pathkeys = pathkeys;
700 pathnode->subpaths = subpaths;
703 * Apply query-wide LIMIT if known and path is for sole base relation.
704 * Finding out the latter at this low level is a bit klugy.
706 pathnode->limit_tuples = root->limit_tuples;
707 if (pathnode->limit_tuples >= 0)
711 for (rti = 1; rti < root->simple_rel_array_size; rti++)
713 RelOptInfo *brel = root->simple_rel_array[rti];
718 /* ignore RTEs that are "other rels" */
719 if (brel->reloptkind != RELOPT_BASEREL)
724 /* Oops, it's a join query */
725 pathnode->limit_tuples = -1.0;
731 /* Add up all the costs of the input paths */
732 input_startup_cost = 0;
733 input_total_cost = 0;
736 Path *subpath = (Path *) lfirst(l);
738 if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
740 /* Subpath is adequately ordered, we won't need to sort it */
741 input_startup_cost += subpath->startup_cost;
742 input_total_cost += subpath->total_cost;
746 /* We'll need to insert a Sort node, so include cost for that */
747 Path sort_path; /* dummy for result of cost_sort */
749 cost_sort(&sort_path,
753 subpath->parent->tuples,
754 subpath->parent->width,
757 pathnode->limit_tuples);
758 input_startup_cost += sort_path.startup_cost;
759 input_total_cost += sort_path.total_cost;
763 /* Now we can compute total costs of the MergeAppend */
764 cost_merge_append(&pathnode->path, root,
765 pathkeys, list_length(subpaths),
766 input_startup_cost, input_total_cost,
774 * Creates a path representing a Result-and-nothing-else plan.
775 * This is only used for the case of a query with an empty jointree.
778 create_result_path(List *quals)
780 ResultPath *pathnode = makeNode(ResultPath);
782 pathnode->path.pathtype = T_Result;
783 pathnode->path.parent = NULL;
784 pathnode->path.pathkeys = NIL;
785 pathnode->quals = quals;
787 /* Ideally should define cost_result(), but I'm too lazy */
788 pathnode->path.startup_cost = 0;
789 pathnode->path.total_cost = cpu_tuple_cost;
792 * In theory we should include the qual eval cost as well, but at present
793 * that doesn't accomplish much except duplicate work that will be done
794 * again in make_result; since this is only used for degenerate cases,
795 * nothing interesting will be done with the path cost values...
802 * create_material_path
803 * Creates a path corresponding to a Material plan, returning the
807 create_material_path(RelOptInfo *rel, Path *subpath)
809 MaterialPath *pathnode = makeNode(MaterialPath);
811 pathnode->path.pathtype = T_Material;
812 pathnode->path.parent = rel;
814 pathnode->path.pathkeys = subpath->pathkeys;
816 pathnode->subpath = subpath;
818 cost_material(&pathnode->path,
819 subpath->startup_cost,
829 * Creates a path representing elimination of distinct rows from the
830 * input data. Distinct-ness is defined according to the needs of the
831 * semijoin represented by sjinfo. If it is not possible to identify
832 * how to make the data unique, NULL is returned.
834 * If used at all, this is likely to be called repeatedly on the same rel;
835 * and the input subpath should always be the same (the cheapest_total path
836 * for the rel). So we cache the result.
839 create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
840 SpecialJoinInfo *sjinfo)
842 UniquePath *pathnode;
843 Path sort_path; /* dummy for result of cost_sort */
844 Path agg_path; /* dummy for result of cost_agg */
845 MemoryContext oldcontext;
853 /* Caller made a mistake if subpath isn't cheapest_total ... */
854 Assert(subpath == rel->cheapest_total_path);
855 /* ... or if SpecialJoinInfo is the wrong one */
856 Assert(sjinfo->jointype == JOIN_SEMI);
857 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
859 /* If result already cached, return it */
860 if (rel->cheapest_unique_path)
861 return (UniquePath *) rel->cheapest_unique_path;
863 /* If we previously failed, return NULL quickly */
864 if (sjinfo->join_quals == NIL)
868 * We must ensure path struct and subsidiary data are allocated in main
869 * planning context; otherwise GEQO memory management causes trouble.
870 * (Compare best_inner_indexscan().)
872 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
875 * Look to see whether the semijoin's join quals consist of AND'ed
876 * equality operators, with (only) RHS variables on only one side of
877 * each one. If so, we can figure out how to enforce uniqueness for
880 * Note that the input join_quals list is the list of quals that are
881 * *syntactically* associated with the semijoin, which in practice means
882 * the synthesized comparison list for an IN or the WHERE of an EXISTS.
883 * Particularly in the latter case, it might contain clauses that aren't
884 * *semantically* associated with the join, but refer to just one side or
885 * the other. We can ignore such clauses here, as they will just drop
886 * down to be processed within one side or the other. (It is okay to
887 * consider only the syntactically-associated clauses here because for a
888 * semijoin, no higher-level quals could refer to the RHS, and so there
889 * can be no other quals that are semantically associated with this join.
890 * We do things this way because it is useful to be able to run this test
891 * before we have extracted the list of quals that are actually
892 * semantically associated with the particular join.)
894 * Note that the in_operators list consists of the joinqual operators
895 * themselves (but commuted if needed to put the RHS value on the right).
896 * These could be cross-type operators, in which case the operator
897 * actually needed for uniqueness is a related single-type operator.
898 * We assume here that that operator will be available from the btree
899 * or hash opclass when the time comes ... if not, create_unique_plan()
906 all_hash = enable_hashagg; /* don't consider hash if not enabled */
907 foreach(lc, sjinfo->join_quals)
909 OpExpr *op = (OpExpr *) lfirst(lc);
918 /* Is it a binary opclause? */
919 if (!IsA(op, OpExpr) ||
920 list_length(op->args) != 2)
922 /* No, but does it reference both sides? */
923 all_varnos = pull_varnos((Node *) op);
924 if (!bms_overlap(all_varnos, sjinfo->syn_righthand) ||
925 bms_is_subset(all_varnos, sjinfo->syn_righthand))
928 * Clause refers to only one rel, so ignore it --- unless it
929 * contains volatile functions, in which case we'd better
932 if (contain_volatile_functions((Node *) op))
936 /* Non-operator clause referencing both sides, must punt */
940 /* Extract data from binary opclause */
942 left_expr = linitial(op->args);
943 right_expr = lsecond(op->args);
944 left_varnos = pull_varnos(left_expr);
945 right_varnos = pull_varnos(right_expr);
946 all_varnos = bms_union(left_varnos, right_varnos);
947 opinputtype = exprType(left_expr);
949 /* Does it reference both sides? */
950 if (!bms_overlap(all_varnos, sjinfo->syn_righthand) ||
951 bms_is_subset(all_varnos, sjinfo->syn_righthand))
954 * Clause refers to only one rel, so ignore it --- unless it
955 * contains volatile functions, in which case we'd better punt.
957 if (contain_volatile_functions((Node *) op))
962 /* check rel membership of arguments */
963 if (!bms_is_empty(right_varnos) &&
964 bms_is_subset(right_varnos, sjinfo->syn_righthand) &&
965 !bms_overlap(left_varnos, sjinfo->syn_righthand))
967 /* typical case, right_expr is RHS variable */
969 else if (!bms_is_empty(left_varnos) &&
970 bms_is_subset(left_varnos, sjinfo->syn_righthand) &&
971 !bms_overlap(right_varnos, sjinfo->syn_righthand))
973 /* flipped case, left_expr is RHS variable */
974 opno = get_commutator(opno);
975 if (!OidIsValid(opno))
977 right_expr = left_expr;
982 /* all operators must be btree equality or hash equality */
985 /* oprcanmerge is considered a hint... */
986 if (!op_mergejoinable(opno, opinputtype) ||
987 get_mergejoin_opfamilies(opno) == NIL)
992 /* ... but oprcanhash had better be correct */
993 if (!op_hashjoinable(opno, opinputtype))
996 if (!(all_btree || all_hash))
999 /* so far so good, keep building lists */
1000 in_operators = lappend_oid(in_operators, opno);
1001 uniq_exprs = lappend(uniq_exprs, copyObject(right_expr));
1004 /* Punt if we didn't find at least one column to unique-ify */
1005 if (uniq_exprs == NIL)
1006 goto no_unique_path;
1009 * The expressions we'd need to unique-ify mustn't be volatile.
1011 if (contain_volatile_functions((Node *) uniq_exprs))
1012 goto no_unique_path;
1015 * If we get here, we can unique-ify using at least one of sorting and
1016 * hashing. Start building the result Path object.
1018 pathnode = makeNode(UniquePath);
1020 pathnode->path.pathtype = T_Unique;
1021 pathnode->path.parent = rel;
1024 * Treat the output as always unsorted, since we don't necessarily have
1025 * pathkeys to represent it.
1027 pathnode->path.pathkeys = NIL;
1029 pathnode->subpath = subpath;
1030 pathnode->in_operators = in_operators;
1031 pathnode->uniq_exprs = uniq_exprs;
1034 * If the input is a subquery whose output must be unique already, then we
1035 * don't need to do anything. The test for uniqueness has to consider
1036 * exactly which columns we are extracting; for example "SELECT DISTINCT
1037 * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1038 * this optimization unless uniq_exprs consists only of simple Vars
1039 * referencing subquery outputs. (Possibly we could do something with
1040 * expressions in the subquery outputs, too, but for now keep it simple.)
1042 if (rel->rtekind == RTE_SUBQUERY)
1044 RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1045 List *sub_tlist_colnos;
1047 sub_tlist_colnos = translate_sub_tlist(uniq_exprs, rel->relid);
1049 if (sub_tlist_colnos &&
1050 query_is_distinct_for(rte->subquery,
1051 sub_tlist_colnos, in_operators))
1053 pathnode->umethod = UNIQUE_PATH_NOOP;
1054 pathnode->rows = rel->rows;
1055 pathnode->path.startup_cost = subpath->startup_cost;
1056 pathnode->path.total_cost = subpath->total_cost;
1057 pathnode->path.pathkeys = subpath->pathkeys;
1059 rel->cheapest_unique_path = (Path *) pathnode;
1061 MemoryContextSwitchTo(oldcontext);
1067 /* Estimate number of output rows */
1068 pathnode->rows = estimate_num_groups(root, uniq_exprs, rel->rows);
1069 numCols = list_length(uniq_exprs);
1074 * Estimate cost for sort+unique implementation
1076 cost_sort(&sort_path, root, NIL,
1077 subpath->total_cost,
1085 * Charge one cpu_operator_cost per comparison per input tuple. We
1086 * assume all columns get compared at most of the tuples. (XXX
1087 * probably this is an overestimate.) This should agree with
1090 sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1096 * Estimate the overhead per hashtable entry at 64 bytes (same as in
1099 int hashentrysize = rel->width + 64;
1101 if (hashentrysize * pathnode->rows > work_mem * 1024L)
1102 all_hash = false; /* don't try to hash */
1104 cost_agg(&agg_path, root,
1106 numCols, pathnode->rows,
1107 subpath->startup_cost,
1108 subpath->total_cost,
1112 if (all_btree && all_hash)
1114 if (agg_path.total_cost < sort_path.total_cost)
1115 pathnode->umethod = UNIQUE_PATH_HASH;
1117 pathnode->umethod = UNIQUE_PATH_SORT;
1120 pathnode->umethod = UNIQUE_PATH_SORT;
1122 pathnode->umethod = UNIQUE_PATH_HASH;
1124 goto no_unique_path;
1126 if (pathnode->umethod == UNIQUE_PATH_HASH)
1128 pathnode->path.startup_cost = agg_path.startup_cost;
1129 pathnode->path.total_cost = agg_path.total_cost;
1133 pathnode->path.startup_cost = sort_path.startup_cost;
1134 pathnode->path.total_cost = sort_path.total_cost;
1137 rel->cheapest_unique_path = (Path *) pathnode;
1139 MemoryContextSwitchTo(oldcontext);
1143 no_unique_path: /* failure exit */
1145 /* Mark the SpecialJoinInfo as not unique-able */
1146 sjinfo->join_quals = NIL;
1148 MemoryContextSwitchTo(oldcontext);
1154 * translate_sub_tlist - get subquery column numbers represented by tlist
1156 * The given targetlist usually contains only Vars referencing the given relid.
1157 * Extract their varattnos (ie, the column numbers of the subquery) and return
1158 * as an integer List.
1160 * If any of the tlist items is not a simple Var, we cannot determine whether
1161 * the subquery's uniqueness condition (if any) matches ours, so punt and
1165 translate_sub_tlist(List *tlist, int relid)
1172 Var *var = (Var *) lfirst(l);
1174 if (!var || !IsA(var, Var) ||
1175 var->varno != relid)
1176 return NIL; /* punt */
1178 result = lappend_int(result, var->varattno);
1184 * query_is_distinct_for - does query never return duplicates of the
1185 * specified columns?
1187 * colnos is an integer list of output column numbers (resno's). We are
1188 * interested in whether rows consisting of just these columns are certain
1189 * to be distinct. "Distinctness" is defined according to whether the
1190 * corresponding upper-level equality operators listed in opids would think
1191 * the values are distinct. (Note: the opids entries could be cross-type
1192 * operators, and thus not exactly the equality operators that the subquery
1193 * would use itself. We use equality_ops_are_compatible() to check
1194 * compatibility. That looks at btree or hash opfamily membership, and so
1195 * should give trustworthy answers for all operators that we might need
1196 * to deal with here.)
1199 query_is_distinct_for(Query *query, List *colnos, List *opids)
1204 Assert(list_length(colnos) == list_length(opids));
1207 * DISTINCT (including DISTINCT ON) guarantees uniqueness if all the
1208 * columns in the DISTINCT clause appear in colnos and operator semantics
1211 if (query->distinctClause)
1213 foreach(l, query->distinctClause)
1215 SortGroupClause *sgc = (SortGroupClause *) lfirst(l);
1216 TargetEntry *tle = get_sortgroupclause_tle(sgc,
1219 opid = distinct_col_search(tle->resno, colnos, opids);
1220 if (!OidIsValid(opid) ||
1221 !equality_ops_are_compatible(opid, sgc->eqop))
1222 break; /* exit early if no match */
1224 if (l == NULL) /* had matches for all? */
1229 * Similarly, GROUP BY guarantees uniqueness if all the grouped columns
1230 * appear in colnos and operator semantics match.
1232 if (query->groupClause)
1234 foreach(l, query->groupClause)
1236 SortGroupClause *sgc = (SortGroupClause *) lfirst(l);
1237 TargetEntry *tle = get_sortgroupclause_tle(sgc,
1240 opid = distinct_col_search(tle->resno, colnos, opids);
1241 if (!OidIsValid(opid) ||
1242 !equality_ops_are_compatible(opid, sgc->eqop))
1243 break; /* exit early if no match */
1245 if (l == NULL) /* had matches for all? */
1251 * If we have no GROUP BY, but do have aggregates or HAVING, then the
1252 * result is at most one row so it's surely unique, for any operators.
1254 if (query->hasAggs || query->havingQual)
1259 * UNION, INTERSECT, EXCEPT guarantee uniqueness of the whole output row,
1262 if (query->setOperations)
1264 SetOperationStmt *topop = (SetOperationStmt *) query->setOperations;
1266 Assert(IsA(topop, SetOperationStmt));
1267 Assert(topop->op != SETOP_NONE);
1273 /* We're good if all the nonjunk output columns are in colnos */
1274 lg = list_head(topop->groupClauses);
1275 foreach(l, query->targetList)
1277 TargetEntry *tle = (TargetEntry *) lfirst(l);
1278 SortGroupClause *sgc;
1281 continue; /* ignore resjunk columns */
1283 /* non-resjunk columns should have grouping clauses */
1285 sgc = (SortGroupClause *) lfirst(lg);
1288 opid = distinct_col_search(tle->resno, colnos, opids);
1289 if (!OidIsValid(opid) ||
1290 !equality_ops_are_compatible(opid, sgc->eqop))
1291 break; /* exit early if no match */
1293 if (l == NULL) /* had matches for all? */
1299 * XXX Are there any other cases in which we can easily see the result
1307 * distinct_col_search - subroutine for query_is_distinct_for
1309 * If colno is in colnos, return the corresponding element of opids,
1310 * else return InvalidOid. (We expect colnos does not contain duplicates,
1311 * so the result is well-defined.)
1314 distinct_col_search(int colno, List *colnos, List *opids)
1319 forboth(lc1, colnos, lc2, opids)
1321 if (colno == lfirst_int(lc1))
1322 return lfirst_oid(lc2);
1328 * create_subqueryscan_path
1329 * Creates a path corresponding to a sequential scan of a subquery,
1330 * returning the pathnode.
1333 create_subqueryscan_path(RelOptInfo *rel, List *pathkeys)
1335 Path *pathnode = makeNode(Path);
1337 pathnode->pathtype = T_SubqueryScan;
1338 pathnode->parent = rel;
1339 pathnode->pathkeys = pathkeys;
1341 cost_subqueryscan(pathnode, rel);
1347 * create_functionscan_path
1348 * Creates a path corresponding to a sequential scan of a function,
1349 * returning the pathnode.
1352 create_functionscan_path(PlannerInfo *root, RelOptInfo *rel)
1354 Path *pathnode = makeNode(Path);
1356 pathnode->pathtype = T_FunctionScan;
1357 pathnode->parent = rel;
1358 pathnode->pathkeys = NIL; /* for now, assume unordered result */
1360 cost_functionscan(pathnode, root, rel);
1366 * create_valuesscan_path
1367 * Creates a path corresponding to a scan of a VALUES list,
1368 * returning the pathnode.
1371 create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel)
1373 Path *pathnode = makeNode(Path);
1375 pathnode->pathtype = T_ValuesScan;
1376 pathnode->parent = rel;
1377 pathnode->pathkeys = NIL; /* result is always unordered */
1379 cost_valuesscan(pathnode, root, rel);
1385 * create_ctescan_path
1386 * Creates a path corresponding to a scan of a non-self-reference CTE,
1387 * returning the pathnode.
1390 create_ctescan_path(PlannerInfo *root, RelOptInfo *rel)
1392 Path *pathnode = makeNode(Path);
1394 pathnode->pathtype = T_CteScan;
1395 pathnode->parent = rel;
1396 pathnode->pathkeys = NIL; /* XXX for now, result is always unordered */
1398 cost_ctescan(pathnode, root, rel);
1404 * create_worktablescan_path
1405 * Creates a path corresponding to a scan of a self-reference CTE,
1406 * returning the pathnode.
1409 create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel)
1411 Path *pathnode = makeNode(Path);
1413 pathnode->pathtype = T_WorkTableScan;
1414 pathnode->parent = rel;
1415 pathnode->pathkeys = NIL; /* result is always unordered */
1417 /* Cost is the same as for a regular CTE scan */
1418 cost_ctescan(pathnode, root, rel);
1424 * create_foreignscan_path
1425 * Creates a path corresponding to a scan of a foreign table,
1426 * returning the pathnode.
1429 create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel)
1431 ForeignPath *pathnode = makeNode(ForeignPath);
1433 FdwRoutine *fdwroutine;
1436 pathnode->path.pathtype = T_ForeignScan;
1437 pathnode->path.parent = rel;
1438 pathnode->path.pathkeys = NIL; /* result is always unordered */
1440 /* Get FDW's callback info */
1441 rte = planner_rt_fetch(rel->relid, root);
1442 fdwroutine = GetFdwRoutineByRelId(rte->relid);
1444 /* Let the FDW do its planning */
1445 fdwplan = fdwroutine->PlanForeignScan(rte->relid, root, rel);
1446 if (fdwplan == NULL || !IsA(fdwplan, FdwPlan))
1447 elog(ERROR, "foreign-data wrapper PlanForeignScan function for relation %u did not return an FdwPlan struct",
1449 pathnode->fdwplan = fdwplan;
1451 /* use costs estimated by FDW */
1452 pathnode->path.startup_cost = fdwplan->startup_cost;
1453 pathnode->path.total_cost = fdwplan->total_cost;
1459 * create_nestloop_path
1460 * Creates a pathnode corresponding to a nestloop join between two
1463 * 'joinrel' is the join relation.
1464 * 'jointype' is the type of join required
1465 * 'sjinfo' is extra info about the join for selectivity estimation
1466 * 'outer_path' is the outer path
1467 * 'inner_path' is the inner path
1468 * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1469 * 'pathkeys' are the path keys of the new join path
1471 * Returns the resulting path node.
1474 create_nestloop_path(PlannerInfo *root,
1475 RelOptInfo *joinrel,
1477 SpecialJoinInfo *sjinfo,
1480 List *restrict_clauses,
1483 NestPath *pathnode = makeNode(NestPath);
1485 pathnode->path.pathtype = T_NestLoop;
1486 pathnode->path.parent = joinrel;
1487 pathnode->jointype = jointype;
1488 pathnode->outerjoinpath = outer_path;
1489 pathnode->innerjoinpath = inner_path;
1490 pathnode->joinrestrictinfo = restrict_clauses;
1491 pathnode->path.pathkeys = pathkeys;
1493 cost_nestloop(pathnode, root, sjinfo);
1499 * create_mergejoin_path
1500 * Creates a pathnode corresponding to a mergejoin join between
1503 * 'joinrel' is the join relation
1504 * 'jointype' is the type of join required
1505 * 'sjinfo' is extra info about the join for selectivity estimation
1506 * 'outer_path' is the outer path
1507 * 'inner_path' is the inner path
1508 * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1509 * 'pathkeys' are the path keys of the new join path
1510 * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
1511 * (this should be a subset of the restrict_clauses list)
1512 * 'outersortkeys' are the sort varkeys for the outer relation
1513 * 'innersortkeys' are the sort varkeys for the inner relation
1516 create_mergejoin_path(PlannerInfo *root,
1517 RelOptInfo *joinrel,
1519 SpecialJoinInfo *sjinfo,
1522 List *restrict_clauses,
1525 List *outersortkeys,
1526 List *innersortkeys)
1528 MergePath *pathnode = makeNode(MergePath);
1531 * If the given paths are already well enough ordered, we can skip doing
1534 if (outersortkeys &&
1535 pathkeys_contained_in(outersortkeys, outer_path->pathkeys))
1536 outersortkeys = NIL;
1537 if (innersortkeys &&
1538 pathkeys_contained_in(innersortkeys, inner_path->pathkeys))
1539 innersortkeys = NIL;
1541 pathnode->jpath.path.pathtype = T_MergeJoin;
1542 pathnode->jpath.path.parent = joinrel;
1543 pathnode->jpath.jointype = jointype;
1544 pathnode->jpath.outerjoinpath = outer_path;
1545 pathnode->jpath.innerjoinpath = inner_path;
1546 pathnode->jpath.joinrestrictinfo = restrict_clauses;
1547 pathnode->jpath.path.pathkeys = pathkeys;
1548 pathnode->path_mergeclauses = mergeclauses;
1549 pathnode->outersortkeys = outersortkeys;
1550 pathnode->innersortkeys = innersortkeys;
1551 /* pathnode->materialize_inner will be set by cost_mergejoin */
1553 cost_mergejoin(pathnode, root, sjinfo);
1559 * create_hashjoin_path
1560 * Creates a pathnode corresponding to a hash join between two relations.
1562 * 'joinrel' is the join relation
1563 * 'jointype' is the type of join required
1564 * 'sjinfo' is extra info about the join for selectivity estimation
1565 * 'outer_path' is the cheapest outer path
1566 * 'inner_path' is the cheapest inner path
1567 * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1568 * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
1569 * (this should be a subset of the restrict_clauses list)
1572 create_hashjoin_path(PlannerInfo *root,
1573 RelOptInfo *joinrel,
1575 SpecialJoinInfo *sjinfo,
1578 List *restrict_clauses,
1581 HashPath *pathnode = makeNode(HashPath);
1583 pathnode->jpath.path.pathtype = T_HashJoin;
1584 pathnode->jpath.path.parent = joinrel;
1585 pathnode->jpath.jointype = jointype;
1586 pathnode->jpath.outerjoinpath = outer_path;
1587 pathnode->jpath.innerjoinpath = inner_path;
1588 pathnode->jpath.joinrestrictinfo = restrict_clauses;
1591 * A hashjoin never has pathkeys, since its output ordering is
1592 * unpredictable due to possible batching. XXX If the inner relation is
1593 * small enough, we could instruct the executor that it must not batch,
1594 * and then we could assume that the output inherits the outer relation's
1595 * ordering, which might save a sort step. However there is considerable
1596 * downside if our estimate of the inner relation size is badly off. For
1597 * the moment we don't risk it. (Note also that if we wanted to take this
1598 * seriously, joinpath.c would have to consider many more paths for the
1599 * outer rel than it does now.)
1601 pathnode->jpath.path.pathkeys = NIL;
1602 pathnode->path_hashclauses = hashclauses;
1603 /* cost_hashjoin will fill in pathnode->num_batches */
1605 cost_hashjoin(pathnode, root, sjinfo);