1 /*-------------------------------------------------------------------------
4 * Routines to manipulate pathlists and create path nodes
6 * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/optimizer/util/pathnode.c
13 *-------------------------------------------------------------------------
19 #include "miscadmin.h"
20 #include "nodes/nodeFuncs.h"
21 #include "optimizer/clauses.h"
22 #include "optimizer/cost.h"
23 #include "optimizer/pathnode.h"
24 #include "optimizer/paths.h"
25 #include "optimizer/planmain.h"
26 #include "optimizer/restrictinfo.h"
27 #include "optimizer/var.h"
28 #include "parser/parsetree.h"
29 #include "utils/lsyscache.h"
30 #include "utils/memutils.h"
31 #include "utils/selfuncs.h"
36 COSTS_EQUAL, /* path costs are fuzzily equal */
37 COSTS_BETTER1, /* first path is cheaper than second */
38 COSTS_BETTER2, /* second path is cheaper than first */
39 COSTS_DIFFERENT /* neither path dominates the other on cost */
42 static List *translate_sub_tlist(List *tlist, int relid);
45 /*****************************************************************************
46 * MISC. PATH UTILITIES
47 *****************************************************************************/
51 * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
52 * or more expensive than path2 for the specified criterion.
55 compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
57 if (criterion == STARTUP_COST)
59 if (path1->startup_cost < path2->startup_cost)
61 if (path1->startup_cost > path2->startup_cost)
65 * If paths have the same startup cost (not at all unlikely), order
68 if (path1->total_cost < path2->total_cost)
70 if (path1->total_cost > path2->total_cost)
75 if (path1->total_cost < path2->total_cost)
77 if (path1->total_cost > path2->total_cost)
81 * If paths have the same total cost, order them by startup cost.
83 if (path1->startup_cost < path2->startup_cost)
85 if (path1->startup_cost > path2->startup_cost)
92 * compare_path_fractional_costs
93 * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
94 * or more expensive than path2 for fetching the specified fraction
95 * of the total tuples.
97 * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
98 * path with the cheaper total_cost.
101 compare_fractional_path_costs(Path *path1, Path *path2,
107 if (fraction <= 0.0 || fraction >= 1.0)
108 return compare_path_costs(path1, path2, TOTAL_COST);
109 cost1 = path1->startup_cost +
110 fraction * (path1->total_cost - path1->startup_cost);
111 cost2 = path2->startup_cost +
112 fraction * (path2->total_cost - path2->startup_cost);
121 * compare_path_costs_fuzzily
122 * Compare the costs of two paths to see if either can be said to
123 * dominate the other.
125 * We use fuzzy comparisons so that add_path() can avoid keeping both of
126 * a pair of paths that really have insignificantly different cost.
128 * The fuzz_factor argument must be 1.0 plus delta, where delta is the
129 * fraction of the smaller cost that is considered to be a significant
130 * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
131 * be 1% of the smaller cost.
133 * The two paths are said to have "equal" costs if both startup and total
134 * costs are fuzzily the same. Path1 is said to be better than path2 if
135 * it has fuzzily better startup cost and fuzzily no worse total cost,
136 * or if it has fuzzily better total cost and fuzzily no worse startup cost.
137 * Path2 is better than path1 if the reverse holds. Finally, if one path
138 * is fuzzily better than the other on startup cost and fuzzily worse on
139 * total cost, we just say that their costs are "different", since neither
140 * dominates the other across the whole performance spectrum.
142 * If consider_startup is false, then we don't care about keeping paths with
143 * good startup cost, so we'll never return COSTS_DIFFERENT.
145 * This function also includes special hacks to support a policy enforced
146 * by its sole caller, add_path(): paths that have any parameterization
147 * cannot win comparisons on the grounds of having cheaper startup cost,
148 * since we deem only total cost to be of interest for a parameterized path.
149 * (Unparameterized paths are more common, so we check for this case last.)
151 static PathCostComparison
152 compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor,
153 bool consider_startup)
156 * Check total cost first since it's more likely to be different; many
157 * paths have zero startup cost.
159 if (path1->total_cost > path2->total_cost * fuzz_factor)
161 /* path1 fuzzily worse on total cost */
162 if (consider_startup &&
163 path2->startup_cost > path1->startup_cost * fuzz_factor &&
164 path1->param_info == NULL)
166 /* ... but path2 fuzzily worse on startup, so DIFFERENT */
167 return COSTS_DIFFERENT;
169 /* else path2 dominates */
170 return COSTS_BETTER2;
172 if (path2->total_cost > path1->total_cost * fuzz_factor)
174 /* path2 fuzzily worse on total cost */
175 if (consider_startup &&
176 path1->startup_cost > path2->startup_cost * fuzz_factor &&
177 path2->param_info == NULL)
179 /* ... but path1 fuzzily worse on startup, so DIFFERENT */
180 return COSTS_DIFFERENT;
182 /* else path1 dominates */
183 return COSTS_BETTER1;
185 /* fuzzily the same on total cost */
186 /* (so we may as well compare startup cost, even if !consider_startup) */
187 if (path1->startup_cost > path2->startup_cost * fuzz_factor &&
188 path2->param_info == NULL)
190 /* ... but path1 fuzzily worse on startup, so path2 wins */
191 return COSTS_BETTER2;
193 if (path2->startup_cost > path1->startup_cost * fuzz_factor &&
194 path1->param_info == NULL)
196 /* ... but path2 fuzzily worse on startup, so path1 wins */
197 return COSTS_BETTER1;
199 /* fuzzily the same on both costs */
205 * Find the minimum-cost paths from among a relation's paths,
206 * and save them in the rel's cheapest-path fields.
208 * cheapest_total_path is normally the cheapest-total-cost unparameterized
209 * path; but if there are no unparameterized paths, we assign it to be the
210 * best (cheapest least-parameterized) parameterized path. However, only
211 * unparameterized paths are considered candidates for cheapest_startup_path,
212 * so that will be NULL if there are no unparameterized paths.
214 * The cheapest_parameterized_paths list collects all parameterized paths
215 * that have survived the add_path() tournament for this relation. (Since
216 * add_path ignores pathkeys and startup cost for a parameterized path,
217 * these will be paths that have best total cost or best row count for their
218 * parameterization.) cheapest_parameterized_paths always includes the
219 * cheapest-total unparameterized path, too, if there is one; the users of
220 * that list find it more convenient if that's included.
222 * This is normally called only after we've finished constructing the path
223 * list for the rel node.
226 set_cheapest(RelOptInfo *parent_rel)
228 Path *cheapest_startup_path;
229 Path *cheapest_total_path;
230 Path *best_param_path;
231 List *parameterized_paths;
234 Assert(IsA(parent_rel, RelOptInfo));
236 if (parent_rel->pathlist == NIL)
237 elog(ERROR, "could not devise a query plan for the given query");
239 cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
240 parameterized_paths = NIL;
242 foreach(p, parent_rel->pathlist)
244 Path *path = (Path *) lfirst(p);
247 if (path->param_info)
249 /* Parameterized path, so add it to parameterized_paths */
250 parameterized_paths = lappend(parameterized_paths, path);
253 * If we have an unparameterized cheapest-total, we no longer care
254 * about finding the best parameterized path, so move on.
256 if (cheapest_total_path)
260 * Otherwise, track the best parameterized path, which is the one
261 * with least total cost among those of the minimum
264 if (best_param_path == NULL)
265 best_param_path = path;
268 switch (bms_subset_compare(PATH_REQ_OUTER(path),
269 PATH_REQ_OUTER(best_param_path)))
272 /* keep the cheaper one */
273 if (compare_path_costs(path, best_param_path,
275 best_param_path = path;
278 /* new path is less-parameterized */
279 best_param_path = path;
282 /* old path is less-parameterized, keep it */
287 * This means that neither path has the least possible
288 * parameterization for the rel. We'll sit on the old
289 * path until something better comes along.
297 /* Unparameterized path, so consider it for cheapest slots */
298 if (cheapest_total_path == NULL)
300 cheapest_startup_path = cheapest_total_path = path;
305 * If we find two paths of identical costs, try to keep the
306 * better-sorted one. The paths might have unrelated sort
307 * orderings, in which case we can only guess which might be
308 * better to keep, but if one is superior then we definitely
309 * should keep that one.
311 cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
314 compare_pathkeys(cheapest_startup_path->pathkeys,
315 path->pathkeys) == PATHKEYS_BETTER2))
316 cheapest_startup_path = path;
318 cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
321 compare_pathkeys(cheapest_total_path->pathkeys,
322 path->pathkeys) == PATHKEYS_BETTER2))
323 cheapest_total_path = path;
327 /* Add cheapest unparameterized path, if any, to parameterized_paths */
328 if (cheapest_total_path)
329 parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
332 * If there is no unparameterized path, use the best parameterized path as
333 * cheapest_total_path (but not as cheapest_startup_path).
335 if (cheapest_total_path == NULL)
336 cheapest_total_path = best_param_path;
337 Assert(cheapest_total_path != NULL);
339 parent_rel->cheapest_startup_path = cheapest_startup_path;
340 parent_rel->cheapest_total_path = cheapest_total_path;
341 parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
342 parent_rel->cheapest_parameterized_paths = parameterized_paths;
347 * Consider a potential implementation path for the specified parent rel,
348 * and add it to the rel's pathlist if it is worthy of consideration.
349 * A path is worthy if it has a better sort order (better pathkeys) or
350 * cheaper cost (on either dimension), or generates fewer rows, than any
351 * existing path that has the same or superset parameterization rels.
353 * We also remove from the rel's pathlist any old paths that are dominated
354 * by new_path --- that is, new_path is cheaper, at least as well ordered,
355 * generates no more rows, and requires no outer rels not required by the
358 * In most cases, a path with a superset parameterization will generate
359 * fewer rows (since it has more join clauses to apply), so that those two
360 * figures of merit move in opposite directions; this means that a path of
361 * one parameterization can seldom dominate a path of another. But such
362 * cases do arise, so we make the full set of checks anyway.
364 * There are two policy decisions embedded in this function, along with
365 * its sibling add_path_precheck: we treat all parameterized paths as
366 * having NIL pathkeys, and we ignore their startup costs, so that they
367 * compete only on parameterization, total cost and rowcount. This is to
368 * reduce the number of parameterized paths that are kept. See discussion
369 * in src/backend/optimizer/README.
371 * Another policy that is enforced here is that we only consider cheap
372 * startup cost to be interesting if parent_rel->consider_startup is true.
374 * The pathlist is kept sorted by total_cost, with cheaper paths
375 * at the front. Within this routine, that's simply a speed hack:
376 * doing it that way makes it more likely that we will reject an inferior
377 * path after a few comparisons, rather than many comparisons.
378 * However, add_path_precheck relies on this ordering to exit early
381 * NOTE: discarded Path objects are immediately pfree'd to reduce planner
382 * memory consumption. We dare not try to free the substructure of a Path,
383 * since much of it may be shared with other Paths or the query tree itself;
384 * but just recycling discarded Path nodes is a very useful savings in
385 * a large join tree. We can recycle the List nodes of pathlist, too.
387 * BUT: we do not pfree IndexPath objects, since they may be referenced as
388 * children of BitmapHeapPaths as well as being paths in their own right.
390 * 'parent_rel' is the relation entry to which the path corresponds.
391 * 'new_path' is a potential path for parent_rel.
393 * Returns nothing, but modifies parent_rel->pathlist.
396 add_path(RelOptInfo *parent_rel, Path *new_path)
398 bool accept_new = true; /* unless we find a superior old path */
399 ListCell *insert_after = NULL; /* where to insert new item */
400 List *new_path_pathkeys;
406 * This is a convenient place to check for query cancel --- no part of the
407 * planner goes very long without calling add_path().
409 CHECK_FOR_INTERRUPTS();
411 /* Pretend parameterized paths have no pathkeys, per comment above */
412 new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
415 * Loop to check proposed new path against old paths. Note it is possible
416 * for more than one old path to be tossed out because new_path dominates
419 * We can't use foreach here because the loop body may delete the current
423 for (p1 = list_head(parent_rel->pathlist); p1 != NULL; p1 = p1_next)
425 Path *old_path = (Path *) lfirst(p1);
426 bool remove_old = false; /* unless new proves superior */
427 PathCostComparison costcmp;
428 PathKeysComparison keyscmp;
429 BMS_Comparison outercmp;
434 * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
435 * percentage need to be user-configurable?)
437 costcmp = compare_path_costs_fuzzily(new_path, old_path, 1.01,
438 parent_rel->consider_startup);
441 * If the two paths compare differently for startup and total cost,
442 * then we want to keep both, and we can skip comparing pathkeys and
443 * required_outer rels. If they compare the same, proceed with the
444 * other comparisons. Row count is checked last. (We make the tests
445 * in this order because the cost comparison is most likely to turn
446 * out "different", and the pathkeys comparison next most likely. As
447 * explained above, row count very seldom makes a difference, so even
448 * though it's cheap to compare there's not much point in checking it
451 if (costcmp != COSTS_DIFFERENT)
453 /* Similarly check to see if either dominates on pathkeys */
454 List *old_path_pathkeys;
456 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
457 keyscmp = compare_pathkeys(new_path_pathkeys,
459 if (keyscmp != PATHKEYS_DIFFERENT)
464 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
465 PATH_REQ_OUTER(old_path));
466 if (keyscmp == PATHKEYS_BETTER1)
468 if ((outercmp == BMS_EQUAL ||
469 outercmp == BMS_SUBSET1) &&
470 new_path->rows <= old_path->rows)
471 remove_old = true; /* new dominates old */
473 else if (keyscmp == PATHKEYS_BETTER2)
475 if ((outercmp == BMS_EQUAL ||
476 outercmp == BMS_SUBSET2) &&
477 new_path->rows >= old_path->rows)
478 accept_new = false; /* old dominates new */
480 else /* keyscmp == PATHKEYS_EQUAL */
482 if (outercmp == BMS_EQUAL)
485 * Same pathkeys and outer rels, and fuzzily
486 * the same cost, so keep just one; to decide
487 * which, first check rows and then do a fuzzy
488 * cost comparison with very small fuzz limit.
489 * (We used to do an exact cost comparison,
490 * but that results in annoying
491 * platform-specific plan variations due to
492 * roundoff in the cost estimates.) If things
493 * are still tied, arbitrarily keep only the
494 * old path. Notice that we will keep only
495 * the old path even if the less-fuzzy
496 * comparison decides the startup and total
497 * costs compare differently.
499 if (new_path->rows < old_path->rows)
500 remove_old = true; /* new dominates old */
501 else if (new_path->rows > old_path->rows)
502 accept_new = false; /* old dominates new */
503 else if (compare_path_costs_fuzzily(new_path,
506 parent_rel->consider_startup) == COSTS_BETTER1)
507 remove_old = true; /* new dominates old */
509 accept_new = false; /* old equals or
512 else if (outercmp == BMS_SUBSET1 &&
513 new_path->rows <= old_path->rows)
514 remove_old = true; /* new dominates old */
515 else if (outercmp == BMS_SUBSET2 &&
516 new_path->rows >= old_path->rows)
517 accept_new = false; /* old dominates new */
518 /* else different parameterizations, keep both */
522 if (keyscmp != PATHKEYS_BETTER2)
524 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
525 PATH_REQ_OUTER(old_path));
526 if ((outercmp == BMS_EQUAL ||
527 outercmp == BMS_SUBSET1) &&
528 new_path->rows <= old_path->rows)
529 remove_old = true; /* new dominates old */
533 if (keyscmp != PATHKEYS_BETTER1)
535 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
536 PATH_REQ_OUTER(old_path));
537 if ((outercmp == BMS_EQUAL ||
538 outercmp == BMS_SUBSET2) &&
539 new_path->rows >= old_path->rows)
540 accept_new = false; /* old dominates new */
543 case COSTS_DIFFERENT:
546 * can't get here, but keep this case to keep compiler
555 * Remove current element from pathlist if dominated by new.
559 parent_rel->pathlist = list_delete_cell(parent_rel->pathlist,
563 * Delete the data pointed-to by the deleted cell, if possible
565 if (!IsA(old_path, IndexPath))
567 /* p1_prev does not advance */
571 /* new belongs after this old path if it has cost >= old's */
572 if (new_path->total_cost >= old_path->total_cost)
574 /* p1_prev advances */
579 * If we found an old path that dominates new_path, we can quit
580 * scanning the pathlist; we will not add new_path, and we assume
581 * new_path cannot dominate any other elements of the pathlist.
589 /* Accept the new path: insert it at proper place in pathlist */
591 lappend_cell(parent_rel->pathlist, insert_after, new_path);
593 parent_rel->pathlist = lcons(new_path, parent_rel->pathlist);
597 /* Reject and recycle the new path */
598 if (!IsA(new_path, IndexPath))
605 * Check whether a proposed new path could possibly get accepted.
606 * We assume we know the path's pathkeys and parameterization accurately,
607 * and have lower bounds for its costs.
609 * Note that we do not know the path's rowcount, since getting an estimate for
610 * that is too expensive to do before prechecking. We assume here that paths
611 * of a superset parameterization will generate fewer rows; if that holds,
612 * then paths with different parameterizations cannot dominate each other
613 * and so we can simply ignore existing paths of another parameterization.
614 * (In the infrequent cases where that rule of thumb fails, add_path will
615 * get rid of the inferior path.)
617 * At the time this is called, we haven't actually built a Path structure,
618 * so the required information has to be passed piecemeal.
621 add_path_precheck(RelOptInfo *parent_rel,
622 Cost startup_cost, Cost total_cost,
623 List *pathkeys, Relids required_outer)
625 List *new_path_pathkeys;
628 /* Pretend parameterized paths have no pathkeys, per add_path policy */
629 new_path_pathkeys = required_outer ? NIL : pathkeys;
631 foreach(p1, parent_rel->pathlist)
633 Path *old_path = (Path *) lfirst(p1);
634 PathKeysComparison keyscmp;
637 * We are looking for an old_path with the same parameterization (and
638 * by assumption the same rowcount) that dominates the new path on
639 * pathkeys as well as both cost metrics. If we find one, we can
640 * reject the new path.
642 * For speed, we make exact rather than fuzzy cost comparisons. If an
643 * old path dominates the new path exactly on both costs, it will
644 * surely do so fuzzily.
646 if (total_cost >= old_path->total_cost)
648 /* can win on startup cost only if unparameterized */
649 if (startup_cost >= old_path->startup_cost || required_outer)
651 /* new path does not win on cost, so check pathkeys... */
652 List *old_path_pathkeys;
654 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
655 keyscmp = compare_pathkeys(new_path_pathkeys,
657 if (keyscmp == PATHKEYS_EQUAL ||
658 keyscmp == PATHKEYS_BETTER2)
660 /* new path does not win on pathkeys... */
661 if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
663 /* Found an old path that dominates the new one */
672 * Since the pathlist is sorted by total_cost, we can stop looking
673 * once we reach a path with a total_cost larger than the new
684 /*****************************************************************************
685 * PATH NODE CREATION ROUTINES
686 *****************************************************************************/
689 * create_seqscan_path
690 * Creates a path corresponding to a sequential scan, returning the
694 create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
696 Path *pathnode = makeNode(Path);
698 pathnode->pathtype = T_SeqScan;
699 pathnode->parent = rel;
700 pathnode->param_info = get_baserel_parampathinfo(root, rel,
702 pathnode->pathkeys = NIL; /* seqscan has unordered result */
704 cost_seqscan(pathnode, root, rel, pathnode->param_info);
711 * Creates a path node for an index scan.
713 * 'index' is a usable index.
714 * 'indexclauses' is a list of RestrictInfo nodes representing clauses
715 * to be used as index qual conditions in the scan.
716 * 'indexclausecols' is an integer list of index column numbers (zero based)
717 * the indexclauses can be used with.
718 * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
719 * to be used as index ordering operators in the scan.
720 * 'indexorderbycols' is an integer list of index column numbers (zero based)
721 * the ordering operators can be used with.
722 * 'pathkeys' describes the ordering of the path.
723 * 'indexscandir' is ForwardScanDirection or BackwardScanDirection
724 * for an ordered index, or NoMovementScanDirection for
725 * an unordered index.
726 * 'indexonly' is true if an index-only scan is wanted.
727 * 'required_outer' is the set of outer relids for a parameterized path.
728 * 'loop_count' is the number of repetitions of the indexscan to factor into
729 * estimates of caching behavior.
731 * Returns the new path node.
734 create_index_path(PlannerInfo *root,
737 List *indexclausecols,
739 List *indexorderbycols,
741 ScanDirection indexscandir,
743 Relids required_outer,
746 IndexPath *pathnode = makeNode(IndexPath);
747 RelOptInfo *rel = index->rel;
751 pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
752 pathnode->path.parent = rel;
753 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
755 pathnode->path.pathkeys = pathkeys;
757 /* Convert clauses to indexquals the executor can handle */
758 expand_indexqual_conditions(index, indexclauses, indexclausecols,
759 &indexquals, &indexqualcols);
761 /* Fill in the pathnode */
762 pathnode->indexinfo = index;
763 pathnode->indexclauses = indexclauses;
764 pathnode->indexquals = indexquals;
765 pathnode->indexqualcols = indexqualcols;
766 pathnode->indexorderbys = indexorderbys;
767 pathnode->indexorderbycols = indexorderbycols;
768 pathnode->indexscandir = indexscandir;
770 cost_index(pathnode, root, loop_count);
776 * create_bitmap_heap_path
777 * Creates a path node for a bitmap scan.
779 * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
780 * 'required_outer' is the set of outer relids for a parameterized path.
781 * 'loop_count' is the number of repetitions of the indexscan to factor into
782 * estimates of caching behavior.
784 * loop_count should match the value used when creating the component
788 create_bitmap_heap_path(PlannerInfo *root,
791 Relids required_outer,
794 BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
796 pathnode->path.pathtype = T_BitmapHeapScan;
797 pathnode->path.parent = rel;
798 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
800 pathnode->path.pathkeys = NIL; /* always unordered */
802 pathnode->bitmapqual = bitmapqual;
804 cost_bitmap_heap_scan(&pathnode->path, root, rel,
805 pathnode->path.param_info,
806 bitmapqual, loop_count);
812 * create_bitmap_and_path
813 * Creates a path node representing a BitmapAnd.
816 create_bitmap_and_path(PlannerInfo *root,
820 BitmapAndPath *pathnode = makeNode(BitmapAndPath);
822 pathnode->path.pathtype = T_BitmapAnd;
823 pathnode->path.parent = rel;
824 pathnode->path.param_info = NULL; /* not used in bitmap trees */
825 pathnode->path.pathkeys = NIL; /* always unordered */
827 pathnode->bitmapquals = bitmapquals;
829 /* this sets bitmapselectivity as well as the regular cost fields: */
830 cost_bitmap_and_node(pathnode, root);
836 * create_bitmap_or_path
837 * Creates a path node representing a BitmapOr.
840 create_bitmap_or_path(PlannerInfo *root,
844 BitmapOrPath *pathnode = makeNode(BitmapOrPath);
846 pathnode->path.pathtype = T_BitmapOr;
847 pathnode->path.parent = rel;
848 pathnode->path.param_info = NULL; /* not used in bitmap trees */
849 pathnode->path.pathkeys = NIL; /* always unordered */
851 pathnode->bitmapquals = bitmapquals;
853 /* this sets bitmapselectivity as well as the regular cost fields: */
854 cost_bitmap_or_node(pathnode, root);
860 * create_tidscan_path
861 * Creates a path corresponding to a scan by TID, returning the pathnode.
864 create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals,
865 Relids required_outer)
867 TidPath *pathnode = makeNode(TidPath);
869 pathnode->path.pathtype = T_TidScan;
870 pathnode->path.parent = rel;
871 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
873 pathnode->path.pathkeys = NIL; /* always unordered */
875 pathnode->tidquals = tidquals;
877 cost_tidscan(&pathnode->path, root, rel, tidquals,
878 pathnode->path.param_info);
885 * Creates a path corresponding to an Append plan, returning the
888 * Note that we must handle subpaths = NIL, representing a dummy access path.
891 create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer)
893 AppendPath *pathnode = makeNode(AppendPath);
896 pathnode->path.pathtype = T_Append;
897 pathnode->path.parent = rel;
898 pathnode->path.param_info = get_appendrel_parampathinfo(rel,
900 pathnode->path.pathkeys = NIL; /* result is always considered
902 pathnode->subpaths = subpaths;
905 * We don't bother with inventing a cost_append(), but just do it here.
907 * Compute rows and costs as sums of subplan rows and costs. We charge
908 * nothing extra for the Append itself, which perhaps is too optimistic,
909 * but since it doesn't do any selection or projection, it is a pretty
910 * cheap node. If you change this, see also make_append().
912 pathnode->path.rows = 0;
913 pathnode->path.startup_cost = 0;
914 pathnode->path.total_cost = 0;
917 Path *subpath = (Path *) lfirst(l);
919 pathnode->path.rows += subpath->rows;
921 if (l == list_head(subpaths)) /* first node? */
922 pathnode->path.startup_cost = subpath->startup_cost;
923 pathnode->path.total_cost += subpath->total_cost;
925 /* All child paths must have same parameterization */
926 Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
933 * create_merge_append_path
934 * Creates a path corresponding to a MergeAppend plan, returning the
938 create_merge_append_path(PlannerInfo *root,
942 Relids required_outer)
944 MergeAppendPath *pathnode = makeNode(MergeAppendPath);
945 Cost input_startup_cost;
946 Cost input_total_cost;
949 pathnode->path.pathtype = T_MergeAppend;
950 pathnode->path.parent = rel;
951 pathnode->path.param_info = get_appendrel_parampathinfo(rel,
953 pathnode->path.pathkeys = pathkeys;
954 pathnode->subpaths = subpaths;
957 * Apply query-wide LIMIT if known and path is for sole base relation.
958 * (Handling this at this low level is a bit klugy.)
960 if (bms_equal(rel->relids, root->all_baserels))
961 pathnode->limit_tuples = root->limit_tuples;
963 pathnode->limit_tuples = -1.0;
966 * Add up the sizes and costs of the input paths.
968 pathnode->path.rows = 0;
969 input_startup_cost = 0;
970 input_total_cost = 0;
973 Path *subpath = (Path *) lfirst(l);
975 pathnode->path.rows += subpath->rows;
977 if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
979 /* Subpath is adequately ordered, we won't need to sort it */
980 input_startup_cost += subpath->startup_cost;
981 input_total_cost += subpath->total_cost;
985 /* We'll need to insert a Sort node, so include cost for that */
986 Path sort_path; /* dummy for result of cost_sort */
988 cost_sort(&sort_path,
992 subpath->parent->tuples,
993 subpath->parent->width,
996 pathnode->limit_tuples);
997 input_startup_cost += sort_path.startup_cost;
998 input_total_cost += sort_path.total_cost;
1001 /* All child paths must have same parameterization */
1002 Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1005 /* Now we can compute total costs of the MergeAppend */
1006 cost_merge_append(&pathnode->path, root,
1007 pathkeys, list_length(subpaths),
1008 input_startup_cost, input_total_cost,
1015 * create_result_path
1016 * Creates a path representing a Result-and-nothing-else plan.
1017 * This is only used for the case of a query with an empty jointree.
1020 create_result_path(List *quals)
1022 ResultPath *pathnode = makeNode(ResultPath);
1024 pathnode->path.pathtype = T_Result;
1025 pathnode->path.parent = NULL;
1026 pathnode->path.param_info = NULL; /* there are no other rels... */
1027 pathnode->path.pathkeys = NIL;
1028 pathnode->quals = quals;
1030 /* Hardly worth defining a cost_result() function ... just do it */
1031 pathnode->path.rows = 1;
1032 pathnode->path.startup_cost = 0;
1033 pathnode->path.total_cost = cpu_tuple_cost;
1036 * In theory we should include the qual eval cost as well, but at present
1037 * that doesn't accomplish much except duplicate work that will be done
1038 * again in make_result; since this is only used for degenerate cases,
1039 * nothing interesting will be done with the path cost values...
1046 * create_material_path
1047 * Creates a path corresponding to a Material plan, returning the
1051 create_material_path(RelOptInfo *rel, Path *subpath)
1053 MaterialPath *pathnode = makeNode(MaterialPath);
1055 Assert(subpath->parent == rel);
1057 pathnode->path.pathtype = T_Material;
1058 pathnode->path.parent = rel;
1059 pathnode->path.param_info = subpath->param_info;
1060 pathnode->path.pathkeys = subpath->pathkeys;
1062 pathnode->subpath = subpath;
1064 cost_material(&pathnode->path,
1065 subpath->startup_cost,
1066 subpath->total_cost,
1074 * create_unique_path
1075 * Creates a path representing elimination of distinct rows from the
1076 * input data. Distinct-ness is defined according to the needs of the
1077 * semijoin represented by sjinfo. If it is not possible to identify
1078 * how to make the data unique, NULL is returned.
1080 * If used at all, this is likely to be called repeatedly on the same rel;
1081 * and the input subpath should always be the same (the cheapest_total path
1082 * for the rel). So we cache the result.
1085 create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1086 SpecialJoinInfo *sjinfo)
1088 UniquePath *pathnode;
1089 Path sort_path; /* dummy for result of cost_sort */
1090 Path agg_path; /* dummy for result of cost_agg */
1091 MemoryContext oldcontext;
1099 /* Caller made a mistake if subpath isn't cheapest_total ... */
1100 Assert(subpath == rel->cheapest_total_path);
1101 Assert(subpath->parent == rel);
1102 /* ... or if SpecialJoinInfo is the wrong one */
1103 Assert(sjinfo->jointype == JOIN_SEMI);
1104 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1106 /* If result already cached, return it */
1107 if (rel->cheapest_unique_path)
1108 return (UniquePath *) rel->cheapest_unique_path;
1110 /* If we previously failed, return NULL quickly */
1111 if (sjinfo->join_quals == NIL)
1115 * We must ensure path struct and subsidiary data are allocated in main
1116 * planning context; otherwise GEQO memory management causes trouble.
1118 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
1121 * Look to see whether the semijoin's join quals consist of AND'ed
1122 * equality operators, with (only) RHS variables on only one side of
1123 * each one. If so, we can figure out how to enforce uniqueness for
1126 * Note that the input join_quals list is the list of quals that are
1127 * *syntactically* associated with the semijoin, which in practice means
1128 * the synthesized comparison list for an IN or the WHERE of an EXISTS.
1129 * Particularly in the latter case, it might contain clauses that aren't
1130 * *semantically* associated with the join, but refer to just one side or
1131 * the other. We can ignore such clauses here, as they will just drop
1132 * down to be processed within one side or the other. (It is okay to
1133 * consider only the syntactically-associated clauses here because for a
1134 * semijoin, no higher-level quals could refer to the RHS, and so there
1135 * can be no other quals that are semantically associated with this join.
1136 * We do things this way because it is useful to be able to run this test
1137 * before we have extracted the list of quals that are actually
1138 * semantically associated with the particular join.)
1140 * Note that the in_operators list consists of the joinqual operators
1141 * themselves (but commuted if needed to put the RHS value on the right).
1142 * These could be cross-type operators, in which case the operator
1143 * actually needed for uniqueness is a related single-type operator.
1144 * We assume here that that operator will be available from the btree
1145 * or hash opclass when the time comes ... if not, create_unique_plan()
1152 all_hash = enable_hashagg; /* don't consider hash if not enabled */
1153 foreach(lc, sjinfo->join_quals)
1155 OpExpr *op = (OpExpr *) lfirst(lc);
1160 Relids right_varnos;
1164 /* Is it a binary opclause? */
1165 if (!IsA(op, OpExpr) ||
1166 list_length(op->args) != 2)
1168 /* No, but does it reference both sides? */
1169 all_varnos = pull_varnos((Node *) op);
1170 if (!bms_overlap(all_varnos, sjinfo->syn_righthand) ||
1171 bms_is_subset(all_varnos, sjinfo->syn_righthand))
1174 * Clause refers to only one rel, so ignore it --- unless it
1175 * contains volatile functions, in which case we'd better
1178 if (contain_volatile_functions((Node *) op))
1179 goto no_unique_path;
1182 /* Non-operator clause referencing both sides, must punt */
1183 goto no_unique_path;
1186 /* Extract data from binary opclause */
1188 left_expr = linitial(op->args);
1189 right_expr = lsecond(op->args);
1190 left_varnos = pull_varnos(left_expr);
1191 right_varnos = pull_varnos(right_expr);
1192 all_varnos = bms_union(left_varnos, right_varnos);
1193 opinputtype = exprType(left_expr);
1195 /* Does it reference both sides? */
1196 if (!bms_overlap(all_varnos, sjinfo->syn_righthand) ||
1197 bms_is_subset(all_varnos, sjinfo->syn_righthand))
1200 * Clause refers to only one rel, so ignore it --- unless it
1201 * contains volatile functions, in which case we'd better punt.
1203 if (contain_volatile_functions((Node *) op))
1204 goto no_unique_path;
1208 /* check rel membership of arguments */
1209 if (!bms_is_empty(right_varnos) &&
1210 bms_is_subset(right_varnos, sjinfo->syn_righthand) &&
1211 !bms_overlap(left_varnos, sjinfo->syn_righthand))
1213 /* typical case, right_expr is RHS variable */
1215 else if (!bms_is_empty(left_varnos) &&
1216 bms_is_subset(left_varnos, sjinfo->syn_righthand) &&
1217 !bms_overlap(right_varnos, sjinfo->syn_righthand))
1219 /* flipped case, left_expr is RHS variable */
1220 opno = get_commutator(opno);
1221 if (!OidIsValid(opno))
1222 goto no_unique_path;
1223 right_expr = left_expr;
1226 goto no_unique_path;
1228 /* all operators must be btree equality or hash equality */
1231 /* oprcanmerge is considered a hint... */
1232 if (!op_mergejoinable(opno, opinputtype) ||
1233 get_mergejoin_opfamilies(opno) == NIL)
1238 /* ... but oprcanhash had better be correct */
1239 if (!op_hashjoinable(opno, opinputtype))
1242 if (!(all_btree || all_hash))
1243 goto no_unique_path;
1245 /* so far so good, keep building lists */
1246 in_operators = lappend_oid(in_operators, opno);
1247 uniq_exprs = lappend(uniq_exprs, copyObject(right_expr));
1250 /* Punt if we didn't find at least one column to unique-ify */
1251 if (uniq_exprs == NIL)
1252 goto no_unique_path;
1255 * The expressions we'd need to unique-ify mustn't be volatile.
1257 if (contain_volatile_functions((Node *) uniq_exprs))
1258 goto no_unique_path;
1261 * If we get here, we can unique-ify using at least one of sorting and
1262 * hashing. Start building the result Path object.
1264 pathnode = makeNode(UniquePath);
1266 pathnode->path.pathtype = T_Unique;
1267 pathnode->path.parent = rel;
1268 pathnode->path.param_info = subpath->param_info;
1271 * Assume the output is unsorted, since we don't necessarily have pathkeys
1272 * to represent it. (This might get overridden below.)
1274 pathnode->path.pathkeys = NIL;
1276 pathnode->subpath = subpath;
1277 pathnode->in_operators = in_operators;
1278 pathnode->uniq_exprs = uniq_exprs;
1281 * If the input is a relation and it has a unique index that proves the
1282 * uniq_exprs are unique, then we don't need to do anything. Note that
1283 * relation_has_unique_index_for automatically considers restriction
1284 * clauses for the rel, as well.
1286 if (rel->rtekind == RTE_RELATION && all_btree &&
1287 relation_has_unique_index_for(root, rel, NIL,
1288 uniq_exprs, in_operators))
1290 pathnode->umethod = UNIQUE_PATH_NOOP;
1291 pathnode->path.rows = rel->rows;
1292 pathnode->path.startup_cost = subpath->startup_cost;
1293 pathnode->path.total_cost = subpath->total_cost;
1294 pathnode->path.pathkeys = subpath->pathkeys;
1296 rel->cheapest_unique_path = (Path *) pathnode;
1298 MemoryContextSwitchTo(oldcontext);
1304 * If the input is a subquery whose output must be unique already, then we
1305 * don't need to do anything. The test for uniqueness has to consider
1306 * exactly which columns we are extracting; for example "SELECT DISTINCT
1307 * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1308 * this optimization unless uniq_exprs consists only of simple Vars
1309 * referencing subquery outputs. (Possibly we could do something with
1310 * expressions in the subquery outputs, too, but for now keep it simple.)
1312 if (rel->rtekind == RTE_SUBQUERY)
1314 RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1316 if (query_supports_distinctness(rte->subquery))
1318 List *sub_tlist_colnos;
1320 sub_tlist_colnos = translate_sub_tlist(uniq_exprs, rel->relid);
1322 if (sub_tlist_colnos &&
1323 query_is_distinct_for(rte->subquery,
1324 sub_tlist_colnos, in_operators))
1326 pathnode->umethod = UNIQUE_PATH_NOOP;
1327 pathnode->path.rows = rel->rows;
1328 pathnode->path.startup_cost = subpath->startup_cost;
1329 pathnode->path.total_cost = subpath->total_cost;
1330 pathnode->path.pathkeys = subpath->pathkeys;
1332 rel->cheapest_unique_path = (Path *) pathnode;
1334 MemoryContextSwitchTo(oldcontext);
1341 /* Estimate number of output rows */
1342 pathnode->path.rows = estimate_num_groups(root, uniq_exprs, rel->rows);
1343 numCols = list_length(uniq_exprs);
1348 * Estimate cost for sort+unique implementation
1350 cost_sort(&sort_path, root, NIL,
1351 subpath->total_cost,
1359 * Charge one cpu_operator_cost per comparison per input tuple. We
1360 * assume all columns get compared at most of the tuples. (XXX
1361 * probably this is an overestimate.) This should agree with
1364 sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1370 * Estimate the overhead per hashtable entry at 64 bytes (same as in
1373 int hashentrysize = rel->width + 64;
1375 if (hashentrysize * pathnode->path.rows > work_mem * 1024L)
1376 all_hash = false; /* don't try to hash */
1378 cost_agg(&agg_path, root,
1380 numCols, pathnode->path.rows,
1381 subpath->startup_cost,
1382 subpath->total_cost,
1386 if (all_btree && all_hash)
1388 if (agg_path.total_cost < sort_path.total_cost)
1389 pathnode->umethod = UNIQUE_PATH_HASH;
1391 pathnode->umethod = UNIQUE_PATH_SORT;
1394 pathnode->umethod = UNIQUE_PATH_SORT;
1396 pathnode->umethod = UNIQUE_PATH_HASH;
1398 goto no_unique_path;
1400 if (pathnode->umethod == UNIQUE_PATH_HASH)
1402 pathnode->path.startup_cost = agg_path.startup_cost;
1403 pathnode->path.total_cost = agg_path.total_cost;
1407 pathnode->path.startup_cost = sort_path.startup_cost;
1408 pathnode->path.total_cost = sort_path.total_cost;
1411 rel->cheapest_unique_path = (Path *) pathnode;
1413 MemoryContextSwitchTo(oldcontext);
1417 no_unique_path: /* failure exit */
1419 /* Mark the SpecialJoinInfo as not unique-able */
1420 sjinfo->join_quals = NIL;
1422 MemoryContextSwitchTo(oldcontext);
1428 * translate_sub_tlist - get subquery column numbers represented by tlist
1430 * The given targetlist usually contains only Vars referencing the given relid.
1431 * Extract their varattnos (ie, the column numbers of the subquery) and return
1432 * as an integer List.
1434 * If any of the tlist items is not a simple Var, we cannot determine whether
1435 * the subquery's uniqueness condition (if any) matches ours, so punt and
1439 translate_sub_tlist(List *tlist, int relid)
1446 Var *var = (Var *) lfirst(l);
1448 if (!var || !IsA(var, Var) ||
1449 var->varno != relid)
1450 return NIL; /* punt */
1452 result = lappend_int(result, var->varattno);
1458 * create_subqueryscan_path
1459 * Creates a path corresponding to a sequential scan of a subquery,
1460 * returning the pathnode.
1463 create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
1464 List *pathkeys, Relids required_outer)
1466 Path *pathnode = makeNode(Path);
1468 pathnode->pathtype = T_SubqueryScan;
1469 pathnode->parent = rel;
1470 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1472 pathnode->pathkeys = pathkeys;
1474 cost_subqueryscan(pathnode, root, rel, pathnode->param_info);
1480 * create_functionscan_path
1481 * Creates a path corresponding to a sequential scan of a function,
1482 * returning the pathnode.
1485 create_functionscan_path(PlannerInfo *root, RelOptInfo *rel,
1486 List *pathkeys, Relids required_outer)
1488 Path *pathnode = makeNode(Path);
1490 pathnode->pathtype = T_FunctionScan;
1491 pathnode->parent = rel;
1492 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1494 pathnode->pathkeys = pathkeys;
1496 cost_functionscan(pathnode, root, rel, pathnode->param_info);
1502 * create_valuesscan_path
1503 * Creates a path corresponding to a scan of a VALUES list,
1504 * returning the pathnode.
1507 create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel,
1508 Relids required_outer)
1510 Path *pathnode = makeNode(Path);
1512 pathnode->pathtype = T_ValuesScan;
1513 pathnode->parent = rel;
1514 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1516 pathnode->pathkeys = NIL; /* result is always unordered */
1518 cost_valuesscan(pathnode, root, rel, pathnode->param_info);
1524 * create_ctescan_path
1525 * Creates a path corresponding to a scan of a non-self-reference CTE,
1526 * returning the pathnode.
1529 create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
1531 Path *pathnode = makeNode(Path);
1533 pathnode->pathtype = T_CteScan;
1534 pathnode->parent = rel;
1535 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1537 pathnode->pathkeys = NIL; /* XXX for now, result is always unordered */
1539 cost_ctescan(pathnode, root, rel, pathnode->param_info);
1545 * create_worktablescan_path
1546 * Creates a path corresponding to a scan of a self-reference CTE,
1547 * returning the pathnode.
1550 create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel,
1551 Relids required_outer)
1553 Path *pathnode = makeNode(Path);
1555 pathnode->pathtype = T_WorkTableScan;
1556 pathnode->parent = rel;
1557 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1559 pathnode->pathkeys = NIL; /* result is always unordered */
1561 /* Cost is the same as for a regular CTE scan */
1562 cost_ctescan(pathnode, root, rel, pathnode->param_info);
1568 * create_foreignscan_path
1569 * Creates a path corresponding to a scan of a foreign table,
1570 * returning the pathnode.
1572 * This function is never called from core Postgres; rather, it's expected
1573 * to be called by the GetForeignPaths function of a foreign data wrapper.
1574 * We make the FDW supply all fields of the path, since we do not have any
1575 * way to calculate them in core.
1578 create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
1579 double rows, Cost startup_cost, Cost total_cost,
1581 Relids required_outer,
1584 ForeignPath *pathnode = makeNode(ForeignPath);
1586 pathnode->path.pathtype = T_ForeignScan;
1587 pathnode->path.parent = rel;
1588 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1590 pathnode->path.rows = rows;
1591 pathnode->path.startup_cost = startup_cost;
1592 pathnode->path.total_cost = total_cost;
1593 pathnode->path.pathkeys = pathkeys;
1595 pathnode->fdw_private = fdw_private;
1601 * calc_nestloop_required_outer
1602 * Compute the required_outer set for a nestloop join path
1604 * Note: result must not share storage with either input
1607 calc_nestloop_required_outer(Path *outer_path, Path *inner_path)
1609 Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
1610 Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
1611 Relids required_outer;
1613 /* inner_path can require rels from outer path, but not vice versa */
1614 Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
1615 /* easy case if inner path is not parameterized */
1616 if (!inner_paramrels)
1617 return bms_copy(outer_paramrels);
1618 /* else, form the union ... */
1619 required_outer = bms_union(outer_paramrels, inner_paramrels);
1620 /* ... and remove any mention of now-satisfied outer rels */
1621 required_outer = bms_del_members(required_outer,
1622 outer_path->parent->relids);
1623 /* maintain invariant that required_outer is exactly NULL if empty */
1624 if (bms_is_empty(required_outer))
1626 bms_free(required_outer);
1627 required_outer = NULL;
1629 return required_outer;
1633 * calc_non_nestloop_required_outer
1634 * Compute the required_outer set for a merge or hash join path
1636 * Note: result must not share storage with either input
1639 calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
1641 Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
1642 Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
1643 Relids required_outer;
1645 /* neither path can require rels from the other */
1646 Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
1647 Assert(!bms_overlap(inner_paramrels, outer_path->parent->relids));
1648 /* form the union ... */
1649 required_outer = bms_union(outer_paramrels, inner_paramrels);
1650 /* we do not need an explicit test for empty; bms_union gets it right */
1651 return required_outer;
1655 * create_nestloop_path
1656 * Creates a pathnode corresponding to a nestloop join between two
1659 * 'joinrel' is the join relation.
1660 * 'jointype' is the type of join required
1661 * 'workspace' is the result from initial_cost_nestloop
1662 * 'sjinfo' is extra info about the join for selectivity estimation
1663 * 'semifactors' contains valid data if jointype is SEMI or ANTI
1664 * 'outer_path' is the outer path
1665 * 'inner_path' is the inner path
1666 * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1667 * 'pathkeys' are the path keys of the new join path
1668 * 'required_outer' is the set of required outer rels
1670 * Returns the resulting path node.
1673 create_nestloop_path(PlannerInfo *root,
1674 RelOptInfo *joinrel,
1676 JoinCostWorkspace *workspace,
1677 SpecialJoinInfo *sjinfo,
1678 SemiAntiJoinFactors *semifactors,
1681 List *restrict_clauses,
1683 Relids required_outer)
1685 NestPath *pathnode = makeNode(NestPath);
1686 Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
1689 * If the inner path is parameterized by the outer, we must drop any
1690 * restrict_clauses that are due to be moved into the inner path. We have
1691 * to do this now, rather than postpone the work till createplan time,
1692 * because the restrict_clauses list can affect the size and cost
1693 * estimates for this path.
1695 if (bms_overlap(inner_req_outer, outer_path->parent->relids))
1697 Relids inner_and_outer = bms_union(inner_path->parent->relids,
1699 List *jclauses = NIL;
1702 foreach(lc, restrict_clauses)
1704 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1706 if (!join_clause_is_movable_into(rinfo,
1707 inner_path->parent->relids,
1709 jclauses = lappend(jclauses, rinfo);
1711 restrict_clauses = jclauses;
1714 pathnode->path.pathtype = T_NestLoop;
1715 pathnode->path.parent = joinrel;
1716 pathnode->path.param_info =
1717 get_joinrel_parampathinfo(root,
1724 pathnode->path.pathkeys = pathkeys;
1725 pathnode->jointype = jointype;
1726 pathnode->outerjoinpath = outer_path;
1727 pathnode->innerjoinpath = inner_path;
1728 pathnode->joinrestrictinfo = restrict_clauses;
1730 final_cost_nestloop(root, pathnode, workspace, sjinfo, semifactors);
1736 * create_mergejoin_path
1737 * Creates a pathnode corresponding to a mergejoin join between
1740 * 'joinrel' is the join relation
1741 * 'jointype' is the type of join required
1742 * 'workspace' is the result from initial_cost_mergejoin
1743 * 'sjinfo' is extra info about the join for selectivity estimation
1744 * 'outer_path' is the outer path
1745 * 'inner_path' is the inner path
1746 * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1747 * 'pathkeys' are the path keys of the new join path
1748 * 'required_outer' is the set of required outer rels
1749 * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
1750 * (this should be a subset of the restrict_clauses list)
1751 * 'outersortkeys' are the sort varkeys for the outer relation
1752 * 'innersortkeys' are the sort varkeys for the inner relation
1755 create_mergejoin_path(PlannerInfo *root,
1756 RelOptInfo *joinrel,
1758 JoinCostWorkspace *workspace,
1759 SpecialJoinInfo *sjinfo,
1762 List *restrict_clauses,
1764 Relids required_outer,
1766 List *outersortkeys,
1767 List *innersortkeys)
1769 MergePath *pathnode = makeNode(MergePath);
1771 pathnode->jpath.path.pathtype = T_MergeJoin;
1772 pathnode->jpath.path.parent = joinrel;
1773 pathnode->jpath.path.param_info =
1774 get_joinrel_parampathinfo(root,
1781 pathnode->jpath.path.pathkeys = pathkeys;
1782 pathnode->jpath.jointype = jointype;
1783 pathnode->jpath.outerjoinpath = outer_path;
1784 pathnode->jpath.innerjoinpath = inner_path;
1785 pathnode->jpath.joinrestrictinfo = restrict_clauses;
1786 pathnode->path_mergeclauses = mergeclauses;
1787 pathnode->outersortkeys = outersortkeys;
1788 pathnode->innersortkeys = innersortkeys;
1789 /* pathnode->materialize_inner will be set by final_cost_mergejoin */
1791 final_cost_mergejoin(root, pathnode, workspace, sjinfo);
1797 * create_hashjoin_path
1798 * Creates a pathnode corresponding to a hash join between two relations.
1800 * 'joinrel' is the join relation
1801 * 'jointype' is the type of join required
1802 * 'workspace' is the result from initial_cost_hashjoin
1803 * 'sjinfo' is extra info about the join for selectivity estimation
1804 * 'semifactors' contains valid data if jointype is SEMI or ANTI
1805 * 'outer_path' is the cheapest outer path
1806 * 'inner_path' is the cheapest inner path
1807 * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1808 * 'required_outer' is the set of required outer rels
1809 * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
1810 * (this should be a subset of the restrict_clauses list)
1813 create_hashjoin_path(PlannerInfo *root,
1814 RelOptInfo *joinrel,
1816 JoinCostWorkspace *workspace,
1817 SpecialJoinInfo *sjinfo,
1818 SemiAntiJoinFactors *semifactors,
1821 List *restrict_clauses,
1822 Relids required_outer,
1825 HashPath *pathnode = makeNode(HashPath);
1827 pathnode->jpath.path.pathtype = T_HashJoin;
1828 pathnode->jpath.path.parent = joinrel;
1829 pathnode->jpath.path.param_info =
1830 get_joinrel_parampathinfo(root,
1839 * A hashjoin never has pathkeys, since its output ordering is
1840 * unpredictable due to possible batching. XXX If the inner relation is
1841 * small enough, we could instruct the executor that it must not batch,
1842 * and then we could assume that the output inherits the outer relation's
1843 * ordering, which might save a sort step. However there is considerable
1844 * downside if our estimate of the inner relation size is badly off. For
1845 * the moment we don't risk it. (Note also that if we wanted to take this
1846 * seriously, joinpath.c would have to consider many more paths for the
1847 * outer rel than it does now.)
1849 pathnode->jpath.path.pathkeys = NIL;
1850 pathnode->jpath.jointype = jointype;
1851 pathnode->jpath.outerjoinpath = outer_path;
1852 pathnode->jpath.innerjoinpath = inner_path;
1853 pathnode->jpath.joinrestrictinfo = restrict_clauses;
1854 pathnode->path_hashclauses = hashclauses;
1855 /* final_cost_hashjoin will fill in pathnode->num_batches */
1857 final_cost_hashjoin(root, pathnode, workspace, sjinfo, semifactors);
1863 * reparameterize_path
1864 * Attempt to modify a Path to have greater parameterization
1866 * We use this to attempt to bring all child paths of an appendrel to the
1867 * same parameterization level, ensuring that they all enforce the same set
1868 * of join quals (and thus that that parameterization can be attributed to
1869 * an append path built from such paths). Currently, only a few path types
1870 * are supported here, though more could be added at need. We return NULL
1871 * if we can't reparameterize the given path.
1873 * Note: we intentionally do not pass created paths to add_path(); it would
1874 * possibly try to delete them on the grounds of being cost-inferior to the
1875 * paths they were made from, and we don't want that. Paths made here are
1876 * not necessarily of general-purpose usefulness, but they can be useful
1877 * as members of an append path.
1880 reparameterize_path(PlannerInfo *root, Path *path,
1881 Relids required_outer,
1884 RelOptInfo *rel = path->parent;
1886 /* Can only increase, not decrease, path's parameterization */
1887 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
1889 switch (path->pathtype)
1892 return create_seqscan_path(root, rel, required_outer);
1894 case T_IndexOnlyScan:
1896 IndexPath *ipath = (IndexPath *) path;
1897 IndexPath *newpath = makeNode(IndexPath);
1900 * We can't use create_index_path directly, and would not want
1901 * to because it would re-compute the indexqual conditions
1902 * which is wasted effort. Instead we hack things a bit:
1903 * flat-copy the path node, revise its param_info, and redo
1904 * the cost estimate.
1906 memcpy(newpath, ipath, sizeof(IndexPath));
1907 newpath->path.param_info =
1908 get_baserel_parampathinfo(root, rel, required_outer);
1909 cost_index(newpath, root, loop_count);
1910 return (Path *) newpath;
1912 case T_BitmapHeapScan:
1914 BitmapHeapPath *bpath = (BitmapHeapPath *) path;
1916 return (Path *) create_bitmap_heap_path(root,
1922 case T_SubqueryScan:
1923 return create_subqueryscan_path(root, rel, path->pathkeys,
1931 /*****************************************************************************
1932 * creation of custom-plan paths
1933 *****************************************************************************/
1935 static List *custom_path_providers = NIL;
1938 * register_custom_path_provider
1940 * Register a table of callback functions which implements a custom-path
1941 * provider. This allows extension to provide additional (hopefully faster)
1942 * methods of scanning a relation.
1945 register_custom_path_provider(CustomPathMethods *cpp_methods)
1947 MemoryContext oldcxt;
1949 oldcxt = MemoryContextSwitchTo(TopMemoryContext);
1950 custom_path_providers = lappend(custom_path_providers, cpp_methods);
1951 MemoryContextSwitchTo(oldcxt);
1955 * create_customscan_paths
1957 * Invoke custom path provider callbacks. If the callback determines that
1958 * the custom-path provider can handle this relation, it can add one or more
1959 * paths using add_path().
1962 create_customscan_paths(PlannerInfo *root,
1963 RelOptInfo *baserel,
1968 foreach (cell, custom_path_providers)
1970 const CustomPathMethods *cpp_methods = lfirst(cell);
1972 if (cpp_methods->CreateCustomScanPath)
1973 cpp_methods->CreateCustomScanPath(root, baserel, rte);