]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/util/pathnode.c
Implement an API to let foreign-data wrappers actually be functional.
[postgresql] / src / backend / optimizer / util / pathnode.c
1 /*-------------------------------------------------------------------------
2  *
3  * pathnode.c
4  *        Routines to manipulate pathlists and create path nodes
5  *
6  * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        src/backend/optimizer/util/pathnode.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include <math.h>
18
19 #include "catalog/pg_operator.h"
20 #include "foreign/fdwapi.h"
21 #include "miscadmin.h"
22 #include "nodes/nodeFuncs.h"
23 #include "optimizer/clauses.h"
24 #include "optimizer/cost.h"
25 #include "optimizer/pathnode.h"
26 #include "optimizer/paths.h"
27 #include "optimizer/tlist.h"
28 #include "optimizer/var.h"
29 #include "parser/parse_expr.h"
30 #include "parser/parsetree.h"
31 #include "utils/selfuncs.h"
32 #include "utils/lsyscache.h"
33 #include "utils/syscache.h"
34
35
36 static List *translate_sub_tlist(List *tlist, int relid);
37 static bool query_is_distinct_for(Query *query, List *colnos, List *opids);
38 static Oid      distinct_col_search(int colno, List *colnos, List *opids);
39
40
41 /*****************************************************************************
42  *              MISC. PATH UTILITIES
43  *****************************************************************************/
44
45 /*
46  * compare_path_costs
47  *        Return -1, 0, or +1 according as path1 is cheaper, the same cost,
48  *        or more expensive than path2 for the specified criterion.
49  */
50 int
51 compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
52 {
53         if (criterion == STARTUP_COST)
54         {
55                 if (path1->startup_cost < path2->startup_cost)
56                         return -1;
57                 if (path1->startup_cost > path2->startup_cost)
58                         return +1;
59
60                 /*
61                  * If paths have the same startup cost (not at all unlikely), order
62                  * them by total cost.
63                  */
64                 if (path1->total_cost < path2->total_cost)
65                         return -1;
66                 if (path1->total_cost > path2->total_cost)
67                         return +1;
68         }
69         else
70         {
71                 if (path1->total_cost < path2->total_cost)
72                         return -1;
73                 if (path1->total_cost > path2->total_cost)
74                         return +1;
75
76                 /*
77                  * If paths have the same total cost, order them by startup cost.
78                  */
79                 if (path1->startup_cost < path2->startup_cost)
80                         return -1;
81                 if (path1->startup_cost > path2->startup_cost)
82                         return +1;
83         }
84         return 0;
85 }
86
87 /*
88  * compare_fuzzy_path_costs
89  *        Return -1, 0, or +1 according as path1 is cheaper, the same cost,
90  *        or more expensive than path2 for the specified criterion.
91  *
92  * This differs from compare_path_costs in that we consider the costs the
93  * same if they agree to within a "fuzz factor".  This is used by add_path
94  * to avoid keeping both of a pair of paths that really have insignificantly
95  * different cost.
96  */
97 static int
98 compare_fuzzy_path_costs(Path *path1, Path *path2, CostSelector criterion)
99 {
100         /*
101          * We use a fuzz factor of 1% of the smaller cost.
102          *
103          * XXX does this percentage need to be user-configurable?
104          */
105         if (criterion == STARTUP_COST)
106         {
107                 if (path1->startup_cost > path2->startup_cost * 1.01)
108                         return +1;
109                 if (path2->startup_cost > path1->startup_cost * 1.01)
110                         return -1;
111
112                 /*
113                  * If paths have the same startup cost (not at all unlikely), order
114                  * them by total cost.
115                  */
116                 if (path1->total_cost > path2->total_cost * 1.01)
117                         return +1;
118                 if (path2->total_cost > path1->total_cost * 1.01)
119                         return -1;
120         }
121         else
122         {
123                 if (path1->total_cost > path2->total_cost * 1.01)
124                         return +1;
125                 if (path2->total_cost > path1->total_cost * 1.01)
126                         return -1;
127
128                 /*
129                  * If paths have the same total cost, order them by startup cost.
130                  */
131                 if (path1->startup_cost > path2->startup_cost * 1.01)
132                         return +1;
133                 if (path2->startup_cost > path1->startup_cost * 1.01)
134                         return -1;
135         }
136         return 0;
137 }
138
139 /*
140  * compare_path_fractional_costs
141  *        Return -1, 0, or +1 according as path1 is cheaper, the same cost,
142  *        or more expensive than path2 for fetching the specified fraction
143  *        of the total tuples.
144  *
145  * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
146  * path with the cheaper total_cost.
147  */
148 int
149 compare_fractional_path_costs(Path *path1, Path *path2,
150                                                           double fraction)
151 {
152         Cost            cost1,
153                                 cost2;
154
155         if (fraction <= 0.0 || fraction >= 1.0)
156                 return compare_path_costs(path1, path2, TOTAL_COST);
157         cost1 = path1->startup_cost +
158                 fraction * (path1->total_cost - path1->startup_cost);
159         cost2 = path2->startup_cost +
160                 fraction * (path2->total_cost - path2->startup_cost);
161         if (cost1 < cost2)
162                 return -1;
163         if (cost1 > cost2)
164                 return +1;
165         return 0;
166 }
167
168 /*
169  * set_cheapest
170  *        Find the minimum-cost paths from among a relation's paths,
171  *        and save them in the rel's cheapest-path fields.
172  *
173  * This is normally called only after we've finished constructing the path
174  * list for the rel node.
175  *
176  * If we find two paths of identical costs, try to keep the better-sorted one.
177  * The paths might have unrelated sort orderings, in which case we can only
178  * guess which might be better to keep, but if one is superior then we
179  * definitely should keep it.
180  */
181 void
182 set_cheapest(RelOptInfo *parent_rel)
183 {
184         List       *pathlist = parent_rel->pathlist;
185         ListCell   *p;
186         Path       *cheapest_startup_path;
187         Path       *cheapest_total_path;
188
189         Assert(IsA(parent_rel, RelOptInfo));
190
191         if (pathlist == NIL)
192                 elog(ERROR, "could not devise a query plan for the given query");
193
194         cheapest_startup_path = cheapest_total_path = (Path *) linitial(pathlist);
195
196         for_each_cell(p, lnext(list_head(pathlist)))
197         {
198                 Path       *path = (Path *) lfirst(p);
199                 int                     cmp;
200
201                 cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
202                 if (cmp > 0 ||
203                         (cmp == 0 &&
204                          compare_pathkeys(cheapest_startup_path->pathkeys,
205                                                           path->pathkeys) == PATHKEYS_BETTER2))
206                         cheapest_startup_path = path;
207
208                 cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
209                 if (cmp > 0 ||
210                         (cmp == 0 &&
211                          compare_pathkeys(cheapest_total_path->pathkeys,
212                                                           path->pathkeys) == PATHKEYS_BETTER2))
213                         cheapest_total_path = path;
214         }
215
216         parent_rel->cheapest_startup_path = cheapest_startup_path;
217         parent_rel->cheapest_total_path = cheapest_total_path;
218         parent_rel->cheapest_unique_path = NULL;        /* computed only if needed */
219 }
220
221 /*
222  * add_path
223  *        Consider a potential implementation path for the specified parent rel,
224  *        and add it to the rel's pathlist if it is worthy of consideration.
225  *        A path is worthy if it has either a better sort order (better pathkeys)
226  *        or cheaper cost (on either dimension) than any of the existing old paths.
227  *
228  *        We also remove from the rel's pathlist any old paths that are dominated
229  *        by new_path --- that is, new_path is both cheaper and at least as well
230  *        ordered.
231  *
232  *        The pathlist is kept sorted by TOTAL_COST metric, with cheaper paths
233  *        at the front.  No code depends on that for correctness; it's simply
234  *        a speed hack within this routine.  Doing it that way makes it more
235  *        likely that we will reject an inferior path after a few comparisons,
236  *        rather than many comparisons.
237  *
238  *        NOTE: discarded Path objects are immediately pfree'd to reduce planner
239  *        memory consumption.  We dare not try to free the substructure of a Path,
240  *        since much of it may be shared with other Paths or the query tree itself;
241  *        but just recycling discarded Path nodes is a very useful savings in
242  *        a large join tree.  We can recycle the List nodes of pathlist, too.
243  *
244  *        BUT: we do not pfree IndexPath objects, since they may be referenced as
245  *        children of BitmapHeapPaths as well as being paths in their own right.
246  *
247  * 'parent_rel' is the relation entry to which the path corresponds.
248  * 'new_path' is a potential path for parent_rel.
249  *
250  * Returns nothing, but modifies parent_rel->pathlist.
251  */
252 void
253 add_path(RelOptInfo *parent_rel, Path *new_path)
254 {
255         bool            accept_new = true;              /* unless we find a superior old path */
256         ListCell   *insert_after = NULL;        /* where to insert new item */
257         ListCell   *p1_prev = NULL;
258         ListCell   *p1;
259
260         /*
261          * This is a convenient place to check for query cancel --- no part of the
262          * planner goes very long without calling add_path().
263          */
264         CHECK_FOR_INTERRUPTS();
265
266         /*
267          * Loop to check proposed new path against old paths.  Note it is possible
268          * for more than one old path to be tossed out because new_path dominates
269          * it.
270          */
271         p1 = list_head(parent_rel->pathlist);           /* cannot use foreach here */
272         while (p1 != NULL)
273         {
274                 Path       *old_path = (Path *) lfirst(p1);
275                 bool            remove_old = false; /* unless new proves superior */
276                 int                     costcmp;
277
278                 /*
279                  * As of Postgres 8.0, we use fuzzy cost comparison to avoid wasting
280                  * cycles keeping paths that are really not significantly different in
281                  * cost.
282                  */
283                 costcmp = compare_fuzzy_path_costs(new_path, old_path, TOTAL_COST);
284
285                 /*
286                  * If the two paths compare differently for startup and total cost,
287                  * then we want to keep both, and we can skip the (much slower)
288                  * comparison of pathkeys.      If they compare the same, proceed with the
289                  * pathkeys comparison.  Note: this test relies on the fact that
290                  * compare_fuzzy_path_costs will only return 0 if both costs are
291                  * effectively equal (and, therefore, there's no need to call it twice
292                  * in that case).
293                  */
294                 if (costcmp == 0 ||
295                         costcmp == compare_fuzzy_path_costs(new_path, old_path,
296                                                                                                 STARTUP_COST))
297                 {
298                         switch (compare_pathkeys(new_path->pathkeys, old_path->pathkeys))
299                         {
300                                 case PATHKEYS_EQUAL:
301                                         if (costcmp < 0)
302                                                 remove_old = true;              /* new dominates old */
303                                         else if (costcmp > 0)
304                                                 accept_new = false;             /* old dominates new */
305                                         else
306                                         {
307                                                 /*
308                                                  * Same pathkeys, and fuzzily the same cost, so keep
309                                                  * just one --- but we'll do an exact cost comparison
310                                                  * to decide which.
311                                                  */
312                                                 if (compare_path_costs(new_path, old_path,
313                                                                                            TOTAL_COST) < 0)
314                                                         remove_old = true;      /* new dominates old */
315                                                 else
316                                                         accept_new = false; /* old equals or dominates new */
317                                         }
318                                         break;
319                                 case PATHKEYS_BETTER1:
320                                         if (costcmp <= 0)
321                                                 remove_old = true;              /* new dominates old */
322                                         break;
323                                 case PATHKEYS_BETTER2:
324                                         if (costcmp >= 0)
325                                                 accept_new = false;             /* old dominates new */
326                                         break;
327                                 case PATHKEYS_DIFFERENT:
328                                         /* keep both paths, since they have different ordering */
329                                         break;
330                         }
331                 }
332
333                 /*
334                  * Remove current element from pathlist if dominated by new.
335                  */
336                 if (remove_old)
337                 {
338                         parent_rel->pathlist = list_delete_cell(parent_rel->pathlist,
339                                                                                                         p1, p1_prev);
340
341                         /*
342                          * Delete the data pointed-to by the deleted cell, if possible
343                          */
344                         if (!IsA(old_path, IndexPath))
345                                 pfree(old_path);
346                         /* Advance list pointer */
347                         if (p1_prev)
348                                 p1 = lnext(p1_prev);
349                         else
350                                 p1 = list_head(parent_rel->pathlist);
351                 }
352                 else
353                 {
354                         /* new belongs after this old path if it has cost >= old's */
355                         if (costcmp >= 0)
356                                 insert_after = p1;
357                         /* Advance list pointers */
358                         p1_prev = p1;
359                         p1 = lnext(p1);
360                 }
361
362                 /*
363                  * If we found an old path that dominates new_path, we can quit
364                  * scanning the pathlist; we will not add new_path, and we assume
365                  * new_path cannot dominate any other elements of the pathlist.
366                  */
367                 if (!accept_new)
368                         break;
369         }
370
371         if (accept_new)
372         {
373                 /* Accept the new path: insert it at proper place in pathlist */
374                 if (insert_after)
375                         lappend_cell(parent_rel->pathlist, insert_after, new_path);
376                 else
377                         parent_rel->pathlist = lcons(new_path, parent_rel->pathlist);
378         }
379         else
380         {
381                 /* Reject and recycle the new path */
382                 if (!IsA(new_path, IndexPath))
383                         pfree(new_path);
384         }
385 }
386
387
388 /*****************************************************************************
389  *              PATH NODE CREATION ROUTINES
390  *****************************************************************************/
391
392 /*
393  * create_seqscan_path
394  *        Creates a path corresponding to a sequential scan, returning the
395  *        pathnode.
396  */
397 Path *
398 create_seqscan_path(PlannerInfo *root, RelOptInfo *rel)
399 {
400         Path       *pathnode = makeNode(Path);
401
402         pathnode->pathtype = T_SeqScan;
403         pathnode->parent = rel;
404         pathnode->pathkeys = NIL;       /* seqscan has unordered result */
405
406         cost_seqscan(pathnode, root, rel);
407
408         return pathnode;
409 }
410
411 /*
412  * create_index_path
413  *        Creates a path node for an index scan.
414  *
415  * 'index' is a usable index.
416  * 'clause_groups' is a list of lists of RestrictInfo nodes
417  *                      to be used as index qual conditions in the scan.
418  * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
419  *                      to be used as index ordering operators in the scan.
420  * 'pathkeys' describes the ordering of the path.
421  * 'indexscandir' is ForwardScanDirection or BackwardScanDirection
422  *                      for an ordered index, or NoMovementScanDirection for
423  *                      an unordered index.
424  * 'outer_rel' is the outer relation if this is a join inner indexscan path.
425  *                      (pathkeys and indexscandir are ignored if so.)  NULL if not.
426  *
427  * Returns the new path node.
428  */
429 IndexPath *
430 create_index_path(PlannerInfo *root,
431                                   IndexOptInfo *index,
432                                   List *clause_groups,
433                                   List *indexorderbys,
434                                   List *pathkeys,
435                                   ScanDirection indexscandir,
436                                   RelOptInfo *outer_rel)
437 {
438         IndexPath  *pathnode = makeNode(IndexPath);
439         RelOptInfo *rel = index->rel;
440         List       *indexquals,
441                            *allclauses;
442
443         /*
444          * For a join inner scan, there's no point in marking the path with any
445          * pathkeys, since it will only ever be used as the inner path of a
446          * nestloop, and so its ordering does not matter.  For the same reason we
447          * don't really care what order it's scanned in.  (We could expect the
448          * caller to supply the correct values, but it's easier to force it here.)
449          */
450         if (outer_rel != NULL)
451         {
452                 pathkeys = NIL;
453                 indexscandir = NoMovementScanDirection;
454         }
455
456         pathnode->path.pathtype = T_IndexScan;
457         pathnode->path.parent = rel;
458         pathnode->path.pathkeys = pathkeys;
459
460         /* Convert clauses to indexquals the executor can handle */
461         indexquals = expand_indexqual_conditions(index, clause_groups);
462
463         /* Flatten the clause-groups list to produce indexclauses list */
464         allclauses = flatten_clausegroups_list(clause_groups);
465
466         /* Fill in the pathnode */
467         pathnode->indexinfo = index;
468         pathnode->indexclauses = allclauses;
469         pathnode->indexquals = indexquals;
470         pathnode->indexorderbys = indexorderbys;
471
472         pathnode->isjoininner = (outer_rel != NULL);
473         pathnode->indexscandir = indexscandir;
474
475         if (outer_rel != NULL)
476         {
477                 /*
478                  * We must compute the estimated number of output rows for the
479                  * indexscan.  This is less than rel->rows because of the additional
480                  * selectivity of the join clauses.  Since clause_groups may contain
481                  * both restriction and join clauses, we have to do a set union to get
482                  * the full set of clauses that must be considered to compute the
483                  * correct selectivity.  (Without the union operation, we might have
484                  * some restriction clauses appearing twice, which'd mislead
485                  * clauselist_selectivity into double-counting their selectivity.
486                  * However, since RestrictInfo nodes aren't copied when linking them
487                  * into different lists, it should be sufficient to use pointer
488                  * comparison to remove duplicates.)
489                  *
490                  * Note that we force the clauses to be treated as non-join clauses
491                  * during selectivity estimation.
492                  */
493                 allclauses = list_union_ptr(rel->baserestrictinfo, allclauses);
494                 pathnode->rows = rel->tuples *
495                         clauselist_selectivity(root,
496                                                                    allclauses,
497                                                                    rel->relid,  /* do not use 0! */
498                                                                    JOIN_INNER,
499                                                                    NULL);
500                 /* Like costsize.c, force estimate to be at least one row */
501                 pathnode->rows = clamp_row_est(pathnode->rows);
502         }
503         else
504         {
505                 /*
506                  * The number of rows is the same as the parent rel's estimate, since
507                  * this isn't a join inner indexscan.
508                  */
509                 pathnode->rows = rel->rows;
510         }
511
512         cost_index(pathnode, root, index, indexquals, indexorderbys, outer_rel);
513
514         return pathnode;
515 }
516
517 /*
518  * create_bitmap_heap_path
519  *        Creates a path node for a bitmap scan.
520  *
521  * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
522  *
523  * If this is a join inner indexscan path, 'outer_rel' is the outer relation,
524  * and all the component IndexPaths should have been costed accordingly.
525  */
526 BitmapHeapPath *
527 create_bitmap_heap_path(PlannerInfo *root,
528                                                 RelOptInfo *rel,
529                                                 Path *bitmapqual,
530                                                 RelOptInfo *outer_rel)
531 {
532         BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
533
534         pathnode->path.pathtype = T_BitmapHeapScan;
535         pathnode->path.parent = rel;
536         pathnode->path.pathkeys = NIL;          /* always unordered */
537
538         pathnode->bitmapqual = bitmapqual;
539         pathnode->isjoininner = (outer_rel != NULL);
540
541         if (pathnode->isjoininner)
542         {
543                 /*
544                  * We must compute the estimated number of output rows for the
545                  * indexscan.  This is less than rel->rows because of the additional
546                  * selectivity of the join clauses.  We make use of the selectivity
547                  * estimated for the bitmap to do this; this isn't really quite right
548                  * since there may be restriction conditions not included in the
549                  * bitmap ...
550                  */
551                 Cost            indexTotalCost;
552                 Selectivity indexSelectivity;
553
554                 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
555                 pathnode->rows = rel->tuples * indexSelectivity;
556                 if (pathnode->rows > rel->rows)
557                         pathnode->rows = rel->rows;
558                 /* Like costsize.c, force estimate to be at least one row */
559                 pathnode->rows = clamp_row_est(pathnode->rows);
560         }
561         else
562         {
563                 /*
564                  * The number of rows is the same as the parent rel's estimate, since
565                  * this isn't a join inner indexscan.
566                  */
567                 pathnode->rows = rel->rows;
568         }
569
570         cost_bitmap_heap_scan(&pathnode->path, root, rel, bitmapqual, outer_rel);
571
572         return pathnode;
573 }
574
575 /*
576  * create_bitmap_and_path
577  *        Creates a path node representing a BitmapAnd.
578  */
579 BitmapAndPath *
580 create_bitmap_and_path(PlannerInfo *root,
581                                            RelOptInfo *rel,
582                                            List *bitmapquals)
583 {
584         BitmapAndPath *pathnode = makeNode(BitmapAndPath);
585
586         pathnode->path.pathtype = T_BitmapAnd;
587         pathnode->path.parent = rel;
588         pathnode->path.pathkeys = NIL;          /* always unordered */
589
590         pathnode->bitmapquals = bitmapquals;
591
592         /* this sets bitmapselectivity as well as the regular cost fields: */
593         cost_bitmap_and_node(pathnode, root);
594
595         return pathnode;
596 }
597
598 /*
599  * create_bitmap_or_path
600  *        Creates a path node representing a BitmapOr.
601  */
602 BitmapOrPath *
603 create_bitmap_or_path(PlannerInfo *root,
604                                           RelOptInfo *rel,
605                                           List *bitmapquals)
606 {
607         BitmapOrPath *pathnode = makeNode(BitmapOrPath);
608
609         pathnode->path.pathtype = T_BitmapOr;
610         pathnode->path.parent = rel;
611         pathnode->path.pathkeys = NIL;          /* always unordered */
612
613         pathnode->bitmapquals = bitmapquals;
614
615         /* this sets bitmapselectivity as well as the regular cost fields: */
616         cost_bitmap_or_node(pathnode, root);
617
618         return pathnode;
619 }
620
621 /*
622  * create_tidscan_path
623  *        Creates a path corresponding to a scan by TID, returning the pathnode.
624  */
625 TidPath *
626 create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals)
627 {
628         TidPath    *pathnode = makeNode(TidPath);
629
630         pathnode->path.pathtype = T_TidScan;
631         pathnode->path.parent = rel;
632         pathnode->path.pathkeys = NIL;
633
634         pathnode->tidquals = tidquals;
635
636         cost_tidscan(&pathnode->path, root, rel, tidquals);
637
638         return pathnode;
639 }
640
641 /*
642  * create_append_path
643  *        Creates a path corresponding to an Append plan, returning the
644  *        pathnode.
645  *
646  * Note that we must handle subpaths = NIL, representing a dummy access path.
647  */
648 AppendPath *
649 create_append_path(RelOptInfo *rel, List *subpaths)
650 {
651         AppendPath *pathnode = makeNode(AppendPath);
652         ListCell   *l;
653
654         pathnode->path.pathtype = T_Append;
655         pathnode->path.parent = rel;
656         pathnode->path.pathkeys = NIL;          /* result is always considered
657                                                                                  * unsorted */
658         pathnode->subpaths = subpaths;
659
660         /*
661          * Compute cost as sum of subplan costs.  We charge nothing extra for the
662          * Append itself, which perhaps is too optimistic, but since it doesn't do
663          * any selection or projection, it is a pretty cheap node.
664          *
665          * If you change this, see also make_append().
666          */
667         pathnode->path.startup_cost = 0;
668         pathnode->path.total_cost = 0;
669         foreach(l, subpaths)
670         {
671                 Path       *subpath = (Path *) lfirst(l);
672
673                 if (l == list_head(subpaths))   /* first node? */
674                         pathnode->path.startup_cost = subpath->startup_cost;
675                 pathnode->path.total_cost += subpath->total_cost;
676         }
677
678         return pathnode;
679 }
680
681 /*
682  * create_merge_append_path
683  *        Creates a path corresponding to a MergeAppend plan, returning the
684  *        pathnode.
685  */
686 MergeAppendPath *
687 create_merge_append_path(PlannerInfo *root,
688                                                  RelOptInfo *rel,
689                                                  List *subpaths,
690                                                  List *pathkeys)
691 {
692         MergeAppendPath *pathnode = makeNode(MergeAppendPath);
693         Cost            input_startup_cost;
694         Cost            input_total_cost;
695         ListCell   *l;
696
697         pathnode->path.pathtype = T_MergeAppend;
698         pathnode->path.parent = rel;
699         pathnode->path.pathkeys = pathkeys;
700         pathnode->subpaths = subpaths;
701
702         /*
703          * Apply query-wide LIMIT if known and path is for sole base relation.
704          * Finding out the latter at this low level is a bit klugy.
705          */
706         pathnode->limit_tuples = root->limit_tuples;
707         if (pathnode->limit_tuples >= 0)
708         {
709                 Index           rti;
710
711                 for (rti = 1; rti < root->simple_rel_array_size; rti++)
712                 {
713                         RelOptInfo *brel = root->simple_rel_array[rti];
714
715                         if (brel == NULL)
716                                 continue;
717
718                         /* ignore RTEs that are "other rels" */
719                         if (brel->reloptkind != RELOPT_BASEREL)
720                                 continue;
721
722                         if (brel != rel)
723                         {
724                                 /* Oops, it's a join query */
725                                 pathnode->limit_tuples = -1.0;
726                                 break;
727                         }
728                 }
729         }
730
731         /* Add up all the costs of the input paths */
732         input_startup_cost = 0;
733         input_total_cost = 0;
734         foreach(l, subpaths)
735         {
736                 Path       *subpath = (Path *) lfirst(l);
737
738                 if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
739                 {
740                         /* Subpath is adequately ordered, we won't need to sort it */
741                         input_startup_cost += subpath->startup_cost;
742                         input_total_cost += subpath->total_cost;
743                 }
744                 else
745                 {
746                         /* We'll need to insert a Sort node, so include cost for that */
747                         Path    sort_path;              /* dummy for result of cost_sort */
748
749                         cost_sort(&sort_path,
750                                           root,
751                                           pathkeys,
752                                           subpath->total_cost,
753                                           subpath->parent->tuples,
754                                           subpath->parent->width,
755                                           0.0,
756                                           work_mem,
757                                           pathnode->limit_tuples);
758                         input_startup_cost += sort_path.startup_cost;
759                         input_total_cost += sort_path.total_cost;
760                 }
761         }
762
763         /* Now we can compute total costs of the MergeAppend */
764         cost_merge_append(&pathnode->path, root,
765                                           pathkeys, list_length(subpaths),
766                                           input_startup_cost, input_total_cost,
767                                           rel->tuples);
768
769         return pathnode;
770 }
771
772 /*
773  * create_result_path
774  *        Creates a path representing a Result-and-nothing-else plan.
775  *        This is only used for the case of a query with an empty jointree.
776  */
777 ResultPath *
778 create_result_path(List *quals)
779 {
780         ResultPath *pathnode = makeNode(ResultPath);
781
782         pathnode->path.pathtype = T_Result;
783         pathnode->path.parent = NULL;
784         pathnode->path.pathkeys = NIL;
785         pathnode->quals = quals;
786
787         /* Ideally should define cost_result(), but I'm too lazy */
788         pathnode->path.startup_cost = 0;
789         pathnode->path.total_cost = cpu_tuple_cost;
790
791         /*
792          * In theory we should include the qual eval cost as well, but at present
793          * that doesn't accomplish much except duplicate work that will be done
794          * again in make_result; since this is only used for degenerate cases,
795          * nothing interesting will be done with the path cost values...
796          */
797
798         return pathnode;
799 }
800
801 /*
802  * create_material_path
803  *        Creates a path corresponding to a Material plan, returning the
804  *        pathnode.
805  */
806 MaterialPath *
807 create_material_path(RelOptInfo *rel, Path *subpath)
808 {
809         MaterialPath *pathnode = makeNode(MaterialPath);
810
811         pathnode->path.pathtype = T_Material;
812         pathnode->path.parent = rel;
813
814         pathnode->path.pathkeys = subpath->pathkeys;
815
816         pathnode->subpath = subpath;
817
818         cost_material(&pathnode->path,
819                                   subpath->startup_cost,
820                                   subpath->total_cost,
821                                   rel->rows,
822                                   rel->width);
823
824         return pathnode;
825 }
826
827 /*
828  * create_unique_path
829  *        Creates a path representing elimination of distinct rows from the
830  *        input data.  Distinct-ness is defined according to the needs of the
831  *        semijoin represented by sjinfo.  If it is not possible to identify
832  *        how to make the data unique, NULL is returned.
833  *
834  * If used at all, this is likely to be called repeatedly on the same rel;
835  * and the input subpath should always be the same (the cheapest_total path
836  * for the rel).  So we cache the result.
837  */
838 UniquePath *
839 create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
840                                    SpecialJoinInfo *sjinfo)
841 {
842         UniquePath *pathnode;
843         Path            sort_path;              /* dummy for result of cost_sort */
844         Path            agg_path;               /* dummy for result of cost_agg */
845         MemoryContext oldcontext;
846         List       *in_operators;
847         List       *uniq_exprs;
848         bool            all_btree;
849         bool            all_hash;
850         int                     numCols;
851         ListCell   *lc;
852
853         /* Caller made a mistake if subpath isn't cheapest_total ... */
854         Assert(subpath == rel->cheapest_total_path);
855         /* ... or if SpecialJoinInfo is the wrong one */
856         Assert(sjinfo->jointype == JOIN_SEMI);
857         Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
858
859         /* If result already cached, return it */
860         if (rel->cheapest_unique_path)
861                 return (UniquePath *) rel->cheapest_unique_path;
862
863         /* If we previously failed, return NULL quickly */
864         if (sjinfo->join_quals == NIL)
865                 return NULL;
866
867         /*
868          * We must ensure path struct and subsidiary data are allocated in main
869          * planning context; otherwise GEQO memory management causes trouble.
870          * (Compare best_inner_indexscan().)
871          */
872         oldcontext = MemoryContextSwitchTo(root->planner_cxt);
873
874         /*----------
875          * Look to see whether the semijoin's join quals consist of AND'ed
876          * equality operators, with (only) RHS variables on only one side of
877          * each one.  If so, we can figure out how to enforce uniqueness for
878          * the RHS.
879          *
880          * Note that the input join_quals list is the list of quals that are
881          * *syntactically* associated with the semijoin, which in practice means
882          * the synthesized comparison list for an IN or the WHERE of an EXISTS.
883          * Particularly in the latter case, it might contain clauses that aren't
884          * *semantically* associated with the join, but refer to just one side or
885          * the other.  We can ignore such clauses here, as they will just drop
886          * down to be processed within one side or the other.  (It is okay to
887          * consider only the syntactically-associated clauses here because for a
888          * semijoin, no higher-level quals could refer to the RHS, and so there
889          * can be no other quals that are semantically associated with this join.
890          * We do things this way because it is useful to be able to run this test
891          * before we have extracted the list of quals that are actually
892          * semantically associated with the particular join.)
893          *
894          * Note that the in_operators list consists of the joinqual operators
895          * themselves (but commuted if needed to put the RHS value on the right).
896          * These could be cross-type operators, in which case the operator
897          * actually needed for uniqueness is a related single-type operator.
898          * We assume here that that operator will be available from the btree
899          * or hash opclass when the time comes ... if not, create_unique_plan()
900          * will fail.
901          *----------
902          */
903         in_operators = NIL;
904         uniq_exprs = NIL;
905         all_btree = true;
906         all_hash = enable_hashagg;      /* don't consider hash if not enabled */
907         foreach(lc, sjinfo->join_quals)
908         {
909                 OpExpr     *op = (OpExpr *) lfirst(lc);
910                 Oid                     opno;
911                 Node       *left_expr;
912                 Node       *right_expr;
913                 Relids          left_varnos;
914                 Relids          right_varnos;
915                 Relids          all_varnos;
916                 Oid                     opinputtype;
917
918                 /* Is it a binary opclause? */
919                 if (!IsA(op, OpExpr) ||
920                         list_length(op->args) != 2)
921                 {
922                         /* No, but does it reference both sides? */
923                         all_varnos = pull_varnos((Node *) op);
924                         if (!bms_overlap(all_varnos, sjinfo->syn_righthand) ||
925                                 bms_is_subset(all_varnos, sjinfo->syn_righthand))
926                         {
927                                 /*
928                                  * Clause refers to only one rel, so ignore it --- unless it
929                                  * contains volatile functions, in which case we'd better
930                                  * punt.
931                                  */
932                                 if (contain_volatile_functions((Node *) op))
933                                         goto no_unique_path;
934                                 continue;
935                         }
936                         /* Non-operator clause referencing both sides, must punt */
937                         goto no_unique_path;
938                 }
939
940                 /* Extract data from binary opclause */
941                 opno = op->opno;
942                 left_expr = linitial(op->args);
943                 right_expr = lsecond(op->args);
944                 left_varnos = pull_varnos(left_expr);
945                 right_varnos = pull_varnos(right_expr);
946                 all_varnos = bms_union(left_varnos, right_varnos);
947                 opinputtype = exprType(left_expr);
948
949                 /* Does it reference both sides? */
950                 if (!bms_overlap(all_varnos, sjinfo->syn_righthand) ||
951                         bms_is_subset(all_varnos, sjinfo->syn_righthand))
952                 {
953                         /*
954                          * Clause refers to only one rel, so ignore it --- unless it
955                          * contains volatile functions, in which case we'd better punt.
956                          */
957                         if (contain_volatile_functions((Node *) op))
958                                 goto no_unique_path;
959                         continue;
960                 }
961
962                 /* check rel membership of arguments */
963                 if (!bms_is_empty(right_varnos) &&
964                         bms_is_subset(right_varnos, sjinfo->syn_righthand) &&
965                         !bms_overlap(left_varnos, sjinfo->syn_righthand))
966                 {
967                         /* typical case, right_expr is RHS variable */
968                 }
969                 else if (!bms_is_empty(left_varnos) &&
970                                  bms_is_subset(left_varnos, sjinfo->syn_righthand) &&
971                                  !bms_overlap(right_varnos, sjinfo->syn_righthand))
972                 {
973                         /* flipped case, left_expr is RHS variable */
974                         opno = get_commutator(opno);
975                         if (!OidIsValid(opno))
976                                 goto no_unique_path;
977                         right_expr = left_expr;
978                 }
979                 else
980                         goto no_unique_path;
981
982                 /* all operators must be btree equality or hash equality */
983                 if (all_btree)
984                 {
985                         /* oprcanmerge is considered a hint... */
986                         if (!op_mergejoinable(opno, opinputtype) ||
987                                 get_mergejoin_opfamilies(opno) == NIL)
988                                 all_btree = false;
989                 }
990                 if (all_hash)
991                 {
992                         /* ... but oprcanhash had better be correct */
993                         if (!op_hashjoinable(opno, opinputtype))
994                                 all_hash = false;
995                 }
996                 if (!(all_btree || all_hash))
997                         goto no_unique_path;
998
999                 /* so far so good, keep building lists */
1000                 in_operators = lappend_oid(in_operators, opno);
1001                 uniq_exprs = lappend(uniq_exprs, copyObject(right_expr));
1002         }
1003
1004         /* Punt if we didn't find at least one column to unique-ify */
1005         if (uniq_exprs == NIL)
1006                 goto no_unique_path;
1007
1008         /*
1009          * The expressions we'd need to unique-ify mustn't be volatile.
1010          */
1011         if (contain_volatile_functions((Node *) uniq_exprs))
1012                 goto no_unique_path;
1013
1014         /*
1015          * If we get here, we can unique-ify using at least one of sorting and
1016          * hashing.  Start building the result Path object.
1017          */
1018         pathnode = makeNode(UniquePath);
1019
1020         pathnode->path.pathtype = T_Unique;
1021         pathnode->path.parent = rel;
1022
1023         /*
1024          * Treat the output as always unsorted, since we don't necessarily have
1025          * pathkeys to represent it.
1026          */
1027         pathnode->path.pathkeys = NIL;
1028
1029         pathnode->subpath = subpath;
1030         pathnode->in_operators = in_operators;
1031         pathnode->uniq_exprs = uniq_exprs;
1032
1033         /*
1034          * If the input is a subquery whose output must be unique already, then we
1035          * don't need to do anything.  The test for uniqueness has to consider
1036          * exactly which columns we are extracting; for example "SELECT DISTINCT
1037          * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1038          * this optimization unless uniq_exprs consists only of simple Vars
1039          * referencing subquery outputs.  (Possibly we could do something with
1040          * expressions in the subquery outputs, too, but for now keep it simple.)
1041          */
1042         if (rel->rtekind == RTE_SUBQUERY)
1043         {
1044                 RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1045                 List       *sub_tlist_colnos;
1046
1047                 sub_tlist_colnos = translate_sub_tlist(uniq_exprs, rel->relid);
1048
1049                 if (sub_tlist_colnos &&
1050                         query_is_distinct_for(rte->subquery,
1051                                                                   sub_tlist_colnos, in_operators))
1052                 {
1053                         pathnode->umethod = UNIQUE_PATH_NOOP;
1054                         pathnode->rows = rel->rows;
1055                         pathnode->path.startup_cost = subpath->startup_cost;
1056                         pathnode->path.total_cost = subpath->total_cost;
1057                         pathnode->path.pathkeys = subpath->pathkeys;
1058
1059                         rel->cheapest_unique_path = (Path *) pathnode;
1060
1061                         MemoryContextSwitchTo(oldcontext);
1062
1063                         return pathnode;
1064                 }
1065         }
1066
1067         /* Estimate number of output rows */
1068         pathnode->rows = estimate_num_groups(root, uniq_exprs, rel->rows);
1069         numCols = list_length(uniq_exprs);
1070
1071         if (all_btree)
1072         {
1073                 /*
1074                  * Estimate cost for sort+unique implementation
1075                  */
1076                 cost_sort(&sort_path, root, NIL,
1077                                   subpath->total_cost,
1078                                   rel->rows,
1079                                   rel->width,
1080                                   0.0,
1081                                   work_mem,
1082                                   -1.0);
1083
1084                 /*
1085                  * Charge one cpu_operator_cost per comparison per input tuple. We
1086                  * assume all columns get compared at most of the tuples. (XXX
1087                  * probably this is an overestimate.)  This should agree with
1088                  * make_unique.
1089                  */
1090                 sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1091         }
1092
1093         if (all_hash)
1094         {
1095                 /*
1096                  * Estimate the overhead per hashtable entry at 64 bytes (same as in
1097                  * planner.c).
1098                  */
1099                 int                     hashentrysize = rel->width + 64;
1100
1101                 if (hashentrysize * pathnode->rows > work_mem * 1024L)
1102                         all_hash = false;       /* don't try to hash */
1103                 else
1104                         cost_agg(&agg_path, root,
1105                                          AGG_HASHED, 0,
1106                                          numCols, pathnode->rows,
1107                                          subpath->startup_cost,
1108                                          subpath->total_cost,
1109                                          rel->rows);
1110         }
1111
1112         if (all_btree && all_hash)
1113         {
1114                 if (agg_path.total_cost < sort_path.total_cost)
1115                         pathnode->umethod = UNIQUE_PATH_HASH;
1116                 else
1117                         pathnode->umethod = UNIQUE_PATH_SORT;
1118         }
1119         else if (all_btree)
1120                 pathnode->umethod = UNIQUE_PATH_SORT;
1121         else if (all_hash)
1122                 pathnode->umethod = UNIQUE_PATH_HASH;
1123         else
1124                 goto no_unique_path;
1125
1126         if (pathnode->umethod == UNIQUE_PATH_HASH)
1127         {
1128                 pathnode->path.startup_cost = agg_path.startup_cost;
1129                 pathnode->path.total_cost = agg_path.total_cost;
1130         }
1131         else
1132         {
1133                 pathnode->path.startup_cost = sort_path.startup_cost;
1134                 pathnode->path.total_cost = sort_path.total_cost;
1135         }
1136
1137         rel->cheapest_unique_path = (Path *) pathnode;
1138
1139         MemoryContextSwitchTo(oldcontext);
1140
1141         return pathnode;
1142
1143 no_unique_path:                 /* failure exit */
1144
1145         /* Mark the SpecialJoinInfo as not unique-able */
1146         sjinfo->join_quals = NIL;
1147
1148         MemoryContextSwitchTo(oldcontext);
1149
1150         return NULL;
1151 }
1152
1153 /*
1154  * translate_sub_tlist - get subquery column numbers represented by tlist
1155  *
1156  * The given targetlist usually contains only Vars referencing the given relid.
1157  * Extract their varattnos (ie, the column numbers of the subquery) and return
1158  * as an integer List.
1159  *
1160  * If any of the tlist items is not a simple Var, we cannot determine whether
1161  * the subquery's uniqueness condition (if any) matches ours, so punt and
1162  * return NIL.
1163  */
1164 static List *
1165 translate_sub_tlist(List *tlist, int relid)
1166 {
1167         List       *result = NIL;
1168         ListCell   *l;
1169
1170         foreach(l, tlist)
1171         {
1172                 Var                *var = (Var *) lfirst(l);
1173
1174                 if (!var || !IsA(var, Var) ||
1175                         var->varno != relid)
1176                         return NIL;                     /* punt */
1177
1178                 result = lappend_int(result, var->varattno);
1179         }
1180         return result;
1181 }
1182
1183 /*
1184  * query_is_distinct_for - does query never return duplicates of the
1185  *              specified columns?
1186  *
1187  * colnos is an integer list of output column numbers (resno's).  We are
1188  * interested in whether rows consisting of just these columns are certain
1189  * to be distinct.      "Distinctness" is defined according to whether the
1190  * corresponding upper-level equality operators listed in opids would think
1191  * the values are distinct.  (Note: the opids entries could be cross-type
1192  * operators, and thus not exactly the equality operators that the subquery
1193  * would use itself.  We use equality_ops_are_compatible() to check
1194  * compatibility.  That looks at btree or hash opfamily membership, and so
1195  * should give trustworthy answers for all operators that we might need
1196  * to deal with here.)
1197  */
1198 static bool
1199 query_is_distinct_for(Query *query, List *colnos, List *opids)
1200 {
1201         ListCell   *l;
1202         Oid                     opid;
1203
1204         Assert(list_length(colnos) == list_length(opids));
1205
1206         /*
1207          * DISTINCT (including DISTINCT ON) guarantees uniqueness if all the
1208          * columns in the DISTINCT clause appear in colnos and operator semantics
1209          * match.
1210          */
1211         if (query->distinctClause)
1212         {
1213                 foreach(l, query->distinctClause)
1214                 {
1215                         SortGroupClause *sgc = (SortGroupClause *) lfirst(l);
1216                         TargetEntry *tle = get_sortgroupclause_tle(sgc,
1217                                                                                                            query->targetList);
1218
1219                         opid = distinct_col_search(tle->resno, colnos, opids);
1220                         if (!OidIsValid(opid) ||
1221                                 !equality_ops_are_compatible(opid, sgc->eqop))
1222                                 break;                  /* exit early if no match */
1223                 }
1224                 if (l == NULL)                  /* had matches for all? */
1225                         return true;
1226         }
1227
1228         /*
1229          * Similarly, GROUP BY guarantees uniqueness if all the grouped columns
1230          * appear in colnos and operator semantics match.
1231          */
1232         if (query->groupClause)
1233         {
1234                 foreach(l, query->groupClause)
1235                 {
1236                         SortGroupClause *sgc = (SortGroupClause *) lfirst(l);
1237                         TargetEntry *tle = get_sortgroupclause_tle(sgc,
1238                                                                                                            query->targetList);
1239
1240                         opid = distinct_col_search(tle->resno, colnos, opids);
1241                         if (!OidIsValid(opid) ||
1242                                 !equality_ops_are_compatible(opid, sgc->eqop))
1243                                 break;                  /* exit early if no match */
1244                 }
1245                 if (l == NULL)                  /* had matches for all? */
1246                         return true;
1247         }
1248         else
1249         {
1250                 /*
1251                  * If we have no GROUP BY, but do have aggregates or HAVING, then the
1252                  * result is at most one row so it's surely unique, for any operators.
1253                  */
1254                 if (query->hasAggs || query->havingQual)
1255                         return true;
1256         }
1257
1258         /*
1259          * UNION, INTERSECT, EXCEPT guarantee uniqueness of the whole output row,
1260          * except with ALL.
1261          */
1262         if (query->setOperations)
1263         {
1264                 SetOperationStmt *topop = (SetOperationStmt *) query->setOperations;
1265
1266                 Assert(IsA(topop, SetOperationStmt));
1267                 Assert(topop->op != SETOP_NONE);
1268
1269                 if (!topop->all)
1270                 {
1271                         ListCell   *lg;
1272
1273                         /* We're good if all the nonjunk output columns are in colnos */
1274                         lg = list_head(topop->groupClauses);
1275                         foreach(l, query->targetList)
1276                         {
1277                                 TargetEntry *tle = (TargetEntry *) lfirst(l);
1278                                 SortGroupClause *sgc;
1279
1280                                 if (tle->resjunk)
1281                                         continue;       /* ignore resjunk columns */
1282
1283                                 /* non-resjunk columns should have grouping clauses */
1284                                 Assert(lg != NULL);
1285                                 sgc = (SortGroupClause *) lfirst(lg);
1286                                 lg = lnext(lg);
1287
1288                                 opid = distinct_col_search(tle->resno, colnos, opids);
1289                                 if (!OidIsValid(opid) ||
1290                                         !equality_ops_are_compatible(opid, sgc->eqop))
1291                                         break;          /* exit early if no match */
1292                         }
1293                         if (l == NULL)          /* had matches for all? */
1294                                 return true;
1295                 }
1296         }
1297
1298         /*
1299          * XXX Are there any other cases in which we can easily see the result
1300          * must be distinct?
1301          */
1302
1303         return false;
1304 }
1305
1306 /*
1307  * distinct_col_search - subroutine for query_is_distinct_for
1308  *
1309  * If colno is in colnos, return the corresponding element of opids,
1310  * else return InvalidOid.      (We expect colnos does not contain duplicates,
1311  * so the result is well-defined.)
1312  */
1313 static Oid
1314 distinct_col_search(int colno, List *colnos, List *opids)
1315 {
1316         ListCell   *lc1,
1317                            *lc2;
1318
1319         forboth(lc1, colnos, lc2, opids)
1320         {
1321                 if (colno == lfirst_int(lc1))
1322                         return lfirst_oid(lc2);
1323         }
1324         return InvalidOid;
1325 }
1326
1327 /*
1328  * create_subqueryscan_path
1329  *        Creates a path corresponding to a sequential scan of a subquery,
1330  *        returning the pathnode.
1331  */
1332 Path *
1333 create_subqueryscan_path(RelOptInfo *rel, List *pathkeys)
1334 {
1335         Path       *pathnode = makeNode(Path);
1336
1337         pathnode->pathtype = T_SubqueryScan;
1338         pathnode->parent = rel;
1339         pathnode->pathkeys = pathkeys;
1340
1341         cost_subqueryscan(pathnode, rel);
1342
1343         return pathnode;
1344 }
1345
1346 /*
1347  * create_functionscan_path
1348  *        Creates a path corresponding to a sequential scan of a function,
1349  *        returning the pathnode.
1350  */
1351 Path *
1352 create_functionscan_path(PlannerInfo *root, RelOptInfo *rel)
1353 {
1354         Path       *pathnode = makeNode(Path);
1355
1356         pathnode->pathtype = T_FunctionScan;
1357         pathnode->parent = rel;
1358         pathnode->pathkeys = NIL;       /* for now, assume unordered result */
1359
1360         cost_functionscan(pathnode, root, rel);
1361
1362         return pathnode;
1363 }
1364
1365 /*
1366  * create_valuesscan_path
1367  *        Creates a path corresponding to a scan of a VALUES list,
1368  *        returning the pathnode.
1369  */
1370 Path *
1371 create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel)
1372 {
1373         Path       *pathnode = makeNode(Path);
1374
1375         pathnode->pathtype = T_ValuesScan;
1376         pathnode->parent = rel;
1377         pathnode->pathkeys = NIL;       /* result is always unordered */
1378
1379         cost_valuesscan(pathnode, root, rel);
1380
1381         return pathnode;
1382 }
1383
1384 /*
1385  * create_ctescan_path
1386  *        Creates a path corresponding to a scan of a non-self-reference CTE,
1387  *        returning the pathnode.
1388  */
1389 Path *
1390 create_ctescan_path(PlannerInfo *root, RelOptInfo *rel)
1391 {
1392         Path       *pathnode = makeNode(Path);
1393
1394         pathnode->pathtype = T_CteScan;
1395         pathnode->parent = rel;
1396         pathnode->pathkeys = NIL;       /* XXX for now, result is always unordered */
1397
1398         cost_ctescan(pathnode, root, rel);
1399
1400         return pathnode;
1401 }
1402
1403 /*
1404  * create_worktablescan_path
1405  *        Creates a path corresponding to a scan of a self-reference CTE,
1406  *        returning the pathnode.
1407  */
1408 Path *
1409 create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel)
1410 {
1411         Path       *pathnode = makeNode(Path);
1412
1413         pathnode->pathtype = T_WorkTableScan;
1414         pathnode->parent = rel;
1415         pathnode->pathkeys = NIL;       /* result is always unordered */
1416
1417         /* Cost is the same as for a regular CTE scan */
1418         cost_ctescan(pathnode, root, rel);
1419
1420         return pathnode;
1421 }
1422
1423 /*
1424  * create_foreignscan_path
1425  *        Creates a path corresponding to a scan of a foreign table,
1426  *        returning the pathnode.
1427  */
1428 ForeignPath *
1429 create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel)
1430 {
1431         ForeignPath *pathnode = makeNode(ForeignPath);
1432         RangeTblEntry *rte;
1433         FdwRoutine *fdwroutine;
1434         FdwPlan    *fdwplan;
1435
1436         pathnode->path.pathtype = T_ForeignScan;
1437         pathnode->path.parent = rel;
1438         pathnode->path.pathkeys = NIL;  /* result is always unordered */
1439
1440         /* Get FDW's callback info */
1441         rte = planner_rt_fetch(rel->relid, root);
1442         fdwroutine = GetFdwRoutineByRelId(rte->relid);
1443
1444         /* Let the FDW do its planning */
1445         fdwplan = fdwroutine->PlanForeignScan(rte->relid, root, rel);
1446         if (fdwplan == NULL || !IsA(fdwplan, FdwPlan))
1447                 elog(ERROR, "foreign-data wrapper PlanForeignScan function for relation %u did not return an FdwPlan struct",
1448                          rte->relid);
1449         pathnode->fdwplan = fdwplan;
1450
1451         /* use costs estimated by FDW */
1452         pathnode->path.startup_cost = fdwplan->startup_cost;
1453         pathnode->path.total_cost = fdwplan->total_cost;
1454
1455         return pathnode;
1456 }
1457
1458 /*
1459  * create_nestloop_path
1460  *        Creates a pathnode corresponding to a nestloop join between two
1461  *        relations.
1462  *
1463  * 'joinrel' is the join relation.
1464  * 'jointype' is the type of join required
1465  * 'sjinfo' is extra info about the join for selectivity estimation
1466  * 'outer_path' is the outer path
1467  * 'inner_path' is the inner path
1468  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1469  * 'pathkeys' are the path keys of the new join path
1470  *
1471  * Returns the resulting path node.
1472  */
1473 NestPath *
1474 create_nestloop_path(PlannerInfo *root,
1475                                          RelOptInfo *joinrel,
1476                                          JoinType jointype,
1477                                          SpecialJoinInfo *sjinfo,
1478                                          Path *outer_path,
1479                                          Path *inner_path,
1480                                          List *restrict_clauses,
1481                                          List *pathkeys)
1482 {
1483         NestPath   *pathnode = makeNode(NestPath);
1484
1485         pathnode->path.pathtype = T_NestLoop;
1486         pathnode->path.parent = joinrel;
1487         pathnode->jointype = jointype;
1488         pathnode->outerjoinpath = outer_path;
1489         pathnode->innerjoinpath = inner_path;
1490         pathnode->joinrestrictinfo = restrict_clauses;
1491         pathnode->path.pathkeys = pathkeys;
1492
1493         cost_nestloop(pathnode, root, sjinfo);
1494
1495         return pathnode;
1496 }
1497
1498 /*
1499  * create_mergejoin_path
1500  *        Creates a pathnode corresponding to a mergejoin join between
1501  *        two relations
1502  *
1503  * 'joinrel' is the join relation
1504  * 'jointype' is the type of join required
1505  * 'sjinfo' is extra info about the join for selectivity estimation
1506  * 'outer_path' is the outer path
1507  * 'inner_path' is the inner path
1508  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1509  * 'pathkeys' are the path keys of the new join path
1510  * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
1511  *              (this should be a subset of the restrict_clauses list)
1512  * 'outersortkeys' are the sort varkeys for the outer relation
1513  * 'innersortkeys' are the sort varkeys for the inner relation
1514  */
1515 MergePath *
1516 create_mergejoin_path(PlannerInfo *root,
1517                                           RelOptInfo *joinrel,
1518                                           JoinType jointype,
1519                                           SpecialJoinInfo *sjinfo,
1520                                           Path *outer_path,
1521                                           Path *inner_path,
1522                                           List *restrict_clauses,
1523                                           List *pathkeys,
1524                                           List *mergeclauses,
1525                                           List *outersortkeys,
1526                                           List *innersortkeys)
1527 {
1528         MergePath  *pathnode = makeNode(MergePath);
1529
1530         /*
1531          * If the given paths are already well enough ordered, we can skip doing
1532          * an explicit sort.
1533          */
1534         if (outersortkeys &&
1535                 pathkeys_contained_in(outersortkeys, outer_path->pathkeys))
1536                 outersortkeys = NIL;
1537         if (innersortkeys &&
1538                 pathkeys_contained_in(innersortkeys, inner_path->pathkeys))
1539                 innersortkeys = NIL;
1540
1541         pathnode->jpath.path.pathtype = T_MergeJoin;
1542         pathnode->jpath.path.parent = joinrel;
1543         pathnode->jpath.jointype = jointype;
1544         pathnode->jpath.outerjoinpath = outer_path;
1545         pathnode->jpath.innerjoinpath = inner_path;
1546         pathnode->jpath.joinrestrictinfo = restrict_clauses;
1547         pathnode->jpath.path.pathkeys = pathkeys;
1548         pathnode->path_mergeclauses = mergeclauses;
1549         pathnode->outersortkeys = outersortkeys;
1550         pathnode->innersortkeys = innersortkeys;
1551         /* pathnode->materialize_inner will be set by cost_mergejoin */
1552
1553         cost_mergejoin(pathnode, root, sjinfo);
1554
1555         return pathnode;
1556 }
1557
1558 /*
1559  * create_hashjoin_path
1560  *        Creates a pathnode corresponding to a hash join between two relations.
1561  *
1562  * 'joinrel' is the join relation
1563  * 'jointype' is the type of join required
1564  * 'sjinfo' is extra info about the join for selectivity estimation
1565  * 'outer_path' is the cheapest outer path
1566  * 'inner_path' is the cheapest inner path
1567  * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
1568  * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
1569  *              (this should be a subset of the restrict_clauses list)
1570  */
1571 HashPath *
1572 create_hashjoin_path(PlannerInfo *root,
1573                                          RelOptInfo *joinrel,
1574                                          JoinType jointype,
1575                                          SpecialJoinInfo *sjinfo,
1576                                          Path *outer_path,
1577                                          Path *inner_path,
1578                                          List *restrict_clauses,
1579                                          List *hashclauses)
1580 {
1581         HashPath   *pathnode = makeNode(HashPath);
1582
1583         pathnode->jpath.path.pathtype = T_HashJoin;
1584         pathnode->jpath.path.parent = joinrel;
1585         pathnode->jpath.jointype = jointype;
1586         pathnode->jpath.outerjoinpath = outer_path;
1587         pathnode->jpath.innerjoinpath = inner_path;
1588         pathnode->jpath.joinrestrictinfo = restrict_clauses;
1589
1590         /*
1591          * A hashjoin never has pathkeys, since its output ordering is
1592          * unpredictable due to possible batching.      XXX If the inner relation is
1593          * small enough, we could instruct the executor that it must not batch,
1594          * and then we could assume that the output inherits the outer relation's
1595          * ordering, which might save a sort step.      However there is considerable
1596          * downside if our estimate of the inner relation size is badly off. For
1597          * the moment we don't risk it.  (Note also that if we wanted to take this
1598          * seriously, joinpath.c would have to consider many more paths for the
1599          * outer rel than it does now.)
1600          */
1601         pathnode->jpath.path.pathkeys = NIL;
1602         pathnode->path_hashclauses = hashclauses;
1603         /* cost_hashjoin will fill in pathnode->num_batches */
1604
1605         cost_hashjoin(pathnode, root, sjinfo);
1606
1607         return pathnode;
1608 }