]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/allpaths.c
Add a security_barrier option for views.
[postgresql] / src / backend / optimizer / path / allpaths.c
1 /*-------------------------------------------------------------------------
2  *
3  * allpaths.c
4  *        Routines to find possible search paths for processing a query
5  *
6  * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        src/backend/optimizer/path/allpaths.c
12  *
13  *-------------------------------------------------------------------------
14  */
15
16 #include "postgres.h"
17
18 #include <math.h>
19
20 #include "catalog/pg_class.h"
21 #include "nodes/nodeFuncs.h"
22 #ifdef OPTIMIZER_DEBUG
23 #include "nodes/print.h"
24 #endif
25 #include "optimizer/clauses.h"
26 #include "optimizer/cost.h"
27 #include "optimizer/geqo.h"
28 #include "optimizer/pathnode.h"
29 #include "optimizer/paths.h"
30 #include "optimizer/plancat.h"
31 #include "optimizer/planner.h"
32 #include "optimizer/prep.h"
33 #include "optimizer/restrictinfo.h"
34 #include "optimizer/var.h"
35 #include "parser/parse_clause.h"
36 #include "parser/parsetree.h"
37 #include "rewrite/rewriteManip.h"
38 #include "utils/lsyscache.h"
39
40
41 /* These parameters are set by GUC */
42 bool            enable_geqo = false;    /* just in case GUC doesn't set it */
43 int                     geqo_threshold;
44
45 /* Hook for plugins to replace standard_join_search() */
46 join_search_hook_type join_search_hook = NULL;
47
48
49 static void set_base_rel_pathlists(PlannerInfo *root);
50 static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
51                                  Index rti, RangeTblEntry *rte);
52 static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
53                                            RangeTblEntry *rte);
54 static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
55                                                 Index rti, RangeTblEntry *rte);
56 static List *accumulate_append_subpath(List *subpaths, Path *path);
57 static void set_dummy_rel_pathlist(RelOptInfo *rel);
58 static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
59                                           Index rti, RangeTblEntry *rte);
60 static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
61                                           RangeTblEntry *rte);
62 static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
63                                         RangeTblEntry *rte);
64 static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
65                                  RangeTblEntry *rte);
66 static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
67                                            RangeTblEntry *rte);
68 static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
69                                          RangeTblEntry *rte);
70 static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
71 static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
72                                                   bool *differentTypes);
73 static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
74                                           bool *differentTypes);
75 static void compare_tlist_datatypes(List *tlist, List *colTypes,
76                                                 bool *differentTypes);
77 static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
78                                           bool *differentTypes);
79 static void subquery_push_qual(Query *subquery,
80                                    RangeTblEntry *rte, Index rti, Node *qual);
81 static void recurse_push_qual(Node *setOp, Query *topquery,
82                                   RangeTblEntry *rte, Index rti, Node *qual);
83
84
85 /*
86  * make_one_rel
87  *        Finds all possible access paths for executing a query, returning a
88  *        single rel that represents the join of all base rels in the query.
89  */
90 RelOptInfo *
91 make_one_rel(PlannerInfo *root, List *joinlist)
92 {
93         RelOptInfo *rel;
94
95         /*
96          * Generate access paths for the base rels.
97          */
98         set_base_rel_pathlists(root);
99
100         /*
101          * Generate access paths for the entire join tree.
102          */
103         rel = make_rel_from_joinlist(root, joinlist);
104
105         /*
106          * The result should join all and only the query's base rels.
107          */
108 #ifdef USE_ASSERT_CHECKING
109         {
110                 int                     num_base_rels = 0;
111                 Index           rti;
112
113                 for (rti = 1; rti < root->simple_rel_array_size; rti++)
114                 {
115                         RelOptInfo *brel = root->simple_rel_array[rti];
116
117                         if (brel == NULL)
118                                 continue;
119
120                         Assert(brel->relid == rti); /* sanity check on array */
121
122                         /* ignore RTEs that are "other rels" */
123                         if (brel->reloptkind != RELOPT_BASEREL)
124                                 continue;
125
126                         Assert(bms_is_member(rti, rel->relids));
127                         num_base_rels++;
128                 }
129
130                 Assert(bms_num_members(rel->relids) == num_base_rels);
131         }
132 #endif
133
134         return rel;
135 }
136
137 /*
138  * set_base_rel_pathlists
139  *        Finds all paths available for scanning each base-relation entry.
140  *        Sequential scan and any available indices are considered.
141  *        Each useful path is attached to its relation's 'pathlist' field.
142  */
143 static void
144 set_base_rel_pathlists(PlannerInfo *root)
145 {
146         Index           rti;
147
148         for (rti = 1; rti < root->simple_rel_array_size; rti++)
149         {
150                 RelOptInfo *rel = root->simple_rel_array[rti];
151
152                 /* there may be empty slots corresponding to non-baserel RTEs */
153                 if (rel == NULL)
154                         continue;
155
156                 Assert(rel->relid == rti);              /* sanity check on array */
157
158                 /* ignore RTEs that are "other rels" */
159                 if (rel->reloptkind != RELOPT_BASEREL)
160                         continue;
161
162                 set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
163         }
164 }
165
166 /*
167  * set_rel_pathlist
168  *        Build access paths for a base relation
169  */
170 static void
171 set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
172                                  Index rti, RangeTblEntry *rte)
173 {
174         if (rel->reloptkind == RELOPT_BASEREL &&
175                 relation_excluded_by_constraints(root, rel, rte))
176         {
177                 /*
178                  * We proved we don't need to scan the rel via constraint exclusion,
179                  * so set up a single dummy path for it.  Here we only check this for
180                  * regular baserels; if it's an otherrel, CE was already checked in
181                  * set_append_rel_pathlist().
182                  */
183                 set_dummy_rel_pathlist(rel);
184         }
185         else if (rte->inh)
186         {
187                 /* It's an "append relation", process accordingly */
188                 set_append_rel_pathlist(root, rel, rti, rte);
189         }
190         else
191         {
192                 switch (rel->rtekind)
193                 {
194                         case RTE_RELATION:
195                                 if (rte->relkind == RELKIND_FOREIGN_TABLE)
196                                 {
197                                         /* Foreign table */
198                                         set_foreign_pathlist(root, rel, rte);
199                                 }
200                                 else
201                                 {
202                                         /* Plain relation */
203                                         set_plain_rel_pathlist(root, rel, rte);
204                                 }
205                                 break;
206                         case RTE_SUBQUERY:
207                                 /* Subquery --- generate a separate plan for it */
208                                 set_subquery_pathlist(root, rel, rti, rte);
209                                 break;
210                         case RTE_FUNCTION:
211                                 /* RangeFunction --- generate a suitable path for it */
212                                 set_function_pathlist(root, rel, rte);
213                                 break;
214                         case RTE_VALUES:
215                                 /* Values list --- generate a suitable path for it */
216                                 set_values_pathlist(root, rel, rte);
217                                 break;
218                         case RTE_CTE:
219                                 /* CTE reference --- generate a suitable path for it */
220                                 if (rte->self_reference)
221                                         set_worktable_pathlist(root, rel, rte);
222                                 else
223                                         set_cte_pathlist(root, rel, rte);
224                                 break;
225                         default:
226                                 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
227                                 break;
228                 }
229         }
230
231 #ifdef OPTIMIZER_DEBUG
232         debug_print_rel(root, rel);
233 #endif
234 }
235
236 /*
237  * set_plain_rel_pathlist
238  *        Build access paths for a plain relation (no subquery, no inheritance)
239  */
240 static void
241 set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
242 {
243         /*
244          * Test any partial indexes of rel for applicability.  We must do this
245          * first since partial unique indexes can affect size estimates.
246          */
247         check_partial_indexes(root, rel);
248
249         /* Mark rel with estimated output rows, width, etc */
250         set_baserel_size_estimates(root, rel);
251
252         /*
253          * Check to see if we can extract any restriction conditions from join
254          * quals that are OR-of-AND structures.  If so, add them to the rel's
255          * restriction list, and redo the above steps.
256          */
257         if (create_or_index_quals(root, rel))
258         {
259                 check_partial_indexes(root, rel);
260                 set_baserel_size_estimates(root, rel);
261         }
262
263         /*
264          * Generate paths and add them to the rel's pathlist.
265          *
266          * Note: add_path() will discard any paths that are dominated by another
267          * available path, keeping only those paths that are superior along at
268          * least one dimension of cost or sortedness.
269          */
270
271         /* Consider sequential scan */
272         add_path(rel, create_seqscan_path(root, rel));
273
274         /* Consider index scans */
275         create_index_paths(root, rel);
276
277         /* Consider TID scans */
278         create_tidscan_paths(root, rel);
279
280         /* Now find the cheapest of the paths for this rel */
281         set_cheapest(rel);
282 }
283
284 /*
285  * set_append_rel_pathlist
286  *        Build access paths for an "append relation"
287  *
288  * The passed-in rel and RTE represent the entire append relation.      The
289  * relation's contents are computed by appending together the output of
290  * the individual member relations.  Note that in the inheritance case,
291  * the first member relation is actually the same table as is mentioned in
292  * the parent RTE ... but it has a different RTE and RelOptInfo.  This is
293  * a good thing because their outputs are not the same size.
294  */
295 static void
296 set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
297                                                 Index rti, RangeTblEntry *rte)
298 {
299         int                     parentRTindex = rti;
300         List       *live_childrels = NIL;
301         List       *subpaths = NIL;
302         List       *all_child_pathkeys = NIL;
303         double          parent_rows;
304         double          parent_size;
305         double     *parent_attrsizes;
306         int                     nattrs;
307         ListCell   *l;
308
309         /*
310          * Initialize to compute size estimates for whole append relation.
311          *
312          * We handle width estimates by weighting the widths of different child
313          * rels proportionally to their number of rows.  This is sensible because
314          * the use of width estimates is mainly to compute the total relation
315          * "footprint" if we have to sort or hash it.  To do this, we sum the
316          * total equivalent size (in "double" arithmetic) and then divide by the
317          * total rowcount estimate.  This is done separately for the total rel
318          * width and each attribute.
319          *
320          * Note: if you consider changing this logic, beware that child rels could
321          * have zero rows and/or width, if they were excluded by constraints.
322          */
323         parent_rows = 0;
324         parent_size = 0;
325         nattrs = rel->max_attr - rel->min_attr + 1;
326         parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
327
328         /*
329          * Generate access paths for each member relation, and pick the cheapest
330          * path for each one.
331          */
332         foreach(l, root->append_rel_list)
333         {
334                 AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
335                 int                     childRTindex;
336                 RangeTblEntry *childRTE;
337                 RelOptInfo *childrel;
338                 List       *childquals;
339                 Node       *childqual;
340                 ListCell   *lcp;
341                 ListCell   *parentvars;
342                 ListCell   *childvars;
343
344                 /* append_rel_list contains all append rels; ignore others */
345                 if (appinfo->parent_relid != parentRTindex)
346                         continue;
347
348                 childRTindex = appinfo->child_relid;
349                 childRTE = root->simple_rte_array[childRTindex];
350
351                 /*
352                  * The child rel's RelOptInfo was already created during
353                  * add_base_rels_to_query.
354                  */
355                 childrel = find_base_rel(root, childRTindex);
356                 Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
357
358                 /*
359                  * We have to copy the parent's targetlist and quals to the child,
360                  * with appropriate substitution of variables.  However, only the
361                  * baserestrictinfo quals are needed before we can check for
362                  * constraint exclusion; so do that first and then check to see if we
363                  * can disregard this child.
364                  *
365                  * As of 8.4, the child rel's targetlist might contain non-Var
366                  * expressions, which means that substitution into the quals could
367                  * produce opportunities for const-simplification, and perhaps even
368                  * pseudoconstant quals.  To deal with this, we strip the RestrictInfo
369                  * nodes, do the substitution, do const-simplification, and then
370                  * reconstitute the RestrictInfo layer.
371                  */
372                 childquals = get_all_actual_clauses(rel->baserestrictinfo);
373                 childquals = (List *) adjust_appendrel_attrs((Node *) childquals,
374                                                                                                          appinfo);
375                 childqual = eval_const_expressions(root, (Node *)
376                                                                                    make_ands_explicit(childquals));
377                 if (childqual && IsA(childqual, Const) &&
378                         (((Const *) childqual)->constisnull ||
379                          !DatumGetBool(((Const *) childqual)->constvalue)))
380                 {
381                         /*
382                          * Restriction reduces to constant FALSE or constant NULL after
383                          * substitution, so this child need not be scanned.
384                          */
385                         set_dummy_rel_pathlist(childrel);
386                         continue;
387                 }
388                 childquals = make_ands_implicit((Expr *) childqual);
389                 childquals = make_restrictinfos_from_actual_clauses(root,
390                                                                                                                         childquals);
391                 childrel->baserestrictinfo = childquals;
392
393                 if (relation_excluded_by_constraints(root, childrel, childRTE))
394                 {
395                         /*
396                          * This child need not be scanned, so we can omit it from the
397                          * appendrel.  Mark it with a dummy cheapest-path though, in case
398                          * best_appendrel_indexscan() looks at it later.
399                          */
400                         set_dummy_rel_pathlist(childrel);
401                         continue;
402                 }
403
404                 /*
405                  * CE failed, so finish copying/modifying targetlist and join quals.
406                  *
407                  * Note: the resulting childrel->reltargetlist may contain arbitrary
408                  * expressions, which normally would not occur in a reltargetlist.
409                  * That is okay because nothing outside of this routine will look at
410                  * the child rel's reltargetlist.  We do have to cope with the case
411                  * while constructing attr_widths estimates below, though.
412                  */
413                 childrel->joininfo = (List *)
414                         adjust_appendrel_attrs((Node *) rel->joininfo,
415                                                                    appinfo);
416                 childrel->reltargetlist = (List *)
417                         adjust_appendrel_attrs((Node *) rel->reltargetlist,
418                                                                    appinfo);
419
420                 /*
421                  * We have to make child entries in the EquivalenceClass data
422                  * structures as well.  This is needed either if the parent
423                  * participates in some eclass joins (because we will want to consider
424                  * inner-indexscan joins on the individual children) or if the parent
425                  * has useful pathkeys (because we should try to build MergeAppend
426                  * paths that produce those sort orderings).
427                  */
428                 if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
429                         add_child_rel_equivalences(root, appinfo, rel, childrel);
430                 childrel->has_eclass_joins = rel->has_eclass_joins;
431
432                 /*
433                  * Note: we could compute appropriate attr_needed data for the child's
434                  * variables, by transforming the parent's attr_needed through the
435                  * translated_vars mapping.  However, currently there's no need
436                  * because attr_needed is only examined for base relations not
437                  * otherrels.  So we just leave the child's attr_needed empty.
438                  */
439
440                 /*
441                  * Compute the child's access paths.
442                  */
443                 set_rel_pathlist(root, childrel, childRTindex, childRTE);
444
445                 /*
446                  * It is possible that constraint exclusion detected a contradiction
447                  * within a child subquery, even though we didn't prove one above.
448                  * If what we got back was a dummy path, we can skip this child.
449                  */
450                 if (IS_DUMMY_PATH(childrel->cheapest_total_path))
451                         continue;
452
453                 /*
454                  * Child is live, so add its cheapest access path to the Append path
455                  * we are constructing for the parent.
456                  */
457                 subpaths = accumulate_append_subpath(subpaths,
458                                                                                          childrel->cheapest_total_path);
459
460                 /* Remember which childrels are live, for MergeAppend logic below */
461                 live_childrels = lappend(live_childrels, childrel);
462
463                 /*
464                  * Collect a list of all the available path orderings for all the
465                  * children.  We use this as a heuristic to indicate which sort
466                  * orderings we should build MergeAppend paths for.
467                  */
468                 foreach(lcp, childrel->pathlist)
469                 {
470                         Path       *childpath = (Path *) lfirst(lcp);
471                         List       *childkeys = childpath->pathkeys;
472                         ListCell   *lpk;
473                         bool            found = false;
474
475                         /* Ignore unsorted paths */
476                         if (childkeys == NIL)
477                                 continue;
478
479                         /* Have we already seen this ordering? */
480                         foreach(lpk, all_child_pathkeys)
481                         {
482                                 List       *existing_pathkeys = (List *) lfirst(lpk);
483
484                                 if (compare_pathkeys(existing_pathkeys,
485                                                                          childkeys) == PATHKEYS_EQUAL)
486                                 {
487                                         found = true;
488                                         break;
489                                 }
490                         }
491                         if (!found)
492                         {
493                                 /* No, so add it to all_child_pathkeys */
494                                 all_child_pathkeys = lappend(all_child_pathkeys, childkeys);
495                         }
496                 }
497
498                 /*
499                  * Accumulate size information from each child.
500                  */
501                 if (childrel->rows > 0)
502                 {
503                         parent_rows += childrel->rows;
504                         parent_size += childrel->width * childrel->rows;
505
506                         /*
507                          * Accumulate per-column estimates too.  We need not do anything
508                          * for PlaceHolderVars in the parent list.  If child expression
509                          * isn't a Var, or we didn't record a width estimate for it, we
510                          * have to fall back on a datatype-based estimate.
511                          *
512                          * By construction, child's reltargetlist is 1-to-1 with parent's.
513                          */
514                         forboth(parentvars, rel->reltargetlist,
515                                         childvars, childrel->reltargetlist)
516                         {
517                                 Var                *parentvar = (Var *) lfirst(parentvars);
518                                 Node       *childvar = (Node *) lfirst(childvars);
519
520                                 if (IsA(parentvar, Var))
521                                 {
522                                         int                     pndx = parentvar->varattno - rel->min_attr;
523                                         int32           child_width = 0;
524
525                                         if (IsA(childvar, Var))
526                                         {
527                                                 int             cndx = ((Var *) childvar)->varattno - childrel->min_attr;
528
529                                                 child_width = childrel->attr_widths[cndx];
530                                         }
531                                         if (child_width <= 0)
532                                                 child_width = get_typavgwidth(exprType(childvar),
533                                                                                                           exprTypmod(childvar));
534                                         Assert(child_width > 0);
535                                         parent_attrsizes[pndx] += child_width * childrel->rows;
536                                 }
537                         }
538                 }
539         }
540
541         /*
542          * Save the finished size estimates.
543          */
544         rel->rows = parent_rows;
545         if (parent_rows > 0)
546         {
547                 int                     i;
548
549                 rel->width = rint(parent_size / parent_rows);
550                 for (i = 0; i < nattrs; i++)
551                         rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
552         }
553         else
554                 rel->width = 0;                 /* attr_widths should be zero already */
555
556         /*
557          * Set "raw tuples" count equal to "rows" for the appendrel; needed
558          * because some places assume rel->tuples is valid for any baserel.
559          */
560         rel->tuples = parent_rows;
561
562         pfree(parent_attrsizes);
563
564         /*
565          * Next, build an unordered Append path for the rel.  (Note: this is
566          * correct even if we have zero or one live subpath due to constraint
567          * exclusion.)
568          */
569         add_path(rel, (Path *) create_append_path(rel, subpaths));
570
571         /*
572          * Next, build MergeAppend paths based on the collected list of child
573          * pathkeys.  We consider both cheapest-startup and cheapest-total cases,
574          * ie, for each interesting ordering, collect all the cheapest startup
575          * subpaths and all the cheapest total paths, and build a MergeAppend path
576          * for each list.
577          */
578         foreach(l, all_child_pathkeys)
579         {
580                 List       *pathkeys = (List *) lfirst(l);
581                 List       *startup_subpaths = NIL;
582                 List       *total_subpaths = NIL;
583                 bool            startup_neq_total = false;
584                 ListCell   *lcr;
585
586                 /* Select the child paths for this ordering... */
587                 foreach(lcr, live_childrels)
588                 {
589                         RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
590                         Path       *cheapest_startup,
591                                            *cheapest_total;
592
593                         /* Locate the right paths, if they are available. */
594                         cheapest_startup =
595                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
596                                                                                            pathkeys,
597                                                                                            STARTUP_COST);
598                         cheapest_total =
599                                 get_cheapest_path_for_pathkeys(childrel->pathlist,
600                                                                                            pathkeys,
601                                                                                            TOTAL_COST);
602
603                         /*
604                          * If we can't find any paths with the right order just add the
605                          * cheapest-total path; we'll have to sort it.
606                          */
607                         if (cheapest_startup == NULL)
608                                 cheapest_startup = childrel->cheapest_total_path;
609                         if (cheapest_total == NULL)
610                                 cheapest_total = childrel->cheapest_total_path;
611
612                         /*
613                          * Notice whether we actually have different paths for the
614                          * "cheapest" and "total" cases; frequently there will be no point
615                          * in two create_merge_append_path() calls.
616                          */
617                         if (cheapest_startup != cheapest_total)
618                                 startup_neq_total = true;
619
620                         startup_subpaths =
621                                 accumulate_append_subpath(startup_subpaths, cheapest_startup);
622                         total_subpaths =
623                                 accumulate_append_subpath(total_subpaths, cheapest_total);
624                 }
625
626                 /* ... and build the MergeAppend paths */
627                 add_path(rel, (Path *) create_merge_append_path(root,
628                                                                                                                 rel,
629                                                                                                                 startup_subpaths,
630                                                                                                                 pathkeys));
631                 if (startup_neq_total)
632                         add_path(rel, (Path *) create_merge_append_path(root,
633                                                                                                                         rel,
634                                                                                                                         total_subpaths,
635                                                                                                                         pathkeys));
636         }
637
638         /* Select cheapest path */
639         set_cheapest(rel);
640 }
641
642 /*
643  * accumulate_append_subpath
644  *              Add a subpath to the list being built for an Append or MergeAppend
645  *
646  * It's possible that the child is itself an Append path, in which case
647  * we can "cut out the middleman" and just add its child paths to our
648  * own list.  (We don't try to do this earlier because we need to
649  * apply both levels of transformation to the quals.)
650  */
651 static List *
652 accumulate_append_subpath(List *subpaths, Path *path)
653 {
654         if (IsA(path, AppendPath))
655         {
656                 AppendPath *apath = (AppendPath *) path;
657
658                 /* list_copy is important here to avoid sharing list substructure */
659                 return list_concat(subpaths, list_copy(apath->subpaths));
660         }
661         else
662                 return lappend(subpaths, path);
663 }
664
665 /*
666  * set_dummy_rel_pathlist
667  *        Build a dummy path for a relation that's been excluded by constraints
668  *
669  * Rather than inventing a special "dummy" path type, we represent this as an
670  * AppendPath with no members (see also IS_DUMMY_PATH macro).
671  */
672 static void
673 set_dummy_rel_pathlist(RelOptInfo *rel)
674 {
675         /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
676         rel->rows = 0;
677         rel->width = 0;
678
679         add_path(rel, (Path *) create_append_path(rel, NIL));
680
681         /* Select cheapest path (pretty easy in this case...) */
682         set_cheapest(rel);
683 }
684
685 /* quick-and-dirty test to see if any joining is needed */
686 static bool
687 has_multiple_baserels(PlannerInfo *root)
688 {
689         int                     num_base_rels = 0;
690         Index           rti;
691
692         for (rti = 1; rti < root->simple_rel_array_size; rti++)
693         {
694                 RelOptInfo *brel = root->simple_rel_array[rti];
695
696                 if (brel == NULL)
697                         continue;
698
699                 /* ignore RTEs that are "other rels" */
700                 if (brel->reloptkind == RELOPT_BASEREL)
701                         if (++num_base_rels > 1)
702                                 return true;
703         }
704         return false;
705 }
706
707 /*
708  * set_subquery_pathlist
709  *              Build the (single) access path for a subquery RTE
710  */
711 static void
712 set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
713                                           Index rti, RangeTblEntry *rte)
714 {
715         Query      *parse = root->parse;
716         Query      *subquery = rte->subquery;
717         bool       *differentTypes;
718         double          tuple_fraction;
719         PlannerInfo *subroot;
720         List       *pathkeys;
721
722         /*
723          * Must copy the Query so that planning doesn't mess up the RTE contents
724          * (really really need to fix the planner to not scribble on its input,
725          * someday).
726          */
727         subquery = copyObject(subquery);
728
729         /* We need a workspace for keeping track of set-op type coercions */
730         differentTypes = (bool *)
731                 palloc0((list_length(subquery->targetList) + 1) * sizeof(bool));
732
733         /*
734          * If there are any restriction clauses that have been attached to the
735          * subquery relation, consider pushing them down to become WHERE or HAVING
736          * quals of the subquery itself.  This transformation is useful because it
737          * may allow us to generate a better plan for the subquery than evaluating
738          * all the subquery output rows and then filtering them.
739          *
740          * There are several cases where we cannot push down clauses. Restrictions
741          * involving the subquery are checked by subquery_is_pushdown_safe().
742          * Restrictions on individual clauses are checked by
743          * qual_is_pushdown_safe().  Also, we don't want to push down
744          * pseudoconstant clauses; better to have the gating node above the
745          * subquery.
746          *
747          * Also, if the sub-query has "security_barrier" flag, it means the
748          * sub-query originated from a view that must enforce row-level security.
749          * We must not push down quals in order to avoid information leaks, either
750          * via side-effects or error output.
751          *
752          * Non-pushed-down clauses will get evaluated as qpquals of the
753          * SubqueryScan node.
754          *
755          * XXX Are there any cases where we want to make a policy decision not to
756          * push down a pushable qual, because it'd result in a worse plan?
757          */
758         if (rel->baserestrictinfo != NIL &&
759                 subquery_is_pushdown_safe(subquery, subquery, differentTypes))
760         {
761                 /* OK to consider pushing down individual quals */
762                 List       *upperrestrictlist = NIL;
763                 ListCell   *l;
764
765                 foreach(l, rel->baserestrictinfo)
766                 {
767                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
768                         Node       *clause = (Node *) rinfo->clause;
769
770                         /*
771                          * XXX.  You might wonder why we're testing rte->security_barrier
772                          * qual-by-qual here rather than hoisting the test up into the
773                          * surrounding if statement; after all, the answer will be the
774                          * same for all quals.  The answer is that we expect to shortly
775                          * change this logic to allow pushing down some quals that use only
776                          * "leakproof" operators even through a security barrier.
777                          */
778                         if (!rinfo->pseudoconstant &&
779                                 !rte->security_barrier &&
780                                 qual_is_pushdown_safe(subquery, rti, clause, differentTypes))
781                         {
782                                 /* Push it down */
783                                 subquery_push_qual(subquery, rte, rti, clause);
784                         }
785                         else
786                         {
787                                 /* Keep it in the upper query */
788                                 upperrestrictlist = lappend(upperrestrictlist, rinfo);
789                         }
790                 }
791                 rel->baserestrictinfo = upperrestrictlist;
792         }
793
794         pfree(differentTypes);
795
796         /*
797          * We can safely pass the outer tuple_fraction down to the subquery if the
798          * outer level has no joining, aggregation, or sorting to do. Otherwise
799          * we'd better tell the subquery to plan for full retrieval. (XXX This
800          * could probably be made more intelligent ...)
801          */
802         if (parse->hasAggs ||
803                 parse->groupClause ||
804                 parse->havingQual ||
805                 parse->distinctClause ||
806                 parse->sortClause ||
807                 has_multiple_baserels(root))
808                 tuple_fraction = 0.0;   /* default case */
809         else
810                 tuple_fraction = root->tuple_fraction;
811
812         /* Generate the plan for the subquery */
813         rel->subplan = subquery_planner(root->glob, subquery,
814                                                                         root,
815                                                                         false, tuple_fraction,
816                                                                         &subroot);
817         rel->subroot = subroot;
818
819         /*
820          * It's possible that constraint exclusion proved the subquery empty.
821          * If so, it's convenient to turn it back into a dummy path so that we
822          * will recognize appropriate optimizations at this level.
823          */
824         if (is_dummy_plan(rel->subplan))
825         {
826                 set_dummy_rel_pathlist(rel);
827                 return;
828         }
829
830         /* Mark rel with estimated output rows, width, etc */
831         set_subquery_size_estimates(root, rel);
832
833         /* Convert subquery pathkeys to outer representation */
834         pathkeys = convert_subquery_pathkeys(root, rel, subroot->query_pathkeys);
835
836         /* Generate appropriate path */
837         add_path(rel, create_subqueryscan_path(rel, pathkeys));
838
839         /* Select cheapest path (pretty easy in this case...) */
840         set_cheapest(rel);
841 }
842
843 /*
844  * set_function_pathlist
845  *              Build the (single) access path for a function RTE
846  */
847 static void
848 set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
849 {
850         /* Mark rel with estimated output rows, width, etc */
851         set_function_size_estimates(root, rel);
852
853         /* Generate appropriate path */
854         add_path(rel, create_functionscan_path(root, rel));
855
856         /* Select cheapest path (pretty easy in this case...) */
857         set_cheapest(rel);
858 }
859
860 /*
861  * set_values_pathlist
862  *              Build the (single) access path for a VALUES RTE
863  */
864 static void
865 set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
866 {
867         /* Mark rel with estimated output rows, width, etc */
868         set_values_size_estimates(root, rel);
869
870         /* Generate appropriate path */
871         add_path(rel, create_valuesscan_path(root, rel));
872
873         /* Select cheapest path (pretty easy in this case...) */
874         set_cheapest(rel);
875 }
876
877 /*
878  * set_cte_pathlist
879  *              Build the (single) access path for a non-self-reference CTE RTE
880  */
881 static void
882 set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
883 {
884         Plan       *cteplan;
885         PlannerInfo *cteroot;
886         Index           levelsup;
887         int                     ndx;
888         ListCell   *lc;
889         int                     plan_id;
890
891         /*
892          * Find the referenced CTE, and locate the plan previously made for it.
893          */
894         levelsup = rte->ctelevelsup;
895         cteroot = root;
896         while (levelsup-- > 0)
897         {
898                 cteroot = cteroot->parent_root;
899                 if (!cteroot)                   /* shouldn't happen */
900                         elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
901         }
902
903         /*
904          * Note: cte_plan_ids can be shorter than cteList, if we are still working
905          * on planning the CTEs (ie, this is a side-reference from another CTE).
906          * So we mustn't use forboth here.
907          */
908         ndx = 0;
909         foreach(lc, cteroot->parse->cteList)
910         {
911                 CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
912
913                 if (strcmp(cte->ctename, rte->ctename) == 0)
914                         break;
915                 ndx++;
916         }
917         if (lc == NULL)                         /* shouldn't happen */
918                 elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
919         if (ndx >= list_length(cteroot->cte_plan_ids))
920                 elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
921         plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
922         Assert(plan_id > 0);
923         cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
924
925         /* Mark rel with estimated output rows, width, etc */
926         set_cte_size_estimates(root, rel, cteplan);
927
928         /* Generate appropriate path */
929         add_path(rel, create_ctescan_path(root, rel));
930
931         /* Select cheapest path (pretty easy in this case...) */
932         set_cheapest(rel);
933 }
934
935 /*
936  * set_worktable_pathlist
937  *              Build the (single) access path for a self-reference CTE RTE
938  */
939 static void
940 set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
941 {
942         Plan       *cteplan;
943         PlannerInfo *cteroot;
944         Index           levelsup;
945
946         /*
947          * We need to find the non-recursive term's plan, which is in the plan
948          * level that's processing the recursive UNION, which is one level *below*
949          * where the CTE comes from.
950          */
951         levelsup = rte->ctelevelsup;
952         if (levelsup == 0)                      /* shouldn't happen */
953                 elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
954         levelsup--;
955         cteroot = root;
956         while (levelsup-- > 0)
957         {
958                 cteroot = cteroot->parent_root;
959                 if (!cteroot)                   /* shouldn't happen */
960                         elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
961         }
962         cteplan = cteroot->non_recursive_plan;
963         if (!cteplan)                           /* shouldn't happen */
964                 elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
965
966         /* Mark rel with estimated output rows, width, etc */
967         set_cte_size_estimates(root, rel, cteplan);
968
969         /* Generate appropriate path */
970         add_path(rel, create_worktablescan_path(root, rel));
971
972         /* Select cheapest path (pretty easy in this case...) */
973         set_cheapest(rel);
974 }
975
976 /*
977  * set_foreign_pathlist
978  *              Build the (single) access path for a foreign table RTE
979  */
980 static void
981 set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
982 {
983         /* Mark rel with estimated output rows, width, etc */
984         set_foreign_size_estimates(root, rel);
985
986         /* Generate appropriate path */
987         add_path(rel, (Path *) create_foreignscan_path(root, rel));
988
989         /* Select cheapest path (pretty easy in this case...) */
990         set_cheapest(rel);
991 }
992
993 /*
994  * make_rel_from_joinlist
995  *        Build access paths using a "joinlist" to guide the join path search.
996  *
997  * See comments for deconstruct_jointree() for definition of the joinlist
998  * data structure.
999  */
1000 static RelOptInfo *
1001 make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
1002 {
1003         int                     levels_needed;
1004         List       *initial_rels;
1005         ListCell   *jl;
1006
1007         /*
1008          * Count the number of child joinlist nodes.  This is the depth of the
1009          * dynamic-programming algorithm we must employ to consider all ways of
1010          * joining the child nodes.
1011          */
1012         levels_needed = list_length(joinlist);
1013
1014         if (levels_needed <= 0)
1015                 return NULL;                    /* nothing to do? */
1016
1017         /*
1018          * Construct a list of rels corresponding to the child joinlist nodes.
1019          * This may contain both base rels and rels constructed according to
1020          * sub-joinlists.
1021          */
1022         initial_rels = NIL;
1023         foreach(jl, joinlist)
1024         {
1025                 Node       *jlnode = (Node *) lfirst(jl);
1026                 RelOptInfo *thisrel;
1027
1028                 if (IsA(jlnode, RangeTblRef))
1029                 {
1030                         int                     varno = ((RangeTblRef *) jlnode)->rtindex;
1031
1032                         thisrel = find_base_rel(root, varno);
1033                 }
1034                 else if (IsA(jlnode, List))
1035                 {
1036                         /* Recurse to handle subproblem */
1037                         thisrel = make_rel_from_joinlist(root, (List *) jlnode);
1038                 }
1039                 else
1040                 {
1041                         elog(ERROR, "unrecognized joinlist node type: %d",
1042                                  (int) nodeTag(jlnode));
1043                         thisrel = NULL;         /* keep compiler quiet */
1044                 }
1045
1046                 initial_rels = lappend(initial_rels, thisrel);
1047         }
1048
1049         if (levels_needed == 1)
1050         {
1051                 /*
1052                  * Single joinlist node, so we're done.
1053                  */
1054                 return (RelOptInfo *) linitial(initial_rels);
1055         }
1056         else
1057         {
1058                 /*
1059                  * Consider the different orders in which we could join the rels,
1060                  * using a plugin, GEQO, or the regular join search code.
1061                  *
1062                  * We put the initial_rels list into a PlannerInfo field because
1063                  * has_legal_joinclause() needs to look at it (ugly :-().
1064                  */
1065                 root->initial_rels = initial_rels;
1066
1067                 if (join_search_hook)
1068                         return (*join_search_hook) (root, levels_needed, initial_rels);
1069                 else if (enable_geqo && levels_needed >= geqo_threshold)
1070                         return geqo(root, levels_needed, initial_rels);
1071                 else
1072                         return standard_join_search(root, levels_needed, initial_rels);
1073         }
1074 }
1075
1076 /*
1077  * standard_join_search
1078  *        Find possible joinpaths for a query by successively finding ways
1079  *        to join component relations into join relations.
1080  *
1081  * 'levels_needed' is the number of iterations needed, ie, the number of
1082  *              independent jointree items in the query.  This is > 1.
1083  *
1084  * 'initial_rels' is a list of RelOptInfo nodes for each independent
1085  *              jointree item.  These are the components to be joined together.
1086  *              Note that levels_needed == list_length(initial_rels).
1087  *
1088  * Returns the final level of join relations, i.e., the relation that is
1089  * the result of joining all the original relations together.
1090  * At least one implementation path must be provided for this relation and
1091  * all required sub-relations.
1092  *
1093  * To support loadable plugins that modify planner behavior by changing the
1094  * join searching algorithm, we provide a hook variable that lets a plugin
1095  * replace or supplement this function.  Any such hook must return the same
1096  * final join relation as the standard code would, but it might have a
1097  * different set of implementation paths attached, and only the sub-joinrels
1098  * needed for these paths need have been instantiated.
1099  *
1100  * Note to plugin authors: the functions invoked during standard_join_search()
1101  * modify root->join_rel_list and root->join_rel_hash.  If you want to do more
1102  * than one join-order search, you'll probably need to save and restore the
1103  * original states of those data structures.  See geqo_eval() for an example.
1104  */
1105 RelOptInfo *
1106 standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
1107 {
1108         int                     lev;
1109         RelOptInfo *rel;
1110
1111         /*
1112          * This function cannot be invoked recursively within any one planning
1113          * problem, so join_rel_level[] can't be in use already.
1114          */
1115         Assert(root->join_rel_level == NULL);
1116
1117         /*
1118          * We employ a simple "dynamic programming" algorithm: we first find all
1119          * ways to build joins of two jointree items, then all ways to build joins
1120          * of three items (from two-item joins and single items), then four-item
1121          * joins, and so on until we have considered all ways to join all the
1122          * items into one rel.
1123          *
1124          * root->join_rel_level[j] is a list of all the j-item rels.  Initially we
1125          * set root->join_rel_level[1] to represent all the single-jointree-item
1126          * relations.
1127          */
1128         root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
1129
1130         root->join_rel_level[1] = initial_rels;
1131
1132         for (lev = 2; lev <= levels_needed; lev++)
1133         {
1134                 ListCell   *lc;
1135
1136                 /*
1137                  * Determine all possible pairs of relations to be joined at this
1138                  * level, and build paths for making each one from every available
1139                  * pair of lower-level relations.
1140                  */
1141                 join_search_one_level(root, lev);
1142
1143                 /*
1144                  * Do cleanup work on each just-processed rel.
1145                  */
1146                 foreach(lc, root->join_rel_level[lev])
1147                 {
1148                         rel = (RelOptInfo *) lfirst(lc);
1149
1150                         /* Find and save the cheapest paths for this rel */
1151                         set_cheapest(rel);
1152
1153 #ifdef OPTIMIZER_DEBUG
1154                         debug_print_rel(root, rel);
1155 #endif
1156                 }
1157         }
1158
1159         /*
1160          * We should have a single rel at the final level.
1161          */
1162         if (root->join_rel_level[levels_needed] == NIL)
1163                 elog(ERROR, "failed to build any %d-way joins", levels_needed);
1164         Assert(list_length(root->join_rel_level[levels_needed]) == 1);
1165
1166         rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
1167
1168         root->join_rel_level = NULL;
1169
1170         return rel;
1171 }
1172
1173 /*****************************************************************************
1174  *                      PUSHING QUALS DOWN INTO SUBQUERIES
1175  *****************************************************************************/
1176
1177 /*
1178  * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
1179  *
1180  * subquery is the particular component query being checked.  topquery
1181  * is the top component of a set-operations tree (the same Query if no
1182  * set-op is involved).
1183  *
1184  * Conditions checked here:
1185  *
1186  * 1. If the subquery has a LIMIT clause, we must not push down any quals,
1187  * since that could change the set of rows returned.
1188  *
1189  * 2. If the subquery contains any window functions, we can't push quals
1190  * into it, because that could change the results.
1191  *
1192  * 3. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
1193  * quals into it, because that could change the results.
1194  *
1195  * 4. For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
1196  * push quals into each component query, but the quals can only reference
1197  * subquery columns that suffer no type coercions in the set operation.
1198  * Otherwise there are possible semantic gotchas.  So, we check the
1199  * component queries to see if any of them have different output types;
1200  * differentTypes[k] is set true if column k has different type in any
1201  * component.
1202  */
1203 static bool
1204 subquery_is_pushdown_safe(Query *subquery, Query *topquery,
1205                                                   bool *differentTypes)
1206 {
1207         SetOperationStmt *topop;
1208
1209         /* Check point 1 */
1210         if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
1211                 return false;
1212
1213         /* Check point 2 */
1214         if (subquery->hasWindowFuncs)
1215                 return false;
1216
1217         /* Are we at top level, or looking at a setop component? */
1218         if (subquery == topquery)
1219         {
1220                 /* Top level, so check any component queries */
1221                 if (subquery->setOperations != NULL)
1222                         if (!recurse_pushdown_safe(subquery->setOperations, topquery,
1223                                                                            differentTypes))
1224                                 return false;
1225         }
1226         else
1227         {
1228                 /* Setop component must not have more components (too weird) */
1229                 if (subquery->setOperations != NULL)
1230                         return false;
1231                 /* Check whether setop component output types match top level */
1232                 topop = (SetOperationStmt *) topquery->setOperations;
1233                 Assert(topop && IsA(topop, SetOperationStmt));
1234                 compare_tlist_datatypes(subquery->targetList,
1235                                                                 topop->colTypes,
1236                                                                 differentTypes);
1237         }
1238         return true;
1239 }
1240
1241 /*
1242  * Helper routine to recurse through setOperations tree
1243  */
1244 static bool
1245 recurse_pushdown_safe(Node *setOp, Query *topquery,
1246                                           bool *differentTypes)
1247 {
1248         if (IsA(setOp, RangeTblRef))
1249         {
1250                 RangeTblRef *rtr = (RangeTblRef *) setOp;
1251                 RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
1252                 Query      *subquery = rte->subquery;
1253
1254                 Assert(subquery != NULL);
1255                 return subquery_is_pushdown_safe(subquery, topquery, differentTypes);
1256         }
1257         else if (IsA(setOp, SetOperationStmt))
1258         {
1259                 SetOperationStmt *op = (SetOperationStmt *) setOp;
1260
1261                 /* EXCEPT is no good */
1262                 if (op->op == SETOP_EXCEPT)
1263                         return false;
1264                 /* Else recurse */
1265                 if (!recurse_pushdown_safe(op->larg, topquery, differentTypes))
1266                         return false;
1267                 if (!recurse_pushdown_safe(op->rarg, topquery, differentTypes))
1268                         return false;
1269         }
1270         else
1271         {
1272                 elog(ERROR, "unrecognized node type: %d",
1273                          (int) nodeTag(setOp));
1274         }
1275         return true;
1276 }
1277
1278 /*
1279  * Compare tlist's datatypes against the list of set-operation result types.
1280  * For any items that are different, mark the appropriate element of
1281  * differentTypes[] to show that this column will have type conversions.
1282  *
1283  * We don't have to care about typmods here: the only allowed difference
1284  * between set-op input and output typmods is input is a specific typmod
1285  * and output is -1, and that does not require a coercion.
1286  */
1287 static void
1288 compare_tlist_datatypes(List *tlist, List *colTypes,
1289                                                 bool *differentTypes)
1290 {
1291         ListCell   *l;
1292         ListCell   *colType = list_head(colTypes);
1293
1294         foreach(l, tlist)
1295         {
1296                 TargetEntry *tle = (TargetEntry *) lfirst(l);
1297
1298                 if (tle->resjunk)
1299                         continue;                       /* ignore resjunk columns */
1300                 if (colType == NULL)
1301                         elog(ERROR, "wrong number of tlist entries");
1302                 if (exprType((Node *) tle->expr) != lfirst_oid(colType))
1303                         differentTypes[tle->resno] = true;
1304                 colType = lnext(colType);
1305         }
1306         if (colType != NULL)
1307                 elog(ERROR, "wrong number of tlist entries");
1308 }
1309
1310 /*
1311  * qual_is_pushdown_safe - is a particular qual safe to push down?
1312  *
1313  * qual is a restriction clause applying to the given subquery (whose RTE
1314  * has index rti in the parent query).
1315  *
1316  * Conditions checked here:
1317  *
1318  * 1. The qual must not contain any subselects (mainly because I'm not sure
1319  * it will work correctly: sublinks will already have been transformed into
1320  * subplans in the qual, but not in the subquery).
1321  *
1322  * 2. The qual must not refer to the whole-row output of the subquery
1323  * (since there is no easy way to name that within the subquery itself).
1324  *
1325  * 3. The qual must not refer to any subquery output columns that were
1326  * found to have inconsistent types across a set operation tree by
1327  * subquery_is_pushdown_safe().
1328  *
1329  * 4. If the subquery uses DISTINCT ON, we must not push down any quals that
1330  * refer to non-DISTINCT output columns, because that could change the set
1331  * of rows returned.  (This condition is vacuous for DISTINCT, because then
1332  * there are no non-DISTINCT output columns, so we needn't check.  But note
1333  * we are assuming that the qual can't distinguish values that the DISTINCT
1334  * operator sees as equal.      This is a bit shaky but we have no way to test
1335  * for the case, and it's unlikely enough that we shouldn't refuse the
1336  * optimization just because it could theoretically happen.)
1337  *
1338  * 5. We must not push down any quals that refer to subselect outputs that
1339  * return sets, else we'd introduce functions-returning-sets into the
1340  * subquery's WHERE/HAVING quals.
1341  *
1342  * 6. We must not push down any quals that refer to subselect outputs that
1343  * contain volatile functions, for fear of introducing strange results due
1344  * to multiple evaluation of a volatile function.
1345  */
1346 static bool
1347 qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
1348                                           bool *differentTypes)
1349 {
1350         bool            safe = true;
1351         List       *vars;
1352         ListCell   *vl;
1353         Bitmapset  *tested = NULL;
1354
1355         /* Refuse subselects (point 1) */
1356         if (contain_subplans(qual))
1357                 return false;
1358
1359         /*
1360          * It would be unsafe to push down window function calls, but at least for
1361          * the moment we could never see any in a qual anyhow.  (The same applies
1362          * to aggregates, which we check for in pull_var_clause below.)
1363          */
1364         Assert(!contain_window_function(qual));
1365
1366         /*
1367          * Examine all Vars used in clause; since it's a restriction clause, all
1368          * such Vars must refer to subselect output columns.
1369          */
1370         vars = pull_var_clause(qual,
1371                                                    PVC_REJECT_AGGREGATES,
1372                                                    PVC_INCLUDE_PLACEHOLDERS);
1373         foreach(vl, vars)
1374         {
1375                 Var                *var = (Var *) lfirst(vl);
1376                 TargetEntry *tle;
1377
1378                 /*
1379                  * XXX Punt if we find any PlaceHolderVars in the restriction clause.
1380                  * It's not clear whether a PHV could safely be pushed down, and even
1381                  * less clear whether such a situation could arise in any cases of
1382                  * practical interest anyway.  So for the moment, just refuse to push
1383                  * down.
1384                  */
1385                 if (!IsA(var, Var))
1386                 {
1387                         safe = false;
1388                         break;
1389                 }
1390
1391                 Assert(var->varno == rti);
1392
1393                 /* Check point 2 */
1394                 if (var->varattno == 0)
1395                 {
1396                         safe = false;
1397                         break;
1398                 }
1399
1400                 /*
1401                  * We use a bitmapset to avoid testing the same attno more than once.
1402                  * (NB: this only works because subquery outputs can't have negative
1403                  * attnos.)
1404                  */
1405                 if (bms_is_member(var->varattno, tested))
1406                         continue;
1407                 tested = bms_add_member(tested, var->varattno);
1408
1409                 /* Check point 3 */
1410                 if (differentTypes[var->varattno])
1411                 {
1412                         safe = false;
1413                         break;
1414                 }
1415
1416                 /* Must find the tlist element referenced by the Var */
1417                 tle = get_tle_by_resno(subquery->targetList, var->varattno);
1418                 Assert(tle != NULL);
1419                 Assert(!tle->resjunk);
1420
1421                 /* If subquery uses DISTINCT ON, check point 4 */
1422                 if (subquery->hasDistinctOn &&
1423                         !targetIsInSortList(tle, InvalidOid, subquery->distinctClause))
1424                 {
1425                         /* non-DISTINCT column, so fail */
1426                         safe = false;
1427                         break;
1428                 }
1429
1430                 /* Refuse functions returning sets (point 5) */
1431                 if (expression_returns_set((Node *) tle->expr))
1432                 {
1433                         safe = false;
1434                         break;
1435                 }
1436
1437                 /* Refuse volatile functions (point 6) */
1438                 if (contain_volatile_functions((Node *) tle->expr))
1439                 {
1440                         safe = false;
1441                         break;
1442                 }
1443         }
1444
1445         list_free(vars);
1446         bms_free(tested);
1447
1448         return safe;
1449 }
1450
1451 /*
1452  * subquery_push_qual - push down a qual that we have determined is safe
1453  */
1454 static void
1455 subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
1456 {
1457         if (subquery->setOperations != NULL)
1458         {
1459                 /* Recurse to push it separately to each component query */
1460                 recurse_push_qual(subquery->setOperations, subquery,
1461                                                   rte, rti, qual);
1462         }
1463         else
1464         {
1465                 /*
1466                  * We need to replace Vars in the qual (which must refer to outputs of
1467                  * the subquery) with copies of the subquery's targetlist expressions.
1468                  * Note that at this point, any uplevel Vars in the qual should have
1469                  * been replaced with Params, so they need no work.
1470                  *
1471                  * This step also ensures that when we are pushing into a setop tree,
1472                  * each component query gets its own copy of the qual.
1473                  */
1474                 qual = ResolveNew(qual, rti, 0, rte,
1475                                                   subquery->targetList,
1476                                                   CMD_SELECT, 0,
1477                                                   &subquery->hasSubLinks);
1478
1479                 /*
1480                  * Now attach the qual to the proper place: normally WHERE, but if the
1481                  * subquery uses grouping or aggregation, put it in HAVING (since the
1482                  * qual really refers to the group-result rows).
1483                  */
1484                 if (subquery->hasAggs || subquery->groupClause || subquery->havingQual)
1485                         subquery->havingQual = make_and_qual(subquery->havingQual, qual);
1486                 else
1487                         subquery->jointree->quals =
1488                                 make_and_qual(subquery->jointree->quals, qual);
1489
1490                 /*
1491                  * We need not change the subquery's hasAggs or hasSublinks flags,
1492                  * since we can't be pushing down any aggregates that weren't there
1493                  * before, and we don't push down subselects at all.
1494                  */
1495         }
1496 }
1497
1498 /*
1499  * Helper routine to recurse through setOperations tree
1500  */
1501 static void
1502 recurse_push_qual(Node *setOp, Query *topquery,
1503                                   RangeTblEntry *rte, Index rti, Node *qual)
1504 {
1505         if (IsA(setOp, RangeTblRef))
1506         {
1507                 RangeTblRef *rtr = (RangeTblRef *) setOp;
1508                 RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
1509                 Query      *subquery = subrte->subquery;
1510
1511                 Assert(subquery != NULL);
1512                 subquery_push_qual(subquery, rte, rti, qual);
1513         }
1514         else if (IsA(setOp, SetOperationStmt))
1515         {
1516                 SetOperationStmt *op = (SetOperationStmt *) setOp;
1517
1518                 recurse_push_qual(op->larg, topquery, rte, rti, qual);
1519                 recurse_push_qual(op->rarg, topquery, rte, rti, qual);
1520         }
1521         else
1522         {
1523                 elog(ERROR, "unrecognized node type: %d",
1524                          (int) nodeTag(setOp));
1525         }
1526 }
1527
1528 /*****************************************************************************
1529  *                      DEBUG SUPPORT
1530  *****************************************************************************/
1531
1532 #ifdef OPTIMIZER_DEBUG
1533
1534 static void
1535 print_relids(Relids relids)
1536 {
1537         Relids          tmprelids;
1538         int                     x;
1539         bool            first = true;
1540
1541         tmprelids = bms_copy(relids);
1542         while ((x = bms_first_member(tmprelids)) >= 0)
1543         {
1544                 if (!first)
1545                         printf(" ");
1546                 printf("%d", x);
1547                 first = false;
1548         }
1549         bms_free(tmprelids);
1550 }
1551
1552 static void
1553 print_restrictclauses(PlannerInfo *root, List *clauses)
1554 {
1555         ListCell   *l;
1556
1557         foreach(l, clauses)
1558         {
1559                 RestrictInfo *c = lfirst(l);
1560
1561                 print_expr((Node *) c->clause, root->parse->rtable);
1562                 if (lnext(l))
1563                         printf(", ");
1564         }
1565 }
1566
1567 static void
1568 print_path(PlannerInfo *root, Path *path, int indent)
1569 {
1570         const char *ptype;
1571         bool            join = false;
1572         Path       *subpath = NULL;
1573         int                     i;
1574
1575         switch (nodeTag(path))
1576         {
1577                 case T_Path:
1578                         ptype = "SeqScan";
1579                         break;
1580                 case T_IndexPath:
1581                         ptype = "IdxScan";
1582                         break;
1583                 case T_BitmapHeapPath:
1584                         ptype = "BitmapHeapScan";
1585                         break;
1586                 case T_BitmapAndPath:
1587                         ptype = "BitmapAndPath";
1588                         break;
1589                 case T_BitmapOrPath:
1590                         ptype = "BitmapOrPath";
1591                         break;
1592                 case T_TidPath:
1593                         ptype = "TidScan";
1594                         break;
1595                 case T_ForeignPath:
1596                         ptype = "ForeignScan";
1597                         break;
1598                 case T_AppendPath:
1599                         ptype = "Append";
1600                         break;
1601                 case T_MergeAppendPath:
1602                         ptype = "MergeAppend";
1603                         break;
1604                 case T_ResultPath:
1605                         ptype = "Result";
1606                         break;
1607                 case T_MaterialPath:
1608                         ptype = "Material";
1609                         subpath = ((MaterialPath *) path)->subpath;
1610                         break;
1611                 case T_UniquePath:
1612                         ptype = "Unique";
1613                         subpath = ((UniquePath *) path)->subpath;
1614                         break;
1615                 case T_NestPath:
1616                         ptype = "NestLoop";
1617                         join = true;
1618                         break;
1619                 case T_MergePath:
1620                         ptype = "MergeJoin";
1621                         join = true;
1622                         break;
1623                 case T_HashPath:
1624                         ptype = "HashJoin";
1625                         join = true;
1626                         break;
1627                 default:
1628                         ptype = "???Path";
1629                         break;
1630         }
1631
1632         for (i = 0; i < indent; i++)
1633                 printf("\t");
1634         printf("%s", ptype);
1635
1636         if (path->parent)
1637         {
1638                 printf("(");
1639                 print_relids(path->parent->relids);
1640                 printf(") rows=%.0f", path->parent->rows);
1641         }
1642         printf(" cost=%.2f..%.2f\n", path->startup_cost, path->total_cost);
1643
1644         if (path->pathkeys)
1645         {
1646                 for (i = 0; i < indent; i++)
1647                         printf("\t");
1648                 printf("  pathkeys: ");
1649                 print_pathkeys(path->pathkeys, root->parse->rtable);
1650         }
1651
1652         if (join)
1653         {
1654                 JoinPath   *jp = (JoinPath *) path;
1655
1656                 for (i = 0; i < indent; i++)
1657                         printf("\t");
1658                 printf("  clauses: ");
1659                 print_restrictclauses(root, jp->joinrestrictinfo);
1660                 printf("\n");
1661
1662                 if (IsA(path, MergePath))
1663                 {
1664                         MergePath  *mp = (MergePath *) path;
1665
1666                         for (i = 0; i < indent; i++)
1667                                 printf("\t");
1668                         printf("  sortouter=%d sortinner=%d materializeinner=%d\n",
1669                                    ((mp->outersortkeys) ? 1 : 0),
1670                                    ((mp->innersortkeys) ? 1 : 0),
1671                                    ((mp->materialize_inner) ? 1 : 0));
1672                 }
1673
1674                 print_path(root, jp->outerjoinpath, indent + 1);
1675                 print_path(root, jp->innerjoinpath, indent + 1);
1676         }
1677
1678         if (subpath)
1679                 print_path(root, subpath, indent + 1);
1680 }
1681
1682 void
1683 debug_print_rel(PlannerInfo *root, RelOptInfo *rel)
1684 {
1685         ListCell   *l;
1686
1687         printf("RELOPTINFO (");
1688         print_relids(rel->relids);
1689         printf("): rows=%.0f width=%d\n", rel->rows, rel->width);
1690
1691         if (rel->baserestrictinfo)
1692         {
1693                 printf("\tbaserestrictinfo: ");
1694                 print_restrictclauses(root, rel->baserestrictinfo);
1695                 printf("\n");
1696         }
1697
1698         if (rel->joininfo)
1699         {
1700                 printf("\tjoininfo: ");
1701                 print_restrictclauses(root, rel->joininfo);
1702                 printf("\n");
1703         }
1704
1705         printf("\tpath list:\n");
1706         foreach(l, rel->pathlist)
1707                 print_path(root, lfirst(l), 1);
1708         printf("\n\tcheapest startup path:\n");
1709         print_path(root, rel->cheapest_startup_path, 1);
1710         printf("\n\tcheapest total path:\n");
1711         print_path(root, rel->cheapest_total_path, 1);
1712         printf("\n");
1713         fflush(stdout);
1714 }
1715
1716 #endif   /* OPTIMIZER_DEBUG */