]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Fix cost_nestloop and cost_hashjoin to model the behavior of semi and anti
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.208 2009/05/09 22:51:41 tgl Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "nodes/nodeFuncs.h"
69 #include "optimizer/clauses.h"
70 #include "optimizer/cost.h"
71 #include "optimizer/pathnode.h"
72 #include "optimizer/placeholder.h"
73 #include "optimizer/planmain.h"
74 #include "optimizer/restrictinfo.h"
75 #include "parser/parsetree.h"
76 #include "utils/lsyscache.h"
77 #include "utils/selfuncs.h"
78 #include "utils/tuplesort.h"
79
80
81 #define LOG2(x)  (log(x) / 0.693147180559945)
82
83 /*
84  * Some Paths return less than the nominal number of rows of their parent
85  * relations; join nodes need to do this to get the correct input count:
86  */
87 #define PATH_ROWS(path) \
88         (IsA(path, UniquePath) ? \
89          ((UniquePath *) (path))->rows : \
90          (path)->parent->rows)
91
92
93 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
94 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
95 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
96 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
97 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
98
99 int                     effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
100
101 Cost            disable_cost = 1.0e10;
102
103 bool            enable_seqscan = true;
104 bool            enable_indexscan = true;
105 bool            enable_bitmapscan = true;
106 bool            enable_tidscan = true;
107 bool            enable_sort = true;
108 bool            enable_hashagg = true;
109 bool            enable_nestloop = true;
110 bool            enable_mergejoin = true;
111 bool            enable_hashjoin = true;
112
113 typedef struct
114 {
115         PlannerInfo *root;
116         QualCost        total;
117 } cost_qual_eval_context;
118
119 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
120                            RestrictInfo *rinfo,
121                            PathKey *pathkey);
122 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
123 static bool adjust_semi_join(PlannerInfo *root, JoinPath *path,
124                                  SpecialJoinInfo *sjinfo,
125                                  Selectivity *outer_match_frac,
126                                  Selectivity *match_count,
127                                  bool *indexed_join_quals);
128 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
129                                                                  List *quals);
130 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
131 static double relation_byte_size(double tuples, int width);
132 static double page_size(double tuples, int width);
133
134
135 /*
136  * clamp_row_est
137  *              Force a row-count estimate to a sane value.
138  */
139 double
140 clamp_row_est(double nrows)
141 {
142         /*
143          * Force estimate to be at least one row, to make explain output look
144          * better and to avoid possible divide-by-zero when interpolating costs.
145          * Make it an integer, too.
146          */
147         if (nrows <= 1.0)
148                 nrows = 1.0;
149         else
150                 nrows = rint(nrows);
151
152         return nrows;
153 }
154
155
156 /*
157  * cost_seqscan
158  *        Determines and returns the cost of scanning a relation sequentially.
159  */
160 void
161 cost_seqscan(Path *path, PlannerInfo *root,
162                          RelOptInfo *baserel)
163 {
164         Cost            startup_cost = 0;
165         Cost            run_cost = 0;
166         Cost            cpu_per_tuple;
167
168         /* Should only be applied to base relations */
169         Assert(baserel->relid > 0);
170         Assert(baserel->rtekind == RTE_RELATION);
171
172         if (!enable_seqscan)
173                 startup_cost += disable_cost;
174
175         /*
176          * disk costs
177          */
178         run_cost += seq_page_cost * baserel->pages;
179
180         /* CPU costs */
181         startup_cost += baserel->baserestrictcost.startup;
182         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
183         run_cost += cpu_per_tuple * baserel->tuples;
184
185         path->startup_cost = startup_cost;
186         path->total_cost = startup_cost + run_cost;
187 }
188
189 /*
190  * cost_index
191  *        Determines and returns the cost of scanning a relation using an index.
192  *
193  * 'index' is the index to be used
194  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
195  * 'outer_rel' is the outer relation when we are considering using the index
196  *              scan as the inside of a nestloop join (hence, some of the indexQuals
197  *              are join clauses, and we should expect repeated scans of the index);
198  *              NULL for a plain index scan
199  *
200  * cost_index() takes an IndexPath not just a Path, because it sets a few
201  * additional fields of the IndexPath besides startup_cost and total_cost.
202  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
203  *
204  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
205  * Any additional quals evaluated as qpquals may reduce the number of returned
206  * tuples, but they won't reduce the number of tuples we have to fetch from
207  * the table, so they don't reduce the scan cost.
208  *
209  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
210  * it was a list of bare clause expressions.
211  */
212 void
213 cost_index(IndexPath *path, PlannerInfo *root,
214                    IndexOptInfo *index,
215                    List *indexQuals,
216                    RelOptInfo *outer_rel)
217 {
218         RelOptInfo *baserel = index->rel;
219         Cost            startup_cost = 0;
220         Cost            run_cost = 0;
221         Cost            indexStartupCost;
222         Cost            indexTotalCost;
223         Selectivity indexSelectivity;
224         double          indexCorrelation,
225                                 csquared;
226         Cost            min_IO_cost,
227                                 max_IO_cost;
228         Cost            cpu_per_tuple;
229         double          tuples_fetched;
230         double          pages_fetched;
231
232         /* Should only be applied to base relations */
233         Assert(IsA(baserel, RelOptInfo) &&
234                    IsA(index, IndexOptInfo));
235         Assert(baserel->relid > 0);
236         Assert(baserel->rtekind == RTE_RELATION);
237
238         if (!enable_indexscan)
239                 startup_cost += disable_cost;
240
241         /*
242          * Call index-access-method-specific code to estimate the processing cost
243          * for scanning the index, as well as the selectivity of the index (ie,
244          * the fraction of main-table tuples we will have to retrieve) and its
245          * correlation to the main-table tuple order.
246          */
247         OidFunctionCall8(index->amcostestimate,
248                                          PointerGetDatum(root),
249                                          PointerGetDatum(index),
250                                          PointerGetDatum(indexQuals),
251                                          PointerGetDatum(outer_rel),
252                                          PointerGetDatum(&indexStartupCost),
253                                          PointerGetDatum(&indexTotalCost),
254                                          PointerGetDatum(&indexSelectivity),
255                                          PointerGetDatum(&indexCorrelation));
256
257         /*
258          * Save amcostestimate's results for possible use in bitmap scan planning.
259          * We don't bother to save indexStartupCost or indexCorrelation, because a
260          * bitmap scan doesn't care about either.
261          */
262         path->indextotalcost = indexTotalCost;
263         path->indexselectivity = indexSelectivity;
264
265         /* all costs for touching index itself included here */
266         startup_cost += indexStartupCost;
267         run_cost += indexTotalCost - indexStartupCost;
268
269         /* estimate number of main-table tuples fetched */
270         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
271
272         /*----------
273          * Estimate number of main-table pages fetched, and compute I/O cost.
274          *
275          * When the index ordering is uncorrelated with the table ordering,
276          * we use an approximation proposed by Mackert and Lohman (see
277          * index_pages_fetched() for details) to compute the number of pages
278          * fetched, and then charge random_page_cost per page fetched.
279          *
280          * When the index ordering is exactly correlated with the table ordering
281          * (just after a CLUSTER, for example), the number of pages fetched should
282          * be exactly selectivity * table_size.  What's more, all but the first
283          * will be sequential fetches, not the random fetches that occur in the
284          * uncorrelated case.  So if the number of pages is more than 1, we
285          * ought to charge
286          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
287          * For partially-correlated indexes, we ought to charge somewhere between
288          * these two estimates.  We currently interpolate linearly between the
289          * estimates based on the correlation squared (XXX is that appropriate?).
290          *----------
291          */
292         if (outer_rel != NULL && outer_rel->rows > 1)
293         {
294                 /*
295                  * For repeated indexscans, the appropriate estimate for the
296                  * uncorrelated case is to scale up the number of tuples fetched in
297                  * the Mackert and Lohman formula by the number of scans, so that we
298                  * estimate the number of pages fetched by all the scans; then
299                  * pro-rate the costs for one scan.  In this case we assume all the
300                  * fetches are random accesses.
301                  */
302                 double          num_scans = outer_rel->rows;
303
304                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
305                                                                                         baserel->pages,
306                                                                                         (double) index->pages,
307                                                                                         root);
308
309                 max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
310
311                 /*
312                  * In the perfectly correlated case, the number of pages touched by
313                  * each scan is selectivity * table_size, and we can use the Mackert
314                  * and Lohman formula at the page level to estimate how much work is
315                  * saved by caching across scans.  We still assume all the fetches are
316                  * random, though, which is an overestimate that's hard to correct for
317                  * without double-counting the cache effects.  (But in most cases
318                  * where such a plan is actually interesting, only one page would get
319                  * fetched per scan anyway, so it shouldn't matter much.)
320                  */
321                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
322
323                 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
324                                                                                         baserel->pages,
325                                                                                         (double) index->pages,
326                                                                                         root);
327
328                 min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
329         }
330         else
331         {
332                 /*
333                  * Normal case: apply the Mackert and Lohman formula, and then
334                  * interpolate between that and the correlation-derived result.
335                  */
336                 pages_fetched = index_pages_fetched(tuples_fetched,
337                                                                                         baserel->pages,
338                                                                                         (double) index->pages,
339                                                                                         root);
340
341                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
342                 max_IO_cost = pages_fetched * random_page_cost;
343
344                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
345                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
346                 min_IO_cost = random_page_cost;
347                 if (pages_fetched > 1)
348                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
349         }
350
351         /*
352          * Now interpolate based on estimated index order correlation to get total
353          * disk I/O cost for main table accesses.
354          */
355         csquared = indexCorrelation * indexCorrelation;
356
357         run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
358
359         /*
360          * Estimate CPU costs per tuple.
361          *
362          * Normally the indexquals will be removed from the list of restriction
363          * clauses that we have to evaluate as qpquals, so we should subtract
364          * their costs from baserestrictcost.  But if we are doing a join then
365          * some of the indexquals are join clauses and shouldn't be subtracted.
366          * Rather than work out exactly how much to subtract, we don't subtract
367          * anything.
368          */
369         startup_cost += baserel->baserestrictcost.startup;
370         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
371
372         if (outer_rel == NULL)
373         {
374                 QualCost        index_qual_cost;
375
376                 cost_qual_eval(&index_qual_cost, indexQuals, root);
377                 /* any startup cost still has to be paid ... */
378                 cpu_per_tuple -= index_qual_cost.per_tuple;
379         }
380
381         run_cost += cpu_per_tuple * tuples_fetched;
382
383         path->path.startup_cost = startup_cost;
384         path->path.total_cost = startup_cost + run_cost;
385 }
386
387 /*
388  * index_pages_fetched
389  *        Estimate the number of pages actually fetched after accounting for
390  *        cache effects.
391  *
392  * We use an approximation proposed by Mackert and Lohman, "Index Scans
393  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
394  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
395  * The Mackert and Lohman approximation is that the number of pages
396  * fetched is
397  *      PF =
398  *              min(2TNs/(2T+Ns), T)                    when T <= b
399  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
400  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
401  * where
402  *              T = # pages in table
403  *              N = # tuples in table
404  *              s = selectivity = fraction of table to be scanned
405  *              b = # buffer pages available (we include kernel space here)
406  *
407  * We assume that effective_cache_size is the total number of buffer pages
408  * available for the whole query, and pro-rate that space across all the
409  * tables in the query and the index currently under consideration.  (This
410  * ignores space needed for other indexes used by the query, but since we
411  * don't know which indexes will get used, we can't estimate that very well;
412  * and in any case counting all the tables may well be an overestimate, since
413  * depending on the join plan not all the tables may be scanned concurrently.)
414  *
415  * The product Ns is the number of tuples fetched; we pass in that
416  * product rather than calculating it here.  "pages" is the number of pages
417  * in the object under consideration (either an index or a table).
418  * "index_pages" is the amount to add to the total table space, which was
419  * computed for us by query_planner.
420  *
421  * Caller is expected to have ensured that tuples_fetched is greater than zero
422  * and rounded to integer (see clamp_row_est).  The result will likewise be
423  * greater than zero and integral.
424  */
425 double
426 index_pages_fetched(double tuples_fetched, BlockNumber pages,
427                                         double index_pages, PlannerInfo *root)
428 {
429         double          pages_fetched;
430         double          total_pages;
431         double          T,
432                                 b;
433
434         /* T is # pages in table, but don't allow it to be zero */
435         T = (pages > 1) ? (double) pages : 1.0;
436
437         /* Compute number of pages assumed to be competing for cache space */
438         total_pages = root->total_table_pages + index_pages;
439         total_pages = Max(total_pages, 1.0);
440         Assert(T <= total_pages);
441
442         /* b is pro-rated share of effective_cache_size */
443         b = (double) effective_cache_size *T / total_pages;
444
445         /* force it positive and integral */
446         if (b <= 1.0)
447                 b = 1.0;
448         else
449                 b = ceil(b);
450
451         /* This part is the Mackert and Lohman formula */
452         if (T <= b)
453         {
454                 pages_fetched =
455                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
456                 if (pages_fetched >= T)
457                         pages_fetched = T;
458                 else
459                         pages_fetched = ceil(pages_fetched);
460         }
461         else
462         {
463                 double          lim;
464
465                 lim = (2.0 * T * b) / (2.0 * T - b);
466                 if (tuples_fetched <= lim)
467                 {
468                         pages_fetched =
469                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
470                 }
471                 else
472                 {
473                         pages_fetched =
474                                 b + (tuples_fetched - lim) * (T - b) / T;
475                 }
476                 pages_fetched = ceil(pages_fetched);
477         }
478         return pages_fetched;
479 }
480
481 /*
482  * get_indexpath_pages
483  *              Determine the total size of the indexes used in a bitmap index path.
484  *
485  * Note: if the same index is used more than once in a bitmap tree, we will
486  * count it multiple times, which perhaps is the wrong thing ... but it's
487  * not completely clear, and detecting duplicates is difficult, so ignore it
488  * for now.
489  */
490 static double
491 get_indexpath_pages(Path *bitmapqual)
492 {
493         double          result = 0;
494         ListCell   *l;
495
496         if (IsA(bitmapqual, BitmapAndPath))
497         {
498                 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
499
500                 foreach(l, apath->bitmapquals)
501                 {
502                         result += get_indexpath_pages((Path *) lfirst(l));
503                 }
504         }
505         else if (IsA(bitmapqual, BitmapOrPath))
506         {
507                 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
508
509                 foreach(l, opath->bitmapquals)
510                 {
511                         result += get_indexpath_pages((Path *) lfirst(l));
512                 }
513         }
514         else if (IsA(bitmapqual, IndexPath))
515         {
516                 IndexPath  *ipath = (IndexPath *) bitmapqual;
517
518                 result = (double) ipath->indexinfo->pages;
519         }
520         else
521                 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
522
523         return result;
524 }
525
526 /*
527  * cost_bitmap_heap_scan
528  *        Determines and returns the cost of scanning a relation using a bitmap
529  *        index-then-heap plan.
530  *
531  * 'baserel' is the relation to be scanned
532  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
533  * 'outer_rel' is the outer relation when we are considering using the bitmap
534  *              scan as the inside of a nestloop join (hence, some of the indexQuals
535  *              are join clauses, and we should expect repeated scans of the table);
536  *              NULL for a plain bitmap scan
537  *
538  * Note: if this is a join inner path, the component IndexPaths in bitmapqual
539  * should have been costed accordingly.
540  */
541 void
542 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
543                                           Path *bitmapqual, RelOptInfo *outer_rel)
544 {
545         Cost            startup_cost = 0;
546         Cost            run_cost = 0;
547         Cost            indexTotalCost;
548         Selectivity indexSelectivity;
549         Cost            cpu_per_tuple;
550         Cost            cost_per_page;
551         double          tuples_fetched;
552         double          pages_fetched;
553         double          T;
554
555         /* Should only be applied to base relations */
556         Assert(IsA(baserel, RelOptInfo));
557         Assert(baserel->relid > 0);
558         Assert(baserel->rtekind == RTE_RELATION);
559
560         if (!enable_bitmapscan)
561                 startup_cost += disable_cost;
562
563         /*
564          * Fetch total cost of obtaining the bitmap, as well as its total
565          * selectivity.
566          */
567         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
568
569         startup_cost += indexTotalCost;
570
571         /*
572          * Estimate number of main-table pages fetched.
573          */
574         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
575
576         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
577
578         if (outer_rel != NULL && outer_rel->rows > 1)
579         {
580                 /*
581                  * For repeated bitmap scans, scale up the number of tuples fetched in
582                  * the Mackert and Lohman formula by the number of scans, so that we
583                  * estimate the number of pages fetched by all the scans. Then
584                  * pro-rate for one scan.
585                  */
586                 double          num_scans = outer_rel->rows;
587
588                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
589                                                                                         baserel->pages,
590                                                                                         get_indexpath_pages(bitmapqual),
591                                                                                         root);
592                 pages_fetched /= num_scans;
593         }
594         else
595         {
596                 /*
597                  * For a single scan, the number of heap pages that need to be fetched
598                  * is the same as the Mackert and Lohman formula for the case T <= b
599                  * (ie, no re-reads needed).
600                  */
601                 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
602         }
603         if (pages_fetched >= T)
604                 pages_fetched = T;
605         else
606                 pages_fetched = ceil(pages_fetched);
607
608         /*
609          * For small numbers of pages we should charge random_page_cost apiece,
610          * while if nearly all the table's pages are being read, it's more
611          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
612          * too. For lack of a better idea, interpolate like this to determine the
613          * cost per page.
614          */
615         if (pages_fetched >= 2.0)
616                 cost_per_page = random_page_cost -
617                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
618         else
619                 cost_per_page = random_page_cost;
620
621         run_cost += pages_fetched * cost_per_page;
622
623         /*
624          * Estimate CPU costs per tuple.
625          *
626          * Often the indexquals don't need to be rechecked at each tuple ... but
627          * not always, especially not if there are enough tuples involved that the
628          * bitmaps become lossy.  For the moment, just assume they will be
629          * rechecked always.
630          */
631         startup_cost += baserel->baserestrictcost.startup;
632         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
633
634         run_cost += cpu_per_tuple * tuples_fetched;
635
636         path->startup_cost = startup_cost;
637         path->total_cost = startup_cost + run_cost;
638 }
639
640 /*
641  * cost_bitmap_tree_node
642  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
643  */
644 void
645 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
646 {
647         if (IsA(path, IndexPath))
648         {
649                 *cost = ((IndexPath *) path)->indextotalcost;
650                 *selec = ((IndexPath *) path)->indexselectivity;
651
652                 /*
653                  * Charge a small amount per retrieved tuple to reflect the costs of
654                  * manipulating the bitmap.  This is mostly to make sure that a bitmap
655                  * scan doesn't look to be the same cost as an indexscan to retrieve a
656                  * single tuple.
657                  */
658                 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
659         }
660         else if (IsA(path, BitmapAndPath))
661         {
662                 *cost = path->total_cost;
663                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
664         }
665         else if (IsA(path, BitmapOrPath))
666         {
667                 *cost = path->total_cost;
668                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
669         }
670         else
671         {
672                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
673                 *cost = *selec = 0;             /* keep compiler quiet */
674         }
675 }
676
677 /*
678  * cost_bitmap_and_node
679  *              Estimate the cost of a BitmapAnd node
680  *
681  * Note that this considers only the costs of index scanning and bitmap
682  * creation, not the eventual heap access.      In that sense the object isn't
683  * truly a Path, but it has enough path-like properties (costs in particular)
684  * to warrant treating it as one.
685  */
686 void
687 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
688 {
689         Cost            totalCost;
690         Selectivity selec;
691         ListCell   *l;
692
693         /*
694          * We estimate AND selectivity on the assumption that the inputs are
695          * independent.  This is probably often wrong, but we don't have the info
696          * to do better.
697          *
698          * The runtime cost of the BitmapAnd itself is estimated at 100x
699          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
700          * definitely too simplistic?
701          */
702         totalCost = 0.0;
703         selec = 1.0;
704         foreach(l, path->bitmapquals)
705         {
706                 Path       *subpath = (Path *) lfirst(l);
707                 Cost            subCost;
708                 Selectivity subselec;
709
710                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
711
712                 selec *= subselec;
713
714                 totalCost += subCost;
715                 if (l != list_head(path->bitmapquals))
716                         totalCost += 100.0 * cpu_operator_cost;
717         }
718         path->bitmapselectivity = selec;
719         path->path.startup_cost = totalCost;
720         path->path.total_cost = totalCost;
721 }
722
723 /*
724  * cost_bitmap_or_node
725  *              Estimate the cost of a BitmapOr node
726  *
727  * See comments for cost_bitmap_and_node.
728  */
729 void
730 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
731 {
732         Cost            totalCost;
733         Selectivity selec;
734         ListCell   *l;
735
736         /*
737          * We estimate OR selectivity on the assumption that the inputs are
738          * non-overlapping, since that's often the case in "x IN (list)" type
739          * situations.  Of course, we clamp to 1.0 at the end.
740          *
741          * The runtime cost of the BitmapOr itself is estimated at 100x
742          * cpu_operator_cost for each tbm_union needed.  Probably too small,
743          * definitely too simplistic?  We are aware that the tbm_unions are
744          * optimized out when the inputs are BitmapIndexScans.
745          */
746         totalCost = 0.0;
747         selec = 0.0;
748         foreach(l, path->bitmapquals)
749         {
750                 Path       *subpath = (Path *) lfirst(l);
751                 Cost            subCost;
752                 Selectivity subselec;
753
754                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
755
756                 selec += subselec;
757
758                 totalCost += subCost;
759                 if (l != list_head(path->bitmapquals) &&
760                         !IsA(subpath, IndexPath))
761                         totalCost += 100.0 * cpu_operator_cost;
762         }
763         path->bitmapselectivity = Min(selec, 1.0);
764         path->path.startup_cost = totalCost;
765         path->path.total_cost = totalCost;
766 }
767
768 /*
769  * cost_tidscan
770  *        Determines and returns the cost of scanning a relation using TIDs.
771  */
772 void
773 cost_tidscan(Path *path, PlannerInfo *root,
774                          RelOptInfo *baserel, List *tidquals)
775 {
776         Cost            startup_cost = 0;
777         Cost            run_cost = 0;
778         bool            isCurrentOf = false;
779         Cost            cpu_per_tuple;
780         QualCost        tid_qual_cost;
781         int                     ntuples;
782         ListCell   *l;
783
784         /* Should only be applied to base relations */
785         Assert(baserel->relid > 0);
786         Assert(baserel->rtekind == RTE_RELATION);
787
788         /* Count how many tuples we expect to retrieve */
789         ntuples = 0;
790         foreach(l, tidquals)
791         {
792                 if (IsA(lfirst(l), ScalarArrayOpExpr))
793                 {
794                         /* Each element of the array yields 1 tuple */
795                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
796                         Node       *arraynode = (Node *) lsecond(saop->args);
797
798                         ntuples += estimate_array_length(arraynode);
799                 }
800                 else if (IsA(lfirst(l), CurrentOfExpr))
801                 {
802                         /* CURRENT OF yields 1 tuple */
803                         isCurrentOf = true;
804                         ntuples++;
805                 }
806                 else
807                 {
808                         /* It's just CTID = something, count 1 tuple */
809                         ntuples++;
810                 }
811         }
812
813         /*
814          * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
815          * understands how to do it correctly.  Therefore, honor enable_tidscan
816          * only when CURRENT OF isn't present.  Also note that cost_qual_eval
817          * counts a CurrentOfExpr as having startup cost disable_cost, which we
818          * subtract off here; that's to prevent other plan types such as seqscan
819          * from winning.
820          */
821         if (isCurrentOf)
822         {
823                 Assert(baserel->baserestrictcost.startup >= disable_cost);
824                 startup_cost -= disable_cost;
825         }
826         else if (!enable_tidscan)
827                 startup_cost += disable_cost;
828
829         /*
830          * The TID qual expressions will be computed once, any other baserestrict
831          * quals once per retrived tuple.
832          */
833         cost_qual_eval(&tid_qual_cost, tidquals, root);
834
835         /* disk costs --- assume each tuple on a different page */
836         run_cost += random_page_cost * ntuples;
837
838         /* CPU costs */
839         startup_cost += baserel->baserestrictcost.startup +
840                 tid_qual_cost.per_tuple;
841         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
842                 tid_qual_cost.per_tuple;
843         run_cost += cpu_per_tuple * ntuples;
844
845         path->startup_cost = startup_cost;
846         path->total_cost = startup_cost + run_cost;
847 }
848
849 /*
850  * cost_subqueryscan
851  *        Determines and returns the cost of scanning a subquery RTE.
852  */
853 void
854 cost_subqueryscan(Path *path, RelOptInfo *baserel)
855 {
856         Cost            startup_cost;
857         Cost            run_cost;
858         Cost            cpu_per_tuple;
859
860         /* Should only be applied to base relations that are subqueries */
861         Assert(baserel->relid > 0);
862         Assert(baserel->rtekind == RTE_SUBQUERY);
863
864         /*
865          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
866          * any restriction clauses that will be attached to the SubqueryScan node,
867          * plus cpu_tuple_cost to account for selection and projection overhead.
868          */
869         path->startup_cost = baserel->subplan->startup_cost;
870         path->total_cost = baserel->subplan->total_cost;
871
872         startup_cost = baserel->baserestrictcost.startup;
873         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
874         run_cost = cpu_per_tuple * baserel->tuples;
875
876         path->startup_cost += startup_cost;
877         path->total_cost += startup_cost + run_cost;
878 }
879
880 /*
881  * cost_functionscan
882  *        Determines and returns the cost of scanning a function RTE.
883  */
884 void
885 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
886 {
887         Cost            startup_cost = 0;
888         Cost            run_cost = 0;
889         Cost            cpu_per_tuple;
890         RangeTblEntry *rte;
891         QualCost        exprcost;
892
893         /* Should only be applied to base relations that are functions */
894         Assert(baserel->relid > 0);
895         rte = planner_rt_fetch(baserel->relid, root);
896         Assert(rte->rtekind == RTE_FUNCTION);
897
898         /* Estimate costs of executing the function expression */
899         cost_qual_eval_node(&exprcost, rte->funcexpr, root);
900
901         startup_cost += exprcost.startup;
902         cpu_per_tuple = exprcost.per_tuple;
903
904         /* Add scanning CPU costs */
905         startup_cost += baserel->baserestrictcost.startup;
906         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
907         run_cost += cpu_per_tuple * baserel->tuples;
908
909         path->startup_cost = startup_cost;
910         path->total_cost = startup_cost + run_cost;
911 }
912
913 /*
914  * cost_valuesscan
915  *        Determines and returns the cost of scanning a VALUES RTE.
916  */
917 void
918 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
919 {
920         Cost            startup_cost = 0;
921         Cost            run_cost = 0;
922         Cost            cpu_per_tuple;
923
924         /* Should only be applied to base relations that are values lists */
925         Assert(baserel->relid > 0);
926         Assert(baserel->rtekind == RTE_VALUES);
927
928         /*
929          * For now, estimate list evaluation cost at one operator eval per list
930          * (probably pretty bogus, but is it worth being smarter?)
931          */
932         cpu_per_tuple = cpu_operator_cost;
933
934         /* Add scanning CPU costs */
935         startup_cost += baserel->baserestrictcost.startup;
936         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
937         run_cost += cpu_per_tuple * baserel->tuples;
938
939         path->startup_cost = startup_cost;
940         path->total_cost = startup_cost + run_cost;
941 }
942
943 /*
944  * cost_ctescan
945  *        Determines and returns the cost of scanning a CTE RTE.
946  *
947  * Note: this is used for both self-reference and regular CTEs; the
948  * possible cost differences are below the threshold of what we could
949  * estimate accurately anyway.  Note that the costs of evaluating the
950  * referenced CTE query are added into the final plan as initplan costs,
951  * and should NOT be counted here.
952  */
953 void
954 cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
955 {
956         Cost            startup_cost = 0;
957         Cost            run_cost = 0;
958         Cost            cpu_per_tuple;
959
960         /* Should only be applied to base relations that are CTEs */
961         Assert(baserel->relid > 0);
962         Assert(baserel->rtekind == RTE_CTE);
963
964         /* Charge one CPU tuple cost per row for tuplestore manipulation */
965         cpu_per_tuple = cpu_tuple_cost;
966
967         /* Add scanning CPU costs */
968         startup_cost += baserel->baserestrictcost.startup;
969         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
970         run_cost += cpu_per_tuple * baserel->tuples;
971
972         path->startup_cost = startup_cost;
973         path->total_cost = startup_cost + run_cost;
974 }
975
976 /*
977  * cost_recursive_union
978  *        Determines and returns the cost of performing a recursive union,
979  *        and also the estimated output size.
980  *
981  * We are given Plans for the nonrecursive and recursive terms.
982  *
983  * Note that the arguments and output are Plans, not Paths as in most of
984  * the rest of this module.  That's because we don't bother setting up a
985  * Path representation for recursive union --- we have only one way to do it.
986  */
987 void
988 cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
989 {
990         Cost            startup_cost;
991         Cost            total_cost;
992         double          total_rows;
993
994         /* We probably have decent estimates for the non-recursive term */
995         startup_cost = nrterm->startup_cost;
996         total_cost = nrterm->total_cost;
997         total_rows = nrterm->plan_rows;
998
999         /*
1000          * We arbitrarily assume that about 10 recursive iterations will be
1001          * needed, and that we've managed to get a good fix on the cost and
1002          * output size of each one of them.  These are mighty shaky assumptions
1003          * but it's hard to see how to do better.
1004          */
1005         total_cost += 10 * rterm->total_cost;
1006         total_rows += 10 * rterm->plan_rows;
1007
1008         /*
1009          * Also charge cpu_tuple_cost per row to account for the costs of
1010          * manipulating the tuplestores.  (We don't worry about possible
1011          * spill-to-disk costs.)
1012          */
1013         total_cost += cpu_tuple_cost * total_rows;
1014
1015         runion->startup_cost = startup_cost;
1016         runion->total_cost = total_cost;
1017         runion->plan_rows = total_rows;
1018         runion->plan_width = Max(nrterm->plan_width, rterm->plan_width);
1019 }
1020
1021 /*
1022  * cost_sort
1023  *        Determines and returns the cost of sorting a relation, including
1024  *        the cost of reading the input data.
1025  *
1026  * If the total volume of data to sort is less than work_mem, we will do
1027  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1028  * comparisons for t tuples.
1029  *
1030  * If the total volume exceeds work_mem, we switch to a tape-style merge
1031  * algorithm.  There will still be about t*log2(t) tuple comparisons in
1032  * total, but we will also need to write and read each tuple once per
1033  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
1034  * number of initial runs formed and M is the merge order used by tuplesort.c.
1035  * Since the average initial run should be about twice work_mem, we have
1036  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
1037  *              cpu = comparison_cost * t * log2(t)
1038  *
1039  * If the sort is bounded (i.e., only the first k result tuples are needed)
1040  * and k tuples can fit into work_mem, we use a heap method that keeps only
1041  * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1042  *
1043  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1044  * accesses (XXX can't we refine that guess?)
1045  *
1046  * We charge two operator evals per tuple comparison, which should be in
1047  * the right ballpark in most cases.
1048  *
1049  * 'pathkeys' is a list of sort keys
1050  * 'input_cost' is the total cost for reading the input data
1051  * 'tuples' is the number of tuples in the relation
1052  * 'width' is the average tuple width in bytes
1053  * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1054  *
1055  * NOTE: some callers currently pass NIL for pathkeys because they
1056  * can't conveniently supply the sort keys.  Since this routine doesn't
1057  * currently do anything with pathkeys anyway, that doesn't matter...
1058  * but if it ever does, it should react gracefully to lack of key data.
1059  * (Actually, the thing we'd most likely be interested in is just the number
1060  * of sort keys, which all callers *could* supply.)
1061  */
1062 void
1063 cost_sort(Path *path, PlannerInfo *root,
1064                   List *pathkeys, Cost input_cost, double tuples, int width,
1065                   double limit_tuples)
1066 {
1067         Cost            startup_cost = input_cost;
1068         Cost            run_cost = 0;
1069         double          input_bytes = relation_byte_size(tuples, width);
1070         double          output_bytes;
1071         double          output_tuples;
1072         long            work_mem_bytes = work_mem * 1024L;
1073
1074         if (!enable_sort)
1075                 startup_cost += disable_cost;
1076
1077         /*
1078          * We want to be sure the cost of a sort is never estimated as zero, even
1079          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
1080          */
1081         if (tuples < 2.0)
1082                 tuples = 2.0;
1083
1084         /* Do we have a useful LIMIT? */
1085         if (limit_tuples > 0 && limit_tuples < tuples)
1086         {
1087                 output_tuples = limit_tuples;
1088                 output_bytes = relation_byte_size(output_tuples, width);
1089         }
1090         else
1091         {
1092                 output_tuples = tuples;
1093                 output_bytes = input_bytes;
1094         }
1095
1096         if (output_bytes > work_mem_bytes)
1097         {
1098                 /*
1099                  * We'll have to use a disk-based sort of all the tuples
1100                  */
1101                 double          npages = ceil(input_bytes / BLCKSZ);
1102                 double          nruns = (input_bytes / work_mem_bytes) * 0.5;
1103                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
1104                 double          log_runs;
1105                 double          npageaccesses;
1106
1107                 /*
1108                  * CPU costs
1109                  *
1110                  * Assume about two operator evals per tuple comparison and N log2 N
1111                  * comparisons
1112                  */
1113                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1114
1115                 /* Disk costs */
1116
1117                 /* Compute logM(r) as log(r) / log(M) */
1118                 if (nruns > mergeorder)
1119                         log_runs = ceil(log(nruns) / log(mergeorder));
1120                 else
1121                         log_runs = 1.0;
1122                 npageaccesses = 2.0 * npages * log_runs;
1123                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1124                 startup_cost += npageaccesses *
1125                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
1126         }
1127         else if (tuples > 2 * output_tuples || input_bytes > work_mem_bytes)
1128         {
1129                 /*
1130                  * We'll use a bounded heap-sort keeping just K tuples in memory, for
1131                  * a total number of tuple comparisons of N log2 K; but the constant
1132                  * factor is a bit higher than for quicksort.  Tweak it so that the
1133                  * cost curve is continuous at the crossover point.
1134                  */
1135                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(2.0 * output_tuples);
1136         }
1137         else
1138         {
1139                 /* We'll use plain quicksort on all the input tuples */
1140                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1141         }
1142
1143         /*
1144          * Also charge a small amount (arbitrarily set equal to operator cost) per
1145          * extracted tuple.  Note it's correct to use tuples not output_tuples
1146          * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1147          * counting the LIMIT otherwise.
1148          */
1149         run_cost += cpu_operator_cost * tuples;
1150
1151         path->startup_cost = startup_cost;
1152         path->total_cost = startup_cost + run_cost;
1153 }
1154
1155 /*
1156  * sort_exceeds_work_mem
1157  *        Given a finished Sort plan node, detect whether it is expected to
1158  *        spill to disk (ie, will need more than work_mem workspace)
1159  *
1160  * This assumes there will be no available LIMIT.
1161  */
1162 bool
1163 sort_exceeds_work_mem(Sort *sort)
1164 {
1165         double          input_bytes = relation_byte_size(sort->plan.plan_rows,
1166                                                                                                  sort->plan.plan_width);
1167         long            work_mem_bytes = work_mem * 1024L;
1168
1169         return (input_bytes > work_mem_bytes);
1170 }
1171
1172 /*
1173  * cost_material
1174  *        Determines and returns the cost of materializing a relation, including
1175  *        the cost of reading the input data.
1176  *
1177  * If the total volume of data to materialize exceeds work_mem, we will need
1178  * to write it to disk, so the cost is much higher in that case.
1179  */
1180 void
1181 cost_material(Path *path,
1182                           Cost input_cost, double tuples, int width)
1183 {
1184         Cost            startup_cost = input_cost;
1185         Cost            run_cost = 0;
1186         double          nbytes = relation_byte_size(tuples, width);
1187         long            work_mem_bytes = work_mem * 1024L;
1188
1189         /* disk costs */
1190         if (nbytes > work_mem_bytes)
1191         {
1192                 double          npages = ceil(nbytes / BLCKSZ);
1193
1194                 /* We'll write during startup and read during retrieval */
1195                 startup_cost += seq_page_cost * npages;
1196                 run_cost += seq_page_cost * npages;
1197         }
1198
1199         /*
1200          * Charge a very small amount per inserted tuple, to reflect bookkeeping
1201          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
1202          * tie that would otherwise exist between nestloop with A outer,
1203          * materialized B inner and nestloop with B outer, materialized A inner.
1204          * The extra cost ensures we'll prefer materializing the smaller rel.
1205          */
1206         startup_cost += cpu_tuple_cost * 0.1 * tuples;
1207
1208         /*
1209          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
1210          * so that it doesn't appear worthwhile to materialize a bare seqscan.
1211          */
1212         run_cost += cpu_tuple_cost * tuples;
1213
1214         path->startup_cost = startup_cost;
1215         path->total_cost = startup_cost + run_cost;
1216 }
1217
1218 /*
1219  * cost_agg
1220  *              Determines and returns the cost of performing an Agg plan node,
1221  *              including the cost of its input.
1222  *
1223  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1224  * are for appropriately-sorted input.
1225  */
1226 void
1227 cost_agg(Path *path, PlannerInfo *root,
1228                  AggStrategy aggstrategy, int numAggs,
1229                  int numGroupCols, double numGroups,
1230                  Cost input_startup_cost, Cost input_total_cost,
1231                  double input_tuples)
1232 {
1233         Cost            startup_cost;
1234         Cost            total_cost;
1235
1236         /*
1237          * We charge one cpu_operator_cost per aggregate function per input tuple,
1238          * and another one per output tuple (corresponding to transfn and finalfn
1239          * calls respectively).  If we are grouping, we charge an additional
1240          * cpu_operator_cost per grouping column per input tuple for grouping
1241          * comparisons.
1242          *
1243          * We will produce a single output tuple if not grouping, and a tuple per
1244          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1245          *
1246          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1247          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1248          * input path is already sorted appropriately, AGG_SORTED should be
1249          * preferred (since it has no risk of memory overflow).  This will happen
1250          * as long as the computed total costs are indeed exactly equal --- but if
1251          * there's roundoff error we might do the wrong thing.  So be sure that
1252          * the computations below form the same intermediate values in the same
1253          * order.
1254          *
1255          * Note: ideally we should use the pg_proc.procost costs of each
1256          * aggregate's component functions, but for now that seems like an
1257          * excessive amount of work.
1258          */
1259         if (aggstrategy == AGG_PLAIN)
1260         {
1261                 startup_cost = input_total_cost;
1262                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1263                 /* we aren't grouping */
1264                 total_cost = startup_cost + cpu_tuple_cost;
1265         }
1266         else if (aggstrategy == AGG_SORTED)
1267         {
1268                 /* Here we are able to deliver output on-the-fly */
1269                 startup_cost = input_startup_cost;
1270                 total_cost = input_total_cost;
1271                 /* calcs phrased this way to match HASHED case, see note above */
1272                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1273                 total_cost += cpu_operator_cost * input_tuples * numAggs;
1274                 total_cost += cpu_operator_cost * numGroups * numAggs;
1275                 total_cost += cpu_tuple_cost * numGroups;
1276         }
1277         else
1278         {
1279                 /* must be AGG_HASHED */
1280                 startup_cost = input_total_cost;
1281                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1282                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1283                 total_cost = startup_cost;
1284                 total_cost += cpu_operator_cost * numGroups * numAggs;
1285                 total_cost += cpu_tuple_cost * numGroups;
1286         }
1287
1288         path->startup_cost = startup_cost;
1289         path->total_cost = total_cost;
1290 }
1291
1292 /*
1293  * cost_windowagg
1294  *              Determines and returns the cost of performing a WindowAgg plan node,
1295  *              including the cost of its input.
1296  *
1297  * Input is assumed already properly sorted.
1298  */
1299 void
1300 cost_windowagg(Path *path, PlannerInfo *root,
1301                            int numWindowFuncs, int numPartCols, int numOrderCols,
1302                            Cost input_startup_cost, Cost input_total_cost,
1303                            double input_tuples)
1304 {
1305         Cost            startup_cost;
1306         Cost            total_cost;
1307
1308         startup_cost = input_startup_cost;
1309         total_cost = input_total_cost;
1310
1311         /*
1312          * We charge one cpu_operator_cost per window function per tuple (often a
1313          * drastic underestimate, but without a way to gauge how many tuples the
1314          * window function will fetch, it's hard to do better).  We also charge
1315          * cpu_operator_cost per grouping column per tuple for grouping
1316          * comparisons, plus cpu_tuple_cost per tuple for general overhead.
1317          */
1318         total_cost += cpu_operator_cost * input_tuples * numWindowFuncs;
1319         total_cost += cpu_operator_cost * input_tuples * (numPartCols + numOrderCols);
1320         total_cost += cpu_tuple_cost * input_tuples;
1321
1322         path->startup_cost = startup_cost;
1323         path->total_cost = total_cost;
1324 }
1325
1326 /*
1327  * cost_group
1328  *              Determines and returns the cost of performing a Group plan node,
1329  *              including the cost of its input.
1330  *
1331  * Note: caller must ensure that input costs are for appropriately-sorted
1332  * input.
1333  */
1334 void
1335 cost_group(Path *path, PlannerInfo *root,
1336                    int numGroupCols, double numGroups,
1337                    Cost input_startup_cost, Cost input_total_cost,
1338                    double input_tuples)
1339 {
1340         Cost            startup_cost;
1341         Cost            total_cost;
1342
1343         startup_cost = input_startup_cost;
1344         total_cost = input_total_cost;
1345
1346         /*
1347          * Charge one cpu_operator_cost per comparison per input tuple. We assume
1348          * all columns get compared at most of the tuples.
1349          */
1350         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1351
1352         path->startup_cost = startup_cost;
1353         path->total_cost = total_cost;
1354 }
1355
1356 /*
1357  * If a nestloop's inner path is an indexscan, be sure to use its estimated
1358  * output row count, which may be lower than the restriction-clause-only row
1359  * count of its parent.  (We don't include this case in the PATH_ROWS macro
1360  * because it applies *only* to a nestloop's inner relation.)  We have to
1361  * be prepared to recurse through Append nodes in case of an appendrel.
1362  */
1363 static double
1364 nestloop_inner_path_rows(Path *path)
1365 {
1366         double          result;
1367
1368         if (IsA(path, IndexPath))
1369                 result = ((IndexPath *) path)->rows;
1370         else if (IsA(path, BitmapHeapPath))
1371                 result = ((BitmapHeapPath *) path)->rows;
1372         else if (IsA(path, AppendPath))
1373         {
1374                 ListCell   *l;
1375
1376                 result = 0;
1377                 foreach(l, ((AppendPath *) path)->subpaths)
1378                 {
1379                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1380                 }
1381         }
1382         else
1383                 result = PATH_ROWS(path);
1384
1385         return result;
1386 }
1387
1388 /*
1389  * cost_nestloop
1390  *        Determines and returns the cost of joining two relations using the
1391  *        nested loop algorithm.
1392  *
1393  * 'path' is already filled in except for the cost fields
1394  * 'sjinfo' is extra info about the join for selectivity estimation
1395  */
1396 void
1397 cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1398 {
1399         Path       *outer_path = path->outerjoinpath;
1400         Path       *inner_path = path->innerjoinpath;
1401         Cost            startup_cost = 0;
1402         Cost            run_cost = 0;
1403         Cost            inner_run_cost;
1404         Cost            cpu_per_tuple;
1405         QualCost        restrict_qual_cost;
1406         double          outer_path_rows = PATH_ROWS(outer_path);
1407         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1408         double          ntuples;
1409         Selectivity     outer_match_frac;
1410         Selectivity     match_count;
1411         bool            indexed_join_quals;
1412
1413         if (!enable_nestloop)
1414                 startup_cost += disable_cost;
1415
1416         /* cost of source data */
1417
1418         /*
1419          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1420          * before we can start returning tuples, so the join's startup cost is
1421          * their sum.  What's not so clear is whether the inner path's
1422          * startup_cost must be paid again on each rescan of the inner path. This
1423          * is not true if the inner path is materialized or is a hashjoin, but
1424          * probably is true otherwise.
1425          */
1426         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1427         run_cost += outer_path->total_cost - outer_path->startup_cost;
1428         if (IsA(inner_path, MaterialPath) ||
1429                 IsA(inner_path, HashPath))
1430         {
1431                 /* charge only run cost for each iteration of inner path */
1432         }
1433         else
1434         {
1435                 /*
1436                  * charge startup cost for each iteration of inner path, except we
1437                  * already charged the first startup_cost in our own startup
1438                  */
1439                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1440         }
1441         inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
1442
1443         if (adjust_semi_join(root, path, sjinfo,
1444                                                  &outer_match_frac,
1445                                                  &match_count,
1446                                                  &indexed_join_quals))
1447         {
1448                 double          outer_matched_rows;
1449                 Selectivity     inner_scan_frac;
1450
1451                 /*
1452                  * SEMI or ANTI join: executor will stop after first match.
1453                  *
1454                  * For an outer-rel row that has at least one match, we can expect the
1455                  * inner scan to stop after a fraction 1/(match_count+1) of the inner
1456                  * rows, if the matches are evenly distributed.  Since they probably
1457                  * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
1458                  * that fraction.  (If we used a larger fuzz factor, we'd have to
1459                  * clamp inner_scan_frac to at most 1.0; but since match_count is at
1460                  * least 1, no such clamp is needed now.)
1461                  */
1462                 outer_matched_rows = rint(outer_path_rows * outer_match_frac);
1463                 inner_scan_frac = 2.0 / (match_count + 1.0);
1464
1465                 /* Add inner run cost for outer tuples having matches */
1466                 run_cost += outer_matched_rows * inner_run_cost * inner_scan_frac;
1467
1468                 /* Compute number of tuples processed (not number emitted!) */
1469                 ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
1470
1471                 /*
1472                  * For unmatched outer-rel rows, there are two cases.  If the inner
1473                  * path is an indexscan using all the joinquals as indexquals, then
1474                  * an unmatched row results in an indexscan returning no rows, which
1475                  * is probably quite cheap.  We estimate this case as the same cost
1476                  * to return the first tuple of a nonempty scan.  Otherwise, the
1477                  * executor will have to scan the whole inner rel; not so cheap.
1478                  */
1479                 if (indexed_join_quals)
1480                 {
1481                         run_cost += (outer_path_rows - outer_matched_rows) *
1482                                 inner_run_cost / inner_path_rows;
1483                         /* We won't be evaluating any quals at all for these rows */
1484                 }
1485                 else
1486                 {
1487                         run_cost += (outer_path_rows - outer_matched_rows) *
1488                                 inner_run_cost;
1489                         ntuples += (outer_path_rows - outer_matched_rows) *
1490                                 inner_path_rows;
1491                 }
1492         }
1493         else
1494         {
1495                 /* Normal case; we'll scan whole input rel for each outer row */
1496                 run_cost += outer_path_rows * inner_run_cost;
1497
1498                 /* Compute number of tuples processed (not number emitted!) */
1499                 ntuples = outer_path_rows * inner_path_rows;
1500         }
1501
1502         /* CPU costs */
1503         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
1504         startup_cost += restrict_qual_cost.startup;
1505         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1506         run_cost += cpu_per_tuple * ntuples;
1507
1508         path->path.startup_cost = startup_cost;
1509         path->path.total_cost = startup_cost + run_cost;
1510 }
1511
1512 /*
1513  * cost_mergejoin
1514  *        Determines and returns the cost of joining two relations using the
1515  *        merge join algorithm.
1516  *
1517  * 'path' is already filled in except for the cost fields
1518  * 'sjinfo' is extra info about the join for selectivity estimation
1519  *
1520  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1521  * outersortkeys and innersortkeys are lists of the keys to be used
1522  * to sort the outer and inner relations, or NIL if no explicit
1523  * sort is needed because the source path is already ordered.
1524  */
1525 void
1526 cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1527 {
1528         Path       *outer_path = path->jpath.outerjoinpath;
1529         Path       *inner_path = path->jpath.innerjoinpath;
1530         List       *mergeclauses = path->path_mergeclauses;
1531         List       *outersortkeys = path->outersortkeys;
1532         List       *innersortkeys = path->innersortkeys;
1533         Cost            startup_cost = 0;
1534         Cost            run_cost = 0;
1535         Cost            cpu_per_tuple;
1536         QualCost        merge_qual_cost;
1537         QualCost        qp_qual_cost;
1538         double          outer_path_rows = PATH_ROWS(outer_path);
1539         double          inner_path_rows = PATH_ROWS(inner_path);
1540         double          outer_rows,
1541                                 inner_rows,
1542                                 outer_skip_rows,
1543                                 inner_skip_rows;
1544         double          mergejointuples,
1545                                 rescannedtuples;
1546         double          rescanratio;
1547         Selectivity outerstartsel,
1548                                 outerendsel,
1549                                 innerstartsel,
1550                                 innerendsel;
1551         Path            sort_path;              /* dummy for result of cost_sort */
1552
1553         /* Protect some assumptions below that rowcounts aren't zero */
1554         if (outer_path_rows <= 0)
1555                 outer_path_rows = 1;
1556         if (inner_path_rows <= 0)
1557                 inner_path_rows = 1;
1558
1559         if (!enable_mergejoin)
1560                 startup_cost += disable_cost;
1561
1562         /*
1563          * Compute cost of the mergequals and qpquals (other restriction clauses)
1564          * separately.
1565          */
1566         cost_qual_eval(&merge_qual_cost, mergeclauses, root);
1567         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1568         qp_qual_cost.startup -= merge_qual_cost.startup;
1569         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1570
1571         /*
1572          * Get approx # tuples passing the mergequals.  We use approx_tuple_count
1573          * here because we need an estimate done with JOIN_INNER semantics.
1574          */
1575         mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
1576
1577         /*
1578          * When there are equal merge keys in the outer relation, the mergejoin
1579          * must rescan any matching tuples in the inner relation. This means
1580          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1581          * costs the same as an original fetch, which is probably an overestimate;
1582          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1583          * Not clear if it's worth developing a more refined model.
1584          *
1585          * For regular inner and outer joins, the number of re-fetches can be
1586          * estimated approximately as size of merge join output minus size of
1587          * inner relation. Assume that the distinct key values are 1, 2, ..., and
1588          * denote the number of values of each key in the outer relation as m1,
1589          * m2, ...; in the inner relation, n1, n2, ...  Then we have
1590          *
1591          * size of join = m1 * n1 + m2 * n2 + ...
1592          *
1593          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1594          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1595          * relation
1596          *
1597          * This equation works correctly for outer tuples having no inner match
1598          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1599          * are effectively subtracting those from the number of rescanned tuples,
1600          * when we should not.  Can we do better without expensive selectivity
1601          * computations?
1602          *
1603          * The whole issue is moot if we are working from a unique-ified outer
1604          * input.
1605          */
1606         if (IsA(outer_path, UniquePath))
1607                 rescannedtuples = 0;
1608         else
1609         {
1610                 rescannedtuples = mergejointuples - inner_path_rows;
1611                 /* Must clamp because of possible underestimate */
1612                 if (rescannedtuples < 0)
1613                         rescannedtuples = 0;
1614         }
1615         /* We'll inflate inner run cost this much to account for rescanning */
1616         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1617
1618         /*
1619          * A merge join will stop as soon as it exhausts either input stream
1620          * (unless it's an outer join, in which case the outer side has to be
1621          * scanned all the way anyway).  Estimate fraction of the left and right
1622          * inputs that will actually need to be scanned.  Likewise, we can
1623          * estimate the number of rows that will be skipped before the first
1624          * join pair is found, which should be factored into startup cost.
1625          * We use only the first (most significant) merge clause for this purpose.
1626          * Since mergejoinscansel() is a fairly expensive computation, we cache
1627          * the results in the merge clause RestrictInfo.
1628          */
1629         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1630         {
1631                 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1632                 List       *opathkeys;
1633                 List       *ipathkeys;
1634                 PathKey    *opathkey;
1635                 PathKey    *ipathkey;
1636                 MergeScanSelCache *cache;
1637
1638                 /* Get the input pathkeys to determine the sort-order details */
1639                 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1640                 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1641                 Assert(opathkeys);
1642                 Assert(ipathkeys);
1643                 opathkey = (PathKey *) linitial(opathkeys);
1644                 ipathkey = (PathKey *) linitial(ipathkeys);
1645                 /* debugging check */
1646                 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1647                         opathkey->pk_strategy != ipathkey->pk_strategy ||
1648                         opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1649                         elog(ERROR, "left and right pathkeys do not match in mergejoin");
1650
1651                 /* Get the selectivity with caching */
1652                 cache = cached_scansel(root, firstclause, opathkey);
1653
1654                 if (bms_is_subset(firstclause->left_relids,
1655                                                   outer_path->parent->relids))
1656                 {
1657                         /* left side of clause is outer */
1658                         outerstartsel = cache->leftstartsel;
1659                         outerendsel = cache->leftendsel;
1660                         innerstartsel = cache->rightstartsel;
1661                         innerendsel = cache->rightendsel;
1662                 }
1663                 else
1664                 {
1665                         /* left side of clause is inner */
1666                         outerstartsel = cache->rightstartsel;
1667                         outerendsel = cache->rightendsel;
1668                         innerstartsel = cache->leftstartsel;
1669                         innerendsel = cache->leftendsel;
1670                 }
1671                 if (path->jpath.jointype == JOIN_LEFT ||
1672                         path->jpath.jointype == JOIN_ANTI)
1673                 {
1674                         outerstartsel = 0.0;
1675                         outerendsel = 1.0;
1676                 }
1677                 else if (path->jpath.jointype == JOIN_RIGHT)
1678                 {
1679                         innerstartsel = 0.0;
1680                         innerendsel = 1.0;
1681                 }
1682         }
1683         else
1684         {
1685                 /* cope with clauseless or full mergejoin */
1686                 outerstartsel = innerstartsel = 0.0;
1687                 outerendsel = innerendsel = 1.0;
1688         }
1689
1690         /*
1691          * Convert selectivities to row counts.  We force outer_rows and
1692          * inner_rows to be at least 1, but the skip_rows estimates can be zero.
1693          */
1694         outer_skip_rows = rint(outer_path_rows * outerstartsel);
1695         inner_skip_rows = rint(inner_path_rows * innerstartsel);
1696         outer_rows = clamp_row_est(outer_path_rows * outerendsel);
1697         inner_rows = clamp_row_est(inner_path_rows * innerendsel);
1698
1699         Assert(outer_skip_rows <= outer_rows);
1700         Assert(inner_skip_rows <= inner_rows);
1701
1702         /*
1703          * Readjust scan selectivities to account for above rounding.  This is
1704          * normally an insignificant effect, but when there are only a few rows in
1705          * the inputs, failing to do this makes for a large percentage error.
1706          */
1707         outerstartsel = outer_skip_rows / outer_path_rows;
1708         innerstartsel = inner_skip_rows / inner_path_rows;
1709         outerendsel = outer_rows / outer_path_rows;
1710         innerendsel = inner_rows / inner_path_rows;
1711
1712         Assert(outerstartsel <= outerendsel);
1713         Assert(innerstartsel <= innerendsel);
1714
1715         /* cost of source data */
1716
1717         if (outersortkeys)                      /* do we need to sort outer? */
1718         {
1719                 cost_sort(&sort_path,
1720                                   root,
1721                                   outersortkeys,
1722                                   outer_path->total_cost,
1723                                   outer_path_rows,
1724                                   outer_path->parent->width,
1725                                   -1.0);
1726                 startup_cost += sort_path.startup_cost;
1727                 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
1728                         * outerstartsel;
1729                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1730                         * (outerendsel - outerstartsel);
1731         }
1732         else
1733         {
1734                 startup_cost += outer_path->startup_cost;
1735                 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
1736                         * outerstartsel;
1737                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1738                         * (outerendsel - outerstartsel);
1739         }
1740
1741         if (innersortkeys)                      /* do we need to sort inner? */
1742         {
1743                 cost_sort(&sort_path,
1744                                   root,
1745                                   innersortkeys,
1746                                   inner_path->total_cost,
1747                                   inner_path_rows,
1748                                   inner_path->parent->width,
1749                                   -1.0);
1750                 startup_cost += sort_path.startup_cost;
1751                 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
1752                         * innerstartsel * rescanratio;
1753                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1754                         * (innerendsel - innerstartsel) * rescanratio;
1755
1756                 /*
1757                  * If the inner sort is expected to spill to disk, we want to add a
1758                  * materialize node to shield it from the need to handle mark/restore.
1759                  * This will allow it to perform the last merge pass on-the-fly, while
1760                  * in most cases not requiring the materialize to spill to disk.
1761                  * Charge an extra cpu_tuple_cost per tuple to account for the
1762                  * materialize node.  (Keep this estimate in sync with similar ones in
1763                  * create_mergejoin_path and create_mergejoin_plan.)
1764                  */
1765                 if (relation_byte_size(inner_path_rows, inner_path->parent->width) >
1766                         (work_mem * 1024L))
1767                         run_cost += cpu_tuple_cost * inner_path_rows;
1768         }
1769         else
1770         {
1771                 startup_cost += inner_path->startup_cost;
1772                 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
1773                         * innerstartsel * rescanratio;
1774                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1775                         * (innerendsel - innerstartsel) * rescanratio;
1776         }
1777
1778         /* CPU costs */
1779
1780         /*
1781          * The number of tuple comparisons needed is approximately number of outer
1782          * rows plus number of inner rows plus number of rescanned tuples (can we
1783          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1784          */
1785         startup_cost += merge_qual_cost.startup;
1786         startup_cost += merge_qual_cost.per_tuple *
1787                 (outer_skip_rows + inner_skip_rows * rescanratio);
1788         run_cost += merge_qual_cost.per_tuple *
1789                 ((outer_rows - outer_skip_rows) +
1790                  (inner_rows - inner_skip_rows) * rescanratio);
1791
1792         /*
1793          * For each tuple that gets through the mergejoin proper, we charge
1794          * cpu_tuple_cost plus the cost of evaluating additional restriction
1795          * clauses that are to be applied at the join.  (This is pessimistic since
1796          * not all of the quals may get evaluated at each tuple.)
1797          *
1798          * Note: we could adjust for SEMI/ANTI joins skipping some qual evaluations
1799          * here, but it's probably not worth the trouble.
1800          */
1801         startup_cost += qp_qual_cost.startup;
1802         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1803         run_cost += cpu_per_tuple * mergejointuples;
1804
1805         path->jpath.path.startup_cost = startup_cost;
1806         path->jpath.path.total_cost = startup_cost + run_cost;
1807 }
1808
1809 /*
1810  * run mergejoinscansel() with caching
1811  */
1812 static MergeScanSelCache *
1813 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
1814 {
1815         MergeScanSelCache *cache;
1816         ListCell   *lc;
1817         Selectivity leftstartsel,
1818                                 leftendsel,
1819                                 rightstartsel,
1820                                 rightendsel;
1821         MemoryContext oldcontext;
1822
1823         /* Do we have this result already? */
1824         foreach(lc, rinfo->scansel_cache)
1825         {
1826                 cache = (MergeScanSelCache *) lfirst(lc);
1827                 if (cache->opfamily == pathkey->pk_opfamily &&
1828                         cache->strategy == pathkey->pk_strategy &&
1829                         cache->nulls_first == pathkey->pk_nulls_first)
1830                         return cache;
1831         }
1832
1833         /* Nope, do the computation */
1834         mergejoinscansel(root,
1835                                          (Node *) rinfo->clause,
1836                                          pathkey->pk_opfamily,
1837                                          pathkey->pk_strategy,
1838                                          pathkey->pk_nulls_first,
1839                                          &leftstartsel,
1840                                          &leftendsel,
1841                                          &rightstartsel,
1842                                          &rightendsel);
1843
1844         /* Cache the result in suitably long-lived workspace */
1845         oldcontext = MemoryContextSwitchTo(root->planner_cxt);
1846
1847         cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
1848         cache->opfamily = pathkey->pk_opfamily;
1849         cache->strategy = pathkey->pk_strategy;
1850         cache->nulls_first = pathkey->pk_nulls_first;
1851         cache->leftstartsel = leftstartsel;
1852         cache->leftendsel = leftendsel;
1853         cache->rightstartsel = rightstartsel;
1854         cache->rightendsel = rightendsel;
1855
1856         rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
1857
1858         MemoryContextSwitchTo(oldcontext);
1859
1860         return cache;
1861 }
1862
1863 /*
1864  * cost_hashjoin
1865  *        Determines and returns the cost of joining two relations using the
1866  *        hash join algorithm.
1867  *
1868  * 'path' is already filled in except for the cost fields
1869  * 'sjinfo' is extra info about the join for selectivity estimation
1870  *
1871  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1872  */
1873 void
1874 cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1875 {
1876         Path       *outer_path = path->jpath.outerjoinpath;
1877         Path       *inner_path = path->jpath.innerjoinpath;
1878         List       *hashclauses = path->path_hashclauses;
1879         Cost            startup_cost = 0;
1880         Cost            run_cost = 0;
1881         Cost            cpu_per_tuple;
1882         QualCost        hash_qual_cost;
1883         QualCost        qp_qual_cost;
1884         double          hashjointuples;
1885         double          outer_path_rows = PATH_ROWS(outer_path);
1886         double          inner_path_rows = PATH_ROWS(inner_path);
1887         int                     num_hashclauses = list_length(hashclauses);
1888         int                     numbuckets;
1889         int                     numbatches;
1890         int                     num_skew_mcvs;
1891         double          virtualbuckets;
1892         Selectivity innerbucketsize;
1893         Selectivity     outer_match_frac;
1894         Selectivity     match_count;
1895         ListCell   *hcl;
1896
1897         if (!enable_hashjoin)
1898                 startup_cost += disable_cost;
1899
1900         /*
1901          * Compute cost of the hashquals and qpquals (other restriction clauses)
1902          * separately.
1903          */
1904         cost_qual_eval(&hash_qual_cost, hashclauses, root);
1905         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1906         qp_qual_cost.startup -= hash_qual_cost.startup;
1907         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1908
1909         /* cost of source data */
1910         startup_cost += outer_path->startup_cost;
1911         run_cost += outer_path->total_cost - outer_path->startup_cost;
1912         startup_cost += inner_path->total_cost;
1913
1914         /*
1915          * Cost of computing hash function: must do it once per input tuple. We
1916          * charge one cpu_operator_cost for each column's hash function.  Also,
1917          * tack on one cpu_tuple_cost per inner row, to model the costs of
1918          * inserting the row into the hashtable.
1919          *
1920          * XXX when a hashclause is more complex than a single operator, we really
1921          * should charge the extra eval costs of the left or right side, as
1922          * appropriate, here.  This seems more work than it's worth at the moment.
1923          */
1924         startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1925                 * inner_path_rows;
1926         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1927
1928         /*
1929          * Get hash table size that executor would use for inner relation.
1930          *
1931          * XXX for the moment, always assume that skew optimization will be
1932          * performed.  As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
1933          * trying to determine that for sure.
1934          *
1935          * XXX at some point it might be interesting to try to account for skew
1936          * optimization in the cost estimate, but for now, we don't.
1937          */
1938         ExecChooseHashTableSize(inner_path_rows,
1939                                                         inner_path->parent->width,
1940                                                         true,   /* useskew */
1941                                                         &numbuckets,
1942                                                         &numbatches,
1943                                                         &num_skew_mcvs);
1944         virtualbuckets = (double) numbuckets *(double) numbatches;
1945         /* mark the path with estimated # of batches */
1946         path->num_batches = numbatches;
1947
1948         /*
1949          * Determine bucketsize fraction for inner relation.  We use the smallest
1950          * bucketsize estimated for any individual hashclause; this is undoubtedly
1951          * conservative.
1952          *
1953          * BUT: if inner relation has been unique-ified, we can assume it's good
1954          * for hashing.  This is important both because it's the right answer, and
1955          * because we avoid contaminating the cache with a value that's wrong for
1956          * non-unique-ified paths.
1957          */
1958         if (IsA(inner_path, UniquePath))
1959                 innerbucketsize = 1.0 / virtualbuckets;
1960         else
1961         {
1962                 innerbucketsize = 1.0;
1963                 foreach(hcl, hashclauses)
1964                 {
1965                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1966                         Selectivity thisbucketsize;
1967
1968                         Assert(IsA(restrictinfo, RestrictInfo));
1969
1970                         /*
1971                          * First we have to figure out which side of the hashjoin clause
1972                          * is the inner side.
1973                          *
1974                          * Since we tend to visit the same clauses over and over when
1975                          * planning a large query, we cache the bucketsize estimate in the
1976                          * RestrictInfo node to avoid repeated lookups of statistics.
1977                          */
1978                         if (bms_is_subset(restrictinfo->right_relids,
1979                                                           inner_path->parent->relids))
1980                         {
1981                                 /* righthand side is inner */
1982                                 thisbucketsize = restrictinfo->right_bucketsize;
1983                                 if (thisbucketsize < 0)
1984                                 {
1985                                         /* not cached yet */
1986                                         thisbucketsize =
1987                                                 estimate_hash_bucketsize(root,
1988                                                                                    get_rightop(restrictinfo->clause),
1989                                                                                                  virtualbuckets);
1990                                         restrictinfo->right_bucketsize = thisbucketsize;
1991                                 }
1992                         }
1993                         else
1994                         {
1995                                 Assert(bms_is_subset(restrictinfo->left_relids,
1996                                                                          inner_path->parent->relids));
1997                                 /* lefthand side is inner */
1998                                 thisbucketsize = restrictinfo->left_bucketsize;
1999                                 if (thisbucketsize < 0)
2000                                 {
2001                                         /* not cached yet */
2002                                         thisbucketsize =
2003                                                 estimate_hash_bucketsize(root,
2004                                                                                         get_leftop(restrictinfo->clause),
2005                                                                                                  virtualbuckets);
2006                                         restrictinfo->left_bucketsize = thisbucketsize;
2007                                 }
2008                         }
2009
2010                         if (innerbucketsize > thisbucketsize)
2011                                 innerbucketsize = thisbucketsize;
2012                 }
2013         }
2014
2015         /*
2016          * If inner relation is too big then we will need to "batch" the join,
2017          * which implies writing and reading most of the tuples to disk an extra
2018          * time.  Charge seq_page_cost per page, since the I/O should be nice and
2019          * sequential.  Writing the inner rel counts as startup cost, all the rest
2020          * as run cost.
2021          */
2022         if (numbatches > 1)
2023         {
2024                 double          outerpages = page_size(outer_path_rows,
2025                                                                                    outer_path->parent->width);
2026                 double          innerpages = page_size(inner_path_rows,
2027                                                                                    inner_path->parent->width);
2028
2029                 startup_cost += seq_page_cost * innerpages;
2030                 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2031         }
2032
2033         /* CPU costs */
2034
2035         if (adjust_semi_join(root, &path->jpath, sjinfo,
2036                                                  &outer_match_frac,
2037                                                  &match_count,
2038                                                  NULL))
2039         {
2040                 double          outer_matched_rows;
2041                 Selectivity     inner_scan_frac;
2042
2043                 /*
2044                  * SEMI or ANTI join: executor will stop after first match.
2045                  *
2046                  * For an outer-rel row that has at least one match, we can expect the
2047                  * bucket scan to stop after a fraction 1/(match_count+1) of the
2048                  * bucket's rows, if the matches are evenly distributed.  Since they
2049                  * probably aren't quite evenly distributed, we apply a fuzz factor of
2050                  * 2.0 to that fraction.  (If we used a larger fuzz factor, we'd have
2051                  * to clamp inner_scan_frac to at most 1.0; but since match_count is
2052                  * at least 1, no such clamp is needed now.)
2053                  */
2054                 outer_matched_rows = rint(outer_path_rows * outer_match_frac);
2055                 inner_scan_frac = 2.0 / (match_count + 1.0);
2056
2057                 startup_cost += hash_qual_cost.startup;
2058                 run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
2059                         clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
2060
2061                 /*
2062                  * For unmatched outer-rel rows, the picture is quite a lot different.
2063                  * In the first place, there is no reason to assume that these rows
2064                  * preferentially hit heavily-populated buckets; instead assume they
2065                  * are uncorrelated with the inner distribution and so they see an
2066                  * average bucket size of inner_path_rows / virtualbuckets.  In the
2067                  * second place, it seems likely that they will have few if any
2068                  * exact hash-code matches and so very few of the tuples in the
2069                  * bucket will actually require eval of the hash quals.  We don't
2070                  * have any good way to estimate how many will, but for the moment
2071                  * assume that the effective cost per bucket entry is one-tenth what
2072                  * it is for matchable tuples.
2073                  */
2074                 run_cost += hash_qual_cost.per_tuple *
2075                         (outer_path_rows - outer_matched_rows) *
2076                         clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
2077
2078                 /* Get # of tuples that will pass the basic join */
2079                 if (path->jpath.jointype == JOIN_SEMI)
2080                         hashjointuples = outer_matched_rows;
2081                 else
2082                         hashjointuples = outer_path_rows - outer_matched_rows;
2083         }
2084         else
2085         {
2086                 /*
2087                  * The number of tuple comparisons needed is the number of outer
2088                  * tuples times the typical number of tuples in a hash bucket, which
2089                  * is the inner relation size times its bucketsize fraction.  At each
2090                  * one, we need to evaluate the hashjoin quals.  But actually,
2091                  * charging the full qual eval cost at each tuple is pessimistic,
2092                  * since we don't evaluate the quals unless the hash values match
2093                  * exactly.  For lack of a better idea, halve the cost estimate to
2094                  * allow for that.
2095                  */
2096                 startup_cost += hash_qual_cost.startup;
2097                 run_cost += hash_qual_cost.per_tuple * outer_path_rows *
2098                         clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
2099
2100                 /*
2101                  * Get approx # tuples passing the hashquals.  We use
2102                  * approx_tuple_count here because we need an estimate done with
2103                  * JOIN_INNER semantics.
2104                  */
2105                 hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
2106         }
2107
2108         /*
2109          * For each tuple that gets through the hashjoin proper, we charge
2110          * cpu_tuple_cost plus the cost of evaluating additional restriction
2111          * clauses that are to be applied at the join.  (This is pessimistic since
2112          * not all of the quals may get evaluated at each tuple.)
2113          */
2114         startup_cost += qp_qual_cost.startup;
2115         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2116         run_cost += cpu_per_tuple * hashjointuples;
2117
2118         path->jpath.path.startup_cost = startup_cost;
2119         path->jpath.path.total_cost = startup_cost + run_cost;
2120 }
2121
2122
2123 /*
2124  * cost_subplan
2125  *              Figure the costs for a SubPlan (or initplan).
2126  *
2127  * Note: we could dig the subplan's Plan out of the root list, but in practice
2128  * all callers have it handy already, so we make them pass it.
2129  */
2130 void
2131 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
2132 {
2133         QualCost        sp_cost;
2134
2135         /* Figure any cost for evaluating the testexpr */
2136         cost_qual_eval(&sp_cost,
2137                                    make_ands_implicit((Expr *) subplan->testexpr),
2138                                    root);
2139
2140         if (subplan->useHashTable)
2141         {
2142                 /*
2143                  * If we are using a hash table for the subquery outputs, then the
2144                  * cost of evaluating the query is a one-time cost.  We charge one
2145                  * cpu_operator_cost per tuple for the work of loading the hashtable,
2146                  * too.
2147                  */
2148                 sp_cost.startup += plan->total_cost +
2149                         cpu_operator_cost * plan->plan_rows;
2150
2151                 /*
2152                  * The per-tuple costs include the cost of evaluating the lefthand
2153                  * expressions, plus the cost of probing the hashtable.  We already
2154                  * accounted for the lefthand expressions as part of the testexpr,
2155                  * and will also have counted one cpu_operator_cost for each
2156                  * comparison operator.  That is probably too low for the probing
2157                  * cost, but it's hard to make a better estimate, so live with it for
2158                  * now.
2159                  */
2160         }
2161         else
2162         {
2163                 /*
2164                  * Otherwise we will be rescanning the subplan output on each
2165                  * evaluation.  We need to estimate how much of the output we will
2166                  * actually need to scan.  NOTE: this logic should agree with the
2167                  * tuple_fraction estimates used by make_subplan() in
2168                  * plan/subselect.c.
2169                  */
2170                 Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
2171
2172                 if (subplan->subLinkType == EXISTS_SUBLINK)
2173                 {
2174                         /* we only need to fetch 1 tuple */
2175                         sp_cost.per_tuple += plan_run_cost / plan->plan_rows;
2176                 }
2177                 else if (subplan->subLinkType == ALL_SUBLINK ||
2178                                  subplan->subLinkType == ANY_SUBLINK)
2179                 {
2180                         /* assume we need 50% of the tuples */
2181                         sp_cost.per_tuple += 0.50 * plan_run_cost;
2182                         /* also charge a cpu_operator_cost per row examined */
2183                         sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
2184                 }
2185                 else
2186                 {
2187                         /* assume we need all tuples */
2188                         sp_cost.per_tuple += plan_run_cost;
2189                 }
2190
2191                 /*
2192                  * Also account for subplan's startup cost. If the subplan is
2193                  * uncorrelated or undirect correlated, AND its topmost node is a Sort
2194                  * or Material node, assume that we'll only need to pay its startup
2195                  * cost once; otherwise assume we pay the startup cost every time.
2196                  */
2197                 if (subplan->parParam == NIL &&
2198                         (IsA(plan, Sort) ||
2199                          IsA(plan, Material)))
2200                         sp_cost.startup += plan->startup_cost;
2201                 else
2202                         sp_cost.per_tuple += plan->startup_cost;
2203         }
2204
2205         subplan->startup_cost = sp_cost.startup;
2206         subplan->per_call_cost = sp_cost.per_tuple;
2207 }
2208
2209
2210 /*
2211  * cost_qual_eval
2212  *              Estimate the CPU costs of evaluating a WHERE clause.
2213  *              The input can be either an implicitly-ANDed list of boolean
2214  *              expressions, or a list of RestrictInfo nodes.  (The latter is
2215  *              preferred since it allows caching of the results.)
2216  *              The result includes both a one-time (startup) component,
2217  *              and a per-evaluation component.
2218  */
2219 void
2220 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
2221 {
2222         cost_qual_eval_context context;
2223         ListCell   *l;
2224
2225         context.root = root;
2226         context.total.startup = 0;
2227         context.total.per_tuple = 0;
2228
2229         /* We don't charge any cost for the implicit ANDing at top level ... */
2230
2231         foreach(l, quals)
2232         {
2233                 Node       *qual = (Node *) lfirst(l);
2234
2235                 cost_qual_eval_walker(qual, &context);
2236         }
2237
2238         *cost = context.total;
2239 }
2240
2241 /*
2242  * cost_qual_eval_node
2243  *              As above, for a single RestrictInfo or expression.
2244  */
2245 void
2246 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
2247 {
2248         cost_qual_eval_context context;
2249
2250         context.root = root;
2251         context.total.startup = 0;
2252         context.total.per_tuple = 0;
2253
2254         cost_qual_eval_walker(qual, &context);
2255
2256         *cost = context.total;
2257 }
2258
2259 static bool
2260 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
2261 {
2262         if (node == NULL)
2263                 return false;
2264
2265         /*
2266          * RestrictInfo nodes contain an eval_cost field reserved for this
2267          * routine's use, so that it's not necessary to evaluate the qual clause's
2268          * cost more than once.  If the clause's cost hasn't been computed yet,
2269          * the field's startup value will contain -1.
2270          */
2271         if (IsA(node, RestrictInfo))
2272         {
2273                 RestrictInfo *rinfo = (RestrictInfo *) node;
2274
2275                 if (rinfo->eval_cost.startup < 0)
2276                 {
2277                         cost_qual_eval_context locContext;
2278
2279                         locContext.root = context->root;
2280                         locContext.total.startup = 0;
2281                         locContext.total.per_tuple = 0;
2282
2283                         /*
2284                          * For an OR clause, recurse into the marked-up tree so that we
2285                          * set the eval_cost for contained RestrictInfos too.
2286                          */
2287                         if (rinfo->orclause)
2288                                 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
2289                         else
2290                                 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
2291
2292                         /*
2293                          * If the RestrictInfo is marked pseudoconstant, it will be tested
2294                          * only once, so treat its cost as all startup cost.
2295                          */
2296                         if (rinfo->pseudoconstant)
2297                         {
2298                                 /* count one execution during startup */
2299                                 locContext.total.startup += locContext.total.per_tuple;
2300                                 locContext.total.per_tuple = 0;
2301                         }
2302                         rinfo->eval_cost = locContext.total;
2303                 }
2304                 context->total.startup += rinfo->eval_cost.startup;
2305                 context->total.per_tuple += rinfo->eval_cost.per_tuple;
2306                 /* do NOT recurse into children */
2307                 return false;
2308         }
2309
2310         /*
2311          * For each operator or function node in the given tree, we charge the
2312          * estimated execution cost given by pg_proc.procost (remember to multiply
2313          * this by cpu_operator_cost).
2314          *
2315          * Vars and Consts are charged zero, and so are boolean operators (AND,
2316          * OR, NOT). Simplistic, but a lot better than no model at all.
2317          *
2318          * Note that Aggref and WindowFunc nodes are (and should be) treated
2319          * like Vars --- whatever execution cost they have is absorbed into
2320          * plan-node-specific costing.  As far as expression evaluation is
2321          * concerned they're just like Vars.
2322          *
2323          * Should we try to account for the possibility of short-circuit
2324          * evaluation of AND/OR?  Probably *not*, because that would make the
2325          * results depend on the clause ordering, and we are not in any position
2326          * to expect that the current ordering of the clauses is the one that's
2327          * going to end up being used.  (Is it worth applying order_qual_clauses
2328          * much earlier in the planning process to fix this?)
2329          */
2330         if (IsA(node, FuncExpr))
2331         {
2332                 context->total.per_tuple +=
2333                         get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
2334         }
2335         else if (IsA(node, OpExpr) ||
2336                          IsA(node, DistinctExpr) ||
2337                          IsA(node, NullIfExpr))
2338         {
2339                 /* rely on struct equivalence to treat these all alike */
2340                 set_opfuncid((OpExpr *) node);
2341                 context->total.per_tuple +=
2342                         get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
2343         }
2344         else if (IsA(node, ScalarArrayOpExpr))
2345         {
2346                 /*
2347                  * Estimate that the operator will be applied to about half of the
2348                  * array elements before the answer is determined.
2349                  */
2350                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
2351                 Node       *arraynode = (Node *) lsecond(saop->args);
2352
2353                 set_sa_opfuncid(saop);
2354                 context->total.per_tuple += get_func_cost(saop->opfuncid) *
2355                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
2356         }
2357         else if (IsA(node, CoerceViaIO))
2358         {
2359                 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
2360                 Oid                     iofunc;
2361                 Oid                     typioparam;
2362                 bool            typisvarlena;
2363
2364                 /* check the result type's input function */
2365                 getTypeInputInfo(iocoerce->resulttype,
2366                                                  &iofunc, &typioparam);
2367                 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2368                 /* check the input type's output function */
2369                 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
2370                                                   &iofunc, &typisvarlena);
2371                 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2372         }
2373         else if (IsA(node, ArrayCoerceExpr))
2374         {
2375                 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
2376                 Node       *arraynode = (Node *) acoerce->arg;
2377
2378                 if (OidIsValid(acoerce->elemfuncid))
2379                         context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
2380                                 cpu_operator_cost * estimate_array_length(arraynode);
2381         }
2382         else if (IsA(node, RowCompareExpr))
2383         {
2384                 /* Conservatively assume we will check all the columns */
2385                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
2386                 ListCell   *lc;
2387
2388                 foreach(lc, rcexpr->opnos)
2389                 {
2390                         Oid                     opid = lfirst_oid(lc);
2391
2392                         context->total.per_tuple += get_func_cost(get_opcode(opid)) *
2393                                 cpu_operator_cost;
2394                 }
2395         }
2396         else if (IsA(node, CurrentOfExpr))
2397         {
2398                 /* Report high cost to prevent selection of anything but TID scan */
2399                 context->total.startup += disable_cost;
2400         }
2401         else if (IsA(node, SubLink))
2402         {
2403                 /* This routine should not be applied to un-planned expressions */
2404                 elog(ERROR, "cannot handle unplanned sub-select");
2405         }
2406         else if (IsA(node, SubPlan))
2407         {
2408                 /*
2409                  * A subplan node in an expression typically indicates that the
2410                  * subplan will be executed on each evaluation, so charge accordingly.
2411                  * (Sub-selects that can be executed as InitPlans have already been
2412                  * removed from the expression.)
2413                  */
2414                 SubPlan    *subplan = (SubPlan *) node;
2415
2416                 context->total.startup += subplan->startup_cost;
2417                 context->total.per_tuple += subplan->per_call_cost;
2418
2419                 /*
2420                  * We don't want to recurse into the testexpr, because it was already
2421                  * counted in the SubPlan node's costs.  So we're done.
2422                  */
2423                 return false;
2424         }
2425         else if (IsA(node, AlternativeSubPlan))
2426         {
2427                 /*
2428                  * Arbitrarily use the first alternative plan for costing.  (We should
2429                  * certainly only include one alternative, and we don't yet have
2430                  * enough information to know which one the executor is most likely
2431                  * to use.)
2432                  */
2433                 AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
2434
2435                 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
2436                                                                          context);
2437         }
2438
2439         /* recurse into children */
2440         return expression_tree_walker(node, cost_qual_eval_walker,
2441                                                                   (void *) context);
2442 }
2443
2444
2445 /*
2446  * adjust_semi_join
2447  *        Estimate how much of the inner input a SEMI or ANTI join
2448  *        can be expected to scan.
2449  *
2450  * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
2451  * inner rows as soon as it finds a match to the current outer row.
2452  * We should therefore adjust some of the cost components for this effect.
2453  * This function computes some estimates needed for these adjustments.
2454  *
2455  * 'path' is already filled in except for the cost fields
2456  * 'sjinfo' is extra info about the join for selectivity estimation
2457  *
2458  * Returns TRUE if this is a SEMI or ANTI join, FALSE if not.
2459  *
2460  * Output parameters (set only in TRUE-result case):
2461  * *outer_match_frac is set to the fraction of the outer tuples that are
2462  *              expected to have at least one match.
2463  * *match_count is set to the average number of matches expected for
2464  *              outer tuples that have at least one match.
2465  * *indexed_join_quals is set to TRUE if all the joinquals are used as
2466  *              inner index quals, FALSE if not.
2467  *
2468  * indexed_join_quals can be passed as NULL if that information is not
2469  * relevant (it is only useful for the nestloop case).
2470  */
2471 static bool
2472 adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
2473                                  Selectivity *outer_match_frac,
2474                                  Selectivity *match_count,
2475                                  bool *indexed_join_quals)
2476 {
2477         JoinType        jointype = path->jointype;
2478         Selectivity jselec;
2479         Selectivity nselec;
2480         Selectivity avgmatch;
2481         SpecialJoinInfo norm_sjinfo;
2482         List       *joinquals;
2483         ListCell   *l;
2484
2485         /* Fall out if it's not JOIN_SEMI or JOIN_ANTI */
2486         if (jointype != JOIN_SEMI && jointype != JOIN_ANTI)
2487                 return false;
2488
2489         /*
2490          * Note: it's annoying to repeat this selectivity estimation on each call,
2491          * when the joinclause list will be the same for all path pairs
2492          * implementing a given join.  clausesel.c will save us from the worst
2493          * effects of this by caching at the RestrictInfo level; but perhaps it'd
2494          * be worth finding a way to cache the results at a higher level.
2495          */
2496
2497         /*
2498          * In an ANTI join, we must ignore clauses that are "pushed down",
2499          * since those won't affect the match logic.  In a SEMI join, we do not
2500          * distinguish joinquals from "pushed down" quals, so just use the whole
2501          * restrictinfo list.
2502          */
2503         if (jointype == JOIN_ANTI)
2504         {
2505                 joinquals = NIL;
2506                 foreach(l, path->joinrestrictinfo)
2507                 {
2508                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2509
2510                         Assert(IsA(rinfo, RestrictInfo));
2511                         if (!rinfo->is_pushed_down)
2512                                 joinquals = lappend(joinquals, rinfo);
2513                 }
2514         }
2515         else
2516                 joinquals = path->joinrestrictinfo;
2517
2518         /*
2519          * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
2520          */
2521         jselec = clauselist_selectivity(root,
2522                                                                         joinquals,
2523                                                                         0,
2524                                                                         jointype,
2525                                                                         sjinfo);
2526
2527         /*
2528          * Also get the normal inner-join selectivity of the join clauses.
2529          */
2530         norm_sjinfo.type = T_SpecialJoinInfo;
2531         norm_sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
2532         norm_sjinfo.min_righthand = path->innerjoinpath->parent->relids;
2533         norm_sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
2534         norm_sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
2535         norm_sjinfo.jointype = JOIN_INNER;
2536         /* we don't bother trying to make the remaining fields valid */
2537         norm_sjinfo.lhs_strict = false;
2538         norm_sjinfo.delay_upper_joins = false;
2539         norm_sjinfo.join_quals = NIL;
2540
2541         nselec = clauselist_selectivity(root,
2542                                                                         joinquals,
2543                                                                         0,
2544                                                                         JOIN_INNER,
2545                                                                         &norm_sjinfo);
2546
2547         /* Avoid leaking a lot of ListCells */
2548         if (jointype == JOIN_ANTI)
2549                 list_free(joinquals);
2550
2551         /*
2552          * jselec can be interpreted as the fraction of outer-rel rows that have
2553          * any matches (this is true for both SEMI and ANTI cases).  And nselec
2554          * is the fraction of the Cartesian product that matches.  So, the
2555          * average number of matches for each outer-rel row that has at least
2556          * one match is nselec * inner_rows / jselec.
2557          *
2558          * Note: it is correct to use the inner rel's "rows" count here, not
2559          * PATH_ROWS(), even if the inner path under consideration is an inner
2560          * indexscan.  This is because we have included all the join clauses
2561          * in the selectivity estimate, even ones used in an inner indexscan.
2562          */
2563         if (jselec > 0)                         /* protect against zero divide */
2564         {
2565                 avgmatch = nselec * path->innerjoinpath->parent->rows / jselec;
2566                 /* Clamp to sane range */
2567                 avgmatch = Max(1.0, avgmatch);
2568         }
2569         else
2570                 avgmatch = 1.0;
2571
2572         *outer_match_frac = jselec;
2573         *match_count = avgmatch;
2574
2575         /*
2576          * If requested, check whether the inner path uses all the joinquals
2577          * as indexquals.  (If that's true, we can assume that an unmatched
2578          * outer tuple is cheap to process, whereas otherwise it's probably
2579          * expensive.)
2580          */
2581         if (indexed_join_quals)
2582         {
2583                 List       *nrclauses;
2584
2585                 nrclauses = select_nonredundant_join_clauses(root,
2586                                                                                                          path->joinrestrictinfo,
2587                                                                                                          path->innerjoinpath);
2588                 *indexed_join_quals = (nrclauses == NIL);
2589         }
2590
2591         return true;
2592 }
2593
2594
2595 /*
2596  * approx_tuple_count
2597  *              Quick-and-dirty estimation of the number of join rows passing
2598  *              a set of qual conditions.
2599  *
2600  * The quals can be either an implicitly-ANDed list of boolean expressions,
2601  * or a list of RestrictInfo nodes (typically the latter).
2602  *
2603  * We intentionally compute the selectivity under JOIN_INNER rules, even
2604  * if it's some type of outer join.  This is appropriate because we are
2605  * trying to figure out how many tuples pass the initial merge or hash
2606  * join step.
2607  *
2608  * This is quick-and-dirty because we bypass clauselist_selectivity, and
2609  * simply multiply the independent clause selectivities together.  Now
2610  * clauselist_selectivity often can't do any better than that anyhow, but
2611  * for some situations (such as range constraints) it is smarter.  However,
2612  * we can't effectively cache the results of clauselist_selectivity, whereas
2613  * the individual clause selectivities can be and are cached.
2614  *
2615  * Since we are only using the results to estimate how many potential
2616  * output tuples are generated and passed through qpqual checking, it
2617  * seems OK to live with the approximation.
2618  */
2619 static double
2620 approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
2621 {
2622         double          tuples;
2623         double          outer_tuples = path->outerjoinpath->parent->rows;
2624         double          inner_tuples = path->innerjoinpath->parent->rows;
2625         SpecialJoinInfo sjinfo;
2626         Selectivity selec = 1.0;
2627         ListCell   *l;
2628
2629         /*
2630          * Make up a SpecialJoinInfo for JOIN_INNER semantics.
2631          */
2632         sjinfo.type = T_SpecialJoinInfo;
2633         sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
2634         sjinfo.min_righthand = path->innerjoinpath->parent->relids;
2635         sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
2636         sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
2637         sjinfo.jointype = JOIN_INNER;
2638         /* we don't bother trying to make the remaining fields valid */
2639         sjinfo.lhs_strict = false;
2640         sjinfo.delay_upper_joins = false;
2641         sjinfo.join_quals = NIL;
2642
2643         /* Get the approximate selectivity */
2644         foreach(l, quals)
2645         {
2646                 Node       *qual = (Node *) lfirst(l);
2647
2648                 /* Note that clause_selectivity will be able to cache its result */
2649                 selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
2650         }
2651
2652         /* Apply it to the input relation sizes */
2653         tuples = selec * outer_tuples * inner_tuples;
2654
2655         return clamp_row_est(tuples);
2656 }
2657
2658
2659 /*
2660  * set_baserel_size_estimates
2661  *              Set the size estimates for the given base relation.
2662  *
2663  * The rel's targetlist and restrictinfo list must have been constructed
2664  * already.
2665  *
2666  * We set the following fields of the rel node:
2667  *      rows: the estimated number of output tuples (after applying
2668  *                restriction clauses).
2669  *      width: the estimated average output tuple width in bytes.
2670  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
2671  */
2672 void
2673 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2674 {
2675         double          nrows;
2676
2677         /* Should only be applied to base relations */
2678         Assert(rel->relid > 0);
2679
2680         nrows = rel->tuples *
2681                 clauselist_selectivity(root,
2682                                                            rel->baserestrictinfo,
2683                                                            0,
2684                                                            JOIN_INNER,
2685                                                            NULL);
2686
2687         rel->rows = clamp_row_est(nrows);
2688
2689         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
2690
2691         set_rel_width(root, rel);
2692 }
2693
2694 /*
2695  * set_joinrel_size_estimates
2696  *              Set the size estimates for the given join relation.
2697  *
2698  * The rel's targetlist must have been constructed already, and a
2699  * restriction clause list that matches the given component rels must
2700  * be provided.
2701  *
2702  * Since there is more than one way to make a joinrel for more than two
2703  * base relations, the results we get here could depend on which component
2704  * rel pair is provided.  In theory we should get the same answers no matter
2705  * which pair is provided; in practice, since the selectivity estimation
2706  * routines don't handle all cases equally well, we might not.  But there's
2707  * not much to be done about it.  (Would it make sense to repeat the
2708  * calculations for each pair of input rels that's encountered, and somehow
2709  * average the results?  Probably way more trouble than it's worth.)
2710  *
2711  * We set only the rows field here.  The width field was already set by
2712  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
2713  */
2714 void
2715 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
2716                                                    RelOptInfo *outer_rel,
2717                                                    RelOptInfo *inner_rel,
2718                                                    SpecialJoinInfo *sjinfo,
2719                                                    List *restrictlist)
2720 {
2721         JoinType        jointype = sjinfo->jointype;
2722         Selectivity jselec;
2723         Selectivity pselec;
2724         double          nrows;
2725
2726         /*
2727          * Compute joinclause selectivity.      Note that we are only considering
2728          * clauses that become restriction clauses at this join level; we are not
2729          * double-counting them because they were not considered in estimating the
2730          * sizes of the component rels.
2731          *
2732          * For an outer join, we have to distinguish the selectivity of the join's
2733          * own clauses (JOIN/ON conditions) from any clauses that were "pushed
2734          * down".  For inner joins we just count them all as joinclauses.
2735          */
2736         if (IS_OUTER_JOIN(jointype))
2737         {
2738                 List       *joinquals = NIL;
2739                 List       *pushedquals = NIL;
2740                 ListCell   *l;
2741
2742                 /* Grovel through the clauses to separate into two lists */
2743                 foreach(l, restrictlist)
2744                 {
2745                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2746
2747                         Assert(IsA(rinfo, RestrictInfo));
2748                         if (rinfo->is_pushed_down)
2749                                 pushedquals = lappend(pushedquals, rinfo);
2750                         else
2751                                 joinquals = lappend(joinquals, rinfo);
2752                 }
2753
2754                 /* Get the separate selectivities */
2755                 jselec = clauselist_selectivity(root,
2756                                                                                 joinquals,
2757                                                                                 0,
2758                                                                                 jointype,
2759                                                                                 sjinfo);
2760                 pselec = clauselist_selectivity(root,
2761                                                                                 pushedquals,
2762                                                                                 0,
2763                                                                                 jointype,
2764                                                                                 sjinfo);
2765
2766                 /* Avoid leaking a lot of ListCells */
2767                 list_free(joinquals);
2768                 list_free(pushedquals);
2769         }
2770         else
2771         {
2772                 jselec = clauselist_selectivity(root,
2773                                                                                 restrictlist,
2774                                                                                 0,
2775                                                                                 jointype,
2776                                                                                 sjinfo);
2777                 pselec = 0.0;                   /* not used, keep compiler quiet */
2778         }
2779
2780         /*
2781          * Basically, we multiply size of Cartesian product by selectivity.
2782          *
2783          * If we are doing an outer join, take that into account: the joinqual
2784          * selectivity has to be clamped using the knowledge that the output must
2785          * be at least as large as the non-nullable input.      However, any
2786          * pushed-down quals are applied after the outer join, so their
2787          * selectivity applies fully.
2788          *
2789          * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
2790          * of LHS rows that have matches, and we apply that straightforwardly.
2791          */
2792         switch (jointype)
2793         {
2794                 case JOIN_INNER:
2795                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2796                         break;
2797                 case JOIN_LEFT:
2798                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2799                         if (nrows < outer_rel->rows)
2800                                 nrows = outer_rel->rows;
2801                         nrows *= pselec;
2802                         break;
2803                 case JOIN_FULL:
2804                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2805                         if (nrows < outer_rel->rows)
2806                                 nrows = outer_rel->rows;
2807                         if (nrows < inner_rel->rows)
2808                                 nrows = inner_rel->rows;
2809                         nrows *= pselec;
2810                         break;
2811                 case JOIN_SEMI:
2812                         nrows = outer_rel->rows * jselec;
2813                         /* pselec not used */
2814                         break;
2815                 case JOIN_ANTI:
2816                         nrows = outer_rel->rows * (1.0 - jselec);
2817                         nrows *= pselec;
2818                         break;
2819                 default:
2820                         /* other values not expected here */
2821                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
2822                         nrows = 0;                      /* keep compiler quiet */
2823                         break;
2824         }
2825
2826         rel->rows = clamp_row_est(nrows);
2827 }
2828
2829 /*
2830  * set_function_size_estimates
2831  *              Set the size estimates for a base relation that is a function call.
2832  *
2833  * The rel's targetlist and restrictinfo list must have been constructed
2834  * already.
2835  *
2836  * We set the same fields as set_baserel_size_estimates.
2837  */
2838 void
2839 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2840 {
2841         RangeTblEntry *rte;
2842
2843         /* Should only be applied to base relations that are functions */
2844         Assert(rel->relid > 0);
2845         rte = planner_rt_fetch(rel->relid, root);
2846         Assert(rte->rtekind == RTE_FUNCTION);
2847
2848         /* Estimate number of rows the function itself will return */
2849         rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
2850
2851         /* Now estimate number of output rows, etc */
2852         set_baserel_size_estimates(root, rel);
2853 }
2854
2855 /*
2856  * set_values_size_estimates
2857  *              Set the size estimates for a base relation that is a values list.
2858  *
2859  * The rel's targetlist and restrictinfo list must have been constructed
2860  * already.
2861  *
2862  * We set the same fields as set_baserel_size_estimates.
2863  */
2864 void
2865 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2866 {
2867         RangeTblEntry *rte;
2868
2869         /* Should only be applied to base relations that are values lists */
2870         Assert(rel->relid > 0);
2871         rte = planner_rt_fetch(rel->relid, root);
2872         Assert(rte->rtekind == RTE_VALUES);
2873
2874         /*
2875          * Estimate number of rows the values list will return. We know this
2876          * precisely based on the list length (well, barring set-returning
2877          * functions in list items, but that's a refinement not catered for
2878          * anywhere else either).
2879          */
2880         rel->tuples = list_length(rte->values_lists);
2881
2882         /* Now estimate number of output rows, etc */
2883         set_baserel_size_estimates(root, rel);
2884 }
2885
2886 /*
2887  * set_cte_size_estimates
2888  *              Set the size estimates for a base relation that is a CTE reference.
2889  *
2890  * The rel's targetlist and restrictinfo list must have been constructed
2891  * already, and we need the completed plan for the CTE (if a regular CTE)
2892  * or the non-recursive term (if a self-reference).
2893  *
2894  * We set the same fields as set_baserel_size_estimates.
2895  */
2896 void
2897 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
2898 {
2899         RangeTblEntry *rte;
2900
2901         /* Should only be applied to base relations that are CTE references */
2902         Assert(rel->relid > 0);
2903         rte = planner_rt_fetch(rel->relid, root);
2904         Assert(rte->rtekind == RTE_CTE);
2905
2906         if (rte->self_reference)
2907         {
2908                 /*
2909                  * In a self-reference, arbitrarily assume the average worktable
2910                  * size is about 10 times the nonrecursive term's size.
2911                  */
2912                 rel->tuples = 10 * cteplan->plan_rows;
2913         }
2914         else
2915         {
2916                 /* Otherwise just believe the CTE plan's output estimate */
2917                 rel->tuples = cteplan->plan_rows;
2918         }
2919
2920         /* Now estimate number of output rows, etc */
2921         set_baserel_size_estimates(root, rel);
2922 }
2923
2924
2925 /*
2926  * set_rel_width
2927  *              Set the estimated output width of a base relation.
2928  *
2929  * NB: this works best on plain relations because it prefers to look at
2930  * real Vars.  It will fail to make use of pg_statistic info when applied
2931  * to a subquery relation, even if the subquery outputs are simple vars
2932  * that we could have gotten info for.  Is it worth trying to be smarter
2933  * about subqueries?
2934  *
2935  * The per-attribute width estimates are cached for possible re-use while
2936  * building join relations.
2937  */
2938 static void
2939 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2940 {
2941         Oid                     reloid = planner_rt_fetch(rel->relid, root)->relid;
2942         int32           tuple_width = 0;
2943         ListCell   *lc;
2944
2945         foreach(lc, rel->reltargetlist)
2946         {
2947                 Node       *node = (Node *) lfirst(lc);
2948
2949                 if (IsA(node, Var))
2950                 {
2951                         Var                *var = (Var *) node;
2952                         int                     ndx;
2953                         int32           item_width;
2954
2955                         Assert(var->varno == rel->relid);
2956                         Assert(var->varattno >= rel->min_attr);
2957                         Assert(var->varattno <= rel->max_attr);
2958
2959                         ndx = var->varattno - rel->min_attr;
2960
2961                         /*
2962                          * The width probably hasn't been cached yet, but may as well check
2963                          */
2964                         if (rel->attr_widths[ndx] > 0)
2965                         {
2966                                 tuple_width += rel->attr_widths[ndx];
2967                                 continue;
2968                         }
2969
2970                         /* Try to get column width from statistics */
2971                         if (reloid != InvalidOid)
2972                         {
2973                                 item_width = get_attavgwidth(reloid, var->varattno);
2974                                 if (item_width > 0)
2975                                 {
2976                                         rel->attr_widths[ndx] = item_width;
2977                                         tuple_width += item_width;
2978                                         continue;
2979                                 }
2980                         }
2981
2982                         /*
2983                          * Not a plain relation, or can't find statistics for it. Estimate
2984                          * using just the type info.
2985                          */
2986                         item_width = get_typavgwidth(var->vartype, var->vartypmod);
2987                         Assert(item_width > 0);
2988                         rel->attr_widths[ndx] = item_width;
2989                         tuple_width += item_width;
2990                 }
2991                 else if (IsA(node, PlaceHolderVar))
2992                 {
2993                         PlaceHolderVar *phv = (PlaceHolderVar *) node;
2994                         PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
2995
2996                         tuple_width += phinfo->ph_width;
2997                 }
2998                 else
2999                 {
3000                         /* For now, punt on whole-row child Vars */
3001                         tuple_width += 32;      /* arbitrary */
3002                 }
3003         }
3004         Assert(tuple_width >= 0);
3005         rel->width = tuple_width;
3006 }
3007
3008 /*
3009  * relation_byte_size
3010  *        Estimate the storage space in bytes for a given number of tuples
3011  *        of a given width (size in bytes).
3012  */
3013 static double
3014 relation_byte_size(double tuples, int width)
3015 {
3016         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
3017 }
3018
3019 /*
3020  * page_size
3021  *        Returns an estimate of the number of pages covered by a given
3022  *        number of tuples of a given width (size in bytes).
3023  */
3024 static double
3025 page_size(double tuples, int width)
3026 {
3027         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
3028 }