]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Change the planner-to-executor API so that the planner tells the executor
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.174 2007/01/10 18:06:03 tgl Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "optimizer/clauses.h"
69 #include "optimizer/cost.h"
70 #include "optimizer/pathnode.h"
71 #include "parser/parsetree.h"
72 #include "utils/lsyscache.h"
73 #include "utils/selfuncs.h"
74 #include "utils/tuplesort.h"
75
76
77 #define LOG2(x)  (log(x) / 0.693147180559945)
78
79 /*
80  * Some Paths return less than the nominal number of rows of their parent
81  * relations; join nodes need to do this to get the correct input count:
82  */
83 #define PATH_ROWS(path) \
84         (IsA(path, UniquePath) ? \
85          ((UniquePath *) (path))->rows : \
86          (path)->parent->rows)
87
88
89 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
90 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
91 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
92 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
93 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
94
95 int                     effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
96
97 Cost            disable_cost = 100000000.0;
98
99 bool            enable_seqscan = true;
100 bool            enable_indexscan = true;
101 bool            enable_bitmapscan = true;
102 bool            enable_tidscan = true;
103 bool            enable_sort = true;
104 bool            enable_hashagg = true;
105 bool            enable_nestloop = true;
106 bool            enable_mergejoin = true;
107 bool            enable_hashjoin = true;
108
109
110 static bool cost_qual_eval_walker(Node *node, QualCost *total);
111 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
112                                    JoinType jointype);
113 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
114 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
115 static double relation_byte_size(double tuples, int width);
116 static double page_size(double tuples, int width);
117
118
119 /*
120  * clamp_row_est
121  *              Force a row-count estimate to a sane value.
122  */
123 double
124 clamp_row_est(double nrows)
125 {
126         /*
127          * Force estimate to be at least one row, to make explain output look
128          * better and to avoid possible divide-by-zero when interpolating costs.
129          * Make it an integer, too.
130          */
131         if (nrows <= 1.0)
132                 nrows = 1.0;
133         else
134                 nrows = rint(nrows);
135
136         return nrows;
137 }
138
139
140 /*
141  * cost_seqscan
142  *        Determines and returns the cost of scanning a relation sequentially.
143  */
144 void
145 cost_seqscan(Path *path, PlannerInfo *root,
146                          RelOptInfo *baserel)
147 {
148         Cost            startup_cost = 0;
149         Cost            run_cost = 0;
150         Cost            cpu_per_tuple;
151
152         /* Should only be applied to base relations */
153         Assert(baserel->relid > 0);
154         Assert(baserel->rtekind == RTE_RELATION);
155
156         if (!enable_seqscan)
157                 startup_cost += disable_cost;
158
159         /*
160          * disk costs
161          */
162         run_cost += seq_page_cost * baserel->pages;
163
164         /* CPU costs */
165         startup_cost += baserel->baserestrictcost.startup;
166         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
167         run_cost += cpu_per_tuple * baserel->tuples;
168
169         path->startup_cost = startup_cost;
170         path->total_cost = startup_cost + run_cost;
171 }
172
173 /*
174  * cost_index
175  *        Determines and returns the cost of scanning a relation using an index.
176  *
177  * 'index' is the index to be used
178  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
179  * 'outer_rel' is the outer relation when we are considering using the index
180  *              scan as the inside of a nestloop join (hence, some of the indexQuals
181  *              are join clauses, and we should expect repeated scans of the index);
182  *              NULL for a plain index scan
183  *
184  * cost_index() takes an IndexPath not just a Path, because it sets a few
185  * additional fields of the IndexPath besides startup_cost and total_cost.
186  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
187  *
188  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
189  * Any additional quals evaluated as qpquals may reduce the number of returned
190  * tuples, but they won't reduce the number of tuples we have to fetch from
191  * the table, so they don't reduce the scan cost.
192  *
193  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
194  * it was a list of bare clause expressions.
195  */
196 void
197 cost_index(IndexPath *path, PlannerInfo *root,
198                    IndexOptInfo *index,
199                    List *indexQuals,
200                    RelOptInfo *outer_rel)
201 {
202         RelOptInfo *baserel = index->rel;
203         Cost            startup_cost = 0;
204         Cost            run_cost = 0;
205         Cost            indexStartupCost;
206         Cost            indexTotalCost;
207         Selectivity indexSelectivity;
208         double          indexCorrelation,
209                                 csquared;
210         Cost            min_IO_cost,
211                                 max_IO_cost;
212         Cost            cpu_per_tuple;
213         double          tuples_fetched;
214         double          pages_fetched;
215
216         /* Should only be applied to base relations */
217         Assert(IsA(baserel, RelOptInfo) &&
218                    IsA(index, IndexOptInfo));
219         Assert(baserel->relid > 0);
220         Assert(baserel->rtekind == RTE_RELATION);
221
222         if (!enable_indexscan)
223                 startup_cost += disable_cost;
224
225         /*
226          * Call index-access-method-specific code to estimate the processing cost
227          * for scanning the index, as well as the selectivity of the index (ie,
228          * the fraction of main-table tuples we will have to retrieve) and its
229          * correlation to the main-table tuple order.
230          */
231         OidFunctionCall8(index->amcostestimate,
232                                          PointerGetDatum(root),
233                                          PointerGetDatum(index),
234                                          PointerGetDatum(indexQuals),
235                                          PointerGetDatum(outer_rel),
236                                          PointerGetDatum(&indexStartupCost),
237                                          PointerGetDatum(&indexTotalCost),
238                                          PointerGetDatum(&indexSelectivity),
239                                          PointerGetDatum(&indexCorrelation));
240
241         /*
242          * Save amcostestimate's results for possible use in bitmap scan planning.
243          * We don't bother to save indexStartupCost or indexCorrelation, because a
244          * bitmap scan doesn't care about either.
245          */
246         path->indextotalcost = indexTotalCost;
247         path->indexselectivity = indexSelectivity;
248
249         /* all costs for touching index itself included here */
250         startup_cost += indexStartupCost;
251         run_cost += indexTotalCost - indexStartupCost;
252
253         /* estimate number of main-table tuples fetched */
254         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
255
256         /*----------
257          * Estimate number of main-table pages fetched, and compute I/O cost.
258          *
259          * When the index ordering is uncorrelated with the table ordering,
260          * we use an approximation proposed by Mackert and Lohman (see
261          * index_pages_fetched() for details) to compute the number of pages
262          * fetched, and then charge random_page_cost per page fetched.
263          *
264          * When the index ordering is exactly correlated with the table ordering
265          * (just after a CLUSTER, for example), the number of pages fetched should
266          * be exactly selectivity * table_size.  What's more, all but the first
267          * will be sequential fetches, not the random fetches that occur in the
268          * uncorrelated case.  So if the number of pages is more than 1, we
269          * ought to charge
270          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
271          * For partially-correlated indexes, we ought to charge somewhere between
272          * these two estimates.  We currently interpolate linearly between the
273          * estimates based on the correlation squared (XXX is that appropriate?).
274          *----------
275          */
276         if (outer_rel != NULL && outer_rel->rows > 1)
277         {
278                 /*
279                  * For repeated indexscans, the appropriate estimate for the
280                  * uncorrelated case is to scale up the number of tuples fetched in
281                  * the Mackert and Lohman formula by the number of scans, so that we
282                  * estimate the number of pages fetched by all the scans; then
283                  * pro-rate the costs for one scan.  In this case we assume all the
284                  * fetches are random accesses.
285                  */
286                 double          num_scans = outer_rel->rows;
287
288                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
289                                                                                         baserel->pages,
290                                                                                         (double) index->pages,
291                                                                                         root);
292
293                 max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
294
295                 /*
296                  * In the perfectly correlated case, the number of pages touched
297                  * by each scan is selectivity * table_size, and we can use the
298                  * Mackert and Lohman formula at the page level to estimate how
299                  * much work is saved by caching across scans.  We still assume
300                  * all the fetches are random, though, which is an overestimate
301                  * that's hard to correct for without double-counting the cache
302                  * effects.  (But in most cases where such a plan is actually
303                  * interesting, only one page would get fetched per scan anyway,
304                  * so it shouldn't matter much.)
305                  */
306                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
307
308                 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
309                                                                                         baserel->pages,
310                                                                                         (double) index->pages,
311                                                                                         root);
312
313                 min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
314         }
315         else
316         {
317                 /*
318                  * Normal case: apply the Mackert and Lohman formula, and then
319                  * interpolate between that and the correlation-derived result.
320                  */
321                 pages_fetched = index_pages_fetched(tuples_fetched,
322                                                                                         baserel->pages,
323                                                                                         (double) index->pages,
324                                                                                         root);
325
326                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
327                 max_IO_cost = pages_fetched * random_page_cost;
328
329                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
330                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
331                 min_IO_cost = random_page_cost;
332                 if (pages_fetched > 1)
333                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
334         }
335
336         /*
337          * Now interpolate based on estimated index order correlation to get
338          * total disk I/O cost for main table accesses.
339          */
340         csquared = indexCorrelation * indexCorrelation;
341
342         run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
343
344         /*
345          * Estimate CPU costs per tuple.
346          *
347          * Normally the indexquals will be removed from the list of restriction
348          * clauses that we have to evaluate as qpquals, so we should subtract
349          * their costs from baserestrictcost.  But if we are doing a join then
350          * some of the indexquals are join clauses and shouldn't be subtracted.
351          * Rather than work out exactly how much to subtract, we don't subtract
352          * anything.
353          */
354         startup_cost += baserel->baserestrictcost.startup;
355         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
356
357         if (outer_rel == NULL)
358         {
359                 QualCost        index_qual_cost;
360
361                 cost_qual_eval(&index_qual_cost, indexQuals);
362                 /* any startup cost still has to be paid ... */
363                 cpu_per_tuple -= index_qual_cost.per_tuple;
364         }
365
366         run_cost += cpu_per_tuple * tuples_fetched;
367
368         path->path.startup_cost = startup_cost;
369         path->path.total_cost = startup_cost + run_cost;
370 }
371
372 /*
373  * index_pages_fetched
374  *        Estimate the number of pages actually fetched after accounting for
375  *        cache effects.
376  *
377  * We use an approximation proposed by Mackert and Lohman, "Index Scans
378  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
379  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
380  * The Mackert and Lohman approximation is that the number of pages
381  * fetched is
382  *      PF =
383  *              min(2TNs/(2T+Ns), T)                    when T <= b
384  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
385  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
386  * where
387  *              T = # pages in table
388  *              N = # tuples in table
389  *              s = selectivity = fraction of table to be scanned
390  *              b = # buffer pages available (we include kernel space here)
391  *
392  * We assume that effective_cache_size is the total number of buffer pages
393  * available for the whole query, and pro-rate that space across all the
394  * tables in the query and the index currently under consideration.  (This
395  * ignores space needed for other indexes used by the query, but since we
396  * don't know which indexes will get used, we can't estimate that very well;
397  * and in any case counting all the tables may well be an overestimate, since
398  * depending on the join plan not all the tables may be scanned concurrently.)
399  *
400  * The product Ns is the number of tuples fetched; we pass in that
401  * product rather than calculating it here.  "pages" is the number of pages
402  * in the object under consideration (either an index or a table).
403  * "index_pages" is the amount to add to the total table space, which was
404  * computed for us by query_planner.
405  *
406  * Caller is expected to have ensured that tuples_fetched is greater than zero
407  * and rounded to integer (see clamp_row_est).  The result will likewise be
408  * greater than zero and integral.
409  */
410 double
411 index_pages_fetched(double tuples_fetched, BlockNumber pages,
412                                         double index_pages, PlannerInfo *root)
413 {
414         double          pages_fetched;
415         double          total_pages;
416         double          T,
417                                 b;
418
419         /* T is # pages in table, but don't allow it to be zero */
420         T = (pages > 1) ? (double) pages : 1.0;
421
422         /* Compute number of pages assumed to be competing for cache space */
423         total_pages = root->total_table_pages + index_pages;
424         total_pages = Max(total_pages, 1.0);
425         Assert(T <= total_pages);
426
427         /* b is pro-rated share of effective_cache_size */
428         b = (double) effective_cache_size *T / total_pages;
429
430         /* force it positive and integral */
431         if (b <= 1.0)
432                 b = 1.0;
433         else
434                 b = ceil(b);
435
436         /* This part is the Mackert and Lohman formula */
437         if (T <= b)
438         {
439                 pages_fetched =
440                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
441                 if (pages_fetched >= T)
442                         pages_fetched = T;
443                 else
444                         pages_fetched = ceil(pages_fetched);
445         }
446         else
447         {
448                 double          lim;
449
450                 lim = (2.0 * T * b) / (2.0 * T - b);
451                 if (tuples_fetched <= lim)
452                 {
453                         pages_fetched =
454                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
455                 }
456                 else
457                 {
458                         pages_fetched =
459                                 b + (tuples_fetched - lim) * (T - b) / T;
460                 }
461                 pages_fetched = ceil(pages_fetched);
462         }
463         return pages_fetched;
464 }
465
466 /*
467  * get_indexpath_pages
468  *              Determine the total size of the indexes used in a bitmap index path.
469  *
470  * Note: if the same index is used more than once in a bitmap tree, we will
471  * count it multiple times, which perhaps is the wrong thing ... but it's
472  * not completely clear, and detecting duplicates is difficult, so ignore it
473  * for now.
474  */
475 static double
476 get_indexpath_pages(Path *bitmapqual)
477 {
478         double          result = 0;
479         ListCell   *l;
480
481         if (IsA(bitmapqual, BitmapAndPath))
482         {
483                 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
484
485                 foreach(l, apath->bitmapquals)
486                 {
487                         result += get_indexpath_pages((Path *) lfirst(l));
488                 }
489         }
490         else if (IsA(bitmapqual, BitmapOrPath))
491         {
492                 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
493
494                 foreach(l, opath->bitmapquals)
495                 {
496                         result += get_indexpath_pages((Path *) lfirst(l));
497                 }
498         }
499         else if (IsA(bitmapqual, IndexPath))
500         {
501                 IndexPath  *ipath = (IndexPath *) bitmapqual;
502
503                 result = (double) ipath->indexinfo->pages;
504         }
505         else
506                 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
507
508         return result;
509 }
510
511 /*
512  * cost_bitmap_heap_scan
513  *        Determines and returns the cost of scanning a relation using a bitmap
514  *        index-then-heap plan.
515  *
516  * 'baserel' is the relation to be scanned
517  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
518  * 'outer_rel' is the outer relation when we are considering using the bitmap
519  *              scan as the inside of a nestloop join (hence, some of the indexQuals
520  *              are join clauses, and we should expect repeated scans of the table);
521  *              NULL for a plain bitmap scan
522  *
523  * Note: if this is a join inner path, the component IndexPaths in bitmapqual
524  * should have been costed accordingly.
525  */
526 void
527 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
528                                           Path *bitmapqual, RelOptInfo *outer_rel)
529 {
530         Cost            startup_cost = 0;
531         Cost            run_cost = 0;
532         Cost            indexTotalCost;
533         Selectivity indexSelectivity;
534         Cost            cpu_per_tuple;
535         Cost            cost_per_page;
536         double          tuples_fetched;
537         double          pages_fetched;
538         double          T;
539
540         /* Should only be applied to base relations */
541         Assert(IsA(baserel, RelOptInfo));
542         Assert(baserel->relid > 0);
543         Assert(baserel->rtekind == RTE_RELATION);
544
545         if (!enable_bitmapscan)
546                 startup_cost += disable_cost;
547
548         /*
549          * Fetch total cost of obtaining the bitmap, as well as its total
550          * selectivity.
551          */
552         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
553
554         startup_cost += indexTotalCost;
555
556         /*
557          * Estimate number of main-table pages fetched.
558          */
559         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
560
561         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
562
563         if (outer_rel != NULL && outer_rel->rows > 1)
564         {
565                 /*
566                  * For repeated bitmap scans, scale up the number of tuples fetched in
567                  * the Mackert and Lohman formula by the number of scans, so that we
568                  * estimate the number of pages fetched by all the scans. Then
569                  * pro-rate for one scan.
570                  */
571                 double          num_scans = outer_rel->rows;
572
573                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
574                                                                                         baserel->pages,
575                                                                                         get_indexpath_pages(bitmapqual),
576                                                                                         root);
577                 pages_fetched /= num_scans;
578         }
579         else
580         {
581                 /*
582                  * For a single scan, the number of heap pages that need to be fetched
583                  * is the same as the Mackert and Lohman formula for the case T <= b
584                  * (ie, no re-reads needed).
585                  */
586                 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
587         }
588         if (pages_fetched >= T)
589                 pages_fetched = T;
590         else
591                 pages_fetched = ceil(pages_fetched);
592
593         /*
594          * For small numbers of pages we should charge random_page_cost apiece,
595          * while if nearly all the table's pages are being read, it's more
596          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
597          * too. For lack of a better idea, interpolate like this to determine the
598          * cost per page.
599          */
600         if (pages_fetched >= 2.0)
601                 cost_per_page = random_page_cost -
602                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
603         else
604                 cost_per_page = random_page_cost;
605
606         run_cost += pages_fetched * cost_per_page;
607
608         /*
609          * Estimate CPU costs per tuple.
610          *
611          * Often the indexquals don't need to be rechecked at each tuple ... but
612          * not always, especially not if there are enough tuples involved that the
613          * bitmaps become lossy.  For the moment, just assume they will be
614          * rechecked always.
615          */
616         startup_cost += baserel->baserestrictcost.startup;
617         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
618
619         run_cost += cpu_per_tuple * tuples_fetched;
620
621         path->startup_cost = startup_cost;
622         path->total_cost = startup_cost + run_cost;
623 }
624
625 /*
626  * cost_bitmap_tree_node
627  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
628  */
629 void
630 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
631 {
632         if (IsA(path, IndexPath))
633         {
634                 *cost = ((IndexPath *) path)->indextotalcost;
635                 *selec = ((IndexPath *) path)->indexselectivity;
636                 /*
637                  * Charge a small amount per retrieved tuple to reflect the costs of
638                  * manipulating the bitmap.  This is mostly to make sure that a bitmap
639                  * scan doesn't look to be the same cost as an indexscan to retrieve
640                  * a single tuple.
641                  */
642                 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
643         }
644         else if (IsA(path, BitmapAndPath))
645         {
646                 *cost = path->total_cost;
647                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
648         }
649         else if (IsA(path, BitmapOrPath))
650         {
651                 *cost = path->total_cost;
652                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
653         }
654         else
655         {
656                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
657                 *cost = *selec = 0;             /* keep compiler quiet */
658         }
659 }
660
661 /*
662  * cost_bitmap_and_node
663  *              Estimate the cost of a BitmapAnd node
664  *
665  * Note that this considers only the costs of index scanning and bitmap
666  * creation, not the eventual heap access.      In that sense the object isn't
667  * truly a Path, but it has enough path-like properties (costs in particular)
668  * to warrant treating it as one.
669  */
670 void
671 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
672 {
673         Cost            totalCost;
674         Selectivity selec;
675         ListCell   *l;
676
677         /*
678          * We estimate AND selectivity on the assumption that the inputs are
679          * independent.  This is probably often wrong, but we don't have the info
680          * to do better.
681          *
682          * The runtime cost of the BitmapAnd itself is estimated at 100x
683          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
684          * definitely too simplistic?
685          */
686         totalCost = 0.0;
687         selec = 1.0;
688         foreach(l, path->bitmapquals)
689         {
690                 Path       *subpath = (Path *) lfirst(l);
691                 Cost            subCost;
692                 Selectivity subselec;
693
694                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
695
696                 selec *= subselec;
697
698                 totalCost += subCost;
699                 if (l != list_head(path->bitmapquals))
700                         totalCost += 100.0 * cpu_operator_cost;
701         }
702         path->bitmapselectivity = selec;
703         path->path.startup_cost = totalCost;
704         path->path.total_cost = totalCost;
705 }
706
707 /*
708  * cost_bitmap_or_node
709  *              Estimate the cost of a BitmapOr node
710  *
711  * See comments for cost_bitmap_and_node.
712  */
713 void
714 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
715 {
716         Cost            totalCost;
717         Selectivity selec;
718         ListCell   *l;
719
720         /*
721          * We estimate OR selectivity on the assumption that the inputs are
722          * non-overlapping, since that's often the case in "x IN (list)" type
723          * situations.  Of course, we clamp to 1.0 at the end.
724          *
725          * The runtime cost of the BitmapOr itself is estimated at 100x
726          * cpu_operator_cost for each tbm_union needed.  Probably too small,
727          * definitely too simplistic?  We are aware that the tbm_unions are
728          * optimized out when the inputs are BitmapIndexScans.
729          */
730         totalCost = 0.0;
731         selec = 0.0;
732         foreach(l, path->bitmapquals)
733         {
734                 Path       *subpath = (Path *) lfirst(l);
735                 Cost            subCost;
736                 Selectivity subselec;
737
738                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
739
740                 selec += subselec;
741
742                 totalCost += subCost;
743                 if (l != list_head(path->bitmapquals) &&
744                         !IsA(subpath, IndexPath))
745                         totalCost += 100.0 * cpu_operator_cost;
746         }
747         path->bitmapselectivity = Min(selec, 1.0);
748         path->path.startup_cost = totalCost;
749         path->path.total_cost = totalCost;
750 }
751
752 /*
753  * cost_tidscan
754  *        Determines and returns the cost of scanning a relation using TIDs.
755  */
756 void
757 cost_tidscan(Path *path, PlannerInfo *root,
758                          RelOptInfo *baserel, List *tidquals)
759 {
760         Cost            startup_cost = 0;
761         Cost            run_cost = 0;
762         Cost            cpu_per_tuple;
763         int                     ntuples;
764         ListCell   *l;
765
766         /* Should only be applied to base relations */
767         Assert(baserel->relid > 0);
768         Assert(baserel->rtekind == RTE_RELATION);
769
770         if (!enable_tidscan)
771                 startup_cost += disable_cost;
772
773         /* Count how many tuples we expect to retrieve */
774         ntuples = 0;
775         foreach(l, tidquals)
776         {
777                 if (IsA(lfirst(l), ScalarArrayOpExpr))
778                 {
779                         /* Each element of the array yields 1 tuple */
780                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
781                         Node       *arraynode = (Node *) lsecond(saop->args);
782
783                         ntuples += estimate_array_length(arraynode);
784                 }
785                 else
786                 {
787                         /* It's just CTID = something, count 1 tuple */
788                         ntuples++;
789                 }
790         }
791
792         /* disk costs --- assume each tuple on a different page */
793         run_cost += random_page_cost * ntuples;
794
795         /* CPU costs */
796         startup_cost += baserel->baserestrictcost.startup;
797         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
798         run_cost += cpu_per_tuple * ntuples;
799
800         path->startup_cost = startup_cost;
801         path->total_cost = startup_cost + run_cost;
802 }
803
804 /*
805  * cost_subqueryscan
806  *        Determines and returns the cost of scanning a subquery RTE.
807  */
808 void
809 cost_subqueryscan(Path *path, RelOptInfo *baserel)
810 {
811         Cost            startup_cost;
812         Cost            run_cost;
813         Cost            cpu_per_tuple;
814
815         /* Should only be applied to base relations that are subqueries */
816         Assert(baserel->relid > 0);
817         Assert(baserel->rtekind == RTE_SUBQUERY);
818
819         /*
820          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
821          * any restriction clauses that will be attached to the SubqueryScan node,
822          * plus cpu_tuple_cost to account for selection and projection overhead.
823          */
824         path->startup_cost = baserel->subplan->startup_cost;
825         path->total_cost = baserel->subplan->total_cost;
826
827         startup_cost = baserel->baserestrictcost.startup;
828         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
829         run_cost = cpu_per_tuple * baserel->tuples;
830
831         path->startup_cost += startup_cost;
832         path->total_cost += startup_cost + run_cost;
833 }
834
835 /*
836  * cost_functionscan
837  *        Determines and returns the cost of scanning a function RTE.
838  */
839 void
840 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
841 {
842         Cost            startup_cost = 0;
843         Cost            run_cost = 0;
844         Cost            cpu_per_tuple;
845
846         /* Should only be applied to base relations that are functions */
847         Assert(baserel->relid > 0);
848         Assert(baserel->rtekind == RTE_FUNCTION);
849
850         /*
851          * For now, estimate function's cost at one operator eval per function
852          * call.  Someday we should revive the function cost estimate columns in
853          * pg_proc...
854          */
855         cpu_per_tuple = cpu_operator_cost;
856
857         /* Add scanning CPU costs */
858         startup_cost += baserel->baserestrictcost.startup;
859         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
860         run_cost += cpu_per_tuple * baserel->tuples;
861
862         path->startup_cost = startup_cost;
863         path->total_cost = startup_cost + run_cost;
864 }
865
866 /*
867  * cost_valuesscan
868  *        Determines and returns the cost of scanning a VALUES RTE.
869  */
870 void
871 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
872 {
873         Cost            startup_cost = 0;
874         Cost            run_cost = 0;
875         Cost            cpu_per_tuple;
876
877         /* Should only be applied to base relations that are values lists */
878         Assert(baserel->relid > 0);
879         Assert(baserel->rtekind == RTE_VALUES);
880
881         /*
882          * For now, estimate list evaluation cost at one operator eval per list
883          * (probably pretty bogus, but is it worth being smarter?)
884          */
885         cpu_per_tuple = cpu_operator_cost;
886
887         /* Add scanning CPU costs */
888         startup_cost += baserel->baserestrictcost.startup;
889         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
890         run_cost += cpu_per_tuple * baserel->tuples;
891
892         path->startup_cost = startup_cost;
893         path->total_cost = startup_cost + run_cost;
894 }
895
896 /*
897  * cost_sort
898  *        Determines and returns the cost of sorting a relation, including
899  *        the cost of reading the input data.
900  *
901  * If the total volume of data to sort is less than work_mem, we will do
902  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
903  * comparisons for t tuples.
904  *
905  * If the total volume exceeds work_mem, we switch to a tape-style merge
906  * algorithm.  There will still be about t*log2(t) tuple comparisons in
907  * total, but we will also need to write and read each tuple once per
908  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
909  * number of initial runs formed and M is the merge order used by tuplesort.c.
910  * Since the average initial run should be about twice work_mem, we have
911  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
912  *              cpu = comparison_cost * t * log2(t)
913  *
914  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
915  * accesses (XXX can't we refine that guess?)
916  *
917  * We charge two operator evals per tuple comparison, which should be in
918  * the right ballpark in most cases.
919  *
920  * 'pathkeys' is a list of sort keys
921  * 'input_cost' is the total cost for reading the input data
922  * 'tuples' is the number of tuples in the relation
923  * 'width' is the average tuple width in bytes
924  *
925  * NOTE: some callers currently pass NIL for pathkeys because they
926  * can't conveniently supply the sort keys.  Since this routine doesn't
927  * currently do anything with pathkeys anyway, that doesn't matter...
928  * but if it ever does, it should react gracefully to lack of key data.
929  * (Actually, the thing we'd most likely be interested in is just the number
930  * of sort keys, which all callers *could* supply.)
931  */
932 void
933 cost_sort(Path *path, PlannerInfo *root,
934                   List *pathkeys, Cost input_cost, double tuples, int width)
935 {
936         Cost            startup_cost = input_cost;
937         Cost            run_cost = 0;
938         double          nbytes = relation_byte_size(tuples, width);
939         long            work_mem_bytes = work_mem * 1024L;
940
941         if (!enable_sort)
942                 startup_cost += disable_cost;
943
944         /*
945          * We want to be sure the cost of a sort is never estimated as zero, even
946          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
947          */
948         if (tuples < 2.0)
949                 tuples = 2.0;
950
951         /*
952          * CPU costs
953          *
954          * Assume about two operator evals per tuple comparison and N log2 N
955          * comparisons
956          */
957         startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
958
959         /* disk costs */
960         if (nbytes > work_mem_bytes)
961         {
962                 double          npages = ceil(nbytes / BLCKSZ);
963                 double          nruns = (nbytes / work_mem_bytes) * 0.5;
964                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
965                 double          log_runs;
966                 double          npageaccesses;
967
968                 /* Compute logM(r) as log(r) / log(M) */
969                 if (nruns > mergeorder)
970                         log_runs = ceil(log(nruns) / log(mergeorder));
971                 else
972                         log_runs = 1.0;
973                 npageaccesses = 2.0 * npages * log_runs;
974                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
975                 startup_cost += npageaccesses *
976                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
977         }
978
979         /*
980          * Also charge a small amount (arbitrarily set equal to operator cost) per
981          * extracted tuple.
982          */
983         run_cost += cpu_operator_cost * tuples;
984
985         path->startup_cost = startup_cost;
986         path->total_cost = startup_cost + run_cost;
987 }
988
989 /*
990  * cost_material
991  *        Determines and returns the cost of materializing a relation, including
992  *        the cost of reading the input data.
993  *
994  * If the total volume of data to materialize exceeds work_mem, we will need
995  * to write it to disk, so the cost is much higher in that case.
996  */
997 void
998 cost_material(Path *path,
999                           Cost input_cost, double tuples, int width)
1000 {
1001         Cost            startup_cost = input_cost;
1002         Cost            run_cost = 0;
1003         double          nbytes = relation_byte_size(tuples, width);
1004         long            work_mem_bytes = work_mem * 1024L;
1005
1006         /* disk costs */
1007         if (nbytes > work_mem_bytes)
1008         {
1009                 double          npages = ceil(nbytes / BLCKSZ);
1010
1011                 /* We'll write during startup and read during retrieval */
1012                 startup_cost += seq_page_cost * npages;
1013                 run_cost += seq_page_cost * npages;
1014         }
1015
1016         /*
1017          * Charge a very small amount per inserted tuple, to reflect bookkeeping
1018          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
1019          * tie that would otherwise exist between nestloop with A outer,
1020          * materialized B inner and nestloop with B outer, materialized A inner.
1021          * The extra cost ensures we'll prefer materializing the smaller rel.
1022          */
1023         startup_cost += cpu_tuple_cost * 0.1 * tuples;
1024
1025         /*
1026          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
1027          * so that it doesn't appear worthwhile to materialize a bare seqscan.
1028          */
1029         run_cost += cpu_tuple_cost * tuples;
1030
1031         path->startup_cost = startup_cost;
1032         path->total_cost = startup_cost + run_cost;
1033 }
1034
1035 /*
1036  * cost_agg
1037  *              Determines and returns the cost of performing an Agg plan node,
1038  *              including the cost of its input.
1039  *
1040  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1041  * are for appropriately-sorted input.
1042  */
1043 void
1044 cost_agg(Path *path, PlannerInfo *root,
1045                  AggStrategy aggstrategy, int numAggs,
1046                  int numGroupCols, double numGroups,
1047                  Cost input_startup_cost, Cost input_total_cost,
1048                  double input_tuples)
1049 {
1050         Cost            startup_cost;
1051         Cost            total_cost;
1052
1053         /*
1054          * We charge one cpu_operator_cost per aggregate function per input tuple,
1055          * and another one per output tuple (corresponding to transfn and finalfn
1056          * calls respectively).  If we are grouping, we charge an additional
1057          * cpu_operator_cost per grouping column per input tuple for grouping
1058          * comparisons.
1059          *
1060          * We will produce a single output tuple if not grouping, and a tuple per
1061          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1062          *
1063          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1064          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1065          * input path is already sorted appropriately, AGG_SORTED should be
1066          * preferred (since it has no risk of memory overflow).  This will happen
1067          * as long as the computed total costs are indeed exactly equal --- but if
1068          * there's roundoff error we might do the wrong thing.  So be sure that
1069          * the computations below form the same intermediate values in the same
1070          * order.
1071          */
1072         if (aggstrategy == AGG_PLAIN)
1073         {
1074                 startup_cost = input_total_cost;
1075                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1076                 /* we aren't grouping */
1077                 total_cost = startup_cost + cpu_tuple_cost;
1078         }
1079         else if (aggstrategy == AGG_SORTED)
1080         {
1081                 /* Here we are able to deliver output on-the-fly */
1082                 startup_cost = input_startup_cost;
1083                 total_cost = input_total_cost;
1084                 /* calcs phrased this way to match HASHED case, see note above */
1085                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1086                 total_cost += cpu_operator_cost * input_tuples * numAggs;
1087                 total_cost += cpu_operator_cost * numGroups * numAggs;
1088                 total_cost += cpu_tuple_cost * numGroups;
1089         }
1090         else
1091         {
1092                 /* must be AGG_HASHED */
1093                 startup_cost = input_total_cost;
1094                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1095                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1096                 total_cost = startup_cost;
1097                 total_cost += cpu_operator_cost * numGroups * numAggs;
1098                 total_cost += cpu_tuple_cost * numGroups;
1099         }
1100
1101         path->startup_cost = startup_cost;
1102         path->total_cost = total_cost;
1103 }
1104
1105 /*
1106  * cost_group
1107  *              Determines and returns the cost of performing a Group plan node,
1108  *              including the cost of its input.
1109  *
1110  * Note: caller must ensure that input costs are for appropriately-sorted
1111  * input.
1112  */
1113 void
1114 cost_group(Path *path, PlannerInfo *root,
1115                    int numGroupCols, double numGroups,
1116                    Cost input_startup_cost, Cost input_total_cost,
1117                    double input_tuples)
1118 {
1119         Cost            startup_cost;
1120         Cost            total_cost;
1121
1122         startup_cost = input_startup_cost;
1123         total_cost = input_total_cost;
1124
1125         /*
1126          * Charge one cpu_operator_cost per comparison per input tuple. We assume
1127          * all columns get compared at most of the tuples.
1128          */
1129         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1130
1131         path->startup_cost = startup_cost;
1132         path->total_cost = total_cost;
1133 }
1134
1135 /*
1136  * If a nestloop's inner path is an indexscan, be sure to use its estimated
1137  * output row count, which may be lower than the restriction-clause-only row
1138  * count of its parent.  (We don't include this case in the PATH_ROWS macro
1139  * because it applies *only* to a nestloop's inner relation.)  We have to
1140  * be prepared to recurse through Append nodes in case of an appendrel.
1141  */
1142 static double
1143 nestloop_inner_path_rows(Path *path)
1144 {
1145         double          result;
1146
1147         if (IsA(path, IndexPath))
1148                 result = ((IndexPath *) path)->rows;
1149         else if (IsA(path, BitmapHeapPath))
1150                 result = ((BitmapHeapPath *) path)->rows;
1151         else if (IsA(path, AppendPath))
1152         {
1153                 ListCell   *l;
1154
1155                 result = 0;
1156                 foreach(l, ((AppendPath *) path)->subpaths)
1157                 {
1158                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1159                 }
1160         }
1161         else
1162                 result = PATH_ROWS(path);
1163
1164         return result;
1165 }
1166
1167 /*
1168  * cost_nestloop
1169  *        Determines and returns the cost of joining two relations using the
1170  *        nested loop algorithm.
1171  *
1172  * 'path' is already filled in except for the cost fields
1173  */
1174 void
1175 cost_nestloop(NestPath *path, PlannerInfo *root)
1176 {
1177         Path       *outer_path = path->outerjoinpath;
1178         Path       *inner_path = path->innerjoinpath;
1179         Cost            startup_cost = 0;
1180         Cost            run_cost = 0;
1181         Cost            cpu_per_tuple;
1182         QualCost        restrict_qual_cost;
1183         double          outer_path_rows = PATH_ROWS(outer_path);
1184         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1185         double          ntuples;
1186         Selectivity joininfactor;
1187
1188         if (!enable_nestloop)
1189                 startup_cost += disable_cost;
1190
1191         /*
1192          * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1193          * outer tuple as soon as we have one match.  Account for the effects of
1194          * this by scaling down the cost estimates in proportion to the JOIN_IN
1195          * selectivity.  (This assumes that all the quals attached to the join are
1196          * IN quals, which should be true.)
1197          */
1198         joininfactor = join_in_selectivity(path, root);
1199
1200         /* cost of source data */
1201
1202         /*
1203          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1204          * before we can start returning tuples, so the join's startup cost is
1205          * their sum.  What's not so clear is whether the inner path's
1206          * startup_cost must be paid again on each rescan of the inner path. This
1207          * is not true if the inner path is materialized or is a hashjoin, but
1208          * probably is true otherwise.
1209          */
1210         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1211         run_cost += outer_path->total_cost - outer_path->startup_cost;
1212         if (IsA(inner_path, MaterialPath) ||
1213                 IsA(inner_path, HashPath))
1214         {
1215                 /* charge only run cost for each iteration of inner path */
1216         }
1217         else
1218         {
1219                 /*
1220                  * charge startup cost for each iteration of inner path, except we
1221                  * already charged the first startup_cost in our own startup
1222                  */
1223                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1224         }
1225         run_cost += outer_path_rows *
1226                 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1227
1228         /*
1229          * Compute number of tuples processed (not number emitted!)
1230          */
1231         ntuples = outer_path_rows * inner_path_rows * joininfactor;
1232
1233         /* CPU costs */
1234         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
1235         startup_cost += restrict_qual_cost.startup;
1236         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1237         run_cost += cpu_per_tuple * ntuples;
1238
1239         path->path.startup_cost = startup_cost;
1240         path->path.total_cost = startup_cost + run_cost;
1241 }
1242
1243 /*
1244  * cost_mergejoin
1245  *        Determines and returns the cost of joining two relations using the
1246  *        merge join algorithm.
1247  *
1248  * 'path' is already filled in except for the cost fields
1249  *
1250  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1251  * outersortkeys and innersortkeys are lists of the keys to be used
1252  * to sort the outer and inner relations, or NIL if no explicit
1253  * sort is needed because the source path is already ordered.
1254  */
1255 void
1256 cost_mergejoin(MergePath *path, PlannerInfo *root)
1257 {
1258         Path       *outer_path = path->jpath.outerjoinpath;
1259         Path       *inner_path = path->jpath.innerjoinpath;
1260         List       *mergeclauses = path->path_mergeclauses;
1261         Oid                *mergeFamilies = path->path_mergeFamilies;
1262         int                *mergeStrategies = path->path_mergeStrategies;
1263         List       *outersortkeys = path->outersortkeys;
1264         List       *innersortkeys = path->innersortkeys;
1265         Cost            startup_cost = 0;
1266         Cost            run_cost = 0;
1267         Cost            cpu_per_tuple;
1268         Selectivity merge_selec;
1269         QualCost        merge_qual_cost;
1270         QualCost        qp_qual_cost;
1271         RestrictInfo *firstclause;
1272         double          outer_path_rows = PATH_ROWS(outer_path);
1273         double          inner_path_rows = PATH_ROWS(inner_path);
1274         double          outer_rows,
1275                                 inner_rows;
1276         double          mergejointuples,
1277                                 rescannedtuples;
1278         double          rescanratio;
1279         Selectivity outerscansel,
1280                                 innerscansel;
1281         Selectivity joininfactor;
1282         Path            sort_path;              /* dummy for result of cost_sort */
1283
1284         if (!enable_mergejoin)
1285                 startup_cost += disable_cost;
1286
1287         /*
1288          * Compute cost and selectivity of the mergequals and qpquals (other
1289          * restriction clauses) separately.  We use approx_selectivity here for
1290          * speed --- in most cases, any errors won't affect the result much.
1291          *
1292          * Note: it's probably bogus to use the normal selectivity calculation
1293          * here when either the outer or inner path is a UniquePath.
1294          */
1295         merge_selec = approx_selectivity(root, mergeclauses,
1296                                                                          path->jpath.jointype);
1297         cost_qual_eval(&merge_qual_cost, mergeclauses);
1298         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1299         qp_qual_cost.startup -= merge_qual_cost.startup;
1300         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1301
1302         /* approx # tuples passing the merge quals */
1303         mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1304
1305         /*
1306          * When there are equal merge keys in the outer relation, the mergejoin
1307          * must rescan any matching tuples in the inner relation. This means
1308          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1309          * costs the same as an original fetch, which is probably an overestimate;
1310          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1311          * Not clear if it's worth developing a more refined model.
1312          *
1313          * The number of re-fetches can be estimated approximately as size of
1314          * merge join output minus size of inner relation.      Assume that the
1315          * distinct key values are 1, 2, ..., and denote the number of values of
1316          * each key in the outer relation as m1, m2, ...; in the inner relation,
1317          * n1, n2, ... Then we have
1318          *
1319          * size of join = m1 * n1 + m2 * n2 + ...
1320          *
1321          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1322          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1323          * relation
1324          *
1325          * This equation works correctly for outer tuples having no inner match
1326          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1327          * are effectively subtracting those from the number of rescanned tuples,
1328          * when we should not.  Can we do better without expensive selectivity
1329          * computations?
1330          */
1331         if (IsA(outer_path, UniquePath))
1332                 rescannedtuples = 0;
1333         else
1334         {
1335                 rescannedtuples = mergejointuples - inner_path_rows;
1336                 /* Must clamp because of possible underestimate */
1337                 if (rescannedtuples < 0)
1338                         rescannedtuples = 0;
1339         }
1340         /* We'll inflate inner run cost this much to account for rescanning */
1341         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1342
1343         /*
1344          * A merge join will stop as soon as it exhausts either input stream
1345          * (unless it's an outer join, in which case the outer side has to be
1346          * scanned all the way anyway).  Estimate fraction of the left and right
1347          * inputs that will actually need to be scanned. We use only the first
1348          * (most significant) merge clause for this purpose.
1349          *
1350          * Since this calculation is somewhat expensive, and will be the same for
1351          * all mergejoin paths associated with the merge clause, we cache the
1352          * results in the RestrictInfo node.  XXX that won't work anymore once
1353          * we support multiple possible orderings!
1354          */
1355         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1356         {
1357                 firstclause = (RestrictInfo *) linitial(mergeclauses);
1358                 if (firstclause->left_mergescansel < 0) /* not computed yet? */
1359                         mergejoinscansel(root, (Node *) firstclause->clause,
1360                                                          mergeFamilies[0],
1361                                                          mergeStrategies[0],
1362                                                          &firstclause->left_mergescansel,
1363                                                          &firstclause->right_mergescansel);
1364
1365                 if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
1366                 {
1367                         /* left side of clause is outer */
1368                         outerscansel = firstclause->left_mergescansel;
1369                         innerscansel = firstclause->right_mergescansel;
1370                 }
1371                 else
1372                 {
1373                         /* left side of clause is inner */
1374                         outerscansel = firstclause->right_mergescansel;
1375                         innerscansel = firstclause->left_mergescansel;
1376                 }
1377                 if (path->jpath.jointype == JOIN_LEFT)
1378                         outerscansel = 1.0;
1379                 else if (path->jpath.jointype == JOIN_RIGHT)
1380                         innerscansel = 1.0;
1381         }
1382         else
1383         {
1384                 /* cope with clauseless or full mergejoin */
1385                 outerscansel = innerscansel = 1.0;
1386         }
1387
1388         /* convert selectivity to row count; must scan at least one row */
1389         outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1390         inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1391
1392         /*
1393          * Readjust scan selectivities to account for above rounding.  This is
1394          * normally an insignificant effect, but when there are only a few rows in
1395          * the inputs, failing to do this makes for a large percentage error.
1396          */
1397         outerscansel = outer_rows / outer_path_rows;
1398         innerscansel = inner_rows / inner_path_rows;
1399
1400         /* cost of source data */
1401
1402         if (outersortkeys)                      /* do we need to sort outer? */
1403         {
1404                 cost_sort(&sort_path,
1405                                   root,
1406                                   outersortkeys,
1407                                   outer_path->total_cost,
1408                                   outer_path_rows,
1409                                   outer_path->parent->width);
1410                 startup_cost += sort_path.startup_cost;
1411                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1412                         * outerscansel;
1413         }
1414         else
1415         {
1416                 startup_cost += outer_path->startup_cost;
1417                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1418                         * outerscansel;
1419         }
1420
1421         if (innersortkeys)                      /* do we need to sort inner? */
1422         {
1423                 cost_sort(&sort_path,
1424                                   root,
1425                                   innersortkeys,
1426                                   inner_path->total_cost,
1427                                   inner_path_rows,
1428                                   inner_path->parent->width);
1429                 startup_cost += sort_path.startup_cost;
1430                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1431                         * innerscansel * rescanratio;
1432         }
1433         else
1434         {
1435                 startup_cost += inner_path->startup_cost;
1436                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1437                         * innerscansel * rescanratio;
1438         }
1439
1440         /* CPU costs */
1441
1442         /*
1443          * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1444          * outer tuple as soon as we have one match.  Account for the effects of
1445          * this by scaling down the cost estimates in proportion to the expected
1446          * output size.  (This assumes that all the quals attached to the join are
1447          * IN quals, which should be true.)
1448          */
1449         joininfactor = join_in_selectivity(&path->jpath, root);
1450
1451         /*
1452          * The number of tuple comparisons needed is approximately number of outer
1453          * rows plus number of inner rows plus number of rescanned tuples (can we
1454          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1455          * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1456          * joininfactor.
1457          */
1458         startup_cost += merge_qual_cost.startup;
1459         run_cost += merge_qual_cost.per_tuple *
1460                 (outer_rows + inner_rows * rescanratio);
1461
1462         /*
1463          * For each tuple that gets through the mergejoin proper, we charge
1464          * cpu_tuple_cost plus the cost of evaluating additional restriction
1465          * clauses that are to be applied at the join.  (This is pessimistic since
1466          * not all of the quals may get evaluated at each tuple.)  This work is
1467          * skipped in JOIN_IN mode, so apply the factor.
1468          */
1469         startup_cost += qp_qual_cost.startup;
1470         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1471         run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1472
1473         path->jpath.path.startup_cost = startup_cost;
1474         path->jpath.path.total_cost = startup_cost + run_cost;
1475 }
1476
1477 /*
1478  * cost_hashjoin
1479  *        Determines and returns the cost of joining two relations using the
1480  *        hash join algorithm.
1481  *
1482  * 'path' is already filled in except for the cost fields
1483  *
1484  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1485  */
1486 void
1487 cost_hashjoin(HashPath *path, PlannerInfo *root)
1488 {
1489         Path       *outer_path = path->jpath.outerjoinpath;
1490         Path       *inner_path = path->jpath.innerjoinpath;
1491         List       *hashclauses = path->path_hashclauses;
1492         Cost            startup_cost = 0;
1493         Cost            run_cost = 0;
1494         Cost            cpu_per_tuple;
1495         Selectivity hash_selec;
1496         QualCost        hash_qual_cost;
1497         QualCost        qp_qual_cost;
1498         double          hashjointuples;
1499         double          outer_path_rows = PATH_ROWS(outer_path);
1500         double          inner_path_rows = PATH_ROWS(inner_path);
1501         int                     num_hashclauses = list_length(hashclauses);
1502         int                     numbuckets;
1503         int                     numbatches;
1504         double          virtualbuckets;
1505         Selectivity innerbucketsize;
1506         Selectivity joininfactor;
1507         ListCell   *hcl;
1508
1509         if (!enable_hashjoin)
1510                 startup_cost += disable_cost;
1511
1512         /*
1513          * Compute cost and selectivity of the hashquals and qpquals (other
1514          * restriction clauses) separately.  We use approx_selectivity here for
1515          * speed --- in most cases, any errors won't affect the result much.
1516          *
1517          * Note: it's probably bogus to use the normal selectivity calculation
1518          * here when either the outer or inner path is a UniquePath.
1519          */
1520         hash_selec = approx_selectivity(root, hashclauses,
1521                                                                         path->jpath.jointype);
1522         cost_qual_eval(&hash_qual_cost, hashclauses);
1523         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1524         qp_qual_cost.startup -= hash_qual_cost.startup;
1525         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1526
1527         /* approx # tuples passing the hash quals */
1528         hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1529
1530         /* cost of source data */
1531         startup_cost += outer_path->startup_cost;
1532         run_cost += outer_path->total_cost - outer_path->startup_cost;
1533         startup_cost += inner_path->total_cost;
1534
1535         /*
1536          * Cost of computing hash function: must do it once per input tuple. We
1537          * charge one cpu_operator_cost for each column's hash function.  Also,
1538          * tack on one cpu_tuple_cost per inner row, to model the costs of
1539          * inserting the row into the hashtable.
1540          *
1541          * XXX when a hashclause is more complex than a single operator, we really
1542          * should charge the extra eval costs of the left or right side, as
1543          * appropriate, here.  This seems more work than it's worth at the moment.
1544          */
1545         startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1546                 * inner_path_rows;
1547         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1548
1549         /* Get hash table size that executor would use for inner relation */
1550         ExecChooseHashTableSize(inner_path_rows,
1551                                                         inner_path->parent->width,
1552                                                         &numbuckets,
1553                                                         &numbatches);
1554         virtualbuckets = (double) numbuckets *(double) numbatches;
1555
1556         /*
1557          * Determine bucketsize fraction for inner relation.  We use the smallest
1558          * bucketsize estimated for any individual hashclause; this is undoubtedly
1559          * conservative.
1560          *
1561          * BUT: if inner relation has been unique-ified, we can assume it's good
1562          * for hashing.  This is important both because it's the right answer, and
1563          * because we avoid contaminating the cache with a value that's wrong for
1564          * non-unique-ified paths.
1565          */
1566         if (IsA(inner_path, UniquePath))
1567                 innerbucketsize = 1.0 / virtualbuckets;
1568         else
1569         {
1570                 innerbucketsize = 1.0;
1571                 foreach(hcl, hashclauses)
1572                 {
1573                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1574                         Selectivity thisbucketsize;
1575
1576                         Assert(IsA(restrictinfo, RestrictInfo));
1577
1578                         /*
1579                          * First we have to figure out which side of the hashjoin clause
1580                          * is the inner side.
1581                          *
1582                          * Since we tend to visit the same clauses over and over when
1583                          * planning a large query, we cache the bucketsize estimate in the
1584                          * RestrictInfo node to avoid repeated lookups of statistics.
1585                          */
1586                         if (bms_is_subset(restrictinfo->right_relids,
1587                                                           inner_path->parent->relids))
1588                         {
1589                                 /* righthand side is inner */
1590                                 thisbucketsize = restrictinfo->right_bucketsize;
1591                                 if (thisbucketsize < 0)
1592                                 {
1593                                         /* not cached yet */
1594                                         thisbucketsize =
1595                                                 estimate_hash_bucketsize(root,
1596                                                                                    get_rightop(restrictinfo->clause),
1597                                                                                                  virtualbuckets);
1598                                         restrictinfo->right_bucketsize = thisbucketsize;
1599                                 }
1600                         }
1601                         else
1602                         {
1603                                 Assert(bms_is_subset(restrictinfo->left_relids,
1604                                                                          inner_path->parent->relids));
1605                                 /* lefthand side is inner */
1606                                 thisbucketsize = restrictinfo->left_bucketsize;
1607                                 if (thisbucketsize < 0)
1608                                 {
1609                                         /* not cached yet */
1610                                         thisbucketsize =
1611                                                 estimate_hash_bucketsize(root,
1612                                                                                         get_leftop(restrictinfo->clause),
1613                                                                                                  virtualbuckets);
1614                                         restrictinfo->left_bucketsize = thisbucketsize;
1615                                 }
1616                         }
1617
1618                         if (innerbucketsize > thisbucketsize)
1619                                 innerbucketsize = thisbucketsize;
1620                 }
1621         }
1622
1623         /*
1624          * If inner relation is too big then we will need to "batch" the join,
1625          * which implies writing and reading most of the tuples to disk an extra
1626          * time.  Charge seq_page_cost per page, since the I/O should be nice and
1627          * sequential.  Writing the inner rel counts as startup cost,
1628          * all the rest as run cost.
1629          */
1630         if (numbatches > 1)
1631         {
1632                 double          outerpages = page_size(outer_path_rows,
1633                                                                                    outer_path->parent->width);
1634                 double          innerpages = page_size(inner_path_rows,
1635                                                                                    inner_path->parent->width);
1636
1637                 startup_cost += seq_page_cost * innerpages;
1638                 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
1639         }
1640
1641         /* CPU costs */
1642
1643         /*
1644          * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1645          * outer tuple as soon as we have one match.  Account for the effects of
1646          * this by scaling down the cost estimates in proportion to the expected
1647          * output size.  (This assumes that all the quals attached to the join are
1648          * IN quals, which should be true.)
1649          */
1650         joininfactor = join_in_selectivity(&path->jpath, root);
1651
1652         /*
1653          * The number of tuple comparisons needed is the number of outer tuples
1654          * times the typical number of tuples in a hash bucket, which is the inner
1655          * relation size times its bucketsize fraction.  At each one, we need to
1656          * evaluate the hashjoin quals.  But actually, charging the full qual eval
1657          * cost at each tuple is pessimistic, since we don't evaluate the quals
1658          * unless the hash values match exactly.  For lack of a better idea, halve
1659          * the cost estimate to allow for that.
1660          */
1661         startup_cost += hash_qual_cost.startup;
1662         run_cost += hash_qual_cost.per_tuple *
1663                 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1664                 joininfactor * 0.5;
1665
1666         /*
1667          * For each tuple that gets through the hashjoin proper, we charge
1668          * cpu_tuple_cost plus the cost of evaluating additional restriction
1669          * clauses that are to be applied at the join.  (This is pessimistic since
1670          * not all of the quals may get evaluated at each tuple.)
1671          */
1672         startup_cost += qp_qual_cost.startup;
1673         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1674         run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1675
1676         path->jpath.path.startup_cost = startup_cost;
1677         path->jpath.path.total_cost = startup_cost + run_cost;
1678 }
1679
1680
1681 /*
1682  * cost_qual_eval
1683  *              Estimate the CPU costs of evaluating a WHERE clause.
1684  *              The input can be either an implicitly-ANDed list of boolean
1685  *              expressions, or a list of RestrictInfo nodes.
1686  *              The result includes both a one-time (startup) component,
1687  *              and a per-evaluation component.
1688  */
1689 void
1690 cost_qual_eval(QualCost *cost, List *quals)
1691 {
1692         ListCell   *l;
1693
1694         cost->startup = 0;
1695         cost->per_tuple = 0;
1696
1697         /* We don't charge any cost for the implicit ANDing at top level ... */
1698
1699         foreach(l, quals)
1700         {
1701                 Node       *qual = (Node *) lfirst(l);
1702
1703                 /*
1704                  * RestrictInfo nodes contain an eval_cost field reserved for this
1705                  * routine's use, so that it's not necessary to evaluate the qual
1706                  * clause's cost more than once.  If the clause's cost hasn't been
1707                  * computed yet, the field's startup value will contain -1.
1708                  *
1709                  * If the RestrictInfo is marked pseudoconstant, it will be tested
1710                  * only once, so treat its cost as all startup cost.
1711                  */
1712                 if (qual && IsA(qual, RestrictInfo))
1713                 {
1714                         RestrictInfo *rinfo = (RestrictInfo *) qual;
1715
1716                         if (rinfo->eval_cost.startup < 0)
1717                         {
1718                                 rinfo->eval_cost.startup = 0;
1719                                 rinfo->eval_cost.per_tuple = 0;
1720                                 cost_qual_eval_walker((Node *) rinfo->clause,
1721                                                                           &rinfo->eval_cost);
1722                                 if (rinfo->pseudoconstant)
1723                                 {
1724                                         /* count one execution during startup */
1725                                         rinfo->eval_cost.startup += rinfo->eval_cost.per_tuple;
1726                                         rinfo->eval_cost.per_tuple = 0;
1727                                 }
1728                         }
1729                         cost->startup += rinfo->eval_cost.startup;
1730                         cost->per_tuple += rinfo->eval_cost.per_tuple;
1731                 }
1732                 else
1733                 {
1734                         /* If it's a bare expression, must always do it the hard way */
1735                         cost_qual_eval_walker(qual, cost);
1736                 }
1737         }
1738 }
1739
1740 static bool
1741 cost_qual_eval_walker(Node *node, QualCost *total)
1742 {
1743         if (node == NULL)
1744                 return false;
1745
1746         /*
1747          * Our basic strategy is to charge one cpu_operator_cost for each operator
1748          * or function node in the given tree.  Vars and Consts are charged zero,
1749          * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
1750          * better than no model at all.
1751          *
1752          * Should we try to account for the possibility of short-circuit
1753          * evaluation of AND/OR?
1754          */
1755         if (IsA(node, FuncExpr) ||
1756                 IsA(node, OpExpr) ||
1757                 IsA(node, DistinctExpr) ||
1758                 IsA(node, NullIfExpr))
1759                 total->per_tuple += cpu_operator_cost;
1760         else if (IsA(node, ScalarArrayOpExpr))
1761         {
1762                 /*
1763                  * Estimate that the operator will be applied to about half of the
1764                  * array elements before the answer is determined.
1765                  */
1766                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1767                 Node       *arraynode = (Node *) lsecond(saop->args);
1768
1769                 total->per_tuple +=
1770                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1771         }
1772         else if (IsA(node, RowCompareExpr))
1773         {
1774                 /* Conservatively assume we will check all the columns */
1775                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
1776
1777                 total->per_tuple += cpu_operator_cost * list_length(rcexpr->opnos);
1778         }
1779         else if (IsA(node, SubLink))
1780         {
1781                 /* This routine should not be applied to un-planned expressions */
1782                 elog(ERROR, "cannot handle unplanned sub-select");
1783         }
1784         else if (IsA(node, SubPlan))
1785         {
1786                 /*
1787                  * A subplan node in an expression typically indicates that the
1788                  * subplan will be executed on each evaluation, so charge accordingly.
1789                  * (Sub-selects that can be executed as InitPlans have already been
1790                  * removed from the expression.)
1791                  *
1792                  * An exception occurs when we have decided we can implement the
1793                  * subplan by hashing.
1794                  */
1795                 SubPlan    *subplan = (SubPlan *) node;
1796                 Plan       *plan = subplan->plan;
1797
1798                 if (subplan->useHashTable)
1799                 {
1800                         /*
1801                          * If we are using a hash table for the subquery outputs, then the
1802                          * cost of evaluating the query is a one-time cost. We charge one
1803                          * cpu_operator_cost per tuple for the work of loading the
1804                          * hashtable, too.
1805                          */
1806                         total->startup += plan->total_cost +
1807                                 cpu_operator_cost * plan->plan_rows;
1808
1809                         /*
1810                          * The per-tuple costs include the cost of evaluating the lefthand
1811                          * expressions, plus the cost of probing the hashtable. Recursion
1812                          * into the testexpr will handle the lefthand expressions
1813                          * properly, and will count one cpu_operator_cost for each
1814                          * comparison operator.  That is probably too low for the probing
1815                          * cost, but it's hard to make a better estimate, so live with it
1816                          * for now.
1817                          */
1818                 }
1819                 else
1820                 {
1821                         /*
1822                          * Otherwise we will be rescanning the subplan output on each
1823                          * evaluation.  We need to estimate how much of the output we will
1824                          * actually need to scan.  NOTE: this logic should agree with the
1825                          * estimates used by make_subplan() in plan/subselect.c.
1826                          */
1827                         Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
1828
1829                         if (subplan->subLinkType == EXISTS_SUBLINK)
1830                         {
1831                                 /* we only need to fetch 1 tuple */
1832                                 total->per_tuple += plan_run_cost / plan->plan_rows;
1833                         }
1834                         else if (subplan->subLinkType == ALL_SUBLINK ||
1835                                          subplan->subLinkType == ANY_SUBLINK)
1836                         {
1837                                 /* assume we need 50% of the tuples */
1838                                 total->per_tuple += 0.50 * plan_run_cost;
1839                                 /* also charge a cpu_operator_cost per row examined */
1840                                 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1841                         }
1842                         else
1843                         {
1844                                 /* assume we need all tuples */
1845                                 total->per_tuple += plan_run_cost;
1846                         }
1847
1848                         /*
1849                          * Also account for subplan's startup cost. If the subplan is
1850                          * uncorrelated or undirect correlated, AND its topmost node is a
1851                          * Sort or Material node, assume that we'll only need to pay its
1852                          * startup cost once; otherwise assume we pay the startup cost
1853                          * every time.
1854                          */
1855                         if (subplan->parParam == NIL &&
1856                                 (IsA(plan, Sort) ||
1857                                  IsA(plan, Material)))
1858                                 total->startup += plan->startup_cost;
1859                         else
1860                                 total->per_tuple += plan->startup_cost;
1861                 }
1862         }
1863
1864         return expression_tree_walker(node, cost_qual_eval_walker,
1865                                                                   (void *) total);
1866 }
1867
1868
1869 /*
1870  * approx_selectivity
1871  *              Quick-and-dirty estimation of clause selectivities.
1872  *              The input can be either an implicitly-ANDed list of boolean
1873  *              expressions, or a list of RestrictInfo nodes (typically the latter).
1874  *
1875  * This is quick-and-dirty because we bypass clauselist_selectivity, and
1876  * simply multiply the independent clause selectivities together.  Now
1877  * clauselist_selectivity often can't do any better than that anyhow, but
1878  * for some situations (such as range constraints) it is smarter.  However,
1879  * we can't effectively cache the results of clauselist_selectivity, whereas
1880  * the individual clause selectivities can be and are cached.
1881  *
1882  * Since we are only using the results to estimate how many potential
1883  * output tuples are generated and passed through qpqual checking, it
1884  * seems OK to live with the approximation.
1885  */
1886 static Selectivity
1887 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
1888 {
1889         Selectivity total = 1.0;
1890         ListCell   *l;
1891
1892         foreach(l, quals)
1893         {
1894                 Node       *qual = (Node *) lfirst(l);
1895
1896                 /* Note that clause_selectivity will be able to cache its result */
1897                 total *= clause_selectivity(root, qual, 0, jointype);
1898         }
1899         return total;
1900 }
1901
1902
1903 /*
1904  * set_baserel_size_estimates
1905  *              Set the size estimates for the given base relation.
1906  *
1907  * The rel's targetlist and restrictinfo list must have been constructed
1908  * already.
1909  *
1910  * We set the following fields of the rel node:
1911  *      rows: the estimated number of output tuples (after applying
1912  *                restriction clauses).
1913  *      width: the estimated average output tuple width in bytes.
1914  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1915  */
1916 void
1917 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
1918 {
1919         double          nrows;
1920
1921         /* Should only be applied to base relations */
1922         Assert(rel->relid > 0);
1923
1924         nrows = rel->tuples *
1925                 clauselist_selectivity(root,
1926                                                            rel->baserestrictinfo,
1927                                                            0,
1928                                                            JOIN_INNER);
1929
1930         rel->rows = clamp_row_est(nrows);
1931
1932         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1933
1934         set_rel_width(root, rel);
1935 }
1936
1937 /*
1938  * set_joinrel_size_estimates
1939  *              Set the size estimates for the given join relation.
1940  *
1941  * The rel's targetlist must have been constructed already, and a
1942  * restriction clause list that matches the given component rels must
1943  * be provided.
1944  *
1945  * Since there is more than one way to make a joinrel for more than two
1946  * base relations, the results we get here could depend on which component
1947  * rel pair is provided.  In theory we should get the same answers no matter
1948  * which pair is provided; in practice, since the selectivity estimation
1949  * routines don't handle all cases equally well, we might not.  But there's
1950  * not much to be done about it.  (Would it make sense to repeat the
1951  * calculations for each pair of input rels that's encountered, and somehow
1952  * average the results?  Probably way more trouble than it's worth.)
1953  *
1954  * It's important that the results for symmetric JoinTypes be symmetric,
1955  * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1956  * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
1957  * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1958  *
1959  * We set only the rows field here.  The width field was already set by
1960  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
1961  */
1962 void
1963 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
1964                                                    RelOptInfo *outer_rel,
1965                                                    RelOptInfo *inner_rel,
1966                                                    JoinType jointype,
1967                                                    List *restrictlist)
1968 {
1969         Selectivity jselec;
1970         Selectivity pselec;
1971         double          nrows;
1972         UniquePath *upath;
1973
1974         /*
1975          * Compute joinclause selectivity.      Note that we are only considering
1976          * clauses that become restriction clauses at this join level; we are not
1977          * double-counting them because they were not considered in estimating the
1978          * sizes of the component rels.
1979          *
1980          * For an outer join, we have to distinguish the selectivity of the
1981          * join's own clauses (JOIN/ON conditions) from any clauses that were
1982          * "pushed down".  For inner joins we just count them all as joinclauses.
1983          */
1984         if (IS_OUTER_JOIN(jointype))
1985         {
1986                 List       *joinquals = NIL;
1987                 List       *pushedquals = NIL;
1988                 ListCell   *l;
1989
1990                 /* Grovel through the clauses to separate into two lists */
1991                 foreach(l, restrictlist)
1992                 {
1993                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
1994
1995                         Assert(IsA(rinfo, RestrictInfo));
1996                         if (rinfo->is_pushed_down)
1997                                 pushedquals = lappend(pushedquals, rinfo);
1998                         else
1999                                 joinquals = lappend(joinquals, rinfo);
2000                 }
2001
2002                 /* Get the separate selectivities */
2003                 jselec = clauselist_selectivity(root,
2004                                                                                 joinquals,
2005                                                                                 0,
2006                                                                                 jointype);
2007                 pselec = clauselist_selectivity(root,
2008                                                                                 pushedquals,
2009                                                                                 0,
2010                                                                                 jointype);
2011
2012                 /* Avoid leaking a lot of ListCells */
2013                 list_free(joinquals);
2014                 list_free(pushedquals);
2015         }
2016         else
2017         {
2018                 jselec = clauselist_selectivity(root,
2019                                                                                 restrictlist,
2020                                                                                 0,
2021                                                                                 jointype);
2022                 pselec = 0.0;                   /* not used, keep compiler quiet */
2023         }
2024
2025         /*
2026          * Basically, we multiply size of Cartesian product by selectivity.
2027          *
2028          * If we are doing an outer join, take that into account: the joinqual
2029          * selectivity has to be clamped using the knowledge that the output must
2030          * be at least as large as the non-nullable input.  However, any
2031          * pushed-down quals are applied after the outer join, so their
2032          * selectivity applies fully.
2033          *
2034          * For JOIN_IN and variants, the Cartesian product is figured with respect
2035          * to a unique-ified input, and then we can clamp to the size of the other
2036          * input.
2037          */
2038         switch (jointype)
2039         {
2040                 case JOIN_INNER:
2041                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2042                         break;
2043                 case JOIN_LEFT:
2044                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2045                         if (nrows < outer_rel->rows)
2046                                 nrows = outer_rel->rows;
2047                         nrows *= pselec;
2048                         break;
2049                 case JOIN_RIGHT:
2050                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2051                         if (nrows < inner_rel->rows)
2052                                 nrows = inner_rel->rows;
2053                         nrows *= pselec;
2054                         break;
2055                 case JOIN_FULL:
2056                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2057                         if (nrows < outer_rel->rows)
2058                                 nrows = outer_rel->rows;
2059                         if (nrows < inner_rel->rows)
2060                                 nrows = inner_rel->rows;
2061                         nrows *= pselec;
2062                         break;
2063                 case JOIN_IN:
2064                 case JOIN_UNIQUE_INNER:
2065                         upath = create_unique_path(root, inner_rel,
2066                                                                            inner_rel->cheapest_total_path);
2067                         nrows = outer_rel->rows * upath->rows * jselec;
2068                         if (nrows > outer_rel->rows)
2069                                 nrows = outer_rel->rows;
2070                         break;
2071                 case JOIN_REVERSE_IN:
2072                 case JOIN_UNIQUE_OUTER:
2073                         upath = create_unique_path(root, outer_rel,
2074                                                                            outer_rel->cheapest_total_path);
2075                         nrows = upath->rows * inner_rel->rows * jselec;
2076                         if (nrows > inner_rel->rows)
2077                                 nrows = inner_rel->rows;
2078                         break;
2079                 default:
2080                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
2081                         nrows = 0;                      /* keep compiler quiet */
2082                         break;
2083         }
2084
2085         rel->rows = clamp_row_est(nrows);
2086 }
2087
2088 /*
2089  * join_in_selectivity
2090  *        Determines the factor by which a JOIN_IN join's result is expected
2091  *        to be smaller than an ordinary inner join.
2092  *
2093  * 'path' is already filled in except for the cost fields
2094  */
2095 static Selectivity
2096 join_in_selectivity(JoinPath *path, PlannerInfo *root)
2097 {
2098         RelOptInfo *innerrel;
2099         UniquePath *innerunique;
2100         Selectivity selec;
2101         double          nrows;
2102
2103         /* Return 1.0 whenever it's not JOIN_IN */
2104         if (path->jointype != JOIN_IN)
2105                 return 1.0;
2106
2107         /*
2108          * Return 1.0 if the inner side is already known unique.  The case where
2109          * the inner path is already a UniquePath probably cannot happen in
2110          * current usage, but check it anyway for completeness.  The interesting
2111          * case is where we've determined the inner relation itself is unique,
2112          * which we can check by looking at the rows estimate for its UniquePath.
2113          */
2114         if (IsA(path->innerjoinpath, UniquePath))
2115                 return 1.0;
2116         innerrel = path->innerjoinpath->parent;
2117         innerunique = create_unique_path(root,
2118                                                                          innerrel,
2119                                                                          innerrel->cheapest_total_path);
2120         if (innerunique->rows >= innerrel->rows)
2121                 return 1.0;
2122
2123         /*
2124          * Compute same result set_joinrel_size_estimates would compute for
2125          * JOIN_INNER.  Note that we use the input rels' absolute size estimates,
2126          * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
2127          * double-counting the effects of any join clauses used in input scans.
2128          */
2129         selec = clauselist_selectivity(root,
2130                                                                    path->joinrestrictinfo,
2131                                                                    0,
2132                                                                    JOIN_INNER);
2133         nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
2134
2135         nrows = clamp_row_est(nrows);
2136
2137         /* See if it's larger than the actual JOIN_IN size estimate */
2138         if (nrows > path->path.parent->rows)
2139                 return path->path.parent->rows / nrows;
2140         else
2141                 return 1.0;
2142 }
2143
2144 /*
2145  * set_function_size_estimates
2146  *              Set the size estimates for a base relation that is a function call.
2147  *
2148  * The rel's targetlist and restrictinfo list must have been constructed
2149  * already.
2150  *
2151  * We set the same fields as set_baserel_size_estimates.
2152  */
2153 void
2154 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2155 {
2156         RangeTblEntry *rte;
2157
2158         /* Should only be applied to base relations that are functions */
2159         Assert(rel->relid > 0);
2160         rte = rt_fetch(rel->relid, root->parse->rtable);
2161         Assert(rte->rtekind == RTE_FUNCTION);
2162
2163         /*
2164          * Estimate number of rows the function itself will return.
2165          *
2166          * XXX no idea how to do this yet; but we can at least check whether
2167          * function returns set or not...
2168          */
2169         if (expression_returns_set(rte->funcexpr))
2170                 rel->tuples = 1000;
2171         else
2172                 rel->tuples = 1;
2173
2174         /* Now estimate number of output rows, etc */
2175         set_baserel_size_estimates(root, rel);
2176 }
2177
2178 /*
2179  * set_values_size_estimates
2180  *              Set the size estimates for a base relation that is a values list.
2181  *
2182  * The rel's targetlist and restrictinfo list must have been constructed
2183  * already.
2184  *
2185  * We set the same fields as set_baserel_size_estimates.
2186  */
2187 void
2188 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2189 {
2190         RangeTblEntry *rte;
2191
2192         /* Should only be applied to base relations that are values lists */
2193         Assert(rel->relid > 0);
2194         rte = rt_fetch(rel->relid, root->parse->rtable);
2195         Assert(rte->rtekind == RTE_VALUES);
2196
2197         /*
2198          * Estimate number of rows the values list will return. We know this
2199          * precisely based on the list length (well, barring set-returning
2200          * functions in list items, but that's a refinement not catered for
2201          * anywhere else either).
2202          */
2203         rel->tuples = list_length(rte->values_lists);
2204
2205         /* Now estimate number of output rows, etc */
2206         set_baserel_size_estimates(root, rel);
2207 }
2208
2209
2210 /*
2211  * set_rel_width
2212  *              Set the estimated output width of a base relation.
2213  *
2214  * NB: this works best on plain relations because it prefers to look at
2215  * real Vars.  It will fail to make use of pg_statistic info when applied
2216  * to a subquery relation, even if the subquery outputs are simple vars
2217  * that we could have gotten info for.  Is it worth trying to be smarter
2218  * about subqueries?
2219  *
2220  * The per-attribute width estimates are cached for possible re-use while
2221  * building join relations.
2222  */
2223 static void
2224 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2225 {
2226         int32           tuple_width = 0;
2227         ListCell   *tllist;
2228
2229         foreach(tllist, rel->reltargetlist)
2230         {
2231                 Var                *var = (Var *) lfirst(tllist);
2232                 int                     ndx;
2233                 Oid                     relid;
2234                 int32           item_width;
2235
2236                 /* For now, punt on whole-row child Vars */
2237                 if (!IsA(var, Var))
2238                 {
2239                         tuple_width += 32;      /* arbitrary */
2240                         continue;
2241                 }
2242
2243                 ndx = var->varattno - rel->min_attr;
2244
2245                 /*
2246                  * The width probably hasn't been cached yet, but may as well check
2247                  */
2248                 if (rel->attr_widths[ndx] > 0)
2249                 {
2250                         tuple_width += rel->attr_widths[ndx];
2251                         continue;
2252                 }
2253
2254                 relid = getrelid(var->varno, root->parse->rtable);
2255                 if (relid != InvalidOid)
2256                 {
2257                         item_width = get_attavgwidth(relid, var->varattno);
2258                         if (item_width > 0)
2259                         {
2260                                 rel->attr_widths[ndx] = item_width;
2261                                 tuple_width += item_width;
2262                                 continue;
2263                         }
2264                 }
2265
2266                 /*
2267                  * Not a plain relation, or can't find statistics for it. Estimate
2268                  * using just the type info.
2269                  */
2270                 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2271                 Assert(item_width > 0);
2272                 rel->attr_widths[ndx] = item_width;
2273                 tuple_width += item_width;
2274         }
2275         Assert(tuple_width >= 0);
2276         rel->width = tuple_width;
2277 }
2278
2279 /*
2280  * relation_byte_size
2281  *        Estimate the storage space in bytes for a given number of tuples
2282  *        of a given width (size in bytes).
2283  */
2284 static double
2285 relation_byte_size(double tuples, int width)
2286 {
2287         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2288 }
2289
2290 /*
2291  * page_size
2292  *        Returns an estimate of the number of pages covered by a given
2293  *        number of tuples of a given width (size in bytes).
2294  */
2295 static double
2296 page_size(double tuples, int width)
2297 {
2298         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
2299 }