]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Add COST and ROWS options to CREATE/ALTER FUNCTION, plus underlying pg_proc
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.176 2007/01/22 01:35:20 tgl Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "optimizer/clauses.h"
69 #include "optimizer/cost.h"
70 #include "optimizer/pathnode.h"
71 #include "optimizer/planmain.h"
72 #include "parser/parsetree.h"
73 #include "utils/lsyscache.h"
74 #include "utils/selfuncs.h"
75 #include "utils/tuplesort.h"
76
77
78 #define LOG2(x)  (log(x) / 0.693147180559945)
79
80 /*
81  * Some Paths return less than the nominal number of rows of their parent
82  * relations; join nodes need to do this to get the correct input count:
83  */
84 #define PATH_ROWS(path) \
85         (IsA(path, UniquePath) ? \
86          ((UniquePath *) (path))->rows : \
87          (path)->parent->rows)
88
89
90 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
91 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
92 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
93 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
94 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
95
96 int                     effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
97
98 Cost            disable_cost = 100000000.0;
99
100 bool            enable_seqscan = true;
101 bool            enable_indexscan = true;
102 bool            enable_bitmapscan = true;
103 bool            enable_tidscan = true;
104 bool            enable_sort = true;
105 bool            enable_hashagg = true;
106 bool            enable_nestloop = true;
107 bool            enable_mergejoin = true;
108 bool            enable_hashjoin = true;
109
110
111 static bool cost_qual_eval_walker(Node *node, QualCost *total);
112 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
113                                    JoinType jointype);
114 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
115 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
116 static double relation_byte_size(double tuples, int width);
117 static double page_size(double tuples, int width);
118
119
120 /*
121  * clamp_row_est
122  *              Force a row-count estimate to a sane value.
123  */
124 double
125 clamp_row_est(double nrows)
126 {
127         /*
128          * Force estimate to be at least one row, to make explain output look
129          * better and to avoid possible divide-by-zero when interpolating costs.
130          * Make it an integer, too.
131          */
132         if (nrows <= 1.0)
133                 nrows = 1.0;
134         else
135                 nrows = rint(nrows);
136
137         return nrows;
138 }
139
140
141 /*
142  * cost_seqscan
143  *        Determines and returns the cost of scanning a relation sequentially.
144  */
145 void
146 cost_seqscan(Path *path, PlannerInfo *root,
147                          RelOptInfo *baserel)
148 {
149         Cost            startup_cost = 0;
150         Cost            run_cost = 0;
151         Cost            cpu_per_tuple;
152
153         /* Should only be applied to base relations */
154         Assert(baserel->relid > 0);
155         Assert(baserel->rtekind == RTE_RELATION);
156
157         if (!enable_seqscan)
158                 startup_cost += disable_cost;
159
160         /*
161          * disk costs
162          */
163         run_cost += seq_page_cost * baserel->pages;
164
165         /* CPU costs */
166         startup_cost += baserel->baserestrictcost.startup;
167         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
168         run_cost += cpu_per_tuple * baserel->tuples;
169
170         path->startup_cost = startup_cost;
171         path->total_cost = startup_cost + run_cost;
172 }
173
174 /*
175  * cost_index
176  *        Determines and returns the cost of scanning a relation using an index.
177  *
178  * 'index' is the index to be used
179  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
180  * 'outer_rel' is the outer relation when we are considering using the index
181  *              scan as the inside of a nestloop join (hence, some of the indexQuals
182  *              are join clauses, and we should expect repeated scans of the index);
183  *              NULL for a plain index scan
184  *
185  * cost_index() takes an IndexPath not just a Path, because it sets a few
186  * additional fields of the IndexPath besides startup_cost and total_cost.
187  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
188  *
189  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
190  * Any additional quals evaluated as qpquals may reduce the number of returned
191  * tuples, but they won't reduce the number of tuples we have to fetch from
192  * the table, so they don't reduce the scan cost.
193  *
194  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
195  * it was a list of bare clause expressions.
196  */
197 void
198 cost_index(IndexPath *path, PlannerInfo *root,
199                    IndexOptInfo *index,
200                    List *indexQuals,
201                    RelOptInfo *outer_rel)
202 {
203         RelOptInfo *baserel = index->rel;
204         Cost            startup_cost = 0;
205         Cost            run_cost = 0;
206         Cost            indexStartupCost;
207         Cost            indexTotalCost;
208         Selectivity indexSelectivity;
209         double          indexCorrelation,
210                                 csquared;
211         Cost            min_IO_cost,
212                                 max_IO_cost;
213         Cost            cpu_per_tuple;
214         double          tuples_fetched;
215         double          pages_fetched;
216
217         /* Should only be applied to base relations */
218         Assert(IsA(baserel, RelOptInfo) &&
219                    IsA(index, IndexOptInfo));
220         Assert(baserel->relid > 0);
221         Assert(baserel->rtekind == RTE_RELATION);
222
223         if (!enable_indexscan)
224                 startup_cost += disable_cost;
225
226         /*
227          * Call index-access-method-specific code to estimate the processing cost
228          * for scanning the index, as well as the selectivity of the index (ie,
229          * the fraction of main-table tuples we will have to retrieve) and its
230          * correlation to the main-table tuple order.
231          */
232         OidFunctionCall8(index->amcostestimate,
233                                          PointerGetDatum(root),
234                                          PointerGetDatum(index),
235                                          PointerGetDatum(indexQuals),
236                                          PointerGetDatum(outer_rel),
237                                          PointerGetDatum(&indexStartupCost),
238                                          PointerGetDatum(&indexTotalCost),
239                                          PointerGetDatum(&indexSelectivity),
240                                          PointerGetDatum(&indexCorrelation));
241
242         /*
243          * Save amcostestimate's results for possible use in bitmap scan planning.
244          * We don't bother to save indexStartupCost or indexCorrelation, because a
245          * bitmap scan doesn't care about either.
246          */
247         path->indextotalcost = indexTotalCost;
248         path->indexselectivity = indexSelectivity;
249
250         /* all costs for touching index itself included here */
251         startup_cost += indexStartupCost;
252         run_cost += indexTotalCost - indexStartupCost;
253
254         /* estimate number of main-table tuples fetched */
255         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
256
257         /*----------
258          * Estimate number of main-table pages fetched, and compute I/O cost.
259          *
260          * When the index ordering is uncorrelated with the table ordering,
261          * we use an approximation proposed by Mackert and Lohman (see
262          * index_pages_fetched() for details) to compute the number of pages
263          * fetched, and then charge random_page_cost per page fetched.
264          *
265          * When the index ordering is exactly correlated with the table ordering
266          * (just after a CLUSTER, for example), the number of pages fetched should
267          * be exactly selectivity * table_size.  What's more, all but the first
268          * will be sequential fetches, not the random fetches that occur in the
269          * uncorrelated case.  So if the number of pages is more than 1, we
270          * ought to charge
271          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
272          * For partially-correlated indexes, we ought to charge somewhere between
273          * these two estimates.  We currently interpolate linearly between the
274          * estimates based on the correlation squared (XXX is that appropriate?).
275          *----------
276          */
277         if (outer_rel != NULL && outer_rel->rows > 1)
278         {
279                 /*
280                  * For repeated indexscans, the appropriate estimate for the
281                  * uncorrelated case is to scale up the number of tuples fetched in
282                  * the Mackert and Lohman formula by the number of scans, so that we
283                  * estimate the number of pages fetched by all the scans; then
284                  * pro-rate the costs for one scan.  In this case we assume all the
285                  * fetches are random accesses.
286                  */
287                 double          num_scans = outer_rel->rows;
288
289                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
290                                                                                         baserel->pages,
291                                                                                         (double) index->pages,
292                                                                                         root);
293
294                 max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
295
296                 /*
297                  * In the perfectly correlated case, the number of pages touched
298                  * by each scan is selectivity * table_size, and we can use the
299                  * Mackert and Lohman formula at the page level to estimate how
300                  * much work is saved by caching across scans.  We still assume
301                  * all the fetches are random, though, which is an overestimate
302                  * that's hard to correct for without double-counting the cache
303                  * effects.  (But in most cases where such a plan is actually
304                  * interesting, only one page would get fetched per scan anyway,
305                  * so it shouldn't matter much.)
306                  */
307                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
308
309                 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
310                                                                                         baserel->pages,
311                                                                                         (double) index->pages,
312                                                                                         root);
313
314                 min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
315         }
316         else
317         {
318                 /*
319                  * Normal case: apply the Mackert and Lohman formula, and then
320                  * interpolate between that and the correlation-derived result.
321                  */
322                 pages_fetched = index_pages_fetched(tuples_fetched,
323                                                                                         baserel->pages,
324                                                                                         (double) index->pages,
325                                                                                         root);
326
327                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
328                 max_IO_cost = pages_fetched * random_page_cost;
329
330                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
331                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
332                 min_IO_cost = random_page_cost;
333                 if (pages_fetched > 1)
334                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
335         }
336
337         /*
338          * Now interpolate based on estimated index order correlation to get
339          * total disk I/O cost for main table accesses.
340          */
341         csquared = indexCorrelation * indexCorrelation;
342
343         run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
344
345         /*
346          * Estimate CPU costs per tuple.
347          *
348          * Normally the indexquals will be removed from the list of restriction
349          * clauses that we have to evaluate as qpquals, so we should subtract
350          * their costs from baserestrictcost.  But if we are doing a join then
351          * some of the indexquals are join clauses and shouldn't be subtracted.
352          * Rather than work out exactly how much to subtract, we don't subtract
353          * anything.
354          */
355         startup_cost += baserel->baserestrictcost.startup;
356         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
357
358         if (outer_rel == NULL)
359         {
360                 QualCost        index_qual_cost;
361
362                 cost_qual_eval(&index_qual_cost, indexQuals);
363                 /* any startup cost still has to be paid ... */
364                 cpu_per_tuple -= index_qual_cost.per_tuple;
365         }
366
367         run_cost += cpu_per_tuple * tuples_fetched;
368
369         path->path.startup_cost = startup_cost;
370         path->path.total_cost = startup_cost + run_cost;
371 }
372
373 /*
374  * index_pages_fetched
375  *        Estimate the number of pages actually fetched after accounting for
376  *        cache effects.
377  *
378  * We use an approximation proposed by Mackert and Lohman, "Index Scans
379  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
380  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
381  * The Mackert and Lohman approximation is that the number of pages
382  * fetched is
383  *      PF =
384  *              min(2TNs/(2T+Ns), T)                    when T <= b
385  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
386  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
387  * where
388  *              T = # pages in table
389  *              N = # tuples in table
390  *              s = selectivity = fraction of table to be scanned
391  *              b = # buffer pages available (we include kernel space here)
392  *
393  * We assume that effective_cache_size is the total number of buffer pages
394  * available for the whole query, and pro-rate that space across all the
395  * tables in the query and the index currently under consideration.  (This
396  * ignores space needed for other indexes used by the query, but since we
397  * don't know which indexes will get used, we can't estimate that very well;
398  * and in any case counting all the tables may well be an overestimate, since
399  * depending on the join plan not all the tables may be scanned concurrently.)
400  *
401  * The product Ns is the number of tuples fetched; we pass in that
402  * product rather than calculating it here.  "pages" is the number of pages
403  * in the object under consideration (either an index or a table).
404  * "index_pages" is the amount to add to the total table space, which was
405  * computed for us by query_planner.
406  *
407  * Caller is expected to have ensured that tuples_fetched is greater than zero
408  * and rounded to integer (see clamp_row_est).  The result will likewise be
409  * greater than zero and integral.
410  */
411 double
412 index_pages_fetched(double tuples_fetched, BlockNumber pages,
413                                         double index_pages, PlannerInfo *root)
414 {
415         double          pages_fetched;
416         double          total_pages;
417         double          T,
418                                 b;
419
420         /* T is # pages in table, but don't allow it to be zero */
421         T = (pages > 1) ? (double) pages : 1.0;
422
423         /* Compute number of pages assumed to be competing for cache space */
424         total_pages = root->total_table_pages + index_pages;
425         total_pages = Max(total_pages, 1.0);
426         Assert(T <= total_pages);
427
428         /* b is pro-rated share of effective_cache_size */
429         b = (double) effective_cache_size *T / total_pages;
430
431         /* force it positive and integral */
432         if (b <= 1.0)
433                 b = 1.0;
434         else
435                 b = ceil(b);
436
437         /* This part is the Mackert and Lohman formula */
438         if (T <= b)
439         {
440                 pages_fetched =
441                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
442                 if (pages_fetched >= T)
443                         pages_fetched = T;
444                 else
445                         pages_fetched = ceil(pages_fetched);
446         }
447         else
448         {
449                 double          lim;
450
451                 lim = (2.0 * T * b) / (2.0 * T - b);
452                 if (tuples_fetched <= lim)
453                 {
454                         pages_fetched =
455                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
456                 }
457                 else
458                 {
459                         pages_fetched =
460                                 b + (tuples_fetched - lim) * (T - b) / T;
461                 }
462                 pages_fetched = ceil(pages_fetched);
463         }
464         return pages_fetched;
465 }
466
467 /*
468  * get_indexpath_pages
469  *              Determine the total size of the indexes used in a bitmap index path.
470  *
471  * Note: if the same index is used more than once in a bitmap tree, we will
472  * count it multiple times, which perhaps is the wrong thing ... but it's
473  * not completely clear, and detecting duplicates is difficult, so ignore it
474  * for now.
475  */
476 static double
477 get_indexpath_pages(Path *bitmapqual)
478 {
479         double          result = 0;
480         ListCell   *l;
481
482         if (IsA(bitmapqual, BitmapAndPath))
483         {
484                 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
485
486                 foreach(l, apath->bitmapquals)
487                 {
488                         result += get_indexpath_pages((Path *) lfirst(l));
489                 }
490         }
491         else if (IsA(bitmapqual, BitmapOrPath))
492         {
493                 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
494
495                 foreach(l, opath->bitmapquals)
496                 {
497                         result += get_indexpath_pages((Path *) lfirst(l));
498                 }
499         }
500         else if (IsA(bitmapqual, IndexPath))
501         {
502                 IndexPath  *ipath = (IndexPath *) bitmapqual;
503
504                 result = (double) ipath->indexinfo->pages;
505         }
506         else
507                 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
508
509         return result;
510 }
511
512 /*
513  * cost_bitmap_heap_scan
514  *        Determines and returns the cost of scanning a relation using a bitmap
515  *        index-then-heap plan.
516  *
517  * 'baserel' is the relation to be scanned
518  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
519  * 'outer_rel' is the outer relation when we are considering using the bitmap
520  *              scan as the inside of a nestloop join (hence, some of the indexQuals
521  *              are join clauses, and we should expect repeated scans of the table);
522  *              NULL for a plain bitmap scan
523  *
524  * Note: if this is a join inner path, the component IndexPaths in bitmapqual
525  * should have been costed accordingly.
526  */
527 void
528 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
529                                           Path *bitmapqual, RelOptInfo *outer_rel)
530 {
531         Cost            startup_cost = 0;
532         Cost            run_cost = 0;
533         Cost            indexTotalCost;
534         Selectivity indexSelectivity;
535         Cost            cpu_per_tuple;
536         Cost            cost_per_page;
537         double          tuples_fetched;
538         double          pages_fetched;
539         double          T;
540
541         /* Should only be applied to base relations */
542         Assert(IsA(baserel, RelOptInfo));
543         Assert(baserel->relid > 0);
544         Assert(baserel->rtekind == RTE_RELATION);
545
546         if (!enable_bitmapscan)
547                 startup_cost += disable_cost;
548
549         /*
550          * Fetch total cost of obtaining the bitmap, as well as its total
551          * selectivity.
552          */
553         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
554
555         startup_cost += indexTotalCost;
556
557         /*
558          * Estimate number of main-table pages fetched.
559          */
560         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
561
562         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
563
564         if (outer_rel != NULL && outer_rel->rows > 1)
565         {
566                 /*
567                  * For repeated bitmap scans, scale up the number of tuples fetched in
568                  * the Mackert and Lohman formula by the number of scans, so that we
569                  * estimate the number of pages fetched by all the scans. Then
570                  * pro-rate for one scan.
571                  */
572                 double          num_scans = outer_rel->rows;
573
574                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
575                                                                                         baserel->pages,
576                                                                                         get_indexpath_pages(bitmapqual),
577                                                                                         root);
578                 pages_fetched /= num_scans;
579         }
580         else
581         {
582                 /*
583                  * For a single scan, the number of heap pages that need to be fetched
584                  * is the same as the Mackert and Lohman formula for the case T <= b
585                  * (ie, no re-reads needed).
586                  */
587                 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
588         }
589         if (pages_fetched >= T)
590                 pages_fetched = T;
591         else
592                 pages_fetched = ceil(pages_fetched);
593
594         /*
595          * For small numbers of pages we should charge random_page_cost apiece,
596          * while if nearly all the table's pages are being read, it's more
597          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
598          * too. For lack of a better idea, interpolate like this to determine the
599          * cost per page.
600          */
601         if (pages_fetched >= 2.0)
602                 cost_per_page = random_page_cost -
603                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
604         else
605                 cost_per_page = random_page_cost;
606
607         run_cost += pages_fetched * cost_per_page;
608
609         /*
610          * Estimate CPU costs per tuple.
611          *
612          * Often the indexquals don't need to be rechecked at each tuple ... but
613          * not always, especially not if there are enough tuples involved that the
614          * bitmaps become lossy.  For the moment, just assume they will be
615          * rechecked always.
616          */
617         startup_cost += baserel->baserestrictcost.startup;
618         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
619
620         run_cost += cpu_per_tuple * tuples_fetched;
621
622         path->startup_cost = startup_cost;
623         path->total_cost = startup_cost + run_cost;
624 }
625
626 /*
627  * cost_bitmap_tree_node
628  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
629  */
630 void
631 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
632 {
633         if (IsA(path, IndexPath))
634         {
635                 *cost = ((IndexPath *) path)->indextotalcost;
636                 *selec = ((IndexPath *) path)->indexselectivity;
637                 /*
638                  * Charge a small amount per retrieved tuple to reflect the costs of
639                  * manipulating the bitmap.  This is mostly to make sure that a bitmap
640                  * scan doesn't look to be the same cost as an indexscan to retrieve
641                  * a single tuple.
642                  */
643                 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
644         }
645         else if (IsA(path, BitmapAndPath))
646         {
647                 *cost = path->total_cost;
648                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
649         }
650         else if (IsA(path, BitmapOrPath))
651         {
652                 *cost = path->total_cost;
653                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
654         }
655         else
656         {
657                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
658                 *cost = *selec = 0;             /* keep compiler quiet */
659         }
660 }
661
662 /*
663  * cost_bitmap_and_node
664  *              Estimate the cost of a BitmapAnd node
665  *
666  * Note that this considers only the costs of index scanning and bitmap
667  * creation, not the eventual heap access.      In that sense the object isn't
668  * truly a Path, but it has enough path-like properties (costs in particular)
669  * to warrant treating it as one.
670  */
671 void
672 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
673 {
674         Cost            totalCost;
675         Selectivity selec;
676         ListCell   *l;
677
678         /*
679          * We estimate AND selectivity on the assumption that the inputs are
680          * independent.  This is probably often wrong, but we don't have the info
681          * to do better.
682          *
683          * The runtime cost of the BitmapAnd itself is estimated at 100x
684          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
685          * definitely too simplistic?
686          */
687         totalCost = 0.0;
688         selec = 1.0;
689         foreach(l, path->bitmapquals)
690         {
691                 Path       *subpath = (Path *) lfirst(l);
692                 Cost            subCost;
693                 Selectivity subselec;
694
695                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
696
697                 selec *= subselec;
698
699                 totalCost += subCost;
700                 if (l != list_head(path->bitmapquals))
701                         totalCost += 100.0 * cpu_operator_cost;
702         }
703         path->bitmapselectivity = selec;
704         path->path.startup_cost = totalCost;
705         path->path.total_cost = totalCost;
706 }
707
708 /*
709  * cost_bitmap_or_node
710  *              Estimate the cost of a BitmapOr node
711  *
712  * See comments for cost_bitmap_and_node.
713  */
714 void
715 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
716 {
717         Cost            totalCost;
718         Selectivity selec;
719         ListCell   *l;
720
721         /*
722          * We estimate OR selectivity on the assumption that the inputs are
723          * non-overlapping, since that's often the case in "x IN (list)" type
724          * situations.  Of course, we clamp to 1.0 at the end.
725          *
726          * The runtime cost of the BitmapOr itself is estimated at 100x
727          * cpu_operator_cost for each tbm_union needed.  Probably too small,
728          * definitely too simplistic?  We are aware that the tbm_unions are
729          * optimized out when the inputs are BitmapIndexScans.
730          */
731         totalCost = 0.0;
732         selec = 0.0;
733         foreach(l, path->bitmapquals)
734         {
735                 Path       *subpath = (Path *) lfirst(l);
736                 Cost            subCost;
737                 Selectivity subselec;
738
739                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
740
741                 selec += subselec;
742
743                 totalCost += subCost;
744                 if (l != list_head(path->bitmapquals) &&
745                         !IsA(subpath, IndexPath))
746                         totalCost += 100.0 * cpu_operator_cost;
747         }
748         path->bitmapselectivity = Min(selec, 1.0);
749         path->path.startup_cost = totalCost;
750         path->path.total_cost = totalCost;
751 }
752
753 /*
754  * cost_tidscan
755  *        Determines and returns the cost of scanning a relation using TIDs.
756  */
757 void
758 cost_tidscan(Path *path, PlannerInfo *root,
759                          RelOptInfo *baserel, List *tidquals)
760 {
761         Cost            startup_cost = 0;
762         Cost            run_cost = 0;
763         Cost            cpu_per_tuple;
764         int                     ntuples;
765         ListCell   *l;
766
767         /* Should only be applied to base relations */
768         Assert(baserel->relid > 0);
769         Assert(baserel->rtekind == RTE_RELATION);
770
771         if (!enable_tidscan)
772                 startup_cost += disable_cost;
773
774         /* Count how many tuples we expect to retrieve */
775         ntuples = 0;
776         foreach(l, tidquals)
777         {
778                 if (IsA(lfirst(l), ScalarArrayOpExpr))
779                 {
780                         /* Each element of the array yields 1 tuple */
781                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
782                         Node       *arraynode = (Node *) lsecond(saop->args);
783
784                         ntuples += estimate_array_length(arraynode);
785                 }
786                 else
787                 {
788                         /* It's just CTID = something, count 1 tuple */
789                         ntuples++;
790                 }
791         }
792
793         /* disk costs --- assume each tuple on a different page */
794         run_cost += random_page_cost * ntuples;
795
796         /* CPU costs */
797         startup_cost += baserel->baserestrictcost.startup;
798         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
799         run_cost += cpu_per_tuple * ntuples;
800
801         path->startup_cost = startup_cost;
802         path->total_cost = startup_cost + run_cost;
803 }
804
805 /*
806  * cost_subqueryscan
807  *        Determines and returns the cost of scanning a subquery RTE.
808  */
809 void
810 cost_subqueryscan(Path *path, RelOptInfo *baserel)
811 {
812         Cost            startup_cost;
813         Cost            run_cost;
814         Cost            cpu_per_tuple;
815
816         /* Should only be applied to base relations that are subqueries */
817         Assert(baserel->relid > 0);
818         Assert(baserel->rtekind == RTE_SUBQUERY);
819
820         /*
821          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
822          * any restriction clauses that will be attached to the SubqueryScan node,
823          * plus cpu_tuple_cost to account for selection and projection overhead.
824          */
825         path->startup_cost = baserel->subplan->startup_cost;
826         path->total_cost = baserel->subplan->total_cost;
827
828         startup_cost = baserel->baserestrictcost.startup;
829         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
830         run_cost = cpu_per_tuple * baserel->tuples;
831
832         path->startup_cost += startup_cost;
833         path->total_cost += startup_cost + run_cost;
834 }
835
836 /*
837  * cost_functionscan
838  *        Determines and returns the cost of scanning a function RTE.
839  */
840 void
841 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
842 {
843         Cost            startup_cost = 0;
844         Cost            run_cost = 0;
845         Cost            cpu_per_tuple;
846         RangeTblEntry *rte;
847         QualCost        exprcost;
848
849         /* Should only be applied to base relations that are functions */
850         Assert(baserel->relid > 0);
851         rte = rt_fetch(baserel->relid, root->parse->rtable);
852         Assert(rte->rtekind == RTE_FUNCTION);
853
854         /* Estimate costs of executing the function expression */
855         cost_qual_eval_node(&exprcost, rte->funcexpr);
856
857         startup_cost += exprcost.startup;
858         cpu_per_tuple = exprcost.per_tuple;
859
860         /* Add scanning CPU costs */
861         startup_cost += baserel->baserestrictcost.startup;
862         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
863         run_cost += cpu_per_tuple * baserel->tuples;
864
865         path->startup_cost = startup_cost;
866         path->total_cost = startup_cost + run_cost;
867 }
868
869 /*
870  * cost_valuesscan
871  *        Determines and returns the cost of scanning a VALUES RTE.
872  */
873 void
874 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
875 {
876         Cost            startup_cost = 0;
877         Cost            run_cost = 0;
878         Cost            cpu_per_tuple;
879
880         /* Should only be applied to base relations that are values lists */
881         Assert(baserel->relid > 0);
882         Assert(baserel->rtekind == RTE_VALUES);
883
884         /*
885          * For now, estimate list evaluation cost at one operator eval per list
886          * (probably pretty bogus, but is it worth being smarter?)
887          */
888         cpu_per_tuple = cpu_operator_cost;
889
890         /* Add scanning CPU costs */
891         startup_cost += baserel->baserestrictcost.startup;
892         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
893         run_cost += cpu_per_tuple * baserel->tuples;
894
895         path->startup_cost = startup_cost;
896         path->total_cost = startup_cost + run_cost;
897 }
898
899 /*
900  * cost_sort
901  *        Determines and returns the cost of sorting a relation, including
902  *        the cost of reading the input data.
903  *
904  * If the total volume of data to sort is less than work_mem, we will do
905  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
906  * comparisons for t tuples.
907  *
908  * If the total volume exceeds work_mem, we switch to a tape-style merge
909  * algorithm.  There will still be about t*log2(t) tuple comparisons in
910  * total, but we will also need to write and read each tuple once per
911  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
912  * number of initial runs formed and M is the merge order used by tuplesort.c.
913  * Since the average initial run should be about twice work_mem, we have
914  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
915  *              cpu = comparison_cost * t * log2(t)
916  *
917  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
918  * accesses (XXX can't we refine that guess?)
919  *
920  * We charge two operator evals per tuple comparison, which should be in
921  * the right ballpark in most cases.
922  *
923  * 'pathkeys' is a list of sort keys
924  * 'input_cost' is the total cost for reading the input data
925  * 'tuples' is the number of tuples in the relation
926  * 'width' is the average tuple width in bytes
927  *
928  * NOTE: some callers currently pass NIL for pathkeys because they
929  * can't conveniently supply the sort keys.  Since this routine doesn't
930  * currently do anything with pathkeys anyway, that doesn't matter...
931  * but if it ever does, it should react gracefully to lack of key data.
932  * (Actually, the thing we'd most likely be interested in is just the number
933  * of sort keys, which all callers *could* supply.)
934  */
935 void
936 cost_sort(Path *path, PlannerInfo *root,
937                   List *pathkeys, Cost input_cost, double tuples, int width)
938 {
939         Cost            startup_cost = input_cost;
940         Cost            run_cost = 0;
941         double          nbytes = relation_byte_size(tuples, width);
942         long            work_mem_bytes = work_mem * 1024L;
943
944         if (!enable_sort)
945                 startup_cost += disable_cost;
946
947         /*
948          * We want to be sure the cost of a sort is never estimated as zero, even
949          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
950          */
951         if (tuples < 2.0)
952                 tuples = 2.0;
953
954         /*
955          * CPU costs
956          *
957          * Assume about two operator evals per tuple comparison and N log2 N
958          * comparisons
959          */
960         startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
961
962         /* disk costs */
963         if (nbytes > work_mem_bytes)
964         {
965                 double          npages = ceil(nbytes / BLCKSZ);
966                 double          nruns = (nbytes / work_mem_bytes) * 0.5;
967                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
968                 double          log_runs;
969                 double          npageaccesses;
970
971                 /* Compute logM(r) as log(r) / log(M) */
972                 if (nruns > mergeorder)
973                         log_runs = ceil(log(nruns) / log(mergeorder));
974                 else
975                         log_runs = 1.0;
976                 npageaccesses = 2.0 * npages * log_runs;
977                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
978                 startup_cost += npageaccesses *
979                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
980         }
981
982         /*
983          * Also charge a small amount (arbitrarily set equal to operator cost) per
984          * extracted tuple.
985          */
986         run_cost += cpu_operator_cost * tuples;
987
988         path->startup_cost = startup_cost;
989         path->total_cost = startup_cost + run_cost;
990 }
991
992 /*
993  * cost_material
994  *        Determines and returns the cost of materializing a relation, including
995  *        the cost of reading the input data.
996  *
997  * If the total volume of data to materialize exceeds work_mem, we will need
998  * to write it to disk, so the cost is much higher in that case.
999  */
1000 void
1001 cost_material(Path *path,
1002                           Cost input_cost, double tuples, int width)
1003 {
1004         Cost            startup_cost = input_cost;
1005         Cost            run_cost = 0;
1006         double          nbytes = relation_byte_size(tuples, width);
1007         long            work_mem_bytes = work_mem * 1024L;
1008
1009         /* disk costs */
1010         if (nbytes > work_mem_bytes)
1011         {
1012                 double          npages = ceil(nbytes / BLCKSZ);
1013
1014                 /* We'll write during startup and read during retrieval */
1015                 startup_cost += seq_page_cost * npages;
1016                 run_cost += seq_page_cost * npages;
1017         }
1018
1019         /*
1020          * Charge a very small amount per inserted tuple, to reflect bookkeeping
1021          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
1022          * tie that would otherwise exist between nestloop with A outer,
1023          * materialized B inner and nestloop with B outer, materialized A inner.
1024          * The extra cost ensures we'll prefer materializing the smaller rel.
1025          */
1026         startup_cost += cpu_tuple_cost * 0.1 * tuples;
1027
1028         /*
1029          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
1030          * so that it doesn't appear worthwhile to materialize a bare seqscan.
1031          */
1032         run_cost += cpu_tuple_cost * tuples;
1033
1034         path->startup_cost = startup_cost;
1035         path->total_cost = startup_cost + run_cost;
1036 }
1037
1038 /*
1039  * cost_agg
1040  *              Determines and returns the cost of performing an Agg plan node,
1041  *              including the cost of its input.
1042  *
1043  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1044  * are for appropriately-sorted input.
1045  */
1046 void
1047 cost_agg(Path *path, PlannerInfo *root,
1048                  AggStrategy aggstrategy, int numAggs,
1049                  int numGroupCols, double numGroups,
1050                  Cost input_startup_cost, Cost input_total_cost,
1051                  double input_tuples)
1052 {
1053         Cost            startup_cost;
1054         Cost            total_cost;
1055
1056         /*
1057          * We charge one cpu_operator_cost per aggregate function per input tuple,
1058          * and another one per output tuple (corresponding to transfn and finalfn
1059          * calls respectively).  If we are grouping, we charge an additional
1060          * cpu_operator_cost per grouping column per input tuple for grouping
1061          * comparisons.
1062          *
1063          * We will produce a single output tuple if not grouping, and a tuple per
1064          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1065          *
1066          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1067          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1068          * input path is already sorted appropriately, AGG_SORTED should be
1069          * preferred (since it has no risk of memory overflow).  This will happen
1070          * as long as the computed total costs are indeed exactly equal --- but if
1071          * there's roundoff error we might do the wrong thing.  So be sure that
1072          * the computations below form the same intermediate values in the same
1073          * order.
1074          *
1075          * Note: ideally we should use the pg_proc.procost costs of each
1076          * aggregate's component functions, but for now that seems like an
1077          * excessive amount of work.
1078          */
1079         if (aggstrategy == AGG_PLAIN)
1080         {
1081                 startup_cost = input_total_cost;
1082                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1083                 /* we aren't grouping */
1084                 total_cost = startup_cost + cpu_tuple_cost;
1085         }
1086         else if (aggstrategy == AGG_SORTED)
1087         {
1088                 /* Here we are able to deliver output on-the-fly */
1089                 startup_cost = input_startup_cost;
1090                 total_cost = input_total_cost;
1091                 /* calcs phrased this way to match HASHED case, see note above */
1092                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1093                 total_cost += cpu_operator_cost * input_tuples * numAggs;
1094                 total_cost += cpu_operator_cost * numGroups * numAggs;
1095                 total_cost += cpu_tuple_cost * numGroups;
1096         }
1097         else
1098         {
1099                 /* must be AGG_HASHED */
1100                 startup_cost = input_total_cost;
1101                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1102                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1103                 total_cost = startup_cost;
1104                 total_cost += cpu_operator_cost * numGroups * numAggs;
1105                 total_cost += cpu_tuple_cost * numGroups;
1106         }
1107
1108         path->startup_cost = startup_cost;
1109         path->total_cost = total_cost;
1110 }
1111
1112 /*
1113  * cost_group
1114  *              Determines and returns the cost of performing a Group plan node,
1115  *              including the cost of its input.
1116  *
1117  * Note: caller must ensure that input costs are for appropriately-sorted
1118  * input.
1119  */
1120 void
1121 cost_group(Path *path, PlannerInfo *root,
1122                    int numGroupCols, double numGroups,
1123                    Cost input_startup_cost, Cost input_total_cost,
1124                    double input_tuples)
1125 {
1126         Cost            startup_cost;
1127         Cost            total_cost;
1128
1129         startup_cost = input_startup_cost;
1130         total_cost = input_total_cost;
1131
1132         /*
1133          * Charge one cpu_operator_cost per comparison per input tuple. We assume
1134          * all columns get compared at most of the tuples.
1135          */
1136         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1137
1138         path->startup_cost = startup_cost;
1139         path->total_cost = total_cost;
1140 }
1141
1142 /*
1143  * If a nestloop's inner path is an indexscan, be sure to use its estimated
1144  * output row count, which may be lower than the restriction-clause-only row
1145  * count of its parent.  (We don't include this case in the PATH_ROWS macro
1146  * because it applies *only* to a nestloop's inner relation.)  We have to
1147  * be prepared to recurse through Append nodes in case of an appendrel.
1148  */
1149 static double
1150 nestloop_inner_path_rows(Path *path)
1151 {
1152         double          result;
1153
1154         if (IsA(path, IndexPath))
1155                 result = ((IndexPath *) path)->rows;
1156         else if (IsA(path, BitmapHeapPath))
1157                 result = ((BitmapHeapPath *) path)->rows;
1158         else if (IsA(path, AppendPath))
1159         {
1160                 ListCell   *l;
1161
1162                 result = 0;
1163                 foreach(l, ((AppendPath *) path)->subpaths)
1164                 {
1165                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1166                 }
1167         }
1168         else
1169                 result = PATH_ROWS(path);
1170
1171         return result;
1172 }
1173
1174 /*
1175  * cost_nestloop
1176  *        Determines and returns the cost of joining two relations using the
1177  *        nested loop algorithm.
1178  *
1179  * 'path' is already filled in except for the cost fields
1180  */
1181 void
1182 cost_nestloop(NestPath *path, PlannerInfo *root)
1183 {
1184         Path       *outer_path = path->outerjoinpath;
1185         Path       *inner_path = path->innerjoinpath;
1186         Cost            startup_cost = 0;
1187         Cost            run_cost = 0;
1188         Cost            cpu_per_tuple;
1189         QualCost        restrict_qual_cost;
1190         double          outer_path_rows = PATH_ROWS(outer_path);
1191         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1192         double          ntuples;
1193         Selectivity joininfactor;
1194
1195         if (!enable_nestloop)
1196                 startup_cost += disable_cost;
1197
1198         /*
1199          * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1200          * outer tuple as soon as we have one match.  Account for the effects of
1201          * this by scaling down the cost estimates in proportion to the JOIN_IN
1202          * selectivity.  (This assumes that all the quals attached to the join are
1203          * IN quals, which should be true.)
1204          */
1205         joininfactor = join_in_selectivity(path, root);
1206
1207         /* cost of source data */
1208
1209         /*
1210          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1211          * before we can start returning tuples, so the join's startup cost is
1212          * their sum.  What's not so clear is whether the inner path's
1213          * startup_cost must be paid again on each rescan of the inner path. This
1214          * is not true if the inner path is materialized or is a hashjoin, but
1215          * probably is true otherwise.
1216          */
1217         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1218         run_cost += outer_path->total_cost - outer_path->startup_cost;
1219         if (IsA(inner_path, MaterialPath) ||
1220                 IsA(inner_path, HashPath))
1221         {
1222                 /* charge only run cost for each iteration of inner path */
1223         }
1224         else
1225         {
1226                 /*
1227                  * charge startup cost for each iteration of inner path, except we
1228                  * already charged the first startup_cost in our own startup
1229                  */
1230                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1231         }
1232         run_cost += outer_path_rows *
1233                 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1234
1235         /*
1236          * Compute number of tuples processed (not number emitted!)
1237          */
1238         ntuples = outer_path_rows * inner_path_rows * joininfactor;
1239
1240         /* CPU costs */
1241         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
1242         startup_cost += restrict_qual_cost.startup;
1243         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1244         run_cost += cpu_per_tuple * ntuples;
1245
1246         path->path.startup_cost = startup_cost;
1247         path->path.total_cost = startup_cost + run_cost;
1248 }
1249
1250 /*
1251  * cost_mergejoin
1252  *        Determines and returns the cost of joining two relations using the
1253  *        merge join algorithm.
1254  *
1255  * 'path' is already filled in except for the cost fields
1256  *
1257  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1258  * outersortkeys and innersortkeys are lists of the keys to be used
1259  * to sort the outer and inner relations, or NIL if no explicit
1260  * sort is needed because the source path is already ordered.
1261  */
1262 void
1263 cost_mergejoin(MergePath *path, PlannerInfo *root)
1264 {
1265         Path       *outer_path = path->jpath.outerjoinpath;
1266         Path       *inner_path = path->jpath.innerjoinpath;
1267         List       *mergeclauses = path->path_mergeclauses;
1268         List       *outersortkeys = path->outersortkeys;
1269         List       *innersortkeys = path->innersortkeys;
1270         Cost            startup_cost = 0;
1271         Cost            run_cost = 0;
1272         Cost            cpu_per_tuple;
1273         Selectivity merge_selec;
1274         QualCost        merge_qual_cost;
1275         QualCost        qp_qual_cost;
1276         double          outer_path_rows = PATH_ROWS(outer_path);
1277         double          inner_path_rows = PATH_ROWS(inner_path);
1278         double          outer_rows,
1279                                 inner_rows;
1280         double          mergejointuples,
1281                                 rescannedtuples;
1282         double          rescanratio;
1283         Selectivity outerscansel,
1284                                 innerscansel;
1285         Selectivity joininfactor;
1286         Path            sort_path;              /* dummy for result of cost_sort */
1287
1288         if (!enable_mergejoin)
1289                 startup_cost += disable_cost;
1290
1291         /*
1292          * Compute cost and selectivity of the mergequals and qpquals (other
1293          * restriction clauses) separately.  We use approx_selectivity here for
1294          * speed --- in most cases, any errors won't affect the result much.
1295          *
1296          * Note: it's probably bogus to use the normal selectivity calculation
1297          * here when either the outer or inner path is a UniquePath.
1298          */
1299         merge_selec = approx_selectivity(root, mergeclauses,
1300                                                                          path->jpath.jointype);
1301         cost_qual_eval(&merge_qual_cost, mergeclauses);
1302         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1303         qp_qual_cost.startup -= merge_qual_cost.startup;
1304         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1305
1306         /* approx # tuples passing the merge quals */
1307         mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1308
1309         /*
1310          * When there are equal merge keys in the outer relation, the mergejoin
1311          * must rescan any matching tuples in the inner relation. This means
1312          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1313          * costs the same as an original fetch, which is probably an overestimate;
1314          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1315          * Not clear if it's worth developing a more refined model.
1316          *
1317          * The number of re-fetches can be estimated approximately as size of
1318          * merge join output minus size of inner relation.      Assume that the
1319          * distinct key values are 1, 2, ..., and denote the number of values of
1320          * each key in the outer relation as m1, m2, ...; in the inner relation,
1321          * n1, n2, ... Then we have
1322          *
1323          * size of join = m1 * n1 + m2 * n2 + ...
1324          *
1325          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1326          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1327          * relation
1328          *
1329          * This equation works correctly for outer tuples having no inner match
1330          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1331          * are effectively subtracting those from the number of rescanned tuples,
1332          * when we should not.  Can we do better without expensive selectivity
1333          * computations?
1334          */
1335         if (IsA(outer_path, UniquePath))
1336                 rescannedtuples = 0;
1337         else
1338         {
1339                 rescannedtuples = mergejointuples - inner_path_rows;
1340                 /* Must clamp because of possible underestimate */
1341                 if (rescannedtuples < 0)
1342                         rescannedtuples = 0;
1343         }
1344         /* We'll inflate inner run cost this much to account for rescanning */
1345         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1346
1347         /*
1348          * A merge join will stop as soon as it exhausts either input stream
1349          * (unless it's an outer join, in which case the outer side has to be
1350          * scanned all the way anyway).  Estimate fraction of the left and right
1351          * inputs that will actually need to be scanned. We use only the first
1352          * (most significant) merge clause for this purpose.
1353          *
1354          * XXX mergejoinscansel is a bit expensive, can we cache its results?
1355          */
1356         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1357         {
1358                 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1359                 List       *opathkeys;
1360                 List       *ipathkeys;
1361                 PathKey    *opathkey;
1362                 PathKey    *ipathkey;
1363                 Selectivity leftscansel,
1364                                         rightscansel;
1365
1366                 /* Get the input pathkeys to determine the sort-order details */
1367                 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1368                 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1369                 Assert(opathkeys);
1370                 Assert(ipathkeys);
1371                 opathkey = (PathKey *) linitial(opathkeys);
1372                 ipathkey = (PathKey *) linitial(ipathkeys);
1373                 /* debugging check */
1374                 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1375                         opathkey->pk_strategy != ipathkey->pk_strategy ||
1376                         opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1377                         elog(ERROR, "left and right pathkeys do not match in mergejoin");
1378
1379                 mergejoinscansel(root, (Node *) firstclause->clause,
1380                                                  opathkey->pk_opfamily, opathkey->pk_strategy,
1381                                                  &leftscansel, &rightscansel);
1382
1383                 if (bms_is_subset(firstclause->left_relids,
1384                                                   outer_path->parent->relids))
1385                 {
1386                         /* left side of clause is outer */
1387                         outerscansel = leftscansel;
1388                         innerscansel = rightscansel;
1389                 }
1390                 else
1391                 {
1392                         /* left side of clause is inner */
1393                         outerscansel = rightscansel;
1394                         innerscansel = leftscansel;
1395                 }
1396                 if (path->jpath.jointype == JOIN_LEFT)
1397                         outerscansel = 1.0;
1398                 else if (path->jpath.jointype == JOIN_RIGHT)
1399                         innerscansel = 1.0;
1400         }
1401         else
1402         {
1403                 /* cope with clauseless or full mergejoin */
1404                 outerscansel = innerscansel = 1.0;
1405         }
1406
1407         /* convert selectivity to row count; must scan at least one row */
1408         outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1409         inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1410
1411         /*
1412          * Readjust scan selectivities to account for above rounding.  This is
1413          * normally an insignificant effect, but when there are only a few rows in
1414          * the inputs, failing to do this makes for a large percentage error.
1415          */
1416         outerscansel = outer_rows / outer_path_rows;
1417         innerscansel = inner_rows / inner_path_rows;
1418
1419         /* cost of source data */
1420
1421         if (outersortkeys)                      /* do we need to sort outer? */
1422         {
1423                 cost_sort(&sort_path,
1424                                   root,
1425                                   outersortkeys,
1426                                   outer_path->total_cost,
1427                                   outer_path_rows,
1428                                   outer_path->parent->width);
1429                 startup_cost += sort_path.startup_cost;
1430                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1431                         * outerscansel;
1432         }
1433         else
1434         {
1435                 startup_cost += outer_path->startup_cost;
1436                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1437                         * outerscansel;
1438         }
1439
1440         if (innersortkeys)                      /* do we need to sort inner? */
1441         {
1442                 cost_sort(&sort_path,
1443                                   root,
1444                                   innersortkeys,
1445                                   inner_path->total_cost,
1446                                   inner_path_rows,
1447                                   inner_path->parent->width);
1448                 startup_cost += sort_path.startup_cost;
1449                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1450                         * innerscansel * rescanratio;
1451         }
1452         else
1453         {
1454                 startup_cost += inner_path->startup_cost;
1455                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1456                         * innerscansel * rescanratio;
1457         }
1458
1459         /* CPU costs */
1460
1461         /*
1462          * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1463          * outer tuple as soon as we have one match.  Account for the effects of
1464          * this by scaling down the cost estimates in proportion to the expected
1465          * output size.  (This assumes that all the quals attached to the join are
1466          * IN quals, which should be true.)
1467          */
1468         joininfactor = join_in_selectivity(&path->jpath, root);
1469
1470         /*
1471          * The number of tuple comparisons needed is approximately number of outer
1472          * rows plus number of inner rows plus number of rescanned tuples (can we
1473          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1474          * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1475          * joininfactor.
1476          */
1477         startup_cost += merge_qual_cost.startup;
1478         run_cost += merge_qual_cost.per_tuple *
1479                 (outer_rows + inner_rows * rescanratio);
1480
1481         /*
1482          * For each tuple that gets through the mergejoin proper, we charge
1483          * cpu_tuple_cost plus the cost of evaluating additional restriction
1484          * clauses that are to be applied at the join.  (This is pessimistic since
1485          * not all of the quals may get evaluated at each tuple.)  This work is
1486          * skipped in JOIN_IN mode, so apply the factor.
1487          */
1488         startup_cost += qp_qual_cost.startup;
1489         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1490         run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1491
1492         path->jpath.path.startup_cost = startup_cost;
1493         path->jpath.path.total_cost = startup_cost + run_cost;
1494 }
1495
1496 /*
1497  * cost_hashjoin
1498  *        Determines and returns the cost of joining two relations using the
1499  *        hash join algorithm.
1500  *
1501  * 'path' is already filled in except for the cost fields
1502  *
1503  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1504  */
1505 void
1506 cost_hashjoin(HashPath *path, PlannerInfo *root)
1507 {
1508         Path       *outer_path = path->jpath.outerjoinpath;
1509         Path       *inner_path = path->jpath.innerjoinpath;
1510         List       *hashclauses = path->path_hashclauses;
1511         Cost            startup_cost = 0;
1512         Cost            run_cost = 0;
1513         Cost            cpu_per_tuple;
1514         Selectivity hash_selec;
1515         QualCost        hash_qual_cost;
1516         QualCost        qp_qual_cost;
1517         double          hashjointuples;
1518         double          outer_path_rows = PATH_ROWS(outer_path);
1519         double          inner_path_rows = PATH_ROWS(inner_path);
1520         int                     num_hashclauses = list_length(hashclauses);
1521         int                     numbuckets;
1522         int                     numbatches;
1523         double          virtualbuckets;
1524         Selectivity innerbucketsize;
1525         Selectivity joininfactor;
1526         ListCell   *hcl;
1527
1528         if (!enable_hashjoin)
1529                 startup_cost += disable_cost;
1530
1531         /*
1532          * Compute cost and selectivity of the hashquals and qpquals (other
1533          * restriction clauses) separately.  We use approx_selectivity here for
1534          * speed --- in most cases, any errors won't affect the result much.
1535          *
1536          * Note: it's probably bogus to use the normal selectivity calculation
1537          * here when either the outer or inner path is a UniquePath.
1538          */
1539         hash_selec = approx_selectivity(root, hashclauses,
1540                                                                         path->jpath.jointype);
1541         cost_qual_eval(&hash_qual_cost, hashclauses);
1542         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1543         qp_qual_cost.startup -= hash_qual_cost.startup;
1544         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1545
1546         /* approx # tuples passing the hash quals */
1547         hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1548
1549         /* cost of source data */
1550         startup_cost += outer_path->startup_cost;
1551         run_cost += outer_path->total_cost - outer_path->startup_cost;
1552         startup_cost += inner_path->total_cost;
1553
1554         /*
1555          * Cost of computing hash function: must do it once per input tuple. We
1556          * charge one cpu_operator_cost for each column's hash function.  Also,
1557          * tack on one cpu_tuple_cost per inner row, to model the costs of
1558          * inserting the row into the hashtable.
1559          *
1560          * XXX when a hashclause is more complex than a single operator, we really
1561          * should charge the extra eval costs of the left or right side, as
1562          * appropriate, here.  This seems more work than it's worth at the moment.
1563          */
1564         startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1565                 * inner_path_rows;
1566         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1567
1568         /* Get hash table size that executor would use for inner relation */
1569         ExecChooseHashTableSize(inner_path_rows,
1570                                                         inner_path->parent->width,
1571                                                         &numbuckets,
1572                                                         &numbatches);
1573         virtualbuckets = (double) numbuckets *(double) numbatches;
1574
1575         /*
1576          * Determine bucketsize fraction for inner relation.  We use the smallest
1577          * bucketsize estimated for any individual hashclause; this is undoubtedly
1578          * conservative.
1579          *
1580          * BUT: if inner relation has been unique-ified, we can assume it's good
1581          * for hashing.  This is important both because it's the right answer, and
1582          * because we avoid contaminating the cache with a value that's wrong for
1583          * non-unique-ified paths.
1584          */
1585         if (IsA(inner_path, UniquePath))
1586                 innerbucketsize = 1.0 / virtualbuckets;
1587         else
1588         {
1589                 innerbucketsize = 1.0;
1590                 foreach(hcl, hashclauses)
1591                 {
1592                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1593                         Selectivity thisbucketsize;
1594
1595                         Assert(IsA(restrictinfo, RestrictInfo));
1596
1597                         /*
1598                          * First we have to figure out which side of the hashjoin clause
1599                          * is the inner side.
1600                          *
1601                          * Since we tend to visit the same clauses over and over when
1602                          * planning a large query, we cache the bucketsize estimate in the
1603                          * RestrictInfo node to avoid repeated lookups of statistics.
1604                          */
1605                         if (bms_is_subset(restrictinfo->right_relids,
1606                                                           inner_path->parent->relids))
1607                         {
1608                                 /* righthand side is inner */
1609                                 thisbucketsize = restrictinfo->right_bucketsize;
1610                                 if (thisbucketsize < 0)
1611                                 {
1612                                         /* not cached yet */
1613                                         thisbucketsize =
1614                                                 estimate_hash_bucketsize(root,
1615                                                                                    get_rightop(restrictinfo->clause),
1616                                                                                                  virtualbuckets);
1617                                         restrictinfo->right_bucketsize = thisbucketsize;
1618                                 }
1619                         }
1620                         else
1621                         {
1622                                 Assert(bms_is_subset(restrictinfo->left_relids,
1623                                                                          inner_path->parent->relids));
1624                                 /* lefthand side is inner */
1625                                 thisbucketsize = restrictinfo->left_bucketsize;
1626                                 if (thisbucketsize < 0)
1627                                 {
1628                                         /* not cached yet */
1629                                         thisbucketsize =
1630                                                 estimate_hash_bucketsize(root,
1631                                                                                         get_leftop(restrictinfo->clause),
1632                                                                                                  virtualbuckets);
1633                                         restrictinfo->left_bucketsize = thisbucketsize;
1634                                 }
1635                         }
1636
1637                         if (innerbucketsize > thisbucketsize)
1638                                 innerbucketsize = thisbucketsize;
1639                 }
1640         }
1641
1642         /*
1643          * If inner relation is too big then we will need to "batch" the join,
1644          * which implies writing and reading most of the tuples to disk an extra
1645          * time.  Charge seq_page_cost per page, since the I/O should be nice and
1646          * sequential.  Writing the inner rel counts as startup cost,
1647          * all the rest as run cost.
1648          */
1649         if (numbatches > 1)
1650         {
1651                 double          outerpages = page_size(outer_path_rows,
1652                                                                                    outer_path->parent->width);
1653                 double          innerpages = page_size(inner_path_rows,
1654                                                                                    inner_path->parent->width);
1655
1656                 startup_cost += seq_page_cost * innerpages;
1657                 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
1658         }
1659
1660         /* CPU costs */
1661
1662         /*
1663          * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1664          * outer tuple as soon as we have one match.  Account for the effects of
1665          * this by scaling down the cost estimates in proportion to the expected
1666          * output size.  (This assumes that all the quals attached to the join are
1667          * IN quals, which should be true.)
1668          */
1669         joininfactor = join_in_selectivity(&path->jpath, root);
1670
1671         /*
1672          * The number of tuple comparisons needed is the number of outer tuples
1673          * times the typical number of tuples in a hash bucket, which is the inner
1674          * relation size times its bucketsize fraction.  At each one, we need to
1675          * evaluate the hashjoin quals.  But actually, charging the full qual eval
1676          * cost at each tuple is pessimistic, since we don't evaluate the quals
1677          * unless the hash values match exactly.  For lack of a better idea, halve
1678          * the cost estimate to allow for that.
1679          */
1680         startup_cost += hash_qual_cost.startup;
1681         run_cost += hash_qual_cost.per_tuple *
1682                 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1683                 joininfactor * 0.5;
1684
1685         /*
1686          * For each tuple that gets through the hashjoin proper, we charge
1687          * cpu_tuple_cost plus the cost of evaluating additional restriction
1688          * clauses that are to be applied at the join.  (This is pessimistic since
1689          * not all of the quals may get evaluated at each tuple.)
1690          */
1691         startup_cost += qp_qual_cost.startup;
1692         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1693         run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1694
1695         path->jpath.path.startup_cost = startup_cost;
1696         path->jpath.path.total_cost = startup_cost + run_cost;
1697 }
1698
1699
1700 /*
1701  * cost_qual_eval
1702  *              Estimate the CPU costs of evaluating a WHERE clause.
1703  *              The input can be either an implicitly-ANDed list of boolean
1704  *              expressions, or a list of RestrictInfo nodes.  (The latter is
1705  *              preferred since it allows caching of the results.)
1706  *              The result includes both a one-time (startup) component,
1707  *              and a per-evaluation component.
1708  */
1709 void
1710 cost_qual_eval(QualCost *cost, List *quals)
1711 {
1712         ListCell   *l;
1713
1714         cost->startup = 0;
1715         cost->per_tuple = 0;
1716
1717         /* We don't charge any cost for the implicit ANDing at top level ... */
1718
1719         foreach(l, quals)
1720         {
1721                 Node       *qual = (Node *) lfirst(l);
1722
1723                 cost_qual_eval_walker(qual, cost);
1724         }
1725 }
1726
1727 /*
1728  * cost_qual_eval_node
1729  *              As above, for a single RestrictInfo or expression.
1730  */
1731 void
1732 cost_qual_eval_node(QualCost *cost, Node *qual)
1733 {
1734         cost->startup = 0;
1735         cost->per_tuple = 0;
1736         cost_qual_eval_walker(qual, cost);
1737 }
1738
1739 static bool
1740 cost_qual_eval_walker(Node *node, QualCost *total)
1741 {
1742         if (node == NULL)
1743                 return false;
1744
1745         /*
1746          * RestrictInfo nodes contain an eval_cost field reserved for this
1747          * routine's use, so that it's not necessary to evaluate the qual
1748          * clause's cost more than once.  If the clause's cost hasn't been
1749          * computed yet, the field's startup value will contain -1.
1750          */
1751         if (IsA(node, RestrictInfo))
1752         {
1753                 RestrictInfo *rinfo = (RestrictInfo *) node;
1754
1755                 if (rinfo->eval_cost.startup < 0)
1756                 {
1757                         rinfo->eval_cost.startup = 0;
1758                         rinfo->eval_cost.per_tuple = 0;
1759                         /*
1760                          * For an OR clause, recurse into the marked-up tree so that
1761                          * we set the eval_cost for contained RestrictInfos too.
1762                          */
1763                         if (rinfo->orclause)
1764                                 cost_qual_eval_walker((Node *) rinfo->orclause,
1765                                                                           &rinfo->eval_cost);
1766                         else
1767                                 cost_qual_eval_walker((Node *) rinfo->clause,
1768                                                                           &rinfo->eval_cost);
1769                         /*
1770                          * If the RestrictInfo is marked pseudoconstant, it will be tested
1771                          * only once, so treat its cost as all startup cost.
1772                          */
1773                         if (rinfo->pseudoconstant)
1774                         {
1775                                 /* count one execution during startup */
1776                                 rinfo->eval_cost.startup += rinfo->eval_cost.per_tuple;
1777                                 rinfo->eval_cost.per_tuple = 0;
1778                         }
1779                 }
1780                 total->startup += rinfo->eval_cost.startup;
1781                 total->per_tuple += rinfo->eval_cost.per_tuple;
1782                 /* do NOT recurse into children */
1783                 return false;
1784         }
1785
1786         /*
1787          * For each operator or function node in the given tree, we charge the
1788          * estimated execution cost given by pg_proc.procost (remember to
1789          * multiply this by cpu_operator_cost).
1790          *
1791          * Vars and Consts are charged zero, and so are boolean operators (AND,
1792          * OR, NOT). Simplistic, but a lot better than no model at all.
1793          *
1794          * Should we try to account for the possibility of short-circuit
1795          * evaluation of AND/OR?  Probably *not*, because that would make the
1796          * results depend on the clause ordering, and we are not in any position
1797          * to expect that the current ordering of the clauses is the one that's
1798          * going to end up being used.  (Is it worth applying order_qual_clauses
1799          * much earlier in the planning process to fix this?)
1800          */
1801         if (IsA(node, FuncExpr))
1802         {
1803                 total->per_tuple += get_func_cost(((FuncExpr *) node)->funcid) *
1804                         cpu_operator_cost;
1805         }
1806         else if (IsA(node, OpExpr) ||
1807                          IsA(node, DistinctExpr) ||
1808                          IsA(node, NullIfExpr))
1809         {
1810                 /* rely on struct equivalence to treat these all alike */
1811                 set_opfuncid((OpExpr *) node);
1812                 total->per_tuple += get_func_cost(((OpExpr *) node)->opfuncid) *
1813                         cpu_operator_cost;
1814         }
1815         else if (IsA(node, ScalarArrayOpExpr))
1816         {
1817                 /*
1818                  * Estimate that the operator will be applied to about half of the
1819                  * array elements before the answer is determined.
1820                  */
1821                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1822                 Node       *arraynode = (Node *) lsecond(saop->args);
1823
1824                 set_sa_opfuncid(saop);
1825                 total->per_tuple += get_func_cost(saop->opfuncid) *
1826                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1827         }
1828         else if (IsA(node, RowCompareExpr))
1829         {
1830                 /* Conservatively assume we will check all the columns */
1831                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
1832                 ListCell   *lc;
1833
1834                 foreach(lc, rcexpr->opnos)
1835                 {
1836                         Oid             opid = lfirst_oid(lc);
1837
1838                         total->per_tuple += get_func_cost(get_opcode(opid)) *
1839                                 cpu_operator_cost;
1840                 }
1841         }
1842         else if (IsA(node, SubLink))
1843         {
1844                 /* This routine should not be applied to un-planned expressions */
1845                 elog(ERROR, "cannot handle unplanned sub-select");
1846         }
1847         else if (IsA(node, SubPlan))
1848         {
1849                 /*
1850                  * A subplan node in an expression typically indicates that the
1851                  * subplan will be executed on each evaluation, so charge accordingly.
1852                  * (Sub-selects that can be executed as InitPlans have already been
1853                  * removed from the expression.)
1854                  *
1855                  * An exception occurs when we have decided we can implement the
1856                  * subplan by hashing.
1857                  */
1858                 SubPlan    *subplan = (SubPlan *) node;
1859                 Plan       *plan = subplan->plan;
1860
1861                 if (subplan->useHashTable)
1862                 {
1863                         /*
1864                          * If we are using a hash table for the subquery outputs, then the
1865                          * cost of evaluating the query is a one-time cost. We charge one
1866                          * cpu_operator_cost per tuple for the work of loading the
1867                          * hashtable, too.
1868                          */
1869                         total->startup += plan->total_cost +
1870                                 cpu_operator_cost * plan->plan_rows;
1871
1872                         /*
1873                          * The per-tuple costs include the cost of evaluating the lefthand
1874                          * expressions, plus the cost of probing the hashtable. Recursion
1875                          * into the testexpr will handle the lefthand expressions
1876                          * properly, and will count one cpu_operator_cost for each
1877                          * comparison operator.  That is probably too low for the probing
1878                          * cost, but it's hard to make a better estimate, so live with it
1879                          * for now.
1880                          */
1881                 }
1882                 else
1883                 {
1884                         /*
1885                          * Otherwise we will be rescanning the subplan output on each
1886                          * evaluation.  We need to estimate how much of the output we will
1887                          * actually need to scan.  NOTE: this logic should agree with the
1888                          * estimates used by make_subplan() in plan/subselect.c.
1889                          */
1890                         Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
1891
1892                         if (subplan->subLinkType == EXISTS_SUBLINK)
1893                         {
1894                                 /* we only need to fetch 1 tuple */
1895                                 total->per_tuple += plan_run_cost / plan->plan_rows;
1896                         }
1897                         else if (subplan->subLinkType == ALL_SUBLINK ||
1898                                          subplan->subLinkType == ANY_SUBLINK)
1899                         {
1900                                 /* assume we need 50% of the tuples */
1901                                 total->per_tuple += 0.50 * plan_run_cost;
1902                                 /* also charge a cpu_operator_cost per row examined */
1903                                 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1904                         }
1905                         else
1906                         {
1907                                 /* assume we need all tuples */
1908                                 total->per_tuple += plan_run_cost;
1909                         }
1910
1911                         /*
1912                          * Also account for subplan's startup cost. If the subplan is
1913                          * uncorrelated or undirect correlated, AND its topmost node is a
1914                          * Sort or Material node, assume that we'll only need to pay its
1915                          * startup cost once; otherwise assume we pay the startup cost
1916                          * every time.
1917                          */
1918                         if (subplan->parParam == NIL &&
1919                                 (IsA(plan, Sort) ||
1920                                  IsA(plan, Material)))
1921                                 total->startup += plan->startup_cost;
1922                         else
1923                                 total->per_tuple += plan->startup_cost;
1924                 }
1925         }
1926
1927         /* recurse into children */
1928         return expression_tree_walker(node, cost_qual_eval_walker,
1929                                                                   (void *) total);
1930 }
1931
1932
1933 /*
1934  * approx_selectivity
1935  *              Quick-and-dirty estimation of clause selectivities.
1936  *              The input can be either an implicitly-ANDed list of boolean
1937  *              expressions, or a list of RestrictInfo nodes (typically the latter).
1938  *
1939  * This is quick-and-dirty because we bypass clauselist_selectivity, and
1940  * simply multiply the independent clause selectivities together.  Now
1941  * clauselist_selectivity often can't do any better than that anyhow, but
1942  * for some situations (such as range constraints) it is smarter.  However,
1943  * we can't effectively cache the results of clauselist_selectivity, whereas
1944  * the individual clause selectivities can be and are cached.
1945  *
1946  * Since we are only using the results to estimate how many potential
1947  * output tuples are generated and passed through qpqual checking, it
1948  * seems OK to live with the approximation.
1949  */
1950 static Selectivity
1951 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
1952 {
1953         Selectivity total = 1.0;
1954         ListCell   *l;
1955
1956         foreach(l, quals)
1957         {
1958                 Node       *qual = (Node *) lfirst(l);
1959
1960                 /* Note that clause_selectivity will be able to cache its result */
1961                 total *= clause_selectivity(root, qual, 0, jointype);
1962         }
1963         return total;
1964 }
1965
1966
1967 /*
1968  * set_baserel_size_estimates
1969  *              Set the size estimates for the given base relation.
1970  *
1971  * The rel's targetlist and restrictinfo list must have been constructed
1972  * already.
1973  *
1974  * We set the following fields of the rel node:
1975  *      rows: the estimated number of output tuples (after applying
1976  *                restriction clauses).
1977  *      width: the estimated average output tuple width in bytes.
1978  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1979  */
1980 void
1981 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
1982 {
1983         double          nrows;
1984
1985         /* Should only be applied to base relations */
1986         Assert(rel->relid > 0);
1987
1988         nrows = rel->tuples *
1989                 clauselist_selectivity(root,
1990                                                            rel->baserestrictinfo,
1991                                                            0,
1992                                                            JOIN_INNER);
1993
1994         rel->rows = clamp_row_est(nrows);
1995
1996         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1997
1998         set_rel_width(root, rel);
1999 }
2000
2001 /*
2002  * set_joinrel_size_estimates
2003  *              Set the size estimates for the given join relation.
2004  *
2005  * The rel's targetlist must have been constructed already, and a
2006  * restriction clause list that matches the given component rels must
2007  * be provided.
2008  *
2009  * Since there is more than one way to make a joinrel for more than two
2010  * base relations, the results we get here could depend on which component
2011  * rel pair is provided.  In theory we should get the same answers no matter
2012  * which pair is provided; in practice, since the selectivity estimation
2013  * routines don't handle all cases equally well, we might not.  But there's
2014  * not much to be done about it.  (Would it make sense to repeat the
2015  * calculations for each pair of input rels that's encountered, and somehow
2016  * average the results?  Probably way more trouble than it's worth.)
2017  *
2018  * It's important that the results for symmetric JoinTypes be symmetric,
2019  * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
2020  * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
2021  * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
2022  *
2023  * We set only the rows field here.  The width field was already set by
2024  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
2025  */
2026 void
2027 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
2028                                                    RelOptInfo *outer_rel,
2029                                                    RelOptInfo *inner_rel,
2030                                                    JoinType jointype,
2031                                                    List *restrictlist)
2032 {
2033         Selectivity jselec;
2034         Selectivity pselec;
2035         double          nrows;
2036         UniquePath *upath;
2037
2038         /*
2039          * Compute joinclause selectivity.      Note that we are only considering
2040          * clauses that become restriction clauses at this join level; we are not
2041          * double-counting them because they were not considered in estimating the
2042          * sizes of the component rels.
2043          *
2044          * For an outer join, we have to distinguish the selectivity of the
2045          * join's own clauses (JOIN/ON conditions) from any clauses that were
2046          * "pushed down".  For inner joins we just count them all as joinclauses.
2047          */
2048         if (IS_OUTER_JOIN(jointype))
2049         {
2050                 List       *joinquals = NIL;
2051                 List       *pushedquals = NIL;
2052                 ListCell   *l;
2053
2054                 /* Grovel through the clauses to separate into two lists */
2055                 foreach(l, restrictlist)
2056                 {
2057                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2058
2059                         Assert(IsA(rinfo, RestrictInfo));
2060                         if (rinfo->is_pushed_down)
2061                                 pushedquals = lappend(pushedquals, rinfo);
2062                         else
2063                                 joinquals = lappend(joinquals, rinfo);
2064                 }
2065
2066                 /* Get the separate selectivities */
2067                 jselec = clauselist_selectivity(root,
2068                                                                                 joinquals,
2069                                                                                 0,
2070                                                                                 jointype);
2071                 pselec = clauselist_selectivity(root,
2072                                                                                 pushedquals,
2073                                                                                 0,
2074                                                                                 jointype);
2075
2076                 /* Avoid leaking a lot of ListCells */
2077                 list_free(joinquals);
2078                 list_free(pushedquals);
2079         }
2080         else
2081         {
2082                 jselec = clauselist_selectivity(root,
2083                                                                                 restrictlist,
2084                                                                                 0,
2085                                                                                 jointype);
2086                 pselec = 0.0;                   /* not used, keep compiler quiet */
2087         }
2088
2089         /*
2090          * Basically, we multiply size of Cartesian product by selectivity.
2091          *
2092          * If we are doing an outer join, take that into account: the joinqual
2093          * selectivity has to be clamped using the knowledge that the output must
2094          * be at least as large as the non-nullable input.  However, any
2095          * pushed-down quals are applied after the outer join, so their
2096          * selectivity applies fully.
2097          *
2098          * For JOIN_IN and variants, the Cartesian product is figured with respect
2099          * to a unique-ified input, and then we can clamp to the size of the other
2100          * input.
2101          */
2102         switch (jointype)
2103         {
2104                 case JOIN_INNER:
2105                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2106                         break;
2107                 case JOIN_LEFT:
2108                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2109                         if (nrows < outer_rel->rows)
2110                                 nrows = outer_rel->rows;
2111                         nrows *= pselec;
2112                         break;
2113                 case JOIN_RIGHT:
2114                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2115                         if (nrows < inner_rel->rows)
2116                                 nrows = inner_rel->rows;
2117                         nrows *= pselec;
2118                         break;
2119                 case JOIN_FULL:
2120                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2121                         if (nrows < outer_rel->rows)
2122                                 nrows = outer_rel->rows;
2123                         if (nrows < inner_rel->rows)
2124                                 nrows = inner_rel->rows;
2125                         nrows *= pselec;
2126                         break;
2127                 case JOIN_IN:
2128                 case JOIN_UNIQUE_INNER:
2129                         upath = create_unique_path(root, inner_rel,
2130                                                                            inner_rel->cheapest_total_path);
2131                         nrows = outer_rel->rows * upath->rows * jselec;
2132                         if (nrows > outer_rel->rows)
2133                                 nrows = outer_rel->rows;
2134                         break;
2135                 case JOIN_REVERSE_IN:
2136                 case JOIN_UNIQUE_OUTER:
2137                         upath = create_unique_path(root, outer_rel,
2138                                                                            outer_rel->cheapest_total_path);
2139                         nrows = upath->rows * inner_rel->rows * jselec;
2140                         if (nrows > inner_rel->rows)
2141                                 nrows = inner_rel->rows;
2142                         break;
2143                 default:
2144                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
2145                         nrows = 0;                      /* keep compiler quiet */
2146                         break;
2147         }
2148
2149         rel->rows = clamp_row_est(nrows);
2150 }
2151
2152 /*
2153  * join_in_selectivity
2154  *        Determines the factor by which a JOIN_IN join's result is expected
2155  *        to be smaller than an ordinary inner join.
2156  *
2157  * 'path' is already filled in except for the cost fields
2158  */
2159 static Selectivity
2160 join_in_selectivity(JoinPath *path, PlannerInfo *root)
2161 {
2162         RelOptInfo *innerrel;
2163         UniquePath *innerunique;
2164         Selectivity selec;
2165         double          nrows;
2166
2167         /* Return 1.0 whenever it's not JOIN_IN */
2168         if (path->jointype != JOIN_IN)
2169                 return 1.0;
2170
2171         /*
2172          * Return 1.0 if the inner side is already known unique.  The case where
2173          * the inner path is already a UniquePath probably cannot happen in
2174          * current usage, but check it anyway for completeness.  The interesting
2175          * case is where we've determined the inner relation itself is unique,
2176          * which we can check by looking at the rows estimate for its UniquePath.
2177          */
2178         if (IsA(path->innerjoinpath, UniquePath))
2179                 return 1.0;
2180         innerrel = path->innerjoinpath->parent;
2181         innerunique = create_unique_path(root,
2182                                                                          innerrel,
2183                                                                          innerrel->cheapest_total_path);
2184         if (innerunique->rows >= innerrel->rows)
2185                 return 1.0;
2186
2187         /*
2188          * Compute same result set_joinrel_size_estimates would compute for
2189          * JOIN_INNER.  Note that we use the input rels' absolute size estimates,
2190          * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
2191          * double-counting the effects of any join clauses used in input scans.
2192          */
2193         selec = clauselist_selectivity(root,
2194                                                                    path->joinrestrictinfo,
2195                                                                    0,
2196                                                                    JOIN_INNER);
2197         nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
2198
2199         nrows = clamp_row_est(nrows);
2200
2201         /* See if it's larger than the actual JOIN_IN size estimate */
2202         if (nrows > path->path.parent->rows)
2203                 return path->path.parent->rows / nrows;
2204         else
2205                 return 1.0;
2206 }
2207
2208 /*
2209  * set_function_size_estimates
2210  *              Set the size estimates for a base relation that is a function call.
2211  *
2212  * The rel's targetlist and restrictinfo list must have been constructed
2213  * already.
2214  *
2215  * We set the same fields as set_baserel_size_estimates.
2216  */
2217 void
2218 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2219 {
2220         RangeTblEntry *rte;
2221
2222         /* Should only be applied to base relations that are functions */
2223         Assert(rel->relid > 0);
2224         rte = rt_fetch(rel->relid, root->parse->rtable);
2225         Assert(rte->rtekind == RTE_FUNCTION);
2226
2227         /* Estimate number of rows the function itself will return */
2228         rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
2229
2230         /* Now estimate number of output rows, etc */
2231         set_baserel_size_estimates(root, rel);
2232 }
2233
2234 /*
2235  * set_values_size_estimates
2236  *              Set the size estimates for a base relation that is a values list.
2237  *
2238  * The rel's targetlist and restrictinfo list must have been constructed
2239  * already.
2240  *
2241  * We set the same fields as set_baserel_size_estimates.
2242  */
2243 void
2244 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2245 {
2246         RangeTblEntry *rte;
2247
2248         /* Should only be applied to base relations that are values lists */
2249         Assert(rel->relid > 0);
2250         rte = rt_fetch(rel->relid, root->parse->rtable);
2251         Assert(rte->rtekind == RTE_VALUES);
2252
2253         /*
2254          * Estimate number of rows the values list will return. We know this
2255          * precisely based on the list length (well, barring set-returning
2256          * functions in list items, but that's a refinement not catered for
2257          * anywhere else either).
2258          */
2259         rel->tuples = list_length(rte->values_lists);
2260
2261         /* Now estimate number of output rows, etc */
2262         set_baserel_size_estimates(root, rel);
2263 }
2264
2265
2266 /*
2267  * set_rel_width
2268  *              Set the estimated output width of a base relation.
2269  *
2270  * NB: this works best on plain relations because it prefers to look at
2271  * real Vars.  It will fail to make use of pg_statistic info when applied
2272  * to a subquery relation, even if the subquery outputs are simple vars
2273  * that we could have gotten info for.  Is it worth trying to be smarter
2274  * about subqueries?
2275  *
2276  * The per-attribute width estimates are cached for possible re-use while
2277  * building join relations.
2278  */
2279 static void
2280 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2281 {
2282         int32           tuple_width = 0;
2283         ListCell   *tllist;
2284
2285         foreach(tllist, rel->reltargetlist)
2286         {
2287                 Var                *var = (Var *) lfirst(tllist);
2288                 int                     ndx;
2289                 Oid                     relid;
2290                 int32           item_width;
2291
2292                 /* For now, punt on whole-row child Vars */
2293                 if (!IsA(var, Var))
2294                 {
2295                         tuple_width += 32;      /* arbitrary */
2296                         continue;
2297                 }
2298
2299                 ndx = var->varattno - rel->min_attr;
2300
2301                 /*
2302                  * The width probably hasn't been cached yet, but may as well check
2303                  */
2304                 if (rel->attr_widths[ndx] > 0)
2305                 {
2306                         tuple_width += rel->attr_widths[ndx];
2307                         continue;
2308                 }
2309
2310                 relid = getrelid(var->varno, root->parse->rtable);
2311                 if (relid != InvalidOid)
2312                 {
2313                         item_width = get_attavgwidth(relid, var->varattno);
2314                         if (item_width > 0)
2315                         {
2316                                 rel->attr_widths[ndx] = item_width;
2317                                 tuple_width += item_width;
2318                                 continue;
2319                         }
2320                 }
2321
2322                 /*
2323                  * Not a plain relation, or can't find statistics for it. Estimate
2324                  * using just the type info.
2325                  */
2326                 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2327                 Assert(item_width > 0);
2328                 rel->attr_widths[ndx] = item_width;
2329                 tuple_width += item_width;
2330         }
2331         Assert(tuple_width >= 0);
2332         rel->width = tuple_width;
2333 }
2334
2335 /*
2336  * relation_byte_size
2337  *        Estimate the storage space in bytes for a given number of tuples
2338  *        of a given width (size in bytes).
2339  */
2340 static double
2341 relation_byte_size(double tuples, int width)
2342 {
2343         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2344 }
2345
2346 /*
2347  * page_size
2348  *        Returns an estimate of the number of pages covered by a given
2349  *        number of tuples of a given width (size in bytes).
2350  */
2351 static double
2352 page_size(double tuples, int width)
2353 {
2354         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
2355 }