]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Suppress a few 'uninitialized variable' warnings that gcc emits only at
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.169 2006/11/11 01:14:19 tgl Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "optimizer/clauses.h"
69 #include "optimizer/cost.h"
70 #include "optimizer/pathnode.h"
71 #include "parser/parsetree.h"
72 #include "utils/lsyscache.h"
73 #include "utils/selfuncs.h"
74 #include "utils/tuplesort.h"
75
76
77 #define LOG2(x)  (log(x) / 0.693147180559945)
78
79 /*
80  * Some Paths return less than the nominal number of rows of their parent
81  * relations; join nodes need to do this to get the correct input count:
82  */
83 #define PATH_ROWS(path) \
84         (IsA(path, UniquePath) ? \
85          ((UniquePath *) (path))->rows : \
86          (path)->parent->rows)
87
88
89 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
90 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
91 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
92 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
93 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
94
95 int                     effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
96
97 Cost            disable_cost = 100000000.0;
98
99 bool            enable_seqscan = true;
100 bool            enable_indexscan = true;
101 bool            enable_bitmapscan = true;
102 bool            enable_tidscan = true;
103 bool            enable_sort = true;
104 bool            enable_hashagg = true;
105 bool            enable_nestloop = true;
106 bool            enable_mergejoin = true;
107 bool            enable_hashjoin = true;
108
109
110 static bool cost_qual_eval_walker(Node *node, QualCost *total);
111 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
112                                    JoinType jointype);
113 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
114 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
115 static double relation_byte_size(double tuples, int width);
116 static double page_size(double tuples, int width);
117
118
119 /*
120  * clamp_row_est
121  *              Force a row-count estimate to a sane value.
122  */
123 double
124 clamp_row_est(double nrows)
125 {
126         /*
127          * Force estimate to be at least one row, to make explain output look
128          * better and to avoid possible divide-by-zero when interpolating costs.
129          * Make it an integer, too.
130          */
131         if (nrows <= 1.0)
132                 nrows = 1.0;
133         else
134                 nrows = rint(nrows);
135
136         return nrows;
137 }
138
139
140 /*
141  * cost_seqscan
142  *        Determines and returns the cost of scanning a relation sequentially.
143  */
144 void
145 cost_seqscan(Path *path, PlannerInfo *root,
146                          RelOptInfo *baserel)
147 {
148         Cost            startup_cost = 0;
149         Cost            run_cost = 0;
150         Cost            cpu_per_tuple;
151
152         /* Should only be applied to base relations */
153         Assert(baserel->relid > 0);
154         Assert(baserel->rtekind == RTE_RELATION);
155
156         if (!enable_seqscan)
157                 startup_cost += disable_cost;
158
159         /*
160          * disk costs
161          */
162         run_cost += seq_page_cost * baserel->pages;
163
164         /* CPU costs */
165         startup_cost += baserel->baserestrictcost.startup;
166         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
167         run_cost += cpu_per_tuple * baserel->tuples;
168
169         path->startup_cost = startup_cost;
170         path->total_cost = startup_cost + run_cost;
171 }
172
173 /*
174  * cost_index
175  *        Determines and returns the cost of scanning a relation using an index.
176  *
177  * 'index' is the index to be used
178  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
179  * 'outer_rel' is the outer relation when we are considering using the index
180  *              scan as the inside of a nestloop join (hence, some of the indexQuals
181  *              are join clauses, and we should expect repeated scans of the index);
182  *              NULL for a plain index scan
183  *
184  * cost_index() takes an IndexPath not just a Path, because it sets a few
185  * additional fields of the IndexPath besides startup_cost and total_cost.
186  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
187  *
188  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
189  * Any additional quals evaluated as qpquals may reduce the number of returned
190  * tuples, but they won't reduce the number of tuples we have to fetch from
191  * the table, so they don't reduce the scan cost.
192  *
193  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
194  * it was a list of bare clause expressions.
195  */
196 void
197 cost_index(IndexPath *path, PlannerInfo *root,
198                    IndexOptInfo *index,
199                    List *indexQuals,
200                    RelOptInfo *outer_rel)
201 {
202         RelOptInfo *baserel = index->rel;
203         Cost            startup_cost = 0;
204         Cost            run_cost = 0;
205         Cost            indexStartupCost;
206         Cost            indexTotalCost;
207         Selectivity indexSelectivity;
208         double          indexCorrelation,
209                                 csquared;
210         Cost            min_IO_cost,
211                                 max_IO_cost;
212         Cost            cpu_per_tuple;
213         double          tuples_fetched;
214         double          pages_fetched;
215
216         /* Should only be applied to base relations */
217         Assert(IsA(baserel, RelOptInfo) &&
218                    IsA(index, IndexOptInfo));
219         Assert(baserel->relid > 0);
220         Assert(baserel->rtekind == RTE_RELATION);
221
222         if (!enable_indexscan)
223                 startup_cost += disable_cost;
224
225         /*
226          * Call index-access-method-specific code to estimate the processing cost
227          * for scanning the index, as well as the selectivity of the index (ie,
228          * the fraction of main-table tuples we will have to retrieve) and its
229          * correlation to the main-table tuple order.
230          */
231         OidFunctionCall8(index->amcostestimate,
232                                          PointerGetDatum(root),
233                                          PointerGetDatum(index),
234                                          PointerGetDatum(indexQuals),
235                                          PointerGetDatum(outer_rel),
236                                          PointerGetDatum(&indexStartupCost),
237                                          PointerGetDatum(&indexTotalCost),
238                                          PointerGetDatum(&indexSelectivity),
239                                          PointerGetDatum(&indexCorrelation));
240
241         /*
242          * Save amcostestimate's results for possible use in bitmap scan planning.
243          * We don't bother to save indexStartupCost or indexCorrelation, because a
244          * bitmap scan doesn't care about either.
245          */
246         path->indextotalcost = indexTotalCost;
247         path->indexselectivity = indexSelectivity;
248
249         /* all costs for touching index itself included here */
250         startup_cost += indexStartupCost;
251         run_cost += indexTotalCost - indexStartupCost;
252
253         /* estimate number of main-table tuples fetched */
254         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
255
256         /*----------
257          * Estimate number of main-table pages fetched, and compute I/O cost.
258          *
259          * When the index ordering is uncorrelated with the table ordering,
260          * we use an approximation proposed by Mackert and Lohman (see
261          * index_pages_fetched() for details) to compute the number of pages
262          * fetched, and then charge random_page_cost per page fetched.
263          *
264          * When the index ordering is exactly correlated with the table ordering
265          * (just after a CLUSTER, for example), the number of pages fetched should
266          * be exactly selectivity * table_size.  What's more, all but the first
267          * will be sequential fetches, not the random fetches that occur in the
268          * uncorrelated case.  So if the number of pages is more than 1, we
269          * ought to charge
270          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
271          * For partially-correlated indexes, we ought to charge somewhere between
272          * these two estimates.  We currently interpolate linearly between the
273          * estimates based on the correlation squared (XXX is that appropriate?).
274          *----------
275          */
276         if (outer_rel != NULL && outer_rel->rows > 1)
277         {
278                 /*
279                  * For repeated indexscans, scale up the number of tuples fetched in
280                  * the Mackert and Lohman formula by the number of scans, so that we
281                  * estimate the number of pages fetched by all the scans. Then
282                  * pro-rate the costs for one scan.  In this case we assume all the
283                  * fetches are random accesses.  XXX it'd be good to include
284                  * correlation in this model, but it's not clear how to do that
285                  * without double-counting cache effects.
286                  */
287                 double          num_scans = outer_rel->rows;
288
289                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
290                                                                                         baserel->pages,
291                                                                                         (double) index->pages,
292                                                                                         root);
293
294                 run_cost += (pages_fetched * random_page_cost) / num_scans;
295         }
296         else
297         {
298                 /*
299                  * Normal case: apply the Mackert and Lohman formula, and then
300                  * interpolate between that and the correlation-derived result.
301                  */
302                 pages_fetched = index_pages_fetched(tuples_fetched,
303                                                                                         baserel->pages,
304                                                                                         (double) index->pages,
305                                                                                         root);
306
307                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
308                 max_IO_cost = pages_fetched * random_page_cost;
309
310                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
311                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
312                 min_IO_cost = random_page_cost;
313                 if (pages_fetched > 1)
314                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
315
316                 /*
317                  * Now interpolate based on estimated index order correlation to get
318                  * total disk I/O cost for main table accesses.
319                  */
320                 csquared = indexCorrelation * indexCorrelation;
321
322                 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
323         }
324
325         /*
326          * Estimate CPU costs per tuple.
327          *
328          * Normally the indexquals will be removed from the list of restriction
329          * clauses that we have to evaluate as qpquals, so we should subtract
330          * their costs from baserestrictcost.  But if we are doing a join then
331          * some of the indexquals are join clauses and shouldn't be subtracted.
332          * Rather than work out exactly how much to subtract, we don't subtract
333          * anything.
334          */
335         startup_cost += baserel->baserestrictcost.startup;
336         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
337
338         if (outer_rel == NULL)
339         {
340                 QualCost        index_qual_cost;
341
342                 cost_qual_eval(&index_qual_cost, indexQuals);
343                 /* any startup cost still has to be paid ... */
344                 cpu_per_tuple -= index_qual_cost.per_tuple;
345         }
346
347         run_cost += cpu_per_tuple * tuples_fetched;
348
349         path->path.startup_cost = startup_cost;
350         path->path.total_cost = startup_cost + run_cost;
351 }
352
353 /*
354  * index_pages_fetched
355  *        Estimate the number of pages actually fetched after accounting for
356  *        cache effects.
357  *
358  * We use an approximation proposed by Mackert and Lohman, "Index Scans
359  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
360  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
361  * The Mackert and Lohman approximation is that the number of pages
362  * fetched is
363  *      PF =
364  *              min(2TNs/(2T+Ns), T)                    when T <= b
365  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
366  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
367  * where
368  *              T = # pages in table
369  *              N = # tuples in table
370  *              s = selectivity = fraction of table to be scanned
371  *              b = # buffer pages available (we include kernel space here)
372  *
373  * We assume that effective_cache_size is the total number of buffer pages
374  * available for the whole query, and pro-rate that space across all the
375  * tables in the query and the index currently under consideration.  (This
376  * ignores space needed for other indexes used by the query, but since we
377  * don't know which indexes will get used, we can't estimate that very well;
378  * and in any case counting all the tables may well be an overestimate, since
379  * depending on the join plan not all the tables may be scanned concurrently.)
380  *
381  * The product Ns is the number of tuples fetched; we pass in that
382  * product rather than calculating it here.  "pages" is the number of pages
383  * in the object under consideration (either an index or a table).
384  * "index_pages" is the amount to add to the total table space, which was
385  * computed for us by query_planner.
386  *
387  * Caller is expected to have ensured that tuples_fetched is greater than zero
388  * and rounded to integer (see clamp_row_est).  The result will likewise be
389  * greater than zero and integral.
390  */
391 double
392 index_pages_fetched(double tuples_fetched, BlockNumber pages,
393                                         double index_pages, PlannerInfo *root)
394 {
395         double          pages_fetched;
396         double          total_pages;
397         double          T,
398                                 b;
399
400         /* T is # pages in table, but don't allow it to be zero */
401         T = (pages > 1) ? (double) pages : 1.0;
402
403         /* Compute number of pages assumed to be competing for cache space */
404         total_pages = root->total_table_pages + index_pages;
405         total_pages = Max(total_pages, 1.0);
406         Assert(T <= total_pages);
407
408         /* b is pro-rated share of effective_cache_size */
409         b = (double) effective_cache_size *T / total_pages;
410
411         /* force it positive and integral */
412         if (b <= 1.0)
413                 b = 1.0;
414         else
415                 b = ceil(b);
416
417         /* This part is the Mackert and Lohman formula */
418         if (T <= b)
419         {
420                 pages_fetched =
421                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
422                 if (pages_fetched >= T)
423                         pages_fetched = T;
424                 else
425                         pages_fetched = ceil(pages_fetched);
426         }
427         else
428         {
429                 double          lim;
430
431                 lim = (2.0 * T * b) / (2.0 * T - b);
432                 if (tuples_fetched <= lim)
433                 {
434                         pages_fetched =
435                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
436                 }
437                 else
438                 {
439                         pages_fetched =
440                                 b + (tuples_fetched - lim) * (T - b) / T;
441                 }
442                 pages_fetched = ceil(pages_fetched);
443         }
444         return pages_fetched;
445 }
446
447 /*
448  * get_indexpath_pages
449  *              Determine the total size of the indexes used in a bitmap index path.
450  *
451  * Note: if the same index is used more than once in a bitmap tree, we will
452  * count it multiple times, which perhaps is the wrong thing ... but it's
453  * not completely clear, and detecting duplicates is difficult, so ignore it
454  * for now.
455  */
456 static double
457 get_indexpath_pages(Path *bitmapqual)
458 {
459         double          result = 0;
460         ListCell   *l;
461
462         if (IsA(bitmapqual, BitmapAndPath))
463         {
464                 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
465
466                 foreach(l, apath->bitmapquals)
467                 {
468                         result += get_indexpath_pages((Path *) lfirst(l));
469                 }
470         }
471         else if (IsA(bitmapqual, BitmapOrPath))
472         {
473                 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
474
475                 foreach(l, opath->bitmapquals)
476                 {
477                         result += get_indexpath_pages((Path *) lfirst(l));
478                 }
479         }
480         else if (IsA(bitmapqual, IndexPath))
481         {
482                 IndexPath  *ipath = (IndexPath *) bitmapqual;
483
484                 result = (double) ipath->indexinfo->pages;
485         }
486         else
487                 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
488
489         return result;
490 }
491
492 /*
493  * cost_bitmap_heap_scan
494  *        Determines and returns the cost of scanning a relation using a bitmap
495  *        index-then-heap plan.
496  *
497  * 'baserel' is the relation to be scanned
498  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
499  * 'outer_rel' is the outer relation when we are considering using the bitmap
500  *              scan as the inside of a nestloop join (hence, some of the indexQuals
501  *              are join clauses, and we should expect repeated scans of the table);
502  *              NULL for a plain bitmap scan
503  *
504  * Note: if this is a join inner path, the component IndexPaths in bitmapqual
505  * should have been costed accordingly.
506  */
507 void
508 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
509                                           Path *bitmapqual, RelOptInfo *outer_rel)
510 {
511         Cost            startup_cost = 0;
512         Cost            run_cost = 0;
513         Cost            indexTotalCost;
514         Selectivity indexSelectivity;
515         Cost            cpu_per_tuple;
516         Cost            cost_per_page;
517         double          tuples_fetched;
518         double          pages_fetched;
519         double          T;
520
521         /* Should only be applied to base relations */
522         Assert(IsA(baserel, RelOptInfo));
523         Assert(baserel->relid > 0);
524         Assert(baserel->rtekind == RTE_RELATION);
525
526         if (!enable_bitmapscan)
527                 startup_cost += disable_cost;
528
529         /*
530          * Fetch total cost of obtaining the bitmap, as well as its total
531          * selectivity.
532          */
533         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
534
535         startup_cost += indexTotalCost;
536
537         /*
538          * Estimate number of main-table pages fetched.
539          */
540         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
541
542         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
543
544         if (outer_rel != NULL && outer_rel->rows > 1)
545         {
546                 /*
547                  * For repeated bitmap scans, scale up the number of tuples fetched in
548                  * the Mackert and Lohman formula by the number of scans, so that we
549                  * estimate the number of pages fetched by all the scans. Then
550                  * pro-rate for one scan.
551                  */
552                 double          num_scans = outer_rel->rows;
553
554                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
555                                                                                         baserel->pages,
556                                                                                         get_indexpath_pages(bitmapqual),
557                                                                                         root);
558                 pages_fetched /= num_scans;
559         }
560         else
561         {
562                 /*
563                  * For a single scan, the number of heap pages that need to be fetched
564                  * is the same as the Mackert and Lohman formula for the case T <= b
565                  * (ie, no re-reads needed).
566                  */
567                 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
568         }
569         if (pages_fetched >= T)
570                 pages_fetched = T;
571         else
572                 pages_fetched = ceil(pages_fetched);
573
574         /*
575          * For small numbers of pages we should charge random_page_cost apiece,
576          * while if nearly all the table's pages are being read, it's more
577          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
578          * too. For lack of a better idea, interpolate like this to determine the
579          * cost per page.
580          */
581         if (pages_fetched >= 2.0)
582                 cost_per_page = random_page_cost -
583                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
584         else
585                 cost_per_page = random_page_cost;
586
587         run_cost += pages_fetched * cost_per_page;
588
589         /*
590          * Estimate CPU costs per tuple.
591          *
592          * Often the indexquals don't need to be rechecked at each tuple ... but
593          * not always, especially not if there are enough tuples involved that the
594          * bitmaps become lossy.  For the moment, just assume they will be
595          * rechecked always.
596          */
597         startup_cost += baserel->baserestrictcost.startup;
598         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
599
600         run_cost += cpu_per_tuple * tuples_fetched;
601
602         path->startup_cost = startup_cost;
603         path->total_cost = startup_cost + run_cost;
604 }
605
606 /*
607  * cost_bitmap_tree_node
608  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
609  */
610 void
611 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
612 {
613         if (IsA(path, IndexPath))
614         {
615                 *cost = ((IndexPath *) path)->indextotalcost;
616                 *selec = ((IndexPath *) path)->indexselectivity;
617         }
618         else if (IsA(path, BitmapAndPath))
619         {
620                 *cost = path->total_cost;
621                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
622         }
623         else if (IsA(path, BitmapOrPath))
624         {
625                 *cost = path->total_cost;
626                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
627         }
628         else
629         {
630                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
631                 *cost = *selec = 0;             /* keep compiler quiet */
632         }
633 }
634
635 /*
636  * cost_bitmap_and_node
637  *              Estimate the cost of a BitmapAnd node
638  *
639  * Note that this considers only the costs of index scanning and bitmap
640  * creation, not the eventual heap access.      In that sense the object isn't
641  * truly a Path, but it has enough path-like properties (costs in particular)
642  * to warrant treating it as one.
643  */
644 void
645 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
646 {
647         Cost            totalCost;
648         Selectivity selec;
649         ListCell   *l;
650
651         /*
652          * We estimate AND selectivity on the assumption that the inputs are
653          * independent.  This is probably often wrong, but we don't have the info
654          * to do better.
655          *
656          * The runtime cost of the BitmapAnd itself is estimated at 100x
657          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
658          * definitely too simplistic?
659          */
660         totalCost = 0.0;
661         selec = 1.0;
662         foreach(l, path->bitmapquals)
663         {
664                 Path       *subpath = (Path *) lfirst(l);
665                 Cost            subCost;
666                 Selectivity subselec;
667
668                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
669
670                 selec *= subselec;
671
672                 totalCost += subCost;
673                 if (l != list_head(path->bitmapquals))
674                         totalCost += 100.0 * cpu_operator_cost;
675         }
676         path->bitmapselectivity = selec;
677         path->path.startup_cost = totalCost;
678         path->path.total_cost = totalCost;
679 }
680
681 /*
682  * cost_bitmap_or_node
683  *              Estimate the cost of a BitmapOr node
684  *
685  * See comments for cost_bitmap_and_node.
686  */
687 void
688 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
689 {
690         Cost            totalCost;
691         Selectivity selec;
692         ListCell   *l;
693
694         /*
695          * We estimate OR selectivity on the assumption that the inputs are
696          * non-overlapping, since that's often the case in "x IN (list)" type
697          * situations.  Of course, we clamp to 1.0 at the end.
698          *
699          * The runtime cost of the BitmapOr itself is estimated at 100x
700          * cpu_operator_cost for each tbm_union needed.  Probably too small,
701          * definitely too simplistic?  We are aware that the tbm_unions are
702          * optimized out when the inputs are BitmapIndexScans.
703          */
704         totalCost = 0.0;
705         selec = 0.0;
706         foreach(l, path->bitmapquals)
707         {
708                 Path       *subpath = (Path *) lfirst(l);
709                 Cost            subCost;
710                 Selectivity subselec;
711
712                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
713
714                 selec += subselec;
715
716                 totalCost += subCost;
717                 if (l != list_head(path->bitmapquals) &&
718                         !IsA(subpath, IndexPath))
719                         totalCost += 100.0 * cpu_operator_cost;
720         }
721         path->bitmapselectivity = Min(selec, 1.0);
722         path->path.startup_cost = totalCost;
723         path->path.total_cost = totalCost;
724 }
725
726 /*
727  * cost_tidscan
728  *        Determines and returns the cost of scanning a relation using TIDs.
729  */
730 void
731 cost_tidscan(Path *path, PlannerInfo *root,
732                          RelOptInfo *baserel, List *tidquals)
733 {
734         Cost            startup_cost = 0;
735         Cost            run_cost = 0;
736         Cost            cpu_per_tuple;
737         int                     ntuples;
738         ListCell   *l;
739
740         /* Should only be applied to base relations */
741         Assert(baserel->relid > 0);
742         Assert(baserel->rtekind == RTE_RELATION);
743
744         if (!enable_tidscan)
745                 startup_cost += disable_cost;
746
747         /* Count how many tuples we expect to retrieve */
748         ntuples = 0;
749         foreach(l, tidquals)
750         {
751                 if (IsA(lfirst(l), ScalarArrayOpExpr))
752                 {
753                         /* Each element of the array yields 1 tuple */
754                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
755                         Node       *arraynode = (Node *) lsecond(saop->args);
756
757                         ntuples += estimate_array_length(arraynode);
758                 }
759                 else
760                 {
761                         /* It's just CTID = something, count 1 tuple */
762                         ntuples++;
763                 }
764         }
765
766         /* disk costs --- assume each tuple on a different page */
767         run_cost += random_page_cost * ntuples;
768
769         /* CPU costs */
770         startup_cost += baserel->baserestrictcost.startup;
771         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
772         run_cost += cpu_per_tuple * ntuples;
773
774         path->startup_cost = startup_cost;
775         path->total_cost = startup_cost + run_cost;
776 }
777
778 /*
779  * cost_subqueryscan
780  *        Determines and returns the cost of scanning a subquery RTE.
781  */
782 void
783 cost_subqueryscan(Path *path, RelOptInfo *baserel)
784 {
785         Cost            startup_cost;
786         Cost            run_cost;
787         Cost            cpu_per_tuple;
788
789         /* Should only be applied to base relations that are subqueries */
790         Assert(baserel->relid > 0);
791         Assert(baserel->rtekind == RTE_SUBQUERY);
792
793         /*
794          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
795          * any restriction clauses that will be attached to the SubqueryScan node,
796          * plus cpu_tuple_cost to account for selection and projection overhead.
797          */
798         path->startup_cost = baserel->subplan->startup_cost;
799         path->total_cost = baserel->subplan->total_cost;
800
801         startup_cost = baserel->baserestrictcost.startup;
802         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
803         run_cost = cpu_per_tuple * baserel->tuples;
804
805         path->startup_cost += startup_cost;
806         path->total_cost += startup_cost + run_cost;
807 }
808
809 /*
810  * cost_functionscan
811  *        Determines and returns the cost of scanning a function RTE.
812  */
813 void
814 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
815 {
816         Cost            startup_cost = 0;
817         Cost            run_cost = 0;
818         Cost            cpu_per_tuple;
819
820         /* Should only be applied to base relations that are functions */
821         Assert(baserel->relid > 0);
822         Assert(baserel->rtekind == RTE_FUNCTION);
823
824         /*
825          * For now, estimate function's cost at one operator eval per function
826          * call.  Someday we should revive the function cost estimate columns in
827          * pg_proc...
828          */
829         cpu_per_tuple = cpu_operator_cost;
830
831         /* Add scanning CPU costs */
832         startup_cost += baserel->baserestrictcost.startup;
833         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
834         run_cost += cpu_per_tuple * baserel->tuples;
835
836         path->startup_cost = startup_cost;
837         path->total_cost = startup_cost + run_cost;
838 }
839
840 /*
841  * cost_valuesscan
842  *        Determines and returns the cost of scanning a VALUES RTE.
843  */
844 void
845 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
846 {
847         Cost            startup_cost = 0;
848         Cost            run_cost = 0;
849         Cost            cpu_per_tuple;
850
851         /* Should only be applied to base relations that are values lists */
852         Assert(baserel->relid > 0);
853         Assert(baserel->rtekind == RTE_VALUES);
854
855         /*
856          * For now, estimate list evaluation cost at one operator eval per list
857          * (probably pretty bogus, but is it worth being smarter?)
858          */
859         cpu_per_tuple = cpu_operator_cost;
860
861         /* Add scanning CPU costs */
862         startup_cost += baserel->baserestrictcost.startup;
863         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
864         run_cost += cpu_per_tuple * baserel->tuples;
865
866         path->startup_cost = startup_cost;
867         path->total_cost = startup_cost + run_cost;
868 }
869
870 /*
871  * cost_sort
872  *        Determines and returns the cost of sorting a relation, including
873  *        the cost of reading the input data.
874  *
875  * If the total volume of data to sort is less than work_mem, we will do
876  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
877  * comparisons for t tuples.
878  *
879  * If the total volume exceeds work_mem, we switch to a tape-style merge
880  * algorithm.  There will still be about t*log2(t) tuple comparisons in
881  * total, but we will also need to write and read each tuple once per
882  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
883  * number of initial runs formed and M is the merge order used by tuplesort.c.
884  * Since the average initial run should be about twice work_mem, we have
885  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
886  *              cpu = comparison_cost * t * log2(t)
887  *
888  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
889  * accesses (XXX can't we refine that guess?)
890  *
891  * We charge two operator evals per tuple comparison, which should be in
892  * the right ballpark in most cases.
893  *
894  * 'pathkeys' is a list of sort keys
895  * 'input_cost' is the total cost for reading the input data
896  * 'tuples' is the number of tuples in the relation
897  * 'width' is the average tuple width in bytes
898  *
899  * NOTE: some callers currently pass NIL for pathkeys because they
900  * can't conveniently supply the sort keys.  Since this routine doesn't
901  * currently do anything with pathkeys anyway, that doesn't matter...
902  * but if it ever does, it should react gracefully to lack of key data.
903  * (Actually, the thing we'd most likely be interested in is just the number
904  * of sort keys, which all callers *could* supply.)
905  */
906 void
907 cost_sort(Path *path, PlannerInfo *root,
908                   List *pathkeys, Cost input_cost, double tuples, int width)
909 {
910         Cost            startup_cost = input_cost;
911         Cost            run_cost = 0;
912         double          nbytes = relation_byte_size(tuples, width);
913         long            work_mem_bytes = work_mem * 1024L;
914
915         if (!enable_sort)
916                 startup_cost += disable_cost;
917
918         /*
919          * We want to be sure the cost of a sort is never estimated as zero, even
920          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
921          */
922         if (tuples < 2.0)
923                 tuples = 2.0;
924
925         /*
926          * CPU costs
927          *
928          * Assume about two operator evals per tuple comparison and N log2 N
929          * comparisons
930          */
931         startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
932
933         /* disk costs */
934         if (nbytes > work_mem_bytes)
935         {
936                 double          npages = ceil(nbytes / BLCKSZ);
937                 double          nruns = (nbytes / work_mem_bytes) * 0.5;
938                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
939                 double          log_runs;
940                 double          npageaccesses;
941
942                 /* Compute logM(r) as log(r) / log(M) */
943                 if (nruns > mergeorder)
944                         log_runs = ceil(log(nruns) / log(mergeorder));
945                 else
946                         log_runs = 1.0;
947                 npageaccesses = 2.0 * npages * log_runs;
948                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
949                 startup_cost += npageaccesses *
950                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
951         }
952
953         /*
954          * Also charge a small amount (arbitrarily set equal to operator cost) per
955          * extracted tuple.
956          */
957         run_cost += cpu_operator_cost * tuples;
958
959         path->startup_cost = startup_cost;
960         path->total_cost = startup_cost + run_cost;
961 }
962
963 /*
964  * cost_material
965  *        Determines and returns the cost of materializing a relation, including
966  *        the cost of reading the input data.
967  *
968  * If the total volume of data to materialize exceeds work_mem, we will need
969  * to write it to disk, so the cost is much higher in that case.
970  */
971 void
972 cost_material(Path *path,
973                           Cost input_cost, double tuples, int width)
974 {
975         Cost            startup_cost = input_cost;
976         Cost            run_cost = 0;
977         double          nbytes = relation_byte_size(tuples, width);
978         long            work_mem_bytes = work_mem * 1024L;
979
980         /* disk costs */
981         if (nbytes > work_mem_bytes)
982         {
983                 double          npages = ceil(nbytes / BLCKSZ);
984
985                 /* We'll write during startup and read during retrieval */
986                 startup_cost += seq_page_cost * npages;
987                 run_cost += seq_page_cost * npages;
988         }
989
990         /*
991          * Charge a very small amount per inserted tuple, to reflect bookkeeping
992          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
993          * tie that would otherwise exist between nestloop with A outer,
994          * materialized B inner and nestloop with B outer, materialized A inner.
995          * The extra cost ensures we'll prefer materializing the smaller rel.
996          */
997         startup_cost += cpu_tuple_cost * 0.1 * tuples;
998
999         /*
1000          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
1001          * so that it doesn't appear worthwhile to materialize a bare seqscan.
1002          */
1003         run_cost += cpu_tuple_cost * tuples;
1004
1005         path->startup_cost = startup_cost;
1006         path->total_cost = startup_cost + run_cost;
1007 }
1008
1009 /*
1010  * cost_agg
1011  *              Determines and returns the cost of performing an Agg plan node,
1012  *              including the cost of its input.
1013  *
1014  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1015  * are for appropriately-sorted input.
1016  */
1017 void
1018 cost_agg(Path *path, PlannerInfo *root,
1019                  AggStrategy aggstrategy, int numAggs,
1020                  int numGroupCols, double numGroups,
1021                  Cost input_startup_cost, Cost input_total_cost,
1022                  double input_tuples)
1023 {
1024         Cost            startup_cost;
1025         Cost            total_cost;
1026
1027         /*
1028          * We charge one cpu_operator_cost per aggregate function per input tuple,
1029          * and another one per output tuple (corresponding to transfn and finalfn
1030          * calls respectively).  If we are grouping, we charge an additional
1031          * cpu_operator_cost per grouping column per input tuple for grouping
1032          * comparisons.
1033          *
1034          * We will produce a single output tuple if not grouping, and a tuple per
1035          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1036          *
1037          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1038          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1039          * input path is already sorted appropriately, AGG_SORTED should be
1040          * preferred (since it has no risk of memory overflow).  This will happen
1041          * as long as the computed total costs are indeed exactly equal --- but if
1042          * there's roundoff error we might do the wrong thing.  So be sure that
1043          * the computations below form the same intermediate values in the same
1044          * order.
1045          */
1046         if (aggstrategy == AGG_PLAIN)
1047         {
1048                 startup_cost = input_total_cost;
1049                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1050                 /* we aren't grouping */
1051                 total_cost = startup_cost + cpu_tuple_cost;
1052         }
1053         else if (aggstrategy == AGG_SORTED)
1054         {
1055                 /* Here we are able to deliver output on-the-fly */
1056                 startup_cost = input_startup_cost;
1057                 total_cost = input_total_cost;
1058                 /* calcs phrased this way to match HASHED case, see note above */
1059                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1060                 total_cost += cpu_operator_cost * input_tuples * numAggs;
1061                 total_cost += cpu_operator_cost * numGroups * numAggs;
1062                 total_cost += cpu_tuple_cost * numGroups;
1063         }
1064         else
1065         {
1066                 /* must be AGG_HASHED */
1067                 startup_cost = input_total_cost;
1068                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1069                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1070                 total_cost = startup_cost;
1071                 total_cost += cpu_operator_cost * numGroups * numAggs;
1072                 total_cost += cpu_tuple_cost * numGroups;
1073         }
1074
1075         path->startup_cost = startup_cost;
1076         path->total_cost = total_cost;
1077 }
1078
1079 /*
1080  * cost_group
1081  *              Determines and returns the cost of performing a Group plan node,
1082  *              including the cost of its input.
1083  *
1084  * Note: caller must ensure that input costs are for appropriately-sorted
1085  * input.
1086  */
1087 void
1088 cost_group(Path *path, PlannerInfo *root,
1089                    int numGroupCols, double numGroups,
1090                    Cost input_startup_cost, Cost input_total_cost,
1091                    double input_tuples)
1092 {
1093         Cost            startup_cost;
1094         Cost            total_cost;
1095
1096         startup_cost = input_startup_cost;
1097         total_cost = input_total_cost;
1098
1099         /*
1100          * Charge one cpu_operator_cost per comparison per input tuple. We assume
1101          * all columns get compared at most of the tuples.
1102          */
1103         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1104
1105         path->startup_cost = startup_cost;
1106         path->total_cost = total_cost;
1107 }
1108
1109 /*
1110  * If a nestloop's inner path is an indexscan, be sure to use its estimated
1111  * output row count, which may be lower than the restriction-clause-only row
1112  * count of its parent.  (We don't include this case in the PATH_ROWS macro
1113  * because it applies *only* to a nestloop's inner relation.)  We have to
1114  * be prepared to recurse through Append nodes in case of an appendrel.
1115  */
1116 static double
1117 nestloop_inner_path_rows(Path *path)
1118 {
1119         double          result;
1120
1121         if (IsA(path, IndexPath))
1122                 result = ((IndexPath *) path)->rows;
1123         else if (IsA(path, BitmapHeapPath))
1124                 result = ((BitmapHeapPath *) path)->rows;
1125         else if (IsA(path, AppendPath))
1126         {
1127                 ListCell   *l;
1128
1129                 result = 0;
1130                 foreach(l, ((AppendPath *) path)->subpaths)
1131                 {
1132                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1133                 }
1134         }
1135         else
1136                 result = PATH_ROWS(path);
1137
1138         return result;
1139 }
1140
1141 /*
1142  * cost_nestloop
1143  *        Determines and returns the cost of joining two relations using the
1144  *        nested loop algorithm.
1145  *
1146  * 'path' is already filled in except for the cost fields
1147  */
1148 void
1149 cost_nestloop(NestPath *path, PlannerInfo *root)
1150 {
1151         Path       *outer_path = path->outerjoinpath;
1152         Path       *inner_path = path->innerjoinpath;
1153         Cost            startup_cost = 0;
1154         Cost            run_cost = 0;
1155         Cost            cpu_per_tuple;
1156         QualCost        restrict_qual_cost;
1157         double          outer_path_rows = PATH_ROWS(outer_path);
1158         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1159         double          ntuples;
1160         Selectivity joininfactor;
1161
1162         if (!enable_nestloop)
1163                 startup_cost += disable_cost;
1164
1165         /*
1166          * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1167          * outer tuple as soon as we have one match.  Account for the effects of
1168          * this by scaling down the cost estimates in proportion to the JOIN_IN
1169          * selectivity.  (This assumes that all the quals attached to the join are
1170          * IN quals, which should be true.)
1171          */
1172         joininfactor = join_in_selectivity(path, root);
1173
1174         /* cost of source data */
1175
1176         /*
1177          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1178          * before we can start returning tuples, so the join's startup cost is
1179          * their sum.  What's not so clear is whether the inner path's
1180          * startup_cost must be paid again on each rescan of the inner path. This
1181          * is not true if the inner path is materialized or is a hashjoin, but
1182          * probably is true otherwise.
1183          */
1184         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1185         run_cost += outer_path->total_cost - outer_path->startup_cost;
1186         if (IsA(inner_path, MaterialPath) ||
1187                 IsA(inner_path, HashPath))
1188         {
1189                 /* charge only run cost for each iteration of inner path */
1190         }
1191         else
1192         {
1193                 /*
1194                  * charge startup cost for each iteration of inner path, except we
1195                  * already charged the first startup_cost in our own startup
1196                  */
1197                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1198         }
1199         run_cost += outer_path_rows *
1200                 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1201
1202         /*
1203          * Compute number of tuples processed (not number emitted!)
1204          */
1205         ntuples = outer_path_rows * inner_path_rows * joininfactor;
1206
1207         /* CPU costs */
1208         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
1209         startup_cost += restrict_qual_cost.startup;
1210         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1211         run_cost += cpu_per_tuple * ntuples;
1212
1213         path->path.startup_cost = startup_cost;
1214         path->path.total_cost = startup_cost + run_cost;
1215 }
1216
1217 /*
1218  * cost_mergejoin
1219  *        Determines and returns the cost of joining two relations using the
1220  *        merge join algorithm.
1221  *
1222  * 'path' is already filled in except for the cost fields
1223  *
1224  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1225  * outersortkeys and innersortkeys are lists of the keys to be used
1226  * to sort the outer and inner relations, or NIL if no explicit
1227  * sort is needed because the source path is already ordered.
1228  */
1229 void
1230 cost_mergejoin(MergePath *path, PlannerInfo *root)
1231 {
1232         Path       *outer_path = path->jpath.outerjoinpath;
1233         Path       *inner_path = path->jpath.innerjoinpath;
1234         List       *mergeclauses = path->path_mergeclauses;
1235         List       *outersortkeys = path->outersortkeys;
1236         List       *innersortkeys = path->innersortkeys;
1237         Cost            startup_cost = 0;
1238         Cost            run_cost = 0;
1239         Cost            cpu_per_tuple;
1240         Selectivity merge_selec;
1241         QualCost        merge_qual_cost;
1242         QualCost        qp_qual_cost;
1243         RestrictInfo *firstclause;
1244         double          outer_path_rows = PATH_ROWS(outer_path);
1245         double          inner_path_rows = PATH_ROWS(inner_path);
1246         double          outer_rows,
1247                                 inner_rows;
1248         double          mergejointuples,
1249                                 rescannedtuples;
1250         double          rescanratio;
1251         Selectivity outerscansel,
1252                                 innerscansel;
1253         Selectivity joininfactor;
1254         Path            sort_path;              /* dummy for result of cost_sort */
1255
1256         if (!enable_mergejoin)
1257                 startup_cost += disable_cost;
1258
1259         /*
1260          * Compute cost and selectivity of the mergequals and qpquals (other
1261          * restriction clauses) separately.  We use approx_selectivity here for
1262          * speed --- in most cases, any errors won't affect the result much.
1263          *
1264          * Note: it's probably bogus to use the normal selectivity calculation
1265          * here when either the outer or inner path is a UniquePath.
1266          */
1267         merge_selec = approx_selectivity(root, mergeclauses,
1268                                                                          path->jpath.jointype);
1269         cost_qual_eval(&merge_qual_cost, mergeclauses);
1270         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1271         qp_qual_cost.startup -= merge_qual_cost.startup;
1272         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1273
1274         /* approx # tuples passing the merge quals */
1275         mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1276
1277         /*
1278          * When there are equal merge keys in the outer relation, the mergejoin
1279          * must rescan any matching tuples in the inner relation. This means
1280          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1281          * costs the same as an original fetch, which is probably an overestimate;
1282          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1283          * Not clear if it's worth developing a more refined model.
1284          *
1285          * The number of re-fetches can be estimated approximately as size of
1286          * merge join output minus size of inner relation.      Assume that the
1287          * distinct key values are 1, 2, ..., and denote the number of values of
1288          * each key in the outer relation as m1, m2, ...; in the inner relation,
1289          * n1, n2, ... Then we have
1290          *
1291          * size of join = m1 * n1 + m2 * n2 + ...
1292          *
1293          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1294          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1295          * relation
1296          *
1297          * This equation works correctly for outer tuples having no inner match
1298          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1299          * are effectively subtracting those from the number of rescanned tuples,
1300          * when we should not.  Can we do better without expensive selectivity
1301          * computations?
1302          */
1303         if (IsA(outer_path, UniquePath))
1304                 rescannedtuples = 0;
1305         else
1306         {
1307                 rescannedtuples = mergejointuples - inner_path_rows;
1308                 /* Must clamp because of possible underestimate */
1309                 if (rescannedtuples < 0)
1310                         rescannedtuples = 0;
1311         }
1312         /* We'll inflate inner run cost this much to account for rescanning */
1313         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1314
1315         /*
1316          * A merge join will stop as soon as it exhausts either input stream
1317          * (unless it's an outer join, in which case the outer side has to be
1318          * scanned all the way anyway).  Estimate fraction of the left and right
1319          * inputs that will actually need to be scanned. We use only the first
1320          * (most significant) merge clause for this purpose.
1321          *
1322          * Since this calculation is somewhat expensive, and will be the same for
1323          * all mergejoin paths associated with the merge clause, we cache the
1324          * results in the RestrictInfo node.
1325          */
1326         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1327         {
1328                 firstclause = (RestrictInfo *) linitial(mergeclauses);
1329                 if (firstclause->left_mergescansel < 0) /* not computed yet? */
1330                         mergejoinscansel(root, (Node *) firstclause->clause,
1331                                                          &firstclause->left_mergescansel,
1332                                                          &firstclause->right_mergescansel);
1333
1334                 if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
1335                 {
1336                         /* left side of clause is outer */
1337                         outerscansel = firstclause->left_mergescansel;
1338                         innerscansel = firstclause->right_mergescansel;
1339                 }
1340                 else
1341                 {
1342                         /* left side of clause is inner */
1343                         outerscansel = firstclause->right_mergescansel;
1344                         innerscansel = firstclause->left_mergescansel;
1345                 }
1346                 if (path->jpath.jointype == JOIN_LEFT)
1347                         outerscansel = 1.0;
1348                 else if (path->jpath.jointype == JOIN_RIGHT)
1349                         innerscansel = 1.0;
1350         }
1351         else
1352         {
1353                 /* cope with clauseless or full mergejoin */
1354                 outerscansel = innerscansel = 1.0;
1355         }
1356
1357         /* convert selectivity to row count; must scan at least one row */
1358         outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1359         inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1360
1361         /*
1362          * Readjust scan selectivities to account for above rounding.  This is
1363          * normally an insignificant effect, but when there are only a few rows in
1364          * the inputs, failing to do this makes for a large percentage error.
1365          */
1366         outerscansel = outer_rows / outer_path_rows;
1367         innerscansel = inner_rows / inner_path_rows;
1368
1369         /* cost of source data */
1370
1371         if (outersortkeys)                      /* do we need to sort outer? */
1372         {
1373                 cost_sort(&sort_path,
1374                                   root,
1375                                   outersortkeys,
1376                                   outer_path->total_cost,
1377                                   outer_path_rows,
1378                                   outer_path->parent->width);
1379                 startup_cost += sort_path.startup_cost;
1380                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1381                         * outerscansel;
1382         }
1383         else
1384         {
1385                 startup_cost += outer_path->startup_cost;
1386                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1387                         * outerscansel;
1388         }
1389
1390         if (innersortkeys)                      /* do we need to sort inner? */
1391         {
1392                 cost_sort(&sort_path,
1393                                   root,
1394                                   innersortkeys,
1395                                   inner_path->total_cost,
1396                                   inner_path_rows,
1397                                   inner_path->parent->width);
1398                 startup_cost += sort_path.startup_cost;
1399                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1400                         * innerscansel * rescanratio;
1401         }
1402         else
1403         {
1404                 startup_cost += inner_path->startup_cost;
1405                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1406                         * innerscansel * rescanratio;
1407         }
1408
1409         /* CPU costs */
1410
1411         /*
1412          * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1413          * outer tuple as soon as we have one match.  Account for the effects of
1414          * this by scaling down the cost estimates in proportion to the expected
1415          * output size.  (This assumes that all the quals attached to the join are
1416          * IN quals, which should be true.)
1417          */
1418         joininfactor = join_in_selectivity(&path->jpath, root);
1419
1420         /*
1421          * The number of tuple comparisons needed is approximately number of outer
1422          * rows plus number of inner rows plus number of rescanned tuples (can we
1423          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1424          * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1425          * joininfactor.
1426          */
1427         startup_cost += merge_qual_cost.startup;
1428         run_cost += merge_qual_cost.per_tuple *
1429                 (outer_rows + inner_rows * rescanratio);
1430
1431         /*
1432          * For each tuple that gets through the mergejoin proper, we charge
1433          * cpu_tuple_cost plus the cost of evaluating additional restriction
1434          * clauses that are to be applied at the join.  (This is pessimistic since
1435          * not all of the quals may get evaluated at each tuple.)  This work is
1436          * skipped in JOIN_IN mode, so apply the factor.
1437          */
1438         startup_cost += qp_qual_cost.startup;
1439         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1440         run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1441
1442         path->jpath.path.startup_cost = startup_cost;
1443         path->jpath.path.total_cost = startup_cost + run_cost;
1444 }
1445
1446 /*
1447  * cost_hashjoin
1448  *        Determines and returns the cost of joining two relations using the
1449  *        hash join algorithm.
1450  *
1451  * 'path' is already filled in except for the cost fields
1452  *
1453  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1454  */
1455 void
1456 cost_hashjoin(HashPath *path, PlannerInfo *root)
1457 {
1458         Path       *outer_path = path->jpath.outerjoinpath;
1459         Path       *inner_path = path->jpath.innerjoinpath;
1460         List       *hashclauses = path->path_hashclauses;
1461         Cost            startup_cost = 0;
1462         Cost            run_cost = 0;
1463         Cost            cpu_per_tuple;
1464         Selectivity hash_selec;
1465         QualCost        hash_qual_cost;
1466         QualCost        qp_qual_cost;
1467         double          hashjointuples;
1468         double          outer_path_rows = PATH_ROWS(outer_path);
1469         double          inner_path_rows = PATH_ROWS(inner_path);
1470         double          outerbytes = relation_byte_size(outer_path_rows,
1471                                                                                                 outer_path->parent->width);
1472         double          innerbytes = relation_byte_size(inner_path_rows,
1473                                                                                                 inner_path->parent->width);
1474         int                     num_hashclauses = list_length(hashclauses);
1475         int                     numbuckets;
1476         int                     numbatches;
1477         double          virtualbuckets;
1478         Selectivity innerbucketsize;
1479         Selectivity joininfactor;
1480         ListCell   *hcl;
1481
1482         if (!enable_hashjoin)
1483                 startup_cost += disable_cost;
1484
1485         /*
1486          * Compute cost and selectivity of the hashquals and qpquals (other
1487          * restriction clauses) separately.  We use approx_selectivity here for
1488          * speed --- in most cases, any errors won't affect the result much.
1489          *
1490          * Note: it's probably bogus to use the normal selectivity calculation
1491          * here when either the outer or inner path is a UniquePath.
1492          */
1493         hash_selec = approx_selectivity(root, hashclauses,
1494                                                                         path->jpath.jointype);
1495         cost_qual_eval(&hash_qual_cost, hashclauses);
1496         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1497         qp_qual_cost.startup -= hash_qual_cost.startup;
1498         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1499
1500         /* approx # tuples passing the hash quals */
1501         hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1502
1503         /* cost of source data */
1504         startup_cost += outer_path->startup_cost;
1505         run_cost += outer_path->total_cost - outer_path->startup_cost;
1506         startup_cost += inner_path->total_cost;
1507
1508         /*
1509          * Cost of computing hash function: must do it once per input tuple. We
1510          * charge one cpu_operator_cost for each column's hash function.
1511          *
1512          * XXX when a hashclause is more complex than a single operator, we really
1513          * should charge the extra eval costs of the left or right side, as
1514          * appropriate, here.  This seems more work than it's worth at the moment.
1515          */
1516         startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
1517         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1518
1519         /* Get hash table size that executor would use for inner relation */
1520         ExecChooseHashTableSize(inner_path_rows,
1521                                                         inner_path->parent->width,
1522                                                         &numbuckets,
1523                                                         &numbatches);
1524         virtualbuckets = (double) numbuckets *(double) numbatches;
1525
1526         /*
1527          * Determine bucketsize fraction for inner relation.  We use the smallest
1528          * bucketsize estimated for any individual hashclause; this is undoubtedly
1529          * conservative.
1530          *
1531          * BUT: if inner relation has been unique-ified, we can assume it's good
1532          * for hashing.  This is important both because it's the right answer, and
1533          * because we avoid contaminating the cache with a value that's wrong for
1534          * non-unique-ified paths.
1535          */
1536         if (IsA(inner_path, UniquePath))
1537                 innerbucketsize = 1.0 / virtualbuckets;
1538         else
1539         {
1540                 innerbucketsize = 1.0;
1541                 foreach(hcl, hashclauses)
1542                 {
1543                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1544                         Selectivity thisbucketsize;
1545
1546                         Assert(IsA(restrictinfo, RestrictInfo));
1547
1548                         /*
1549                          * First we have to figure out which side of the hashjoin clause
1550                          * is the inner side.
1551                          *
1552                          * Since we tend to visit the same clauses over and over when
1553                          * planning a large query, we cache the bucketsize estimate in the
1554                          * RestrictInfo node to avoid repeated lookups of statistics.
1555                          */
1556                         if (bms_is_subset(restrictinfo->right_relids,
1557                                                           inner_path->parent->relids))
1558                         {
1559                                 /* righthand side is inner */
1560                                 thisbucketsize = restrictinfo->right_bucketsize;
1561                                 if (thisbucketsize < 0)
1562                                 {
1563                                         /* not cached yet */
1564                                         thisbucketsize =
1565                                                 estimate_hash_bucketsize(root,
1566                                                                                    get_rightop(restrictinfo->clause),
1567                                                                                                  virtualbuckets);
1568                                         restrictinfo->right_bucketsize = thisbucketsize;
1569                                 }
1570                         }
1571                         else
1572                         {
1573                                 Assert(bms_is_subset(restrictinfo->left_relids,
1574                                                                          inner_path->parent->relids));
1575                                 /* lefthand side is inner */
1576                                 thisbucketsize = restrictinfo->left_bucketsize;
1577                                 if (thisbucketsize < 0)
1578                                 {
1579                                         /* not cached yet */
1580                                         thisbucketsize =
1581                                                 estimate_hash_bucketsize(root,
1582                                                                                         get_leftop(restrictinfo->clause),
1583                                                                                                  virtualbuckets);
1584                                         restrictinfo->left_bucketsize = thisbucketsize;
1585                                 }
1586                         }
1587
1588                         if (innerbucketsize > thisbucketsize)
1589                                 innerbucketsize = thisbucketsize;
1590                 }
1591         }
1592
1593         /*
1594          * If inner relation is too big then we will need to "batch" the join,
1595          * which implies writing and reading most of the tuples to disk an extra
1596          * time.  Charge one cost unit per page of I/O (correct since it should be
1597          * nice and sequential...).  Writing the inner rel counts as startup cost,
1598          * all the rest as run cost.
1599          */
1600         if (numbatches > 1)
1601         {
1602                 double          outerpages = page_size(outer_path_rows,
1603                                                                                    outer_path->parent->width);
1604                 double          innerpages = page_size(inner_path_rows,
1605                                                                                    inner_path->parent->width);
1606
1607                 startup_cost += innerpages;
1608                 run_cost += innerpages + 2 * outerpages;
1609         }
1610
1611         /* CPU costs */
1612
1613         /*
1614          * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1615          * outer tuple as soon as we have one match.  Account for the effects of
1616          * this by scaling down the cost estimates in proportion to the expected
1617          * output size.  (This assumes that all the quals attached to the join are
1618          * IN quals, which should be true.)
1619          */
1620         joininfactor = join_in_selectivity(&path->jpath, root);
1621
1622         /*
1623          * The number of tuple comparisons needed is the number of outer tuples
1624          * times the typical number of tuples in a hash bucket, which is the inner
1625          * relation size times its bucketsize fraction.  At each one, we need to
1626          * evaluate the hashjoin quals.  (Note: charging the full qual eval cost
1627          * at each tuple is pessimistic, since we don't evaluate the quals unless
1628          * the hash values match exactly.)
1629          */
1630         startup_cost += hash_qual_cost.startup;
1631         run_cost += hash_qual_cost.per_tuple *
1632                 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1633                 joininfactor;
1634
1635         /*
1636          * For each tuple that gets through the hashjoin proper, we charge
1637          * cpu_tuple_cost plus the cost of evaluating additional restriction
1638          * clauses that are to be applied at the join.  (This is pessimistic since
1639          * not all of the quals may get evaluated at each tuple.)
1640          */
1641         startup_cost += qp_qual_cost.startup;
1642         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1643         run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1644
1645         /*
1646          * Bias against putting larger relation on inside.      We don't want an
1647          * absolute prohibition, though, since larger relation might have better
1648          * bucketsize --- and we can't trust the size estimates unreservedly,
1649          * anyway.      Instead, inflate the run cost by the square root of the size
1650          * ratio.  (Why square root?  No real good reason, but it seems
1651          * reasonable...)
1652          *
1653          * Note: before 7.4 we implemented this by inflating startup cost; but if
1654          * there's a disable_cost component in the input paths' startup cost, that
1655          * unfairly penalizes the hash.  Probably it'd be better to keep track of
1656          * disable penalty separately from cost.
1657          */
1658         if (innerbytes > outerbytes && outerbytes > 0)
1659                 run_cost *= sqrt(innerbytes / outerbytes);
1660
1661         path->jpath.path.startup_cost = startup_cost;
1662         path->jpath.path.total_cost = startup_cost + run_cost;
1663 }
1664
1665
1666 /*
1667  * cost_qual_eval
1668  *              Estimate the CPU costs of evaluating a WHERE clause.
1669  *              The input can be either an implicitly-ANDed list of boolean
1670  *              expressions, or a list of RestrictInfo nodes.
1671  *              The result includes both a one-time (startup) component,
1672  *              and a per-evaluation component.
1673  */
1674 void
1675 cost_qual_eval(QualCost *cost, List *quals)
1676 {
1677         ListCell   *l;
1678
1679         cost->startup = 0;
1680         cost->per_tuple = 0;
1681
1682         /* We don't charge any cost for the implicit ANDing at top level ... */
1683
1684         foreach(l, quals)
1685         {
1686                 Node       *qual = (Node *) lfirst(l);
1687
1688                 /*
1689                  * RestrictInfo nodes contain an eval_cost field reserved for this
1690                  * routine's use, so that it's not necessary to evaluate the qual
1691                  * clause's cost more than once.  If the clause's cost hasn't been
1692                  * computed yet, the field's startup value will contain -1.
1693                  *
1694                  * If the RestrictInfo is marked pseudoconstant, it will be tested
1695                  * only once, so treat its cost as all startup cost.
1696                  */
1697                 if (qual && IsA(qual, RestrictInfo))
1698                 {
1699                         RestrictInfo *rinfo = (RestrictInfo *) qual;
1700
1701                         if (rinfo->eval_cost.startup < 0)
1702                         {
1703                                 rinfo->eval_cost.startup = 0;
1704                                 rinfo->eval_cost.per_tuple = 0;
1705                                 cost_qual_eval_walker((Node *) rinfo->clause,
1706                                                                           &rinfo->eval_cost);
1707                                 if (rinfo->pseudoconstant)
1708                                 {
1709                                         /* count one execution during startup */
1710                                         rinfo->eval_cost.startup += rinfo->eval_cost.per_tuple;
1711                                         rinfo->eval_cost.per_tuple = 0;
1712                                 }
1713                         }
1714                         cost->startup += rinfo->eval_cost.startup;
1715                         cost->per_tuple += rinfo->eval_cost.per_tuple;
1716                 }
1717                 else
1718                 {
1719                         /* If it's a bare expression, must always do it the hard way */
1720                         cost_qual_eval_walker(qual, cost);
1721                 }
1722         }
1723 }
1724
1725 static bool
1726 cost_qual_eval_walker(Node *node, QualCost *total)
1727 {
1728         if (node == NULL)
1729                 return false;
1730
1731         /*
1732          * Our basic strategy is to charge one cpu_operator_cost for each operator
1733          * or function node in the given tree.  Vars and Consts are charged zero,
1734          * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
1735          * better than no model at all.
1736          *
1737          * Should we try to account for the possibility of short-circuit
1738          * evaluation of AND/OR?
1739          */
1740         if (IsA(node, FuncExpr) ||
1741                 IsA(node, OpExpr) ||
1742                 IsA(node, DistinctExpr) ||
1743                 IsA(node, NullIfExpr))
1744                 total->per_tuple += cpu_operator_cost;
1745         else if (IsA(node, ScalarArrayOpExpr))
1746         {
1747                 /*
1748                  * Estimate that the operator will be applied to about half of the
1749                  * array elements before the answer is determined.
1750                  */
1751                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1752                 Node       *arraynode = (Node *) lsecond(saop->args);
1753
1754                 total->per_tuple +=
1755                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1756         }
1757         else if (IsA(node, RowCompareExpr))
1758         {
1759                 /* Conservatively assume we will check all the columns */
1760                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
1761
1762                 total->per_tuple += cpu_operator_cost * list_length(rcexpr->opnos);
1763         }
1764         else if (IsA(node, SubLink))
1765         {
1766                 /* This routine should not be applied to un-planned expressions */
1767                 elog(ERROR, "cannot handle unplanned sub-select");
1768         }
1769         else if (IsA(node, SubPlan))
1770         {
1771                 /*
1772                  * A subplan node in an expression typically indicates that the
1773                  * subplan will be executed on each evaluation, so charge accordingly.
1774                  * (Sub-selects that can be executed as InitPlans have already been
1775                  * removed from the expression.)
1776                  *
1777                  * An exception occurs when we have decided we can implement the
1778                  * subplan by hashing.
1779                  */
1780                 SubPlan    *subplan = (SubPlan *) node;
1781                 Plan       *plan = subplan->plan;
1782
1783                 if (subplan->useHashTable)
1784                 {
1785                         /*
1786                          * If we are using a hash table for the subquery outputs, then the
1787                          * cost of evaluating the query is a one-time cost. We charge one
1788                          * cpu_operator_cost per tuple for the work of loading the
1789                          * hashtable, too.
1790                          */
1791                         total->startup += plan->total_cost +
1792                                 cpu_operator_cost * plan->plan_rows;
1793
1794                         /*
1795                          * The per-tuple costs include the cost of evaluating the lefthand
1796                          * expressions, plus the cost of probing the hashtable. Recursion
1797                          * into the testexpr will handle the lefthand expressions
1798                          * properly, and will count one cpu_operator_cost for each
1799                          * comparison operator.  That is probably too low for the probing
1800                          * cost, but it's hard to make a better estimate, so live with it
1801                          * for now.
1802                          */
1803                 }
1804                 else
1805                 {
1806                         /*
1807                          * Otherwise we will be rescanning the subplan output on each
1808                          * evaluation.  We need to estimate how much of the output we will
1809                          * actually need to scan.  NOTE: this logic should agree with the
1810                          * estimates used by make_subplan() in plan/subselect.c.
1811                          */
1812                         Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
1813
1814                         if (subplan->subLinkType == EXISTS_SUBLINK)
1815                         {
1816                                 /* we only need to fetch 1 tuple */
1817                                 total->per_tuple += plan_run_cost / plan->plan_rows;
1818                         }
1819                         else if (subplan->subLinkType == ALL_SUBLINK ||
1820                                          subplan->subLinkType == ANY_SUBLINK)
1821                         {
1822                                 /* assume we need 50% of the tuples */
1823                                 total->per_tuple += 0.50 * plan_run_cost;
1824                                 /* also charge a cpu_operator_cost per row examined */
1825                                 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1826                         }
1827                         else
1828                         {
1829                                 /* assume we need all tuples */
1830                                 total->per_tuple += plan_run_cost;
1831                         }
1832
1833                         /*
1834                          * Also account for subplan's startup cost. If the subplan is
1835                          * uncorrelated or undirect correlated, AND its topmost node is a
1836                          * Sort or Material node, assume that we'll only need to pay its
1837                          * startup cost once; otherwise assume we pay the startup cost
1838                          * every time.
1839                          */
1840                         if (subplan->parParam == NIL &&
1841                                 (IsA(plan, Sort) ||
1842                                  IsA(plan, Material)))
1843                                 total->startup += plan->startup_cost;
1844                         else
1845                                 total->per_tuple += plan->startup_cost;
1846                 }
1847         }
1848
1849         return expression_tree_walker(node, cost_qual_eval_walker,
1850                                                                   (void *) total);
1851 }
1852
1853
1854 /*
1855  * approx_selectivity
1856  *              Quick-and-dirty estimation of clause selectivities.
1857  *              The input can be either an implicitly-ANDed list of boolean
1858  *              expressions, or a list of RestrictInfo nodes (typically the latter).
1859  *
1860  * This is quick-and-dirty because we bypass clauselist_selectivity, and
1861  * simply multiply the independent clause selectivities together.  Now
1862  * clauselist_selectivity often can't do any better than that anyhow, but
1863  * for some situations (such as range constraints) it is smarter.  However,
1864  * we can't effectively cache the results of clauselist_selectivity, whereas
1865  * the individual clause selectivities can be and are cached.
1866  *
1867  * Since we are only using the results to estimate how many potential
1868  * output tuples are generated and passed through qpqual checking, it
1869  * seems OK to live with the approximation.
1870  */
1871 static Selectivity
1872 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
1873 {
1874         Selectivity total = 1.0;
1875         ListCell   *l;
1876
1877         foreach(l, quals)
1878         {
1879                 Node       *qual = (Node *) lfirst(l);
1880
1881                 /* Note that clause_selectivity will be able to cache its result */
1882                 total *= clause_selectivity(root, qual, 0, jointype);
1883         }
1884         return total;
1885 }
1886
1887
1888 /*
1889  * set_baserel_size_estimates
1890  *              Set the size estimates for the given base relation.
1891  *
1892  * The rel's targetlist and restrictinfo list must have been constructed
1893  * already.
1894  *
1895  * We set the following fields of the rel node:
1896  *      rows: the estimated number of output tuples (after applying
1897  *                restriction clauses).
1898  *      width: the estimated average output tuple width in bytes.
1899  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1900  */
1901 void
1902 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
1903 {
1904         double          nrows;
1905
1906         /* Should only be applied to base relations */
1907         Assert(rel->relid > 0);
1908
1909         nrows = rel->tuples *
1910                 clauselist_selectivity(root,
1911                                                            rel->baserestrictinfo,
1912                                                            0,
1913                                                            JOIN_INNER);
1914
1915         rel->rows = clamp_row_est(nrows);
1916
1917         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1918
1919         set_rel_width(root, rel);
1920 }
1921
1922 /*
1923  * set_joinrel_size_estimates
1924  *              Set the size estimates for the given join relation.
1925  *
1926  * The rel's targetlist must have been constructed already, and a
1927  * restriction clause list that matches the given component rels must
1928  * be provided.
1929  *
1930  * Since there is more than one way to make a joinrel for more than two
1931  * base relations, the results we get here could depend on which component
1932  * rel pair is provided.  In theory we should get the same answers no matter
1933  * which pair is provided; in practice, since the selectivity estimation
1934  * routines don't handle all cases equally well, we might not.  But there's
1935  * not much to be done about it.  (Would it make sense to repeat the
1936  * calculations for each pair of input rels that's encountered, and somehow
1937  * average the results?  Probably way more trouble than it's worth.)
1938  *
1939  * It's important that the results for symmetric JoinTypes be symmetric,
1940  * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1941  * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
1942  * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1943  *
1944  * We set only the rows field here.  The width field was already set by
1945  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
1946  */
1947 void
1948 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
1949                                                    RelOptInfo *outer_rel,
1950                                                    RelOptInfo *inner_rel,
1951                                                    JoinType jointype,
1952                                                    List *restrictlist)
1953 {
1954         Selectivity jselec;
1955         Selectivity pselec;
1956         double          nrows;
1957         UniquePath *upath;
1958
1959         /*
1960          * Compute joinclause selectivity.      Note that we are only considering
1961          * clauses that become restriction clauses at this join level; we are not
1962          * double-counting them because they were not considered in estimating the
1963          * sizes of the component rels.
1964          *
1965          * For an outer join, we have to distinguish the selectivity of the
1966          * join's own clauses (JOIN/ON conditions) from any clauses that were
1967          * "pushed down".  For inner joins we just count them all as joinclauses.
1968          */
1969         if (IS_OUTER_JOIN(jointype))
1970         {
1971                 List       *joinquals = NIL;
1972                 List       *pushedquals = NIL;
1973                 ListCell   *l;
1974
1975                 /* Grovel through the clauses to separate into two lists */
1976                 foreach(l, restrictlist)
1977                 {
1978                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
1979
1980                         Assert(IsA(rinfo, RestrictInfo));
1981                         if (rinfo->is_pushed_down)
1982                                 pushedquals = lappend(pushedquals, rinfo);
1983                         else
1984                                 joinquals = lappend(joinquals, rinfo);
1985                 }
1986
1987                 /* Get the separate selectivities */
1988                 jselec = clauselist_selectivity(root,
1989                                                                                 joinquals,
1990                                                                                 0,
1991                                                                                 jointype);
1992                 pselec = clauselist_selectivity(root,
1993                                                                                 pushedquals,
1994                                                                                 0,
1995                                                                                 jointype);
1996
1997                 /* Avoid leaking a lot of ListCells */
1998                 list_free(joinquals);
1999                 list_free(pushedquals);
2000         }
2001         else
2002         {
2003                 jselec = clauselist_selectivity(root,
2004                                                                                 restrictlist,
2005                                                                                 0,
2006                                                                                 jointype);
2007                 pselec = 0.0;                   /* not used, keep compiler quiet */
2008         }
2009
2010         /*
2011          * Basically, we multiply size of Cartesian product by selectivity.
2012          *
2013          * If we are doing an outer join, take that into account: the joinqual
2014          * selectivity has to be clamped using the knowledge that the output must
2015          * be at least as large as the non-nullable input.  However, any
2016          * pushed-down quals are applied after the outer join, so their
2017          * selectivity applies fully.
2018          *
2019          * For JOIN_IN and variants, the Cartesian product is figured with respect
2020          * to a unique-ified input, and then we can clamp to the size of the other
2021          * input.
2022          */
2023         switch (jointype)
2024         {
2025                 case JOIN_INNER:
2026                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2027                         break;
2028                 case JOIN_LEFT:
2029                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2030                         if (nrows < outer_rel->rows)
2031                                 nrows = outer_rel->rows;
2032                         nrows *= pselec;
2033                         break;
2034                 case JOIN_RIGHT:
2035                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2036                         if (nrows < inner_rel->rows)
2037                                 nrows = inner_rel->rows;
2038                         nrows *= pselec;
2039                         break;
2040                 case JOIN_FULL:
2041                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2042                         if (nrows < outer_rel->rows)
2043                                 nrows = outer_rel->rows;
2044                         if (nrows < inner_rel->rows)
2045                                 nrows = inner_rel->rows;
2046                         nrows *= pselec;
2047                         break;
2048                 case JOIN_IN:
2049                 case JOIN_UNIQUE_INNER:
2050                         upath = create_unique_path(root, inner_rel,
2051                                                                            inner_rel->cheapest_total_path);
2052                         nrows = outer_rel->rows * upath->rows * jselec;
2053                         if (nrows > outer_rel->rows)
2054                                 nrows = outer_rel->rows;
2055                         break;
2056                 case JOIN_REVERSE_IN:
2057                 case JOIN_UNIQUE_OUTER:
2058                         upath = create_unique_path(root, outer_rel,
2059                                                                            outer_rel->cheapest_total_path);
2060                         nrows = upath->rows * inner_rel->rows * jselec;
2061                         if (nrows > inner_rel->rows)
2062                                 nrows = inner_rel->rows;
2063                         break;
2064                 default:
2065                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
2066                         nrows = 0;                      /* keep compiler quiet */
2067                         break;
2068         }
2069
2070         rel->rows = clamp_row_est(nrows);
2071 }
2072
2073 /*
2074  * join_in_selectivity
2075  *        Determines the factor by which a JOIN_IN join's result is expected
2076  *        to be smaller than an ordinary inner join.
2077  *
2078  * 'path' is already filled in except for the cost fields
2079  */
2080 static Selectivity
2081 join_in_selectivity(JoinPath *path, PlannerInfo *root)
2082 {
2083         RelOptInfo *innerrel;
2084         UniquePath *innerunique;
2085         Selectivity selec;
2086         double          nrows;
2087
2088         /* Return 1.0 whenever it's not JOIN_IN */
2089         if (path->jointype != JOIN_IN)
2090                 return 1.0;
2091
2092         /*
2093          * Return 1.0 if the inner side is already known unique.  The case where
2094          * the inner path is already a UniquePath probably cannot happen in
2095          * current usage, but check it anyway for completeness.  The interesting
2096          * case is where we've determined the inner relation itself is unique,
2097          * which we can check by looking at the rows estimate for its UniquePath.
2098          */
2099         if (IsA(path->innerjoinpath, UniquePath))
2100                 return 1.0;
2101         innerrel = path->innerjoinpath->parent;
2102         innerunique = create_unique_path(root,
2103                                                                          innerrel,
2104                                                                          innerrel->cheapest_total_path);
2105         if (innerunique->rows >= innerrel->rows)
2106                 return 1.0;
2107
2108         /*
2109          * Compute same result set_joinrel_size_estimates would compute for
2110          * JOIN_INNER.  Note that we use the input rels' absolute size estimates,
2111          * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
2112          * double-counting the effects of any join clauses used in input scans.
2113          */
2114         selec = clauselist_selectivity(root,
2115                                                                    path->joinrestrictinfo,
2116                                                                    0,
2117                                                                    JOIN_INNER);
2118         nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
2119
2120         nrows = clamp_row_est(nrows);
2121
2122         /* See if it's larger than the actual JOIN_IN size estimate */
2123         if (nrows > path->path.parent->rows)
2124                 return path->path.parent->rows / nrows;
2125         else
2126                 return 1.0;
2127 }
2128
2129 /*
2130  * set_function_size_estimates
2131  *              Set the size estimates for a base relation that is a function call.
2132  *
2133  * The rel's targetlist and restrictinfo list must have been constructed
2134  * already.
2135  *
2136  * We set the same fields as set_baserel_size_estimates.
2137  */
2138 void
2139 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2140 {
2141         RangeTblEntry *rte;
2142
2143         /* Should only be applied to base relations that are functions */
2144         Assert(rel->relid > 0);
2145         rte = rt_fetch(rel->relid, root->parse->rtable);
2146         Assert(rte->rtekind == RTE_FUNCTION);
2147
2148         /*
2149          * Estimate number of rows the function itself will return.
2150          *
2151          * XXX no idea how to do this yet; but we can at least check whether
2152          * function returns set or not...
2153          */
2154         if (expression_returns_set(rte->funcexpr))
2155                 rel->tuples = 1000;
2156         else
2157                 rel->tuples = 1;
2158
2159         /* Now estimate number of output rows, etc */
2160         set_baserel_size_estimates(root, rel);
2161 }
2162
2163 /*
2164  * set_values_size_estimates
2165  *              Set the size estimates for a base relation that is a values list.
2166  *
2167  * The rel's targetlist and restrictinfo list must have been constructed
2168  * already.
2169  *
2170  * We set the same fields as set_baserel_size_estimates.
2171  */
2172 void
2173 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2174 {
2175         RangeTblEntry *rte;
2176
2177         /* Should only be applied to base relations that are values lists */
2178         Assert(rel->relid > 0);
2179         rte = rt_fetch(rel->relid, root->parse->rtable);
2180         Assert(rte->rtekind == RTE_VALUES);
2181
2182         /*
2183          * Estimate number of rows the values list will return. We know this
2184          * precisely based on the list length (well, barring set-returning
2185          * functions in list items, but that's a refinement not catered for
2186          * anywhere else either).
2187          */
2188         rel->tuples = list_length(rte->values_lists);
2189
2190         /* Now estimate number of output rows, etc */
2191         set_baserel_size_estimates(root, rel);
2192 }
2193
2194
2195 /*
2196  * set_rel_width
2197  *              Set the estimated output width of a base relation.
2198  *
2199  * NB: this works best on plain relations because it prefers to look at
2200  * real Vars.  It will fail to make use of pg_statistic info when applied
2201  * to a subquery relation, even if the subquery outputs are simple vars
2202  * that we could have gotten info for.  Is it worth trying to be smarter
2203  * about subqueries?
2204  *
2205  * The per-attribute width estimates are cached for possible re-use while
2206  * building join relations.
2207  */
2208 static void
2209 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2210 {
2211         int32           tuple_width = 0;
2212         ListCell   *tllist;
2213
2214         foreach(tllist, rel->reltargetlist)
2215         {
2216                 Var                *var = (Var *) lfirst(tllist);
2217                 int                     ndx;
2218                 Oid                     relid;
2219                 int32           item_width;
2220
2221                 /* For now, punt on whole-row child Vars */
2222                 if (!IsA(var, Var))
2223                 {
2224                         tuple_width += 32;      /* arbitrary */
2225                         continue;
2226                 }
2227
2228                 ndx = var->varattno - rel->min_attr;
2229
2230                 /*
2231                  * The width probably hasn't been cached yet, but may as well check
2232                  */
2233                 if (rel->attr_widths[ndx] > 0)
2234                 {
2235                         tuple_width += rel->attr_widths[ndx];
2236                         continue;
2237                 }
2238
2239                 relid = getrelid(var->varno, root->parse->rtable);
2240                 if (relid != InvalidOid)
2241                 {
2242                         item_width = get_attavgwidth(relid, var->varattno);
2243                         if (item_width > 0)
2244                         {
2245                                 rel->attr_widths[ndx] = item_width;
2246                                 tuple_width += item_width;
2247                                 continue;
2248                         }
2249                 }
2250
2251                 /*
2252                  * Not a plain relation, or can't find statistics for it. Estimate
2253                  * using just the type info.
2254                  */
2255                 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2256                 Assert(item_width > 0);
2257                 rel->attr_widths[ndx] = item_width;
2258                 tuple_width += item_width;
2259         }
2260         Assert(tuple_width >= 0);
2261         rel->width = tuple_width;
2262 }
2263
2264 /*
2265  * relation_byte_size
2266  *        Estimate the storage space in bytes for a given number of tuples
2267  *        of a given width (size in bytes).
2268  */
2269 static double
2270 relation_byte_size(double tuples, int width)
2271 {
2272         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2273 }
2274
2275 /*
2276  * page_size
2277  *        Returns an estimate of the number of pages covered by a given
2278  *        number of tuples of a given width (size in bytes).
2279  */
2280 static double
2281 page_size(double tuples, int width)
2282 {
2283         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
2284 }