]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Improve usage of effective_cache_size parameter by assuming that all the
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.166 2006/09/19 22:49:52 tgl Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "optimizer/clauses.h"
69 #include "optimizer/cost.h"
70 #include "optimizer/pathnode.h"
71 #include "parser/parsetree.h"
72 #include "utils/lsyscache.h"
73 #include "utils/selfuncs.h"
74 #include "utils/tuplesort.h"
75
76
77 #define LOG2(x)  (log(x) / 0.693147180559945)
78
79 /*
80  * Some Paths return less than the nominal number of rows of their parent
81  * relations; join nodes need to do this to get the correct input count:
82  */
83 #define PATH_ROWS(path) \
84         (IsA(path, UniquePath) ? \
85          ((UniquePath *) (path))->rows : \
86          (path)->parent->rows)
87
88
89 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
90 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
91 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
92 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
93 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
94
95 int             effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
96
97 Cost            disable_cost = 100000000.0;
98
99 bool            enable_seqscan = true;
100 bool            enable_indexscan = true;
101 bool            enable_bitmapscan = true;
102 bool            enable_tidscan = true;
103 bool            enable_sort = true;
104 bool            enable_hashagg = true;
105 bool            enable_nestloop = true;
106 bool            enable_mergejoin = true;
107 bool            enable_hashjoin = true;
108
109
110 static bool cost_qual_eval_walker(Node *node, QualCost *total);
111 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
112                                    JoinType jointype);
113 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
114 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
115 static double relation_byte_size(double tuples, int width);
116 static double page_size(double tuples, int width);
117
118
119 /*
120  * clamp_row_est
121  *              Force a row-count estimate to a sane value.
122  */
123 double
124 clamp_row_est(double nrows)
125 {
126         /*
127          * Force estimate to be at least one row, to make explain output look
128          * better and to avoid possible divide-by-zero when interpolating costs.
129          * Make it an integer, too.
130          */
131         if (nrows <= 1.0)
132                 nrows = 1.0;
133         else
134                 nrows = rint(nrows);
135
136         return nrows;
137 }
138
139
140 /*
141  * cost_seqscan
142  *        Determines and returns the cost of scanning a relation sequentially.
143  */
144 void
145 cost_seqscan(Path *path, PlannerInfo *root,
146                          RelOptInfo *baserel)
147 {
148         Cost            startup_cost = 0;
149         Cost            run_cost = 0;
150         Cost            cpu_per_tuple;
151
152         /* Should only be applied to base relations */
153         Assert(baserel->relid > 0);
154         Assert(baserel->rtekind == RTE_RELATION);
155
156         if (!enable_seqscan)
157                 startup_cost += disable_cost;
158
159         /*
160          * disk costs
161          */
162         run_cost += seq_page_cost * baserel->pages;
163
164         /* CPU costs */
165         startup_cost += baserel->baserestrictcost.startup;
166         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
167         run_cost += cpu_per_tuple * baserel->tuples;
168
169         path->startup_cost = startup_cost;
170         path->total_cost = startup_cost + run_cost;
171 }
172
173 /*
174  * cost_index
175  *        Determines and returns the cost of scanning a relation using an index.
176  *
177  * 'index' is the index to be used
178  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
179  * 'outer_rel' is the outer relation when we are considering using the index
180  *              scan as the inside of a nestloop join (hence, some of the indexQuals
181  *              are join clauses, and we should expect repeated scans of the index);
182  *              NULL for a plain index scan
183  *
184  * cost_index() takes an IndexPath not just a Path, because it sets a few
185  * additional fields of the IndexPath besides startup_cost and total_cost.
186  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
187  *
188  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
189  * Any additional quals evaluated as qpquals may reduce the number of returned
190  * tuples, but they won't reduce the number of tuples we have to fetch from
191  * the table, so they don't reduce the scan cost.
192  *
193  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
194  * it was a list of bare clause expressions.
195  */
196 void
197 cost_index(IndexPath *path, PlannerInfo *root,
198                    IndexOptInfo *index,
199                    List *indexQuals,
200                    RelOptInfo *outer_rel)
201 {
202         RelOptInfo *baserel = index->rel;
203         Cost            startup_cost = 0;
204         Cost            run_cost = 0;
205         Cost            indexStartupCost;
206         Cost            indexTotalCost;
207         Selectivity indexSelectivity;
208         double          indexCorrelation,
209                                 csquared;
210         Cost            min_IO_cost,
211                                 max_IO_cost;
212         Cost            cpu_per_tuple;
213         double          tuples_fetched;
214         double          pages_fetched;
215
216         /* Should only be applied to base relations */
217         Assert(IsA(baserel, RelOptInfo) &&
218                    IsA(index, IndexOptInfo));
219         Assert(baserel->relid > 0);
220         Assert(baserel->rtekind == RTE_RELATION);
221
222         if (!enable_indexscan)
223                 startup_cost += disable_cost;
224
225         /*
226          * Call index-access-method-specific code to estimate the processing cost
227          * for scanning the index, as well as the selectivity of the index (ie,
228          * the fraction of main-table tuples we will have to retrieve) and its
229          * correlation to the main-table tuple order.
230          */
231         OidFunctionCall8(index->amcostestimate,
232                                          PointerGetDatum(root),
233                                          PointerGetDatum(index),
234                                          PointerGetDatum(indexQuals),
235                                          PointerGetDatum(outer_rel),
236                                          PointerGetDatum(&indexStartupCost),
237                                          PointerGetDatum(&indexTotalCost),
238                                          PointerGetDatum(&indexSelectivity),
239                                          PointerGetDatum(&indexCorrelation));
240
241         /*
242          * Save amcostestimate's results for possible use in bitmap scan planning.
243          * We don't bother to save indexStartupCost or indexCorrelation, because a
244          * bitmap scan doesn't care about either.
245          */
246         path->indextotalcost = indexTotalCost;
247         path->indexselectivity = indexSelectivity;
248
249         /* all costs for touching index itself included here */
250         startup_cost += indexStartupCost;
251         run_cost += indexTotalCost - indexStartupCost;
252
253         /* estimate number of main-table tuples fetched */
254         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
255
256         /*----------
257          * Estimate number of main-table pages fetched, and compute I/O cost.
258          *
259          * When the index ordering is uncorrelated with the table ordering,
260          * we use an approximation proposed by Mackert and Lohman (see
261          * index_pages_fetched() for details) to compute the number of pages
262          * fetched, and then charge random_page_cost per page fetched.
263          *
264          * When the index ordering is exactly correlated with the table ordering
265          * (just after a CLUSTER, for example), the number of pages fetched should
266          * be exactly selectivity * table_size.  What's more, all but the first
267          * will be sequential fetches, not the random fetches that occur in the
268          * uncorrelated case.  So if the number of pages is more than 1, we
269          * ought to charge
270          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
271          * For partially-correlated indexes, we ought to charge somewhere between
272          * these two estimates.  We currently interpolate linearly between the
273          * estimates based on the correlation squared (XXX is that appropriate?).
274          *----------
275          */
276         if (outer_rel != NULL && outer_rel->rows > 1)
277         {
278                 /*
279                  * For repeated indexscans, scale up the number of tuples fetched
280                  * in the Mackert and Lohman formula by the number of scans, so
281                  * that we estimate the number of pages fetched by all the scans.
282                  * Then pro-rate the costs for one scan.  In this case we assume
283                  * all the fetches are random accesses.  XXX it'd be good to
284                  * include correlation in this model, but it's not clear how to do
285                  * that without double-counting cache effects.
286                  */
287                 double          num_scans = outer_rel->rows;
288
289                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
290                                                                                         baserel->pages,
291                                                                                         (double) index->pages,
292                                                                                         root);
293
294                 run_cost += (pages_fetched * random_page_cost) / num_scans;
295         }
296         else
297         {
298                 /*
299                  * Normal case: apply the Mackert and Lohman formula, and then
300                  * interpolate between that and the correlation-derived result.
301                  */
302                 pages_fetched = index_pages_fetched(tuples_fetched,
303                                                                                         baserel->pages,
304                                                                                         (double) index->pages,
305                                                                                         root);
306
307                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
308                 max_IO_cost = pages_fetched * random_page_cost;
309
310                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
311                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
312                 min_IO_cost = random_page_cost;
313                 if (pages_fetched > 1)
314                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
315
316                 /*
317                  * Now interpolate based on estimated index order correlation to get
318                  * total disk I/O cost for main table accesses.
319                  */
320                 csquared = indexCorrelation * indexCorrelation;
321
322                 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
323         }
324
325         /*
326          * Estimate CPU costs per tuple.
327          *
328          * Normally the indexquals will be removed from the list of restriction
329          * clauses that we have to evaluate as qpquals, so we should subtract
330          * their costs from baserestrictcost.  But if we are doing a join then
331          * some of the indexquals are join clauses and shouldn't be subtracted.
332          * Rather than work out exactly how much to subtract, we don't subtract
333          * anything.
334          */
335         startup_cost += baserel->baserestrictcost.startup;
336         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
337
338         if (outer_rel == NULL)
339         {
340                 QualCost        index_qual_cost;
341
342                 cost_qual_eval(&index_qual_cost, indexQuals);
343                 /* any startup cost still has to be paid ... */
344                 cpu_per_tuple -= index_qual_cost.per_tuple;
345         }
346
347         run_cost += cpu_per_tuple * tuples_fetched;
348
349         path->path.startup_cost = startup_cost;
350         path->path.total_cost = startup_cost + run_cost;
351 }
352
353 /*
354  * index_pages_fetched
355  *        Estimate the number of pages actually fetched after accounting for
356  *        cache effects.
357  *
358  * We use an approximation proposed by Mackert and Lohman, "Index Scans
359  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
360  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
361  * The Mackert and Lohman approximation is that the number of pages
362  * fetched is
363  *      PF =
364  *              min(2TNs/(2T+Ns), T)                    when T <= b
365  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
366  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
367  * where
368  *              T = # pages in table
369  *              N = # tuples in table
370  *              s = selectivity = fraction of table to be scanned
371  *              b = # buffer pages available (we include kernel space here)
372  *
373  * We assume that effective_cache_size is the total number of buffer pages
374  * available for the whole query, and pro-rate that space across all the
375  * tables in the query and the index currently under consideration.  (This
376  * ignores space needed for other indexes used by the query, but since we
377  * don't know which indexes will get used, we can't estimate that very well;
378  * and in any case counting all the tables may well be an overestimate, since
379  * depending on the join plan not all the tables may be scanned concurrently.)
380  *
381  * The product Ns is the number of tuples fetched; we pass in that
382  * product rather than calculating it here.  "pages" is the number of pages
383  * in the object under consideration (either an index or a table).
384  * "index_pages" is the amount to add to the total table space, which was
385  * computed for us by query_planner.
386  *
387  * Caller is expected to have ensured that tuples_fetched is greater than zero
388  * and rounded to integer (see clamp_row_est).  The result will likewise be
389  * greater than zero and integral.
390  */
391 double
392 index_pages_fetched(double tuples_fetched, BlockNumber pages,
393                                         double index_pages, PlannerInfo *root)
394 {
395         double          pages_fetched;
396         double          total_pages;
397         double          T,
398                                 b;
399
400         /* T is # pages in table, but don't allow it to be zero */
401         T = (pages > 1) ? (double) pages : 1.0;
402
403         /* Compute number of pages assumed to be competing for cache space */
404         total_pages = root->total_table_pages + index_pages;
405         total_pages = Max(total_pages, 1.0);
406         Assert(T <= total_pages);
407
408         /* b is pro-rated share of effective_cache_size */
409         b = (double) effective_cache_size * T / total_pages;
410         /* force it positive and integral */
411         if (b <= 1.0)
412                 b = 1.0;
413         else
414                 b = ceil(b);
415
416         /* This part is the Mackert and Lohman formula */
417         if (T <= b)
418         {
419                 pages_fetched =
420                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
421                 if (pages_fetched >= T)
422                         pages_fetched = T;
423                 else
424                         pages_fetched = ceil(pages_fetched);
425         }
426         else
427         {
428                 double          lim;
429
430                 lim = (2.0 * T * b) / (2.0 * T - b);
431                 if (tuples_fetched <= lim)
432                 {
433                         pages_fetched =
434                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
435                 }
436                 else
437                 {
438                         pages_fetched =
439                                 b + (tuples_fetched - lim) * (T - b) / T;
440                 }
441                 pages_fetched = ceil(pages_fetched);
442         }
443         return pages_fetched;
444 }
445
446 /*
447  * get_indexpath_pages
448  *              Determine the total size of the indexes used in a bitmap index path.
449  *
450  * Note: if the same index is used more than once in a bitmap tree, we will
451  * count it multiple times, which perhaps is the wrong thing ... but it's
452  * not completely clear, and detecting duplicates is difficult, so ignore it
453  * for now.
454  */
455 static double
456 get_indexpath_pages(Path *bitmapqual)
457 {
458         double          result = 0;
459         ListCell   *l;
460
461         if (IsA(bitmapqual, BitmapAndPath))
462         {
463                 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
464
465                 foreach(l, apath->bitmapquals)
466                 {
467                         result += get_indexpath_pages((Path *) lfirst(l));
468                 }
469         }
470         else if (IsA(bitmapqual, BitmapOrPath))
471         {
472                 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
473
474                 foreach(l, opath->bitmapquals)
475                 {
476                         result += get_indexpath_pages((Path *) lfirst(l));
477                 }
478         }
479         else if (IsA(bitmapqual, IndexPath))
480         {
481                 IndexPath  *ipath = (IndexPath *) bitmapqual;
482
483                 result = (double) ipath->indexinfo->pages;
484         }
485         else
486                 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
487
488         return result;
489 }
490
491 /*
492  * cost_bitmap_heap_scan
493  *        Determines and returns the cost of scanning a relation using a bitmap
494  *        index-then-heap plan.
495  *
496  * 'baserel' is the relation to be scanned
497  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
498  * 'outer_rel' is the outer relation when we are considering using the bitmap
499  *              scan as the inside of a nestloop join (hence, some of the indexQuals
500  *              are join clauses, and we should expect repeated scans of the table);
501  *              NULL for a plain bitmap scan
502  *
503  * Note: if this is a join inner path, the component IndexPaths in bitmapqual
504  * should have been costed accordingly.
505  */
506 void
507 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
508                                           Path *bitmapqual, RelOptInfo *outer_rel)
509 {
510         Cost            startup_cost = 0;
511         Cost            run_cost = 0;
512         Cost            indexTotalCost;
513         Selectivity indexSelectivity;
514         Cost            cpu_per_tuple;
515         Cost            cost_per_page;
516         double          tuples_fetched;
517         double          pages_fetched;
518         double          T;
519
520         /* Should only be applied to base relations */
521         Assert(IsA(baserel, RelOptInfo));
522         Assert(baserel->relid > 0);
523         Assert(baserel->rtekind == RTE_RELATION);
524
525         if (!enable_bitmapscan)
526                 startup_cost += disable_cost;
527
528         /*
529          * Fetch total cost of obtaining the bitmap, as well as its total
530          * selectivity.
531          */
532         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
533
534         startup_cost += indexTotalCost;
535
536         /*
537          * Estimate number of main-table pages fetched.
538          */
539         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
540
541         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
542
543         if (outer_rel != NULL && outer_rel->rows > 1)
544         {
545                 /*
546                  * For repeated bitmap scans, scale up the number of tuples fetched
547                  * in the Mackert and Lohman formula by the number of scans, so
548                  * that we estimate the number of pages fetched by all the scans.
549                  * Then pro-rate for one scan.
550                  */
551                 double          num_scans = outer_rel->rows;
552
553                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
554                                                                                         baserel->pages,
555                                                                                         get_indexpath_pages(bitmapqual),
556                                                                                         root);
557                 pages_fetched /= num_scans;
558         }
559         else
560         {
561                 /*
562                  * For a single scan, the number of heap pages that need to be fetched
563                  * is the same as the Mackert and Lohman formula for the case T <= b
564                  * (ie, no re-reads needed).
565                  */
566                 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
567         }
568         if (pages_fetched >= T)
569                 pages_fetched = T;
570         else
571                 pages_fetched = ceil(pages_fetched);
572
573         /*
574          * For small numbers of pages we should charge random_page_cost apiece,
575          * while if nearly all the table's pages are being read, it's more
576          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
577          * too. For lack of a better idea, interpolate like this to determine the
578          * cost per page.
579          */
580         if (pages_fetched >= 2.0)
581                 cost_per_page = random_page_cost -
582                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
583         else
584                 cost_per_page = random_page_cost;
585
586         run_cost += pages_fetched * cost_per_page;
587
588         /*
589          * Estimate CPU costs per tuple.
590          *
591          * Often the indexquals don't need to be rechecked at each tuple ... but
592          * not always, especially not if there are enough tuples involved that the
593          * bitmaps become lossy.  For the moment, just assume they will be
594          * rechecked always.
595          */
596         startup_cost += baserel->baserestrictcost.startup;
597         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
598
599         run_cost += cpu_per_tuple * tuples_fetched;
600
601         path->startup_cost = startup_cost;
602         path->total_cost = startup_cost + run_cost;
603 }
604
605 /*
606  * cost_bitmap_tree_node
607  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
608  */
609 void
610 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
611 {
612         if (IsA(path, IndexPath))
613         {
614                 *cost = ((IndexPath *) path)->indextotalcost;
615                 *selec = ((IndexPath *) path)->indexselectivity;
616         }
617         else if (IsA(path, BitmapAndPath))
618         {
619                 *cost = path->total_cost;
620                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
621         }
622         else if (IsA(path, BitmapOrPath))
623         {
624                 *cost = path->total_cost;
625                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
626         }
627         else
628                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
629 }
630
631 /*
632  * cost_bitmap_and_node
633  *              Estimate the cost of a BitmapAnd node
634  *
635  * Note that this considers only the costs of index scanning and bitmap
636  * creation, not the eventual heap access.      In that sense the object isn't
637  * truly a Path, but it has enough path-like properties (costs in particular)
638  * to warrant treating it as one.
639  */
640 void
641 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
642 {
643         Cost            totalCost;
644         Selectivity selec;
645         ListCell   *l;
646
647         /*
648          * We estimate AND selectivity on the assumption that the inputs are
649          * independent.  This is probably often wrong, but we don't have the info
650          * to do better.
651          *
652          * The runtime cost of the BitmapAnd itself is estimated at 100x
653          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
654          * definitely too simplistic?
655          */
656         totalCost = 0.0;
657         selec = 1.0;
658         foreach(l, path->bitmapquals)
659         {
660                 Path       *subpath = (Path *) lfirst(l);
661                 Cost            subCost;
662                 Selectivity subselec;
663
664                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
665
666                 selec *= subselec;
667
668                 totalCost += subCost;
669                 if (l != list_head(path->bitmapquals))
670                         totalCost += 100.0 * cpu_operator_cost;
671         }
672         path->bitmapselectivity = selec;
673         path->path.startup_cost = totalCost;
674         path->path.total_cost = totalCost;
675 }
676
677 /*
678  * cost_bitmap_or_node
679  *              Estimate the cost of a BitmapOr node
680  *
681  * See comments for cost_bitmap_and_node.
682  */
683 void
684 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
685 {
686         Cost            totalCost;
687         Selectivity selec;
688         ListCell   *l;
689
690         /*
691          * We estimate OR selectivity on the assumption that the inputs are
692          * non-overlapping, since that's often the case in "x IN (list)" type
693          * situations.  Of course, we clamp to 1.0 at the end.
694          *
695          * The runtime cost of the BitmapOr itself is estimated at 100x
696          * cpu_operator_cost for each tbm_union needed.  Probably too small,
697          * definitely too simplistic?  We are aware that the tbm_unions are
698          * optimized out when the inputs are BitmapIndexScans.
699          */
700         totalCost = 0.0;
701         selec = 0.0;
702         foreach(l, path->bitmapquals)
703         {
704                 Path       *subpath = (Path *) lfirst(l);
705                 Cost            subCost;
706                 Selectivity subselec;
707
708                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
709
710                 selec += subselec;
711
712                 totalCost += subCost;
713                 if (l != list_head(path->bitmapquals) &&
714                         !IsA(subpath, IndexPath))
715                         totalCost += 100.0 * cpu_operator_cost;
716         }
717         path->bitmapselectivity = Min(selec, 1.0);
718         path->path.startup_cost = totalCost;
719         path->path.total_cost = totalCost;
720 }
721
722 /*
723  * cost_tidscan
724  *        Determines and returns the cost of scanning a relation using TIDs.
725  */
726 void
727 cost_tidscan(Path *path, PlannerInfo *root,
728                          RelOptInfo *baserel, List *tidquals)
729 {
730         Cost            startup_cost = 0;
731         Cost            run_cost = 0;
732         Cost            cpu_per_tuple;
733         int                     ntuples;
734         ListCell   *l;
735
736         /* Should only be applied to base relations */
737         Assert(baserel->relid > 0);
738         Assert(baserel->rtekind == RTE_RELATION);
739
740         if (!enable_tidscan)
741                 startup_cost += disable_cost;
742
743         /* Count how many tuples we expect to retrieve */
744         ntuples = 0;
745         foreach(l, tidquals)
746         {
747                 if (IsA(lfirst(l), ScalarArrayOpExpr))
748                 {
749                         /* Each element of the array yields 1 tuple */
750                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
751                         Node *arraynode = (Node *) lsecond(saop->args);
752
753                         ntuples += estimate_array_length(arraynode);
754                 }
755                 else
756                 {
757                         /* It's just CTID = something, count 1 tuple */
758                         ntuples++;
759                 }
760         }
761
762         /* disk costs --- assume each tuple on a different page */
763         run_cost += random_page_cost * ntuples;
764
765         /* CPU costs */
766         startup_cost += baserel->baserestrictcost.startup;
767         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
768         run_cost += cpu_per_tuple * ntuples;
769
770         path->startup_cost = startup_cost;
771         path->total_cost = startup_cost + run_cost;
772 }
773
774 /*
775  * cost_subqueryscan
776  *        Determines and returns the cost of scanning a subquery RTE.
777  */
778 void
779 cost_subqueryscan(Path *path, RelOptInfo *baserel)
780 {
781         Cost            startup_cost;
782         Cost            run_cost;
783         Cost            cpu_per_tuple;
784
785         /* Should only be applied to base relations that are subqueries */
786         Assert(baserel->relid > 0);
787         Assert(baserel->rtekind == RTE_SUBQUERY);
788
789         /*
790          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
791          * any restriction clauses that will be attached to the SubqueryScan node,
792          * plus cpu_tuple_cost to account for selection and projection overhead.
793          */
794         path->startup_cost = baserel->subplan->startup_cost;
795         path->total_cost = baserel->subplan->total_cost;
796
797         startup_cost = baserel->baserestrictcost.startup;
798         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
799         run_cost = cpu_per_tuple * baserel->tuples;
800
801         path->startup_cost += startup_cost;
802         path->total_cost += startup_cost + run_cost;
803 }
804
805 /*
806  * cost_functionscan
807  *        Determines and returns the cost of scanning a function RTE.
808  */
809 void
810 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
811 {
812         Cost            startup_cost = 0;
813         Cost            run_cost = 0;
814         Cost            cpu_per_tuple;
815
816         /* Should only be applied to base relations that are functions */
817         Assert(baserel->relid > 0);
818         Assert(baserel->rtekind == RTE_FUNCTION);
819
820         /*
821          * For now, estimate function's cost at one operator eval per function
822          * call.  Someday we should revive the function cost estimate columns in
823          * pg_proc...
824          */
825         cpu_per_tuple = cpu_operator_cost;
826
827         /* Add scanning CPU costs */
828         startup_cost += baserel->baserestrictcost.startup;
829         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
830         run_cost += cpu_per_tuple * baserel->tuples;
831
832         path->startup_cost = startup_cost;
833         path->total_cost = startup_cost + run_cost;
834 }
835
836 /*
837  * cost_valuesscan
838  *        Determines and returns the cost of scanning a VALUES RTE.
839  */
840 void
841 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
842 {
843         Cost            startup_cost = 0;
844         Cost            run_cost = 0;
845         Cost            cpu_per_tuple;
846
847         /* Should only be applied to base relations that are values lists */
848         Assert(baserel->relid > 0);
849         Assert(baserel->rtekind == RTE_VALUES);
850
851         /*
852          * For now, estimate list evaluation cost at one operator eval per
853          * list (probably pretty bogus, but is it worth being smarter?)
854          */
855         cpu_per_tuple = cpu_operator_cost;
856
857         /* Add scanning CPU costs */
858         startup_cost += baserel->baserestrictcost.startup;
859         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
860         run_cost += cpu_per_tuple * baserel->tuples;
861
862         path->startup_cost = startup_cost;
863         path->total_cost = startup_cost + run_cost;
864 }
865
866 /*
867  * cost_sort
868  *        Determines and returns the cost of sorting a relation, including
869  *        the cost of reading the input data.
870  *
871  * If the total volume of data to sort is less than work_mem, we will do
872  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
873  * comparisons for t tuples.
874  *
875  * If the total volume exceeds work_mem, we switch to a tape-style merge
876  * algorithm.  There will still be about t*log2(t) tuple comparisons in
877  * total, but we will also need to write and read each tuple once per
878  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
879  * number of initial runs formed and M is the merge order used by tuplesort.c.
880  * Since the average initial run should be about twice work_mem, we have
881  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
882  *              cpu = comparison_cost * t * log2(t)
883  *
884  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
885  * accesses (XXX can't we refine that guess?)
886  *
887  * We charge two operator evals per tuple comparison, which should be in
888  * the right ballpark in most cases.
889  *
890  * 'pathkeys' is a list of sort keys
891  * 'input_cost' is the total cost for reading the input data
892  * 'tuples' is the number of tuples in the relation
893  * 'width' is the average tuple width in bytes
894  *
895  * NOTE: some callers currently pass NIL for pathkeys because they
896  * can't conveniently supply the sort keys.  Since this routine doesn't
897  * currently do anything with pathkeys anyway, that doesn't matter...
898  * but if it ever does, it should react gracefully to lack of key data.
899  * (Actually, the thing we'd most likely be interested in is just the number
900  * of sort keys, which all callers *could* supply.)
901  */
902 void
903 cost_sort(Path *path, PlannerInfo *root,
904                   List *pathkeys, Cost input_cost, double tuples, int width)
905 {
906         Cost            startup_cost = input_cost;
907         Cost            run_cost = 0;
908         double          nbytes = relation_byte_size(tuples, width);
909         long            work_mem_bytes = work_mem * 1024L;
910
911         if (!enable_sort)
912                 startup_cost += disable_cost;
913
914         /*
915          * We want to be sure the cost of a sort is never estimated as zero, even
916          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
917          */
918         if (tuples < 2.0)
919                 tuples = 2.0;
920
921         /*
922          * CPU costs
923          *
924          * Assume about two operator evals per tuple comparison and N log2 N
925          * comparisons
926          */
927         startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
928
929         /* disk costs */
930         if (nbytes > work_mem_bytes)
931         {
932                 double          npages = ceil(nbytes / BLCKSZ);
933                 double          nruns = (nbytes / work_mem_bytes) * 0.5;
934                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
935                 double          log_runs;
936                 double          npageaccesses;
937
938                 /* Compute logM(r) as log(r) / log(M) */
939                 if (nruns > mergeorder)
940                         log_runs = ceil(log(nruns) / log(mergeorder));
941                 else
942                         log_runs = 1.0;
943                 npageaccesses = 2.0 * npages * log_runs;
944                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
945                 startup_cost += npageaccesses *
946                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
947         }
948
949         /*
950          * Also charge a small amount (arbitrarily set equal to operator cost) per
951          * extracted tuple.
952          */
953         run_cost += cpu_operator_cost * tuples;
954
955         path->startup_cost = startup_cost;
956         path->total_cost = startup_cost + run_cost;
957 }
958
959 /*
960  * cost_material
961  *        Determines and returns the cost of materializing a relation, including
962  *        the cost of reading the input data.
963  *
964  * If the total volume of data to materialize exceeds work_mem, we will need
965  * to write it to disk, so the cost is much higher in that case.
966  */
967 void
968 cost_material(Path *path,
969                           Cost input_cost, double tuples, int width)
970 {
971         Cost            startup_cost = input_cost;
972         Cost            run_cost = 0;
973         double          nbytes = relation_byte_size(tuples, width);
974         long            work_mem_bytes = work_mem * 1024L;
975
976         /* disk costs */
977         if (nbytes > work_mem_bytes)
978         {
979                 double          npages = ceil(nbytes / BLCKSZ);
980
981                 /* We'll write during startup and read during retrieval */
982                 startup_cost += seq_page_cost * npages;
983                 run_cost += seq_page_cost * npages;
984         }
985
986         /*
987          * Charge a very small amount per inserted tuple, to reflect bookkeeping
988          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
989          * tie that would otherwise exist between nestloop with A outer,
990          * materialized B inner and nestloop with B outer, materialized A inner.
991          * The extra cost ensures we'll prefer materializing the smaller rel.
992          */
993         startup_cost += cpu_tuple_cost * 0.1 * tuples;
994
995         /*
996          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
997          * so that it doesn't appear worthwhile to materialize a bare seqscan.
998          */
999         run_cost += cpu_tuple_cost * tuples;
1000
1001         path->startup_cost = startup_cost;
1002         path->total_cost = startup_cost + run_cost;
1003 }
1004
1005 /*
1006  * cost_agg
1007  *              Determines and returns the cost of performing an Agg plan node,
1008  *              including the cost of its input.
1009  *
1010  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1011  * are for appropriately-sorted input.
1012  */
1013 void
1014 cost_agg(Path *path, PlannerInfo *root,
1015                  AggStrategy aggstrategy, int numAggs,
1016                  int numGroupCols, double numGroups,
1017                  Cost input_startup_cost, Cost input_total_cost,
1018                  double input_tuples)
1019 {
1020         Cost            startup_cost;
1021         Cost            total_cost;
1022
1023         /*
1024          * We charge one cpu_operator_cost per aggregate function per input tuple,
1025          * and another one per output tuple (corresponding to transfn and finalfn
1026          * calls respectively).  If we are grouping, we charge an additional
1027          * cpu_operator_cost per grouping column per input tuple for grouping
1028          * comparisons.
1029          *
1030          * We will produce a single output tuple if not grouping, and a tuple per
1031          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1032          *
1033          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1034          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1035          * input path is already sorted appropriately, AGG_SORTED should be
1036          * preferred (since it has no risk of memory overflow).  This will happen
1037          * as long as the computed total costs are indeed exactly equal --- but if
1038          * there's roundoff error we might do the wrong thing.  So be sure that
1039          * the computations below form the same intermediate values in the same
1040          * order.
1041          */
1042         if (aggstrategy == AGG_PLAIN)
1043         {
1044                 startup_cost = input_total_cost;
1045                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1046                 /* we aren't grouping */
1047                 total_cost = startup_cost + cpu_tuple_cost;
1048         }
1049         else if (aggstrategy == AGG_SORTED)
1050         {
1051                 /* Here we are able to deliver output on-the-fly */
1052                 startup_cost = input_startup_cost;
1053                 total_cost = input_total_cost;
1054                 /* calcs phrased this way to match HASHED case, see note above */
1055                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1056                 total_cost += cpu_operator_cost * input_tuples * numAggs;
1057                 total_cost += cpu_operator_cost * numGroups * numAggs;
1058                 total_cost += cpu_tuple_cost * numGroups;
1059         }
1060         else
1061         {
1062                 /* must be AGG_HASHED */
1063                 startup_cost = input_total_cost;
1064                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1065                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1066                 total_cost = startup_cost;
1067                 total_cost += cpu_operator_cost * numGroups * numAggs;
1068                 total_cost += cpu_tuple_cost * numGroups;
1069         }
1070
1071         path->startup_cost = startup_cost;
1072         path->total_cost = total_cost;
1073 }
1074
1075 /*
1076  * cost_group
1077  *              Determines and returns the cost of performing a Group plan node,
1078  *              including the cost of its input.
1079  *
1080  * Note: caller must ensure that input costs are for appropriately-sorted
1081  * input.
1082  */
1083 void
1084 cost_group(Path *path, PlannerInfo *root,
1085                    int numGroupCols, double numGroups,
1086                    Cost input_startup_cost, Cost input_total_cost,
1087                    double input_tuples)
1088 {
1089         Cost            startup_cost;
1090         Cost            total_cost;
1091
1092         startup_cost = input_startup_cost;
1093         total_cost = input_total_cost;
1094
1095         /*
1096          * Charge one cpu_operator_cost per comparison per input tuple. We assume
1097          * all columns get compared at most of the tuples.
1098          */
1099         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1100
1101         path->startup_cost = startup_cost;
1102         path->total_cost = total_cost;
1103 }
1104
1105 /*
1106  * If a nestloop's inner path is an indexscan, be sure to use its estimated
1107  * output row count, which may be lower than the restriction-clause-only row
1108  * count of its parent.  (We don't include this case in the PATH_ROWS macro
1109  * because it applies *only* to a nestloop's inner relation.)  We have to
1110  * be prepared to recurse through Append nodes in case of an appendrel.
1111  */
1112 static double
1113 nestloop_inner_path_rows(Path *path)
1114 {
1115         double          result;
1116
1117         if (IsA(path, IndexPath))
1118                 result = ((IndexPath *) path)->rows;
1119         else if (IsA(path, BitmapHeapPath))
1120                 result = ((BitmapHeapPath *) path)->rows;
1121         else if (IsA(path, AppendPath))
1122         {
1123                 ListCell   *l;
1124
1125                 result = 0;
1126                 foreach(l, ((AppendPath *) path)->subpaths)
1127                 {
1128                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1129                 }
1130         }
1131         else
1132                 result = PATH_ROWS(path);
1133
1134         return result;
1135 }
1136
1137 /*
1138  * cost_nestloop
1139  *        Determines and returns the cost of joining two relations using the
1140  *        nested loop algorithm.
1141  *
1142  * 'path' is already filled in except for the cost fields
1143  */
1144 void
1145 cost_nestloop(NestPath *path, PlannerInfo *root)
1146 {
1147         Path       *outer_path = path->outerjoinpath;
1148         Path       *inner_path = path->innerjoinpath;
1149         Cost            startup_cost = 0;
1150         Cost            run_cost = 0;
1151         Cost            cpu_per_tuple;
1152         QualCost        restrict_qual_cost;
1153         double          outer_path_rows = PATH_ROWS(outer_path);
1154         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1155         double          ntuples;
1156         Selectivity joininfactor;
1157
1158         if (!enable_nestloop)
1159                 startup_cost += disable_cost;
1160
1161         /*
1162          * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1163          * outer tuple as soon as we have one match.  Account for the effects of
1164          * this by scaling down the cost estimates in proportion to the JOIN_IN
1165          * selectivity.  (This assumes that all the quals attached to the join are
1166          * IN quals, which should be true.)
1167          */
1168         joininfactor = join_in_selectivity(path, root);
1169
1170         /* cost of source data */
1171
1172         /*
1173          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1174          * before we can start returning tuples, so the join's startup cost is
1175          * their sum.  What's not so clear is whether the inner path's
1176          * startup_cost must be paid again on each rescan of the inner path. This
1177          * is not true if the inner path is materialized or is a hashjoin, but
1178          * probably is true otherwise.
1179          */
1180         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1181         run_cost += outer_path->total_cost - outer_path->startup_cost;
1182         if (IsA(inner_path, MaterialPath) ||
1183                 IsA(inner_path, HashPath))
1184         {
1185                 /* charge only run cost for each iteration of inner path */
1186         }
1187         else
1188         {
1189                 /*
1190                  * charge startup cost for each iteration of inner path, except we
1191                  * already charged the first startup_cost in our own startup
1192                  */
1193                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1194         }
1195         run_cost += outer_path_rows *
1196                 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1197
1198         /*
1199          * Compute number of tuples processed (not number emitted!)
1200          */
1201         ntuples = outer_path_rows * inner_path_rows * joininfactor;
1202
1203         /* CPU costs */
1204         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
1205         startup_cost += restrict_qual_cost.startup;
1206         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1207         run_cost += cpu_per_tuple * ntuples;
1208
1209         path->path.startup_cost = startup_cost;
1210         path->path.total_cost = startup_cost + run_cost;
1211 }
1212
1213 /*
1214  * cost_mergejoin
1215  *        Determines and returns the cost of joining two relations using the
1216  *        merge join algorithm.
1217  *
1218  * 'path' is already filled in except for the cost fields
1219  *
1220  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1221  * outersortkeys and innersortkeys are lists of the keys to be used
1222  * to sort the outer and inner relations, or NIL if no explicit
1223  * sort is needed because the source path is already ordered.
1224  */
1225 void
1226 cost_mergejoin(MergePath *path, PlannerInfo *root)
1227 {
1228         Path       *outer_path = path->jpath.outerjoinpath;
1229         Path       *inner_path = path->jpath.innerjoinpath;
1230         List       *mergeclauses = path->path_mergeclauses;
1231         List       *outersortkeys = path->outersortkeys;
1232         List       *innersortkeys = path->innersortkeys;
1233         Cost            startup_cost = 0;
1234         Cost            run_cost = 0;
1235         Cost            cpu_per_tuple;
1236         Selectivity merge_selec;
1237         QualCost        merge_qual_cost;
1238         QualCost        qp_qual_cost;
1239         RestrictInfo *firstclause;
1240         double          outer_path_rows = PATH_ROWS(outer_path);
1241         double          inner_path_rows = PATH_ROWS(inner_path);
1242         double          outer_rows,
1243                                 inner_rows;
1244         double          mergejointuples,
1245                                 rescannedtuples;
1246         double          rescanratio;
1247         Selectivity outerscansel,
1248                                 innerscansel;
1249         Selectivity joininfactor;
1250         Path            sort_path;              /* dummy for result of cost_sort */
1251
1252         if (!enable_mergejoin)
1253                 startup_cost += disable_cost;
1254
1255         /*
1256          * Compute cost and selectivity of the mergequals and qpquals (other
1257          * restriction clauses) separately.  We use approx_selectivity here for
1258          * speed --- in most cases, any errors won't affect the result much.
1259          *
1260          * Note: it's probably bogus to use the normal selectivity calculation
1261          * here when either the outer or inner path is a UniquePath.
1262          */
1263         merge_selec = approx_selectivity(root, mergeclauses,
1264                                                                          path->jpath.jointype);
1265         cost_qual_eval(&merge_qual_cost, mergeclauses);
1266         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1267         qp_qual_cost.startup -= merge_qual_cost.startup;
1268         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1269
1270         /* approx # tuples passing the merge quals */
1271         mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1272
1273         /*
1274          * When there are equal merge keys in the outer relation, the mergejoin
1275          * must rescan any matching tuples in the inner relation. This means
1276          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1277          * costs the same as an original fetch, which is probably an overestimate;
1278          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1279          * Not clear if it's worth developing a more refined model.
1280          *
1281          * The number of re-fetches can be estimated approximately as size of
1282          * merge join output minus size of inner relation.      Assume that the
1283          * distinct key values are 1, 2, ..., and denote the number of values of
1284          * each key in the outer relation as m1, m2, ...; in the inner relation,
1285          * n1, n2, ... Then we have
1286          *
1287          * size of join = m1 * n1 + m2 * n2 + ...
1288          *
1289          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1290          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1291          * relation
1292          *
1293          * This equation works correctly for outer tuples having no inner match
1294          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1295          * are effectively subtracting those from the number of rescanned tuples,
1296          * when we should not.  Can we do better without expensive selectivity
1297          * computations?
1298          */
1299         if (IsA(outer_path, UniquePath))
1300                 rescannedtuples = 0;
1301         else
1302         {
1303                 rescannedtuples = mergejointuples - inner_path_rows;
1304                 /* Must clamp because of possible underestimate */
1305                 if (rescannedtuples < 0)
1306                         rescannedtuples = 0;
1307         }
1308         /* We'll inflate inner run cost this much to account for rescanning */
1309         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1310
1311         /*
1312          * A merge join will stop as soon as it exhausts either input stream
1313          * (unless it's an outer join, in which case the outer side has to be
1314          * scanned all the way anyway).  Estimate fraction of the left and right
1315          * inputs that will actually need to be scanned. We use only the first
1316          * (most significant) merge clause for this purpose.
1317          *
1318          * Since this calculation is somewhat expensive, and will be the same for
1319          * all mergejoin paths associated with the merge clause, we cache the
1320          * results in the RestrictInfo node.
1321          */
1322         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1323         {
1324                 firstclause = (RestrictInfo *) linitial(mergeclauses);
1325                 if (firstclause->left_mergescansel < 0) /* not computed yet? */
1326                         mergejoinscansel(root, (Node *) firstclause->clause,
1327                                                          &firstclause->left_mergescansel,
1328                                                          &firstclause->right_mergescansel);
1329
1330                 if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
1331                 {
1332                         /* left side of clause is outer */
1333                         outerscansel = firstclause->left_mergescansel;
1334                         innerscansel = firstclause->right_mergescansel;
1335                 }
1336                 else
1337                 {
1338                         /* left side of clause is inner */
1339                         outerscansel = firstclause->right_mergescansel;
1340                         innerscansel = firstclause->left_mergescansel;
1341                 }
1342                 if (path->jpath.jointype == JOIN_LEFT)
1343                         outerscansel = 1.0;
1344                 else if (path->jpath.jointype == JOIN_RIGHT)
1345                         innerscansel = 1.0;
1346         }
1347         else
1348         {
1349                 /* cope with clauseless or full mergejoin */
1350                 outerscansel = innerscansel = 1.0;
1351         }
1352
1353         /* convert selectivity to row count; must scan at least one row */
1354         outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1355         inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1356
1357         /*
1358          * Readjust scan selectivities to account for above rounding.  This is
1359          * normally an insignificant effect, but when there are only a few rows in
1360          * the inputs, failing to do this makes for a large percentage error.
1361          */
1362         outerscansel = outer_rows / outer_path_rows;
1363         innerscansel = inner_rows / inner_path_rows;
1364
1365         /* cost of source data */
1366
1367         if (outersortkeys)                      /* do we need to sort outer? */
1368         {
1369                 cost_sort(&sort_path,
1370                                   root,
1371                                   outersortkeys,
1372                                   outer_path->total_cost,
1373                                   outer_path_rows,
1374                                   outer_path->parent->width);
1375                 startup_cost += sort_path.startup_cost;
1376                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1377                         * outerscansel;
1378         }
1379         else
1380         {
1381                 startup_cost += outer_path->startup_cost;
1382                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1383                         * outerscansel;
1384         }
1385
1386         if (innersortkeys)                      /* do we need to sort inner? */
1387         {
1388                 cost_sort(&sort_path,
1389                                   root,
1390                                   innersortkeys,
1391                                   inner_path->total_cost,
1392                                   inner_path_rows,
1393                                   inner_path->parent->width);
1394                 startup_cost += sort_path.startup_cost;
1395                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1396                         * innerscansel * rescanratio;
1397         }
1398         else
1399         {
1400                 startup_cost += inner_path->startup_cost;
1401                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1402                         * innerscansel * rescanratio;
1403         }
1404
1405         /* CPU costs */
1406
1407         /*
1408          * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1409          * outer tuple as soon as we have one match.  Account for the effects of
1410          * this by scaling down the cost estimates in proportion to the expected
1411          * output size.  (This assumes that all the quals attached to the join are
1412          * IN quals, which should be true.)
1413          */
1414         joininfactor = join_in_selectivity(&path->jpath, root);
1415
1416         /*
1417          * The number of tuple comparisons needed is approximately number of outer
1418          * rows plus number of inner rows plus number of rescanned tuples (can we
1419          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1420          * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1421          * joininfactor.
1422          */
1423         startup_cost += merge_qual_cost.startup;
1424         run_cost += merge_qual_cost.per_tuple *
1425                 (outer_rows + inner_rows * rescanratio);
1426
1427         /*
1428          * For each tuple that gets through the mergejoin proper, we charge
1429          * cpu_tuple_cost plus the cost of evaluating additional restriction
1430          * clauses that are to be applied at the join.  (This is pessimistic since
1431          * not all of the quals may get evaluated at each tuple.)  This work is
1432          * skipped in JOIN_IN mode, so apply the factor.
1433          */
1434         startup_cost += qp_qual_cost.startup;
1435         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1436         run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1437
1438         path->jpath.path.startup_cost = startup_cost;
1439         path->jpath.path.total_cost = startup_cost + run_cost;
1440 }
1441
1442 /*
1443  * cost_hashjoin
1444  *        Determines and returns the cost of joining two relations using the
1445  *        hash join algorithm.
1446  *
1447  * 'path' is already filled in except for the cost fields
1448  *
1449  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1450  */
1451 void
1452 cost_hashjoin(HashPath *path, PlannerInfo *root)
1453 {
1454         Path       *outer_path = path->jpath.outerjoinpath;
1455         Path       *inner_path = path->jpath.innerjoinpath;
1456         List       *hashclauses = path->path_hashclauses;
1457         Cost            startup_cost = 0;
1458         Cost            run_cost = 0;
1459         Cost            cpu_per_tuple;
1460         Selectivity hash_selec;
1461         QualCost        hash_qual_cost;
1462         QualCost        qp_qual_cost;
1463         double          hashjointuples;
1464         double          outer_path_rows = PATH_ROWS(outer_path);
1465         double          inner_path_rows = PATH_ROWS(inner_path);
1466         double          outerbytes = relation_byte_size(outer_path_rows,
1467                                                                                                 outer_path->parent->width);
1468         double          innerbytes = relation_byte_size(inner_path_rows,
1469                                                                                                 inner_path->parent->width);
1470         int                     num_hashclauses = list_length(hashclauses);
1471         int                     numbuckets;
1472         int                     numbatches;
1473         double          virtualbuckets;
1474         Selectivity innerbucketsize;
1475         Selectivity joininfactor;
1476         ListCell   *hcl;
1477
1478         if (!enable_hashjoin)
1479                 startup_cost += disable_cost;
1480
1481         /*
1482          * Compute cost and selectivity of the hashquals and qpquals (other
1483          * restriction clauses) separately.  We use approx_selectivity here for
1484          * speed --- in most cases, any errors won't affect the result much.
1485          *
1486          * Note: it's probably bogus to use the normal selectivity calculation
1487          * here when either the outer or inner path is a UniquePath.
1488          */
1489         hash_selec = approx_selectivity(root, hashclauses,
1490                                                                         path->jpath.jointype);
1491         cost_qual_eval(&hash_qual_cost, hashclauses);
1492         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1493         qp_qual_cost.startup -= hash_qual_cost.startup;
1494         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1495
1496         /* approx # tuples passing the hash quals */
1497         hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1498
1499         /* cost of source data */
1500         startup_cost += outer_path->startup_cost;
1501         run_cost += outer_path->total_cost - outer_path->startup_cost;
1502         startup_cost += inner_path->total_cost;
1503
1504         /*
1505          * Cost of computing hash function: must do it once per input tuple. We
1506          * charge one cpu_operator_cost for each column's hash function.
1507          *
1508          * XXX when a hashclause is more complex than a single operator, we really
1509          * should charge the extra eval costs of the left or right side, as
1510          * appropriate, here.  This seems more work than it's worth at the moment.
1511          */
1512         startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
1513         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1514
1515         /* Get hash table size that executor would use for inner relation */
1516         ExecChooseHashTableSize(inner_path_rows,
1517                                                         inner_path->parent->width,
1518                                                         &numbuckets,
1519                                                         &numbatches);
1520         virtualbuckets = (double) numbuckets *(double) numbatches;
1521
1522         /*
1523          * Determine bucketsize fraction for inner relation.  We use the smallest
1524          * bucketsize estimated for any individual hashclause; this is undoubtedly
1525          * conservative.
1526          *
1527          * BUT: if inner relation has been unique-ified, we can assume it's good
1528          * for hashing.  This is important both because it's the right answer, and
1529          * because we avoid contaminating the cache with a value that's wrong for
1530          * non-unique-ified paths.
1531          */
1532         if (IsA(inner_path, UniquePath))
1533                 innerbucketsize = 1.0 / virtualbuckets;
1534         else
1535         {
1536                 innerbucketsize = 1.0;
1537                 foreach(hcl, hashclauses)
1538                 {
1539                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1540                         Selectivity thisbucketsize;
1541
1542                         Assert(IsA(restrictinfo, RestrictInfo));
1543
1544                         /*
1545                          * First we have to figure out which side of the hashjoin clause
1546                          * is the inner side.
1547                          *
1548                          * Since we tend to visit the same clauses over and over when
1549                          * planning a large query, we cache the bucketsize estimate in the
1550                          * RestrictInfo node to avoid repeated lookups of statistics.
1551                          */
1552                         if (bms_is_subset(restrictinfo->right_relids,
1553                                                           inner_path->parent->relids))
1554                         {
1555                                 /* righthand side is inner */
1556                                 thisbucketsize = restrictinfo->right_bucketsize;
1557                                 if (thisbucketsize < 0)
1558                                 {
1559                                         /* not cached yet */
1560                                         thisbucketsize =
1561                                                 estimate_hash_bucketsize(root,
1562                                                                                    get_rightop(restrictinfo->clause),
1563                                                                                                  virtualbuckets);
1564                                         restrictinfo->right_bucketsize = thisbucketsize;
1565                                 }
1566                         }
1567                         else
1568                         {
1569                                 Assert(bms_is_subset(restrictinfo->left_relids,
1570                                                                          inner_path->parent->relids));
1571                                 /* lefthand side is inner */
1572                                 thisbucketsize = restrictinfo->left_bucketsize;
1573                                 if (thisbucketsize < 0)
1574                                 {
1575                                         /* not cached yet */
1576                                         thisbucketsize =
1577                                                 estimate_hash_bucketsize(root,
1578                                                                                         get_leftop(restrictinfo->clause),
1579                                                                                                  virtualbuckets);
1580                                         restrictinfo->left_bucketsize = thisbucketsize;
1581                                 }
1582                         }
1583
1584                         if (innerbucketsize > thisbucketsize)
1585                                 innerbucketsize = thisbucketsize;
1586                 }
1587         }
1588
1589         /*
1590          * If inner relation is too big then we will need to "batch" the join,
1591          * which implies writing and reading most of the tuples to disk an extra
1592          * time.  Charge one cost unit per page of I/O (correct since it should be
1593          * nice and sequential...).  Writing the inner rel counts as startup cost,
1594          * all the rest as run cost.
1595          */
1596         if (numbatches > 1)
1597         {
1598                 double          outerpages = page_size(outer_path_rows,
1599                                                                                    outer_path->parent->width);
1600                 double          innerpages = page_size(inner_path_rows,
1601                                                                                    inner_path->parent->width);
1602
1603                 startup_cost += innerpages;
1604                 run_cost += innerpages + 2 * outerpages;
1605         }
1606
1607         /* CPU costs */
1608
1609         /*
1610          * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1611          * outer tuple as soon as we have one match.  Account for the effects of
1612          * this by scaling down the cost estimates in proportion to the expected
1613          * output size.  (This assumes that all the quals attached to the join are
1614          * IN quals, which should be true.)
1615          */
1616         joininfactor = join_in_selectivity(&path->jpath, root);
1617
1618         /*
1619          * The number of tuple comparisons needed is the number of outer tuples
1620          * times the typical number of tuples in a hash bucket, which is the inner
1621          * relation size times its bucketsize fraction.  At each one, we need to
1622          * evaluate the hashjoin quals.  (Note: charging the full qual eval cost
1623          * at each tuple is pessimistic, since we don't evaluate the quals unless
1624          * the hash values match exactly.)
1625          */
1626         startup_cost += hash_qual_cost.startup;
1627         run_cost += hash_qual_cost.per_tuple *
1628                 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1629                 joininfactor;
1630
1631         /*
1632          * For each tuple that gets through the hashjoin proper, we charge
1633          * cpu_tuple_cost plus the cost of evaluating additional restriction
1634          * clauses that are to be applied at the join.  (This is pessimistic since
1635          * not all of the quals may get evaluated at each tuple.)
1636          */
1637         startup_cost += qp_qual_cost.startup;
1638         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1639         run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1640
1641         /*
1642          * Bias against putting larger relation on inside.      We don't want an
1643          * absolute prohibition, though, since larger relation might have better
1644          * bucketsize --- and we can't trust the size estimates unreservedly,
1645          * anyway.      Instead, inflate the run cost by the square root of the size
1646          * ratio.  (Why square root?  No real good reason, but it seems
1647          * reasonable...)
1648          *
1649          * Note: before 7.4 we implemented this by inflating startup cost; but if
1650          * there's a disable_cost component in the input paths' startup cost, that
1651          * unfairly penalizes the hash.  Probably it'd be better to keep track of
1652          * disable penalty separately from cost.
1653          */
1654         if (innerbytes > outerbytes && outerbytes > 0)
1655                 run_cost *= sqrt(innerbytes / outerbytes);
1656
1657         path->jpath.path.startup_cost = startup_cost;
1658         path->jpath.path.total_cost = startup_cost + run_cost;
1659 }
1660
1661
1662 /*
1663  * cost_qual_eval
1664  *              Estimate the CPU costs of evaluating a WHERE clause.
1665  *              The input can be either an implicitly-ANDed list of boolean
1666  *              expressions, or a list of RestrictInfo nodes.
1667  *              The result includes both a one-time (startup) component,
1668  *              and a per-evaluation component.
1669  */
1670 void
1671 cost_qual_eval(QualCost *cost, List *quals)
1672 {
1673         ListCell   *l;
1674
1675         cost->startup = 0;
1676         cost->per_tuple = 0;
1677
1678         /* We don't charge any cost for the implicit ANDing at top level ... */
1679
1680         foreach(l, quals)
1681         {
1682                 Node       *qual = (Node *) lfirst(l);
1683
1684                 /*
1685                  * RestrictInfo nodes contain an eval_cost field reserved for this
1686                  * routine's use, so that it's not necessary to evaluate the qual
1687                  * clause's cost more than once.  If the clause's cost hasn't been
1688                  * computed yet, the field's startup value will contain -1.
1689                  *
1690                  * If the RestrictInfo is marked pseudoconstant, it will be tested
1691                  * only once, so treat its cost as all startup cost.
1692                  */
1693                 if (qual && IsA(qual, RestrictInfo))
1694                 {
1695                         RestrictInfo *rinfo = (RestrictInfo *) qual;
1696
1697                         if (rinfo->eval_cost.startup < 0)
1698                         {
1699                                 rinfo->eval_cost.startup = 0;
1700                                 rinfo->eval_cost.per_tuple = 0;
1701                                 cost_qual_eval_walker((Node *) rinfo->clause,
1702                                                                           &rinfo->eval_cost);
1703                                 if (rinfo->pseudoconstant)
1704                                 {
1705                                         /* count one execution during startup */
1706                                         rinfo->eval_cost.startup += rinfo->eval_cost.per_tuple;
1707                                         rinfo->eval_cost.per_tuple = 0;
1708                                 }
1709                         }
1710                         cost->startup += rinfo->eval_cost.startup;
1711                         cost->per_tuple += rinfo->eval_cost.per_tuple;
1712                 }
1713                 else
1714                 {
1715                         /* If it's a bare expression, must always do it the hard way */
1716                         cost_qual_eval_walker(qual, cost);
1717                 }
1718         }
1719 }
1720
1721 static bool
1722 cost_qual_eval_walker(Node *node, QualCost *total)
1723 {
1724         if (node == NULL)
1725                 return false;
1726
1727         /*
1728          * Our basic strategy is to charge one cpu_operator_cost for each operator
1729          * or function node in the given tree.  Vars and Consts are charged zero,
1730          * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
1731          * better than no model at all.
1732          *
1733          * Should we try to account for the possibility of short-circuit
1734          * evaluation of AND/OR?
1735          */
1736         if (IsA(node, FuncExpr) ||
1737                 IsA(node, OpExpr) ||
1738                 IsA(node, DistinctExpr) ||
1739                 IsA(node, NullIfExpr))
1740                 total->per_tuple += cpu_operator_cost;
1741         else if (IsA(node, ScalarArrayOpExpr))
1742         {
1743                 /*
1744                  * Estimate that the operator will be applied to about half of the
1745                  * array elements before the answer is determined.
1746                  */
1747                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1748                 Node *arraynode = (Node *) lsecond(saop->args);
1749
1750                 total->per_tuple +=
1751                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1752         }
1753         else if (IsA(node, RowCompareExpr))
1754         {
1755                 /* Conservatively assume we will check all the columns */
1756                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
1757
1758                 total->per_tuple += cpu_operator_cost * list_length(rcexpr->opnos);
1759         }
1760         else if (IsA(node, SubLink))
1761         {
1762                 /* This routine should not be applied to un-planned expressions */
1763                 elog(ERROR, "cannot handle unplanned sub-select");
1764         }
1765         else if (IsA(node, SubPlan))
1766         {
1767                 /*
1768                  * A subplan node in an expression typically indicates that the
1769                  * subplan will be executed on each evaluation, so charge accordingly.
1770                  * (Sub-selects that can be executed as InitPlans have already been
1771                  * removed from the expression.)
1772                  *
1773                  * An exception occurs when we have decided we can implement the
1774                  * subplan by hashing.
1775                  */
1776                 SubPlan    *subplan = (SubPlan *) node;
1777                 Plan       *plan = subplan->plan;
1778
1779                 if (subplan->useHashTable)
1780                 {
1781                         /*
1782                          * If we are using a hash table for the subquery outputs, then the
1783                          * cost of evaluating the query is a one-time cost. We charge one
1784                          * cpu_operator_cost per tuple for the work of loading the
1785                          * hashtable, too.
1786                          */
1787                         total->startup += plan->total_cost +
1788                                 cpu_operator_cost * plan->plan_rows;
1789
1790                         /*
1791                          * The per-tuple costs include the cost of evaluating the lefthand
1792                          * expressions, plus the cost of probing the hashtable. Recursion
1793                          * into the testexpr will handle the lefthand expressions
1794                          * properly, and will count one cpu_operator_cost for each
1795                          * comparison operator.  That is probably too low for the probing
1796                          * cost, but it's hard to make a better estimate, so live with it
1797                          * for now.
1798                          */
1799                 }
1800                 else
1801                 {
1802                         /*
1803                          * Otherwise we will be rescanning the subplan output on each
1804                          * evaluation.  We need to estimate how much of the output we will
1805                          * actually need to scan.  NOTE: this logic should agree with the
1806                          * estimates used by make_subplan() in plan/subselect.c.
1807                          */
1808                         Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
1809
1810                         if (subplan->subLinkType == EXISTS_SUBLINK)
1811                         {
1812                                 /* we only need to fetch 1 tuple */
1813                                 total->per_tuple += plan_run_cost / plan->plan_rows;
1814                         }
1815                         else if (subplan->subLinkType == ALL_SUBLINK ||
1816                                          subplan->subLinkType == ANY_SUBLINK)
1817                         {
1818                                 /* assume we need 50% of the tuples */
1819                                 total->per_tuple += 0.50 * plan_run_cost;
1820                                 /* also charge a cpu_operator_cost per row examined */
1821                                 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1822                         }
1823                         else
1824                         {
1825                                 /* assume we need all tuples */
1826                                 total->per_tuple += plan_run_cost;
1827                         }
1828
1829                         /*
1830                          * Also account for subplan's startup cost. If the subplan is
1831                          * uncorrelated or undirect correlated, AND its topmost node is a
1832                          * Sort or Material node, assume that we'll only need to pay its
1833                          * startup cost once; otherwise assume we pay the startup cost
1834                          * every time.
1835                          */
1836                         if (subplan->parParam == NIL &&
1837                                 (IsA(plan, Sort) ||
1838                                  IsA(plan, Material)))
1839                                 total->startup += plan->startup_cost;
1840                         else
1841                                 total->per_tuple += plan->startup_cost;
1842                 }
1843         }
1844
1845         return expression_tree_walker(node, cost_qual_eval_walker,
1846                                                                   (void *) total);
1847 }
1848
1849
1850 /*
1851  * approx_selectivity
1852  *              Quick-and-dirty estimation of clause selectivities.
1853  *              The input can be either an implicitly-ANDed list of boolean
1854  *              expressions, or a list of RestrictInfo nodes (typically the latter).
1855  *
1856  * This is quick-and-dirty because we bypass clauselist_selectivity, and
1857  * simply multiply the independent clause selectivities together.  Now
1858  * clauselist_selectivity often can't do any better than that anyhow, but
1859  * for some situations (such as range constraints) it is smarter.  However,
1860  * we can't effectively cache the results of clauselist_selectivity, whereas
1861  * the individual clause selectivities can be and are cached.
1862  *
1863  * Since we are only using the results to estimate how many potential
1864  * output tuples are generated and passed through qpqual checking, it
1865  * seems OK to live with the approximation.
1866  */
1867 static Selectivity
1868 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
1869 {
1870         Selectivity total = 1.0;
1871         ListCell   *l;
1872
1873         foreach(l, quals)
1874         {
1875                 Node       *qual = (Node *) lfirst(l);
1876
1877                 /* Note that clause_selectivity will be able to cache its result */
1878                 total *= clause_selectivity(root, qual, 0, jointype);
1879         }
1880         return total;
1881 }
1882
1883
1884 /*
1885  * set_baserel_size_estimates
1886  *              Set the size estimates for the given base relation.
1887  *
1888  * The rel's targetlist and restrictinfo list must have been constructed
1889  * already.
1890  *
1891  * We set the following fields of the rel node:
1892  *      rows: the estimated number of output tuples (after applying
1893  *                restriction clauses).
1894  *      width: the estimated average output tuple width in bytes.
1895  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1896  */
1897 void
1898 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
1899 {
1900         double          nrows;
1901
1902         /* Should only be applied to base relations */
1903         Assert(rel->relid > 0);
1904
1905         nrows = rel->tuples *
1906                 clauselist_selectivity(root,
1907                                                            rel->baserestrictinfo,
1908                                                            0,
1909                                                            JOIN_INNER);
1910
1911         rel->rows = clamp_row_est(nrows);
1912
1913         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1914
1915         set_rel_width(root, rel);
1916 }
1917
1918 /*
1919  * set_joinrel_size_estimates
1920  *              Set the size estimates for the given join relation.
1921  *
1922  * The rel's targetlist must have been constructed already, and a
1923  * restriction clause list that matches the given component rels must
1924  * be provided.
1925  *
1926  * Since there is more than one way to make a joinrel for more than two
1927  * base relations, the results we get here could depend on which component
1928  * rel pair is provided.  In theory we should get the same answers no matter
1929  * which pair is provided; in practice, since the selectivity estimation
1930  * routines don't handle all cases equally well, we might not.  But there's
1931  * not much to be done about it.  (Would it make sense to repeat the
1932  * calculations for each pair of input rels that's encountered, and somehow
1933  * average the results?  Probably way more trouble than it's worth.)
1934  *
1935  * It's important that the results for symmetric JoinTypes be symmetric,
1936  * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1937  * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
1938  * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1939  *
1940  * We set only the rows field here.  The width field was already set by
1941  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
1942  */
1943 void
1944 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
1945                                                    RelOptInfo *outer_rel,
1946                                                    RelOptInfo *inner_rel,
1947                                                    JoinType jointype,
1948                                                    List *restrictlist)
1949 {
1950         Selectivity selec;
1951         double          nrows;
1952         UniquePath *upath;
1953
1954         /*
1955          * Compute joinclause selectivity.      Note that we are only considering
1956          * clauses that become restriction clauses at this join level; we are not
1957          * double-counting them because they were not considered in estimating the
1958          * sizes of the component rels.
1959          */
1960         selec = clauselist_selectivity(root,
1961                                                                    restrictlist,
1962                                                                    0,
1963                                                                    jointype);
1964
1965         /*
1966          * Basically, we multiply size of Cartesian product by selectivity.
1967          *
1968          * If we are doing an outer join, take that into account: the output must
1969          * be at least as large as the non-nullable input.      (Is there any chance
1970          * of being even smarter?)  (XXX this is not really right, because it
1971          * assumes all the restriction clauses are join clauses; we should figure
1972          * pushed-down clauses separately.)
1973          *
1974          * For JOIN_IN and variants, the Cartesian product is figured with respect
1975          * to a unique-ified input, and then we can clamp to the size of the other
1976          * input.
1977          */
1978         switch (jointype)
1979         {
1980                 case JOIN_INNER:
1981                         nrows = outer_rel->rows * inner_rel->rows * selec;
1982                         break;
1983                 case JOIN_LEFT:
1984                         nrows = outer_rel->rows * inner_rel->rows * selec;
1985                         if (nrows < outer_rel->rows)
1986                                 nrows = outer_rel->rows;
1987                         break;
1988                 case JOIN_RIGHT:
1989                         nrows = outer_rel->rows * inner_rel->rows * selec;
1990                         if (nrows < inner_rel->rows)
1991                                 nrows = inner_rel->rows;
1992                         break;
1993                 case JOIN_FULL:
1994                         nrows = outer_rel->rows * inner_rel->rows * selec;
1995                         if (nrows < outer_rel->rows)
1996                                 nrows = outer_rel->rows;
1997                         if (nrows < inner_rel->rows)
1998                                 nrows = inner_rel->rows;
1999                         break;
2000                 case JOIN_IN:
2001                 case JOIN_UNIQUE_INNER:
2002                         upath = create_unique_path(root, inner_rel,
2003                                                                            inner_rel->cheapest_total_path);
2004                         nrows = outer_rel->rows * upath->rows * selec;
2005                         if (nrows > outer_rel->rows)
2006                                 nrows = outer_rel->rows;
2007                         break;
2008                 case JOIN_REVERSE_IN:
2009                 case JOIN_UNIQUE_OUTER:
2010                         upath = create_unique_path(root, outer_rel,
2011                                                                            outer_rel->cheapest_total_path);
2012                         nrows = upath->rows * inner_rel->rows * selec;
2013                         if (nrows > inner_rel->rows)
2014                                 nrows = inner_rel->rows;
2015                         break;
2016                 default:
2017                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
2018                         nrows = 0;                      /* keep compiler quiet */
2019                         break;
2020         }
2021
2022         rel->rows = clamp_row_est(nrows);
2023 }
2024
2025 /*
2026  * join_in_selectivity
2027  *        Determines the factor by which a JOIN_IN join's result is expected
2028  *        to be smaller than an ordinary inner join.
2029  *
2030  * 'path' is already filled in except for the cost fields
2031  */
2032 static Selectivity
2033 join_in_selectivity(JoinPath *path, PlannerInfo *root)
2034 {
2035         RelOptInfo *innerrel;
2036         UniquePath *innerunique;
2037         Selectivity selec;
2038         double          nrows;
2039
2040         /* Return 1.0 whenever it's not JOIN_IN */
2041         if (path->jointype != JOIN_IN)
2042                 return 1.0;
2043
2044         /*
2045          * Return 1.0 if the inner side is already known unique.  The case where
2046          * the inner path is already a UniquePath probably cannot happen in
2047          * current usage, but check it anyway for completeness.  The interesting
2048          * case is where we've determined the inner relation itself is unique,
2049          * which we can check by looking at the rows estimate for its UniquePath.
2050          */
2051         if (IsA(path->innerjoinpath, UniquePath))
2052                 return 1.0;
2053         innerrel = path->innerjoinpath->parent;
2054         innerunique = create_unique_path(root,
2055                                                                          innerrel,
2056                                                                          innerrel->cheapest_total_path);
2057         if (innerunique->rows >= innerrel->rows)
2058                 return 1.0;
2059
2060         /*
2061          * Compute same result set_joinrel_size_estimates would compute for
2062          * JOIN_INNER.  Note that we use the input rels' absolute size estimates,
2063          * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
2064          * double-counting the effects of any join clauses used in input scans.
2065          */
2066         selec = clauselist_selectivity(root,
2067                                                                    path->joinrestrictinfo,
2068                                                                    0,
2069                                                                    JOIN_INNER);
2070         nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
2071
2072         nrows = clamp_row_est(nrows);
2073
2074         /* See if it's larger than the actual JOIN_IN size estimate */
2075         if (nrows > path->path.parent->rows)
2076                 return path->path.parent->rows / nrows;
2077         else
2078                 return 1.0;
2079 }
2080
2081 /*
2082  * set_function_size_estimates
2083  *              Set the size estimates for a base relation that is a function call.
2084  *
2085  * The rel's targetlist and restrictinfo list must have been constructed
2086  * already.
2087  *
2088  * We set the same fields as set_baserel_size_estimates.
2089  */
2090 void
2091 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2092 {
2093         RangeTblEntry *rte;
2094
2095         /* Should only be applied to base relations that are functions */
2096         Assert(rel->relid > 0);
2097         rte = rt_fetch(rel->relid, root->parse->rtable);
2098         Assert(rte->rtekind == RTE_FUNCTION);
2099
2100         /*
2101          * Estimate number of rows the function itself will return.
2102          *
2103          * XXX no idea how to do this yet; but we can at least check whether
2104          * function returns set or not...
2105          */
2106         if (expression_returns_set(rte->funcexpr))
2107                 rel->tuples = 1000;
2108         else
2109                 rel->tuples = 1;
2110
2111         /* Now estimate number of output rows, etc */
2112         set_baserel_size_estimates(root, rel);
2113 }
2114
2115 /*
2116  * set_values_size_estimates
2117  *              Set the size estimates for a base relation that is a values list.
2118  *
2119  * The rel's targetlist and restrictinfo list must have been constructed
2120  * already.
2121  *
2122  * We set the same fields as set_baserel_size_estimates.
2123  */
2124 void
2125 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2126 {
2127         RangeTblEntry *rte;
2128
2129         /* Should only be applied to base relations that are values lists */
2130         Assert(rel->relid > 0);
2131         rte = rt_fetch(rel->relid, root->parse->rtable);
2132         Assert(rte->rtekind == RTE_VALUES);
2133
2134         /*
2135          * Estimate number of rows the values list will return.
2136          * We know this precisely based on the list length (well,
2137          * barring set-returning functions in list items, but that's
2138          * a refinement not catered for anywhere else either).
2139          */
2140         rel->tuples = list_length(rte->values_lists);
2141
2142         /* Now estimate number of output rows, etc */
2143         set_baserel_size_estimates(root, rel);
2144 }
2145
2146
2147 /*
2148  * set_rel_width
2149  *              Set the estimated output width of a base relation.
2150  *
2151  * NB: this works best on plain relations because it prefers to look at
2152  * real Vars.  It will fail to make use of pg_statistic info when applied
2153  * to a subquery relation, even if the subquery outputs are simple vars
2154  * that we could have gotten info for.  Is it worth trying to be smarter
2155  * about subqueries?
2156  *
2157  * The per-attribute width estimates are cached for possible re-use while
2158  * building join relations.
2159  */
2160 static void
2161 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2162 {
2163         int32           tuple_width = 0;
2164         ListCell   *tllist;
2165
2166         foreach(tllist, rel->reltargetlist)
2167         {
2168                 Var                *var = (Var *) lfirst(tllist);
2169                 int                     ndx;
2170                 Oid                     relid;
2171                 int32           item_width;
2172
2173                 /* For now, punt on whole-row child Vars */
2174                 if (!IsA(var, Var))
2175                 {
2176                         tuple_width += 32;      /* arbitrary */
2177                         continue;
2178                 }
2179
2180                 ndx = var->varattno - rel->min_attr;
2181
2182                 /*
2183                  * The width probably hasn't been cached yet, but may as well check
2184                  */
2185                 if (rel->attr_widths[ndx] > 0)
2186                 {
2187                         tuple_width += rel->attr_widths[ndx];
2188                         continue;
2189                 }
2190
2191                 relid = getrelid(var->varno, root->parse->rtable);
2192                 if (relid != InvalidOid)
2193                 {
2194                         item_width = get_attavgwidth(relid, var->varattno);
2195                         if (item_width > 0)
2196                         {
2197                                 rel->attr_widths[ndx] = item_width;
2198                                 tuple_width += item_width;
2199                                 continue;
2200                         }
2201                 }
2202
2203                 /*
2204                  * Not a plain relation, or can't find statistics for it. Estimate
2205                  * using just the type info.
2206                  */
2207                 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2208                 Assert(item_width > 0);
2209                 rel->attr_widths[ndx] = item_width;
2210                 tuple_width += item_width;
2211         }
2212         Assert(tuple_width >= 0);
2213         rel->width = tuple_width;
2214 }
2215
2216 /*
2217  * relation_byte_size
2218  *        Estimate the storage space in bytes for a given number of tuples
2219  *        of a given width (size in bytes).
2220  */
2221 static double
2222 relation_byte_size(double tuples, int width)
2223 {
2224         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2225 }
2226
2227 /*
2228  * page_size
2229  *        Returns an estimate of the number of pages covered by a given
2230  *        number of tuples of a given width (size in bytes).
2231  */
2232 static double
2233 page_size(double tuples, int width)
2234 {
2235         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
2236 }