]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Sort reference of include files, "A" - "F".
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.161 2006/07/11 16:35:31 momjian Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "catalog/pg_statistic.h"
67 #include "executor/nodeHash.h"
68 #include "miscadmin.h"
69 #include "optimizer/clauses.h"
70 #include "optimizer/cost.h"
71 #include "optimizer/pathnode.h"
72 #include "optimizer/plancat.h"
73 #include "parser/parsetree.h"
74 #include "utils/array.h"
75 #include "utils/lsyscache.h"
76 #include "utils/selfuncs.h"
77 #include "utils/syscache.h"
78 #include "utils/tuplesort.h"
79
80
81 #define LOG2(x)  (log(x) / 0.693147180559945)
82
83 /*
84  * Some Paths return less than the nominal number of rows of their parent
85  * relations; join nodes need to do this to get the correct input count:
86  */
87 #define PATH_ROWS(path) \
88         (IsA(path, UniquePath) ? \
89          ((UniquePath *) (path))->rows : \
90          (path)->parent->rows)
91
92
93 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
94 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
95 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
96 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
97 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
98
99 double          effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
100
101 Cost            disable_cost = 100000000.0;
102
103 bool            enable_seqscan = true;
104 bool            enable_indexscan = true;
105 bool            enable_bitmapscan = true;
106 bool            enable_tidscan = true;
107 bool            enable_sort = true;
108 bool            enable_hashagg = true;
109 bool            enable_nestloop = true;
110 bool            enable_mergejoin = true;
111 bool            enable_hashjoin = true;
112
113
114 static bool cost_qual_eval_walker(Node *node, QualCost *total);
115 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
116                                    JoinType jointype);
117 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
118 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
119 static double relation_byte_size(double tuples, int width);
120 static double page_size(double tuples, int width);
121
122
123 /*
124  * clamp_row_est
125  *              Force a row-count estimate to a sane value.
126  */
127 double
128 clamp_row_est(double nrows)
129 {
130         /*
131          * Force estimate to be at least one row, to make explain output look
132          * better and to avoid possible divide-by-zero when interpolating costs.
133          * Make it an integer, too.
134          */
135         if (nrows <= 1.0)
136                 nrows = 1.0;
137         else
138                 nrows = rint(nrows);
139
140         return nrows;
141 }
142
143
144 /*
145  * cost_seqscan
146  *        Determines and returns the cost of scanning a relation sequentially.
147  */
148 void
149 cost_seqscan(Path *path, PlannerInfo *root,
150                          RelOptInfo *baserel)
151 {
152         Cost            startup_cost = 0;
153         Cost            run_cost = 0;
154         Cost            cpu_per_tuple;
155
156         /* Should only be applied to base relations */
157         Assert(baserel->relid > 0);
158         Assert(baserel->rtekind == RTE_RELATION);
159
160         if (!enable_seqscan)
161                 startup_cost += disable_cost;
162
163         /*
164          * disk costs
165          */
166         run_cost += seq_page_cost * baserel->pages;
167
168         /* CPU costs */
169         startup_cost += baserel->baserestrictcost.startup;
170         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
171         run_cost += cpu_per_tuple * baserel->tuples;
172
173         path->startup_cost = startup_cost;
174         path->total_cost = startup_cost + run_cost;
175 }
176
177 /*
178  * cost_index
179  *        Determines and returns the cost of scanning a relation using an index.
180  *
181  * 'index' is the index to be used
182  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
183  * 'outer_rel' is the outer relation when we are considering using the index
184  *              scan as the inside of a nestloop join (hence, some of the indexQuals
185  *              are join clauses, and we should expect repeated scans of the index);
186  *              NULL for a plain index scan
187  *
188  * cost_index() takes an IndexPath not just a Path, because it sets a few
189  * additional fields of the IndexPath besides startup_cost and total_cost.
190  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
191  *
192  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
193  * Any additional quals evaluated as qpquals may reduce the number of returned
194  * tuples, but they won't reduce the number of tuples we have to fetch from
195  * the table, so they don't reduce the scan cost.
196  *
197  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
198  * it was a list of bare clause expressions.
199  */
200 void
201 cost_index(IndexPath *path, PlannerInfo *root,
202                    IndexOptInfo *index,
203                    List *indexQuals,
204                    RelOptInfo *outer_rel)
205 {
206         RelOptInfo *baserel = index->rel;
207         Cost            startup_cost = 0;
208         Cost            run_cost = 0;
209         Cost            indexStartupCost;
210         Cost            indexTotalCost;
211         Selectivity indexSelectivity;
212         double          indexCorrelation,
213                                 csquared;
214         Cost            min_IO_cost,
215                                 max_IO_cost;
216         Cost            cpu_per_tuple;
217         double          tuples_fetched;
218         double          pages_fetched;
219
220         /* Should only be applied to base relations */
221         Assert(IsA(baserel, RelOptInfo) &&
222                    IsA(index, IndexOptInfo));
223         Assert(baserel->relid > 0);
224         Assert(baserel->rtekind == RTE_RELATION);
225
226         if (!enable_indexscan)
227                 startup_cost += disable_cost;
228
229         /*
230          * Call index-access-method-specific code to estimate the processing cost
231          * for scanning the index, as well as the selectivity of the index (ie,
232          * the fraction of main-table tuples we will have to retrieve) and its
233          * correlation to the main-table tuple order.
234          */
235         OidFunctionCall8(index->amcostestimate,
236                                          PointerGetDatum(root),
237                                          PointerGetDatum(index),
238                                          PointerGetDatum(indexQuals),
239                                          PointerGetDatum(outer_rel),
240                                          PointerGetDatum(&indexStartupCost),
241                                          PointerGetDatum(&indexTotalCost),
242                                          PointerGetDatum(&indexSelectivity),
243                                          PointerGetDatum(&indexCorrelation));
244
245         /*
246          * Save amcostestimate's results for possible use in bitmap scan planning.
247          * We don't bother to save indexStartupCost or indexCorrelation, because a
248          * bitmap scan doesn't care about either.
249          */
250         path->indextotalcost = indexTotalCost;
251         path->indexselectivity = indexSelectivity;
252
253         /* all costs for touching index itself included here */
254         startup_cost += indexStartupCost;
255         run_cost += indexTotalCost - indexStartupCost;
256
257         /* estimate number of main-table tuples fetched */
258         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
259
260         /*----------
261          * Estimate number of main-table pages fetched, and compute I/O cost.
262          *
263          * When the index ordering is uncorrelated with the table ordering,
264          * we use an approximation proposed by Mackert and Lohman (see
265          * index_pages_fetched() for details) to compute the number of pages
266          * fetched, and then charge random_page_cost per page fetched.
267          *
268          * When the index ordering is exactly correlated with the table ordering
269          * (just after a CLUSTER, for example), the number of pages fetched should
270          * be exactly selectivity * table_size.  What's more, all but the first
271          * will be sequential fetches, not the random fetches that occur in the
272          * uncorrelated case.  So if the number of pages is more than 1, we
273          * ought to charge
274          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
275          * For partially-correlated indexes, we ought to charge somewhere between
276          * these two estimates.  We currently interpolate linearly between the
277          * estimates based on the correlation squared (XXX is that appropriate?).
278          *----------
279          */
280         if (outer_rel != NULL && outer_rel->rows > 1)
281         {
282                 /*
283                  * For repeated indexscans, scale up the number of tuples fetched
284                  * in the Mackert and Lohman formula by the number of scans, so
285                  * that we estimate the number of pages fetched by all the scans.
286                  * Then pro-rate the costs for one scan.  In this case we assume
287                  * all the fetches are random accesses.  XXX it'd be good to
288                  * include correlation in this model, but it's not clear how to do
289                  * that without double-counting cache effects.
290                  */
291                 double          num_scans = outer_rel->rows;
292
293                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
294                                                                                         baserel->pages,
295                                                                                         index->pages);
296
297                 run_cost += (pages_fetched * random_page_cost) / num_scans;
298         }
299         else
300         {
301                 /*
302                  * Normal case: apply the Mackert and Lohman formula, and then
303                  * interpolate between that and the correlation-derived result.
304                  */
305                 pages_fetched = index_pages_fetched(tuples_fetched,
306                                                                                         baserel->pages,
307                                                                                         index->pages);
308
309                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
310                 max_IO_cost = pages_fetched * random_page_cost;
311
312                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
313                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
314                 min_IO_cost = random_page_cost;
315                 if (pages_fetched > 1)
316                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
317
318                 /*
319                  * Now interpolate based on estimated index order correlation to get
320                  * total disk I/O cost for main table accesses.
321                  */
322                 csquared = indexCorrelation * indexCorrelation;
323
324                 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
325         }
326
327         /*
328          * Estimate CPU costs per tuple.
329          *
330          * Normally the indexquals will be removed from the list of restriction
331          * clauses that we have to evaluate as qpquals, so we should subtract
332          * their costs from baserestrictcost.  But if we are doing a join then
333          * some of the indexquals are join clauses and shouldn't be subtracted.
334          * Rather than work out exactly how much to subtract, we don't subtract
335          * anything.
336          */
337         startup_cost += baserel->baserestrictcost.startup;
338         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
339
340         if (outer_rel == NULL)
341         {
342                 QualCost        index_qual_cost;
343
344                 cost_qual_eval(&index_qual_cost, indexQuals);
345                 /* any startup cost still has to be paid ... */
346                 cpu_per_tuple -= index_qual_cost.per_tuple;
347         }
348
349         run_cost += cpu_per_tuple * tuples_fetched;
350
351         path->path.startup_cost = startup_cost;
352         path->path.total_cost = startup_cost + run_cost;
353 }
354
355 /*
356  * index_pages_fetched
357  *        Estimate the number of pages actually fetched after accounting for
358  *        cache effects.
359  *
360  * We use an approximation proposed by Mackert and Lohman, "Index Scans
361  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
362  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
363  * The Mackert and Lohman approximation is that the number of pages
364  * fetched is
365  *      PF =
366  *              min(2TNs/(2T+Ns), T)                    when T <= b
367  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
368  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
369  * where
370  *              T = # pages in table
371  *              N = # tuples in table
372  *              s = selectivity = fraction of table to be scanned
373  *              b = # buffer pages available (we include kernel space here)
374  *
375  * We assume that effective_cache_size is the total number of buffer pages
376  * available for both table and index, and pro-rate that space between the
377  * table and index.  (Ideally other_pages should include all the other
378  * tables and indexes used by the query too; but we don't have a good way
379  * to get that number here.)
380  *
381  * The product Ns is the number of tuples fetched; we pass in that
382  * product rather than calculating it here.
383  *
384  * Caller is expected to have ensured that tuples_fetched is greater than zero
385  * and rounded to integer (see clamp_row_est).  The result will likewise be
386  * greater than zero and integral.
387  */
388 double
389 index_pages_fetched(double tuples_fetched, BlockNumber pages,
390                                         BlockNumber other_pages)
391 {
392         double          pages_fetched;
393         double          T,
394                                 b;
395
396         /* T is # pages in table, but don't allow it to be zero */
397         T = (pages > 1) ? (double) pages : 1.0;
398
399         /* b is pro-rated share of effective_cache_size */
400         b = effective_cache_size * T / (T + (double) other_pages);
401         /* force it positive and integral */
402         if (b <= 1.0)
403                 b = 1.0;
404         else
405                 b = ceil(b);
406
407         /* This part is the Mackert and Lohman formula */
408         if (T <= b)
409         {
410                 pages_fetched =
411                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
412                 if (pages_fetched >= T)
413                         pages_fetched = T;
414                 else
415                         pages_fetched = ceil(pages_fetched);
416         }
417         else
418         {
419                 double          lim;
420
421                 lim = (2.0 * T * b) / (2.0 * T - b);
422                 if (tuples_fetched <= lim)
423                 {
424                         pages_fetched =
425                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
426                 }
427                 else
428                 {
429                         pages_fetched =
430                                 b + (tuples_fetched - lim) * (T - b) / T;
431                 }
432                 pages_fetched = ceil(pages_fetched);
433         }
434         return pages_fetched;
435 }
436
437 /*
438  * cost_bitmap_heap_scan
439  *        Determines and returns the cost of scanning a relation using a bitmap
440  *        index-then-heap plan.
441  *
442  * 'baserel' is the relation to be scanned
443  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
444  *
445  * Note: we take no explicit notice here of whether this is a join inner path.
446  * If it is, the component IndexPaths should have been costed accordingly.
447  */
448 void
449 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
450                                           Path *bitmapqual)
451 {
452         Cost            startup_cost = 0;
453         Cost            run_cost = 0;
454         Cost            indexTotalCost;
455         Selectivity indexSelectivity;
456         Cost            cpu_per_tuple;
457         Cost            cost_per_page;
458         double          tuples_fetched;
459         double          pages_fetched;
460         double          T;
461
462         /* Should only be applied to base relations */
463         Assert(IsA(baserel, RelOptInfo));
464         Assert(baserel->relid > 0);
465         Assert(baserel->rtekind == RTE_RELATION);
466
467         if (!enable_bitmapscan)
468                 startup_cost += disable_cost;
469
470         /*
471          * Fetch total cost of obtaining the bitmap, as well as its total
472          * selectivity.
473          */
474         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
475
476         startup_cost += indexTotalCost;
477
478         /*
479          * The number of heap pages that need to be fetched is the same as the
480          * Mackert and Lohman formula for the case T <= b (ie, no re-reads
481          * needed).
482          */
483         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
484
485         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
486         pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
487         if (pages_fetched >= T)
488                 pages_fetched = T;
489         else
490                 pages_fetched = ceil(pages_fetched);
491
492         /*
493          * For small numbers of pages we should charge random_page_cost apiece,
494          * while if nearly all the table's pages are being read, it's more
495          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
496          * too. For lack of a better idea, interpolate like this to determine the
497          * cost per page.
498          */
499         if (pages_fetched >= 2.0)
500                 cost_per_page = random_page_cost -
501                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
502         else
503                 cost_per_page = random_page_cost;
504
505         run_cost += pages_fetched * cost_per_page;
506
507         /*
508          * Estimate CPU costs per tuple.
509          *
510          * Often the indexquals don't need to be rechecked at each tuple ... but
511          * not always, especially not if there are enough tuples involved that the
512          * bitmaps become lossy.  For the moment, just assume they will be
513          * rechecked always.
514          */
515         startup_cost += baserel->baserestrictcost.startup;
516         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
517
518         run_cost += cpu_per_tuple * tuples_fetched;
519
520         path->startup_cost = startup_cost;
521         path->total_cost = startup_cost + run_cost;
522 }
523
524 /*
525  * cost_bitmap_tree_node
526  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
527  */
528 void
529 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
530 {
531         if (IsA(path, IndexPath))
532         {
533                 *cost = ((IndexPath *) path)->indextotalcost;
534                 *selec = ((IndexPath *) path)->indexselectivity;
535         }
536         else if (IsA(path, BitmapAndPath))
537         {
538                 *cost = path->total_cost;
539                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
540         }
541         else if (IsA(path, BitmapOrPath))
542         {
543                 *cost = path->total_cost;
544                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
545         }
546         else
547                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
548 }
549
550 /*
551  * cost_bitmap_and_node
552  *              Estimate the cost of a BitmapAnd node
553  *
554  * Note that this considers only the costs of index scanning and bitmap
555  * creation, not the eventual heap access.      In that sense the object isn't
556  * truly a Path, but it has enough path-like properties (costs in particular)
557  * to warrant treating it as one.
558  */
559 void
560 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
561 {
562         Cost            totalCost;
563         Selectivity selec;
564         ListCell   *l;
565
566         /*
567          * We estimate AND selectivity on the assumption that the inputs are
568          * independent.  This is probably often wrong, but we don't have the info
569          * to do better.
570          *
571          * The runtime cost of the BitmapAnd itself is estimated at 100x
572          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
573          * definitely too simplistic?
574          */
575         totalCost = 0.0;
576         selec = 1.0;
577         foreach(l, path->bitmapquals)
578         {
579                 Path       *subpath = (Path *) lfirst(l);
580                 Cost            subCost;
581                 Selectivity subselec;
582
583                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
584
585                 selec *= subselec;
586
587                 totalCost += subCost;
588                 if (l != list_head(path->bitmapquals))
589                         totalCost += 100.0 * cpu_operator_cost;
590         }
591         path->bitmapselectivity = selec;
592         path->path.startup_cost = totalCost;
593         path->path.total_cost = totalCost;
594 }
595
596 /*
597  * cost_bitmap_or_node
598  *              Estimate the cost of a BitmapOr node
599  *
600  * See comments for cost_bitmap_and_node.
601  */
602 void
603 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
604 {
605         Cost            totalCost;
606         Selectivity selec;
607         ListCell   *l;
608
609         /*
610          * We estimate OR selectivity on the assumption that the inputs are
611          * non-overlapping, since that's often the case in "x IN (list)" type
612          * situations.  Of course, we clamp to 1.0 at the end.
613          *
614          * The runtime cost of the BitmapOr itself is estimated at 100x
615          * cpu_operator_cost for each tbm_union needed.  Probably too small,
616          * definitely too simplistic?  We are aware that the tbm_unions are
617          * optimized out when the inputs are BitmapIndexScans.
618          */
619         totalCost = 0.0;
620         selec = 0.0;
621         foreach(l, path->bitmapquals)
622         {
623                 Path       *subpath = (Path *) lfirst(l);
624                 Cost            subCost;
625                 Selectivity subselec;
626
627                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
628
629                 selec += subselec;
630
631                 totalCost += subCost;
632                 if (l != list_head(path->bitmapquals) &&
633                         !IsA(subpath, IndexPath))
634                         totalCost += 100.0 * cpu_operator_cost;
635         }
636         path->bitmapselectivity = Min(selec, 1.0);
637         path->path.startup_cost = totalCost;
638         path->path.total_cost = totalCost;
639 }
640
641 /*
642  * cost_tidscan
643  *        Determines and returns the cost of scanning a relation using TIDs.
644  */
645 void
646 cost_tidscan(Path *path, PlannerInfo *root,
647                          RelOptInfo *baserel, List *tidquals)
648 {
649         Cost            startup_cost = 0;
650         Cost            run_cost = 0;
651         Cost            cpu_per_tuple;
652         int                     ntuples;
653         ListCell   *l;
654
655         /* Should only be applied to base relations */
656         Assert(baserel->relid > 0);
657         Assert(baserel->rtekind == RTE_RELATION);
658
659         if (!enable_tidscan)
660                 startup_cost += disable_cost;
661
662         /* Count how many tuples we expect to retrieve */
663         ntuples = 0;
664         foreach(l, tidquals)
665         {
666                 if (IsA(lfirst(l), ScalarArrayOpExpr))
667                 {
668                         /* Each element of the array yields 1 tuple */
669                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
670                         Node *arraynode = (Node *) lsecond(saop->args);
671
672                         ntuples += estimate_array_length(arraynode);
673                 }
674                 else
675                 {
676                         /* It's just CTID = something, count 1 tuple */
677                         ntuples++;
678                 }
679         }
680
681         /* disk costs --- assume each tuple on a different page */
682         run_cost += random_page_cost * ntuples;
683
684         /* CPU costs */
685         startup_cost += baserel->baserestrictcost.startup;
686         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
687         run_cost += cpu_per_tuple * ntuples;
688
689         path->startup_cost = startup_cost;
690         path->total_cost = startup_cost + run_cost;
691 }
692
693 /*
694  * cost_subqueryscan
695  *        Determines and returns the cost of scanning a subquery RTE.
696  */
697 void
698 cost_subqueryscan(Path *path, RelOptInfo *baserel)
699 {
700         Cost            startup_cost;
701         Cost            run_cost;
702         Cost            cpu_per_tuple;
703
704         /* Should only be applied to base relations that are subqueries */
705         Assert(baserel->relid > 0);
706         Assert(baserel->rtekind == RTE_SUBQUERY);
707
708         /*
709          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
710          * any restriction clauses that will be attached to the SubqueryScan node,
711          * plus cpu_tuple_cost to account for selection and projection overhead.
712          */
713         path->startup_cost = baserel->subplan->startup_cost;
714         path->total_cost = baserel->subplan->total_cost;
715
716         startup_cost = baserel->baserestrictcost.startup;
717         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
718         run_cost = cpu_per_tuple * baserel->tuples;
719
720         path->startup_cost += startup_cost;
721         path->total_cost += startup_cost + run_cost;
722 }
723
724 /*
725  * cost_functionscan
726  *        Determines and returns the cost of scanning a function RTE.
727  */
728 void
729 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
730 {
731         Cost            startup_cost = 0;
732         Cost            run_cost = 0;
733         Cost            cpu_per_tuple;
734
735         /* Should only be applied to base relations that are functions */
736         Assert(baserel->relid > 0);
737         Assert(baserel->rtekind == RTE_FUNCTION);
738
739         /*
740          * For now, estimate function's cost at one operator eval per function
741          * call.  Someday we should revive the function cost estimate columns in
742          * pg_proc...
743          */
744         cpu_per_tuple = cpu_operator_cost;
745
746         /* Add scanning CPU costs */
747         startup_cost += baserel->baserestrictcost.startup;
748         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
749         run_cost += cpu_per_tuple * baserel->tuples;
750
751         path->startup_cost = startup_cost;
752         path->total_cost = startup_cost + run_cost;
753 }
754
755 /*
756  * cost_sort
757  *        Determines and returns the cost of sorting a relation, including
758  *        the cost of reading the input data.
759  *
760  * If the total volume of data to sort is less than work_mem, we will do
761  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
762  * comparisons for t tuples.
763  *
764  * If the total volume exceeds work_mem, we switch to a tape-style merge
765  * algorithm.  There will still be about t*log2(t) tuple comparisons in
766  * total, but we will also need to write and read each tuple once per
767  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
768  * number of initial runs formed and M is the merge order used by tuplesort.c.
769  * Since the average initial run should be about twice work_mem, we have
770  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
771  *              cpu = comparison_cost * t * log2(t)
772  *
773  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
774  * accesses (XXX can't we refine that guess?)
775  *
776  * We charge two operator evals per tuple comparison, which should be in
777  * the right ballpark in most cases.
778  *
779  * 'pathkeys' is a list of sort keys
780  * 'input_cost' is the total cost for reading the input data
781  * 'tuples' is the number of tuples in the relation
782  * 'width' is the average tuple width in bytes
783  *
784  * NOTE: some callers currently pass NIL for pathkeys because they
785  * can't conveniently supply the sort keys.  Since this routine doesn't
786  * currently do anything with pathkeys anyway, that doesn't matter...
787  * but if it ever does, it should react gracefully to lack of key data.
788  * (Actually, the thing we'd most likely be interested in is just the number
789  * of sort keys, which all callers *could* supply.)
790  */
791 void
792 cost_sort(Path *path, PlannerInfo *root,
793                   List *pathkeys, Cost input_cost, double tuples, int width)
794 {
795         Cost            startup_cost = input_cost;
796         Cost            run_cost = 0;
797         double          nbytes = relation_byte_size(tuples, width);
798         long            work_mem_bytes = work_mem * 1024L;
799
800         if (!enable_sort)
801                 startup_cost += disable_cost;
802
803         /*
804          * We want to be sure the cost of a sort is never estimated as zero, even
805          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
806          */
807         if (tuples < 2.0)
808                 tuples = 2.0;
809
810         /*
811          * CPU costs
812          *
813          * Assume about two operator evals per tuple comparison and N log2 N
814          * comparisons
815          */
816         startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
817
818         /* disk costs */
819         if (nbytes > work_mem_bytes)
820         {
821                 double          npages = ceil(nbytes / BLCKSZ);
822                 double          nruns = (nbytes / work_mem_bytes) * 0.5;
823                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
824                 double          log_runs;
825                 double          npageaccesses;
826
827                 /* Compute logM(r) as log(r) / log(M) */
828                 if (nruns > mergeorder)
829                         log_runs = ceil(log(nruns) / log(mergeorder));
830                 else
831                         log_runs = 1.0;
832                 npageaccesses = 2.0 * npages * log_runs;
833                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
834                 startup_cost += npageaccesses *
835                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
836         }
837
838         /*
839          * Also charge a small amount (arbitrarily set equal to operator cost) per
840          * extracted tuple.
841          */
842         run_cost += cpu_operator_cost * tuples;
843
844         path->startup_cost = startup_cost;
845         path->total_cost = startup_cost + run_cost;
846 }
847
848 /*
849  * cost_material
850  *        Determines and returns the cost of materializing a relation, including
851  *        the cost of reading the input data.
852  *
853  * If the total volume of data to materialize exceeds work_mem, we will need
854  * to write it to disk, so the cost is much higher in that case.
855  */
856 void
857 cost_material(Path *path,
858                           Cost input_cost, double tuples, int width)
859 {
860         Cost            startup_cost = input_cost;
861         Cost            run_cost = 0;
862         double          nbytes = relation_byte_size(tuples, width);
863         long            work_mem_bytes = work_mem * 1024L;
864
865         /* disk costs */
866         if (nbytes > work_mem_bytes)
867         {
868                 double          npages = ceil(nbytes / BLCKSZ);
869
870                 /* We'll write during startup and read during retrieval */
871                 startup_cost += seq_page_cost * npages;
872                 run_cost += seq_page_cost * npages;
873         }
874
875         /*
876          * Charge a very small amount per inserted tuple, to reflect bookkeeping
877          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
878          * tie that would otherwise exist between nestloop with A outer,
879          * materialized B inner and nestloop with B outer, materialized A inner.
880          * The extra cost ensures we'll prefer materializing the smaller rel.
881          */
882         startup_cost += cpu_tuple_cost * 0.1 * tuples;
883
884         /*
885          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
886          * so that it doesn't appear worthwhile to materialize a bare seqscan.
887          */
888         run_cost += cpu_tuple_cost * tuples;
889
890         path->startup_cost = startup_cost;
891         path->total_cost = startup_cost + run_cost;
892 }
893
894 /*
895  * cost_agg
896  *              Determines and returns the cost of performing an Agg plan node,
897  *              including the cost of its input.
898  *
899  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
900  * are for appropriately-sorted input.
901  */
902 void
903 cost_agg(Path *path, PlannerInfo *root,
904                  AggStrategy aggstrategy, int numAggs,
905                  int numGroupCols, double numGroups,
906                  Cost input_startup_cost, Cost input_total_cost,
907                  double input_tuples)
908 {
909         Cost            startup_cost;
910         Cost            total_cost;
911
912         /*
913          * We charge one cpu_operator_cost per aggregate function per input tuple,
914          * and another one per output tuple (corresponding to transfn and finalfn
915          * calls respectively).  If we are grouping, we charge an additional
916          * cpu_operator_cost per grouping column per input tuple for grouping
917          * comparisons.
918          *
919          * We will produce a single output tuple if not grouping, and a tuple per
920          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
921          *
922          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
923          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
924          * input path is already sorted appropriately, AGG_SORTED should be
925          * preferred (since it has no risk of memory overflow).  This will happen
926          * as long as the computed total costs are indeed exactly equal --- but if
927          * there's roundoff error we might do the wrong thing.  So be sure that
928          * the computations below form the same intermediate values in the same
929          * order.
930          */
931         if (aggstrategy == AGG_PLAIN)
932         {
933                 startup_cost = input_total_cost;
934                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
935                 /* we aren't grouping */
936                 total_cost = startup_cost + cpu_tuple_cost;
937         }
938         else if (aggstrategy == AGG_SORTED)
939         {
940                 /* Here we are able to deliver output on-the-fly */
941                 startup_cost = input_startup_cost;
942                 total_cost = input_total_cost;
943                 /* calcs phrased this way to match HASHED case, see note above */
944                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
945                 total_cost += cpu_operator_cost * input_tuples * numAggs;
946                 total_cost += cpu_operator_cost * numGroups * numAggs;
947                 total_cost += cpu_tuple_cost * numGroups;
948         }
949         else
950         {
951                 /* must be AGG_HASHED */
952                 startup_cost = input_total_cost;
953                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
954                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
955                 total_cost = startup_cost;
956                 total_cost += cpu_operator_cost * numGroups * numAggs;
957                 total_cost += cpu_tuple_cost * numGroups;
958         }
959
960         path->startup_cost = startup_cost;
961         path->total_cost = total_cost;
962 }
963
964 /*
965  * cost_group
966  *              Determines and returns the cost of performing a Group plan node,
967  *              including the cost of its input.
968  *
969  * Note: caller must ensure that input costs are for appropriately-sorted
970  * input.
971  */
972 void
973 cost_group(Path *path, PlannerInfo *root,
974                    int numGroupCols, double numGroups,
975                    Cost input_startup_cost, Cost input_total_cost,
976                    double input_tuples)
977 {
978         Cost            startup_cost;
979         Cost            total_cost;
980
981         startup_cost = input_startup_cost;
982         total_cost = input_total_cost;
983
984         /*
985          * Charge one cpu_operator_cost per comparison per input tuple. We assume
986          * all columns get compared at most of the tuples.
987          */
988         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
989
990         path->startup_cost = startup_cost;
991         path->total_cost = total_cost;
992 }
993
994 /*
995  * If a nestloop's inner path is an indexscan, be sure to use its estimated
996  * output row count, which may be lower than the restriction-clause-only row
997  * count of its parent.  (We don't include this case in the PATH_ROWS macro
998  * because it applies *only* to a nestloop's inner relation.)  We have to
999  * be prepared to recurse through Append nodes in case of an appendrel.
1000  */
1001 static double
1002 nestloop_inner_path_rows(Path *path)
1003 {
1004         double          result;
1005
1006         if (IsA(path, IndexPath))
1007                 result = ((IndexPath *) path)->rows;
1008         else if (IsA(path, BitmapHeapPath))
1009                 result = ((BitmapHeapPath *) path)->rows;
1010         else if (IsA(path, AppendPath))
1011         {
1012                 ListCell   *l;
1013
1014                 result = 0;
1015                 foreach(l, ((AppendPath *) path)->subpaths)
1016                 {
1017                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1018                 }
1019         }
1020         else
1021                 result = PATH_ROWS(path);
1022
1023         return result;
1024 }
1025
1026 /*
1027  * cost_nestloop
1028  *        Determines and returns the cost of joining two relations using the
1029  *        nested loop algorithm.
1030  *
1031  * 'path' is already filled in except for the cost fields
1032  */
1033 void
1034 cost_nestloop(NestPath *path, PlannerInfo *root)
1035 {
1036         Path       *outer_path = path->outerjoinpath;
1037         Path       *inner_path = path->innerjoinpath;
1038         Cost            startup_cost = 0;
1039         Cost            run_cost = 0;
1040         Cost            cpu_per_tuple;
1041         QualCost        restrict_qual_cost;
1042         double          outer_path_rows = PATH_ROWS(outer_path);
1043         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1044         double          ntuples;
1045         Selectivity joininfactor;
1046
1047         if (!enable_nestloop)
1048                 startup_cost += disable_cost;
1049
1050         /*
1051          * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1052          * outer tuple as soon as we have one match.  Account for the effects of
1053          * this by scaling down the cost estimates in proportion to the JOIN_IN
1054          * selectivity.  (This assumes that all the quals attached to the join are
1055          * IN quals, which should be true.)
1056          */
1057         joininfactor = join_in_selectivity(path, root);
1058
1059         /* cost of source data */
1060
1061         /*
1062          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1063          * before we can start returning tuples, so the join's startup cost is
1064          * their sum.  What's not so clear is whether the inner path's
1065          * startup_cost must be paid again on each rescan of the inner path. This
1066          * is not true if the inner path is materialized or is a hashjoin, but
1067          * probably is true otherwise.
1068          */
1069         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1070         run_cost += outer_path->total_cost - outer_path->startup_cost;
1071         if (IsA(inner_path, MaterialPath) ||
1072                 IsA(inner_path, HashPath))
1073         {
1074                 /* charge only run cost for each iteration of inner path */
1075         }
1076         else
1077         {
1078                 /*
1079                  * charge startup cost for each iteration of inner path, except we
1080                  * already charged the first startup_cost in our own startup
1081                  */
1082                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1083         }
1084         run_cost += outer_path_rows *
1085                 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1086
1087         /*
1088          * Compute number of tuples processed (not number emitted!)
1089          */
1090         ntuples = outer_path_rows * inner_path_rows * joininfactor;
1091
1092         /* CPU costs */
1093         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
1094         startup_cost += restrict_qual_cost.startup;
1095         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1096         run_cost += cpu_per_tuple * ntuples;
1097
1098         path->path.startup_cost = startup_cost;
1099         path->path.total_cost = startup_cost + run_cost;
1100 }
1101
1102 /*
1103  * cost_mergejoin
1104  *        Determines and returns the cost of joining two relations using the
1105  *        merge join algorithm.
1106  *
1107  * 'path' is already filled in except for the cost fields
1108  *
1109  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1110  * outersortkeys and innersortkeys are lists of the keys to be used
1111  * to sort the outer and inner relations, or NIL if no explicit
1112  * sort is needed because the source path is already ordered.
1113  */
1114 void
1115 cost_mergejoin(MergePath *path, PlannerInfo *root)
1116 {
1117         Path       *outer_path = path->jpath.outerjoinpath;
1118         Path       *inner_path = path->jpath.innerjoinpath;
1119         List       *mergeclauses = path->path_mergeclauses;
1120         List       *outersortkeys = path->outersortkeys;
1121         List       *innersortkeys = path->innersortkeys;
1122         Cost            startup_cost = 0;
1123         Cost            run_cost = 0;
1124         Cost            cpu_per_tuple;
1125         Selectivity merge_selec;
1126         QualCost        merge_qual_cost;
1127         QualCost        qp_qual_cost;
1128         RestrictInfo *firstclause;
1129         double          outer_path_rows = PATH_ROWS(outer_path);
1130         double          inner_path_rows = PATH_ROWS(inner_path);
1131         double          outer_rows,
1132                                 inner_rows;
1133         double          mergejointuples,
1134                                 rescannedtuples;
1135         double          rescanratio;
1136         Selectivity outerscansel,
1137                                 innerscansel;
1138         Selectivity joininfactor;
1139         Path            sort_path;              /* dummy for result of cost_sort */
1140
1141         if (!enable_mergejoin)
1142                 startup_cost += disable_cost;
1143
1144         /*
1145          * Compute cost and selectivity of the mergequals and qpquals (other
1146          * restriction clauses) separately.  We use approx_selectivity here for
1147          * speed --- in most cases, any errors won't affect the result much.
1148          *
1149          * Note: it's probably bogus to use the normal selectivity calculation
1150          * here when either the outer or inner path is a UniquePath.
1151          */
1152         merge_selec = approx_selectivity(root, mergeclauses,
1153                                                                          path->jpath.jointype);
1154         cost_qual_eval(&merge_qual_cost, mergeclauses);
1155         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1156         qp_qual_cost.startup -= merge_qual_cost.startup;
1157         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1158
1159         /* approx # tuples passing the merge quals */
1160         mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1161
1162         /*
1163          * When there are equal merge keys in the outer relation, the mergejoin
1164          * must rescan any matching tuples in the inner relation. This means
1165          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1166          * costs the same as an original fetch, which is probably an overestimate;
1167          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1168          * Not clear if it's worth developing a more refined model.
1169          *
1170          * The number of re-fetches can be estimated approximately as size of
1171          * merge join output minus size of inner relation.      Assume that the
1172          * distinct key values are 1, 2, ..., and denote the number of values of
1173          * each key in the outer relation as m1, m2, ...; in the inner relation,
1174          * n1, n2, ... Then we have
1175          *
1176          * size of join = m1 * n1 + m2 * n2 + ...
1177          *
1178          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1179          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1180          * relation
1181          *
1182          * This equation works correctly for outer tuples having no inner match
1183          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1184          * are effectively subtracting those from the number of rescanned tuples,
1185          * when we should not.  Can we do better without expensive selectivity
1186          * computations?
1187          */
1188         if (IsA(outer_path, UniquePath))
1189                 rescannedtuples = 0;
1190         else
1191         {
1192                 rescannedtuples = mergejointuples - inner_path_rows;
1193                 /* Must clamp because of possible underestimate */
1194                 if (rescannedtuples < 0)
1195                         rescannedtuples = 0;
1196         }
1197         /* We'll inflate inner run cost this much to account for rescanning */
1198         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1199
1200         /*
1201          * A merge join will stop as soon as it exhausts either input stream
1202          * (unless it's an outer join, in which case the outer side has to be
1203          * scanned all the way anyway).  Estimate fraction of the left and right
1204          * inputs that will actually need to be scanned. We use only the first
1205          * (most significant) merge clause for this purpose.
1206          *
1207          * Since this calculation is somewhat expensive, and will be the same for
1208          * all mergejoin paths associated with the merge clause, we cache the
1209          * results in the RestrictInfo node.
1210          */
1211         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1212         {
1213                 firstclause = (RestrictInfo *) linitial(mergeclauses);
1214                 if (firstclause->left_mergescansel < 0) /* not computed yet? */
1215                         mergejoinscansel(root, (Node *) firstclause->clause,
1216                                                          &firstclause->left_mergescansel,
1217                                                          &firstclause->right_mergescansel);
1218
1219                 if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
1220                 {
1221                         /* left side of clause is outer */
1222                         outerscansel = firstclause->left_mergescansel;
1223                         innerscansel = firstclause->right_mergescansel;
1224                 }
1225                 else
1226                 {
1227                         /* left side of clause is inner */
1228                         outerscansel = firstclause->right_mergescansel;
1229                         innerscansel = firstclause->left_mergescansel;
1230                 }
1231                 if (path->jpath.jointype == JOIN_LEFT)
1232                         outerscansel = 1.0;
1233                 else if (path->jpath.jointype == JOIN_RIGHT)
1234                         innerscansel = 1.0;
1235         }
1236         else
1237         {
1238                 /* cope with clauseless or full mergejoin */
1239                 outerscansel = innerscansel = 1.0;
1240         }
1241
1242         /* convert selectivity to row count; must scan at least one row */
1243         outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1244         inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1245
1246         /*
1247          * Readjust scan selectivities to account for above rounding.  This is
1248          * normally an insignificant effect, but when there are only a few rows in
1249          * the inputs, failing to do this makes for a large percentage error.
1250          */
1251         outerscansel = outer_rows / outer_path_rows;
1252         innerscansel = inner_rows / inner_path_rows;
1253
1254         /* cost of source data */
1255
1256         if (outersortkeys)                      /* do we need to sort outer? */
1257         {
1258                 cost_sort(&sort_path,
1259                                   root,
1260                                   outersortkeys,
1261                                   outer_path->total_cost,
1262                                   outer_path_rows,
1263                                   outer_path->parent->width);
1264                 startup_cost += sort_path.startup_cost;
1265                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1266                         * outerscansel;
1267         }
1268         else
1269         {
1270                 startup_cost += outer_path->startup_cost;
1271                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1272                         * outerscansel;
1273         }
1274
1275         if (innersortkeys)                      /* do we need to sort inner? */
1276         {
1277                 cost_sort(&sort_path,
1278                                   root,
1279                                   innersortkeys,
1280                                   inner_path->total_cost,
1281                                   inner_path_rows,
1282                                   inner_path->parent->width);
1283                 startup_cost += sort_path.startup_cost;
1284                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1285                         * innerscansel * rescanratio;
1286         }
1287         else
1288         {
1289                 startup_cost += inner_path->startup_cost;
1290                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1291                         * innerscansel * rescanratio;
1292         }
1293
1294         /* CPU costs */
1295
1296         /*
1297          * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1298          * outer tuple as soon as we have one match.  Account for the effects of
1299          * this by scaling down the cost estimates in proportion to the expected
1300          * output size.  (This assumes that all the quals attached to the join are
1301          * IN quals, which should be true.)
1302          */
1303         joininfactor = join_in_selectivity(&path->jpath, root);
1304
1305         /*
1306          * The number of tuple comparisons needed is approximately number of outer
1307          * rows plus number of inner rows plus number of rescanned tuples (can we
1308          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1309          * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1310          * joininfactor.
1311          */
1312         startup_cost += merge_qual_cost.startup;
1313         run_cost += merge_qual_cost.per_tuple *
1314                 (outer_rows + inner_rows * rescanratio);
1315
1316         /*
1317          * For each tuple that gets through the mergejoin proper, we charge
1318          * cpu_tuple_cost plus the cost of evaluating additional restriction
1319          * clauses that are to be applied at the join.  (This is pessimistic since
1320          * not all of the quals may get evaluated at each tuple.)  This work is
1321          * skipped in JOIN_IN mode, so apply the factor.
1322          */
1323         startup_cost += qp_qual_cost.startup;
1324         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1325         run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1326
1327         path->jpath.path.startup_cost = startup_cost;
1328         path->jpath.path.total_cost = startup_cost + run_cost;
1329 }
1330
1331 /*
1332  * cost_hashjoin
1333  *        Determines and returns the cost of joining two relations using the
1334  *        hash join algorithm.
1335  *
1336  * 'path' is already filled in except for the cost fields
1337  *
1338  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1339  */
1340 void
1341 cost_hashjoin(HashPath *path, PlannerInfo *root)
1342 {
1343         Path       *outer_path = path->jpath.outerjoinpath;
1344         Path       *inner_path = path->jpath.innerjoinpath;
1345         List       *hashclauses = path->path_hashclauses;
1346         Cost            startup_cost = 0;
1347         Cost            run_cost = 0;
1348         Cost            cpu_per_tuple;
1349         Selectivity hash_selec;
1350         QualCost        hash_qual_cost;
1351         QualCost        qp_qual_cost;
1352         double          hashjointuples;
1353         double          outer_path_rows = PATH_ROWS(outer_path);
1354         double          inner_path_rows = PATH_ROWS(inner_path);
1355         double          outerbytes = relation_byte_size(outer_path_rows,
1356                                                                                                 outer_path->parent->width);
1357         double          innerbytes = relation_byte_size(inner_path_rows,
1358                                                                                                 inner_path->parent->width);
1359         int                     num_hashclauses = list_length(hashclauses);
1360         int                     numbuckets;
1361         int                     numbatches;
1362         double          virtualbuckets;
1363         Selectivity innerbucketsize;
1364         Selectivity joininfactor;
1365         ListCell   *hcl;
1366
1367         if (!enable_hashjoin)
1368                 startup_cost += disable_cost;
1369
1370         /*
1371          * Compute cost and selectivity of the hashquals and qpquals (other
1372          * restriction clauses) separately.  We use approx_selectivity here for
1373          * speed --- in most cases, any errors won't affect the result much.
1374          *
1375          * Note: it's probably bogus to use the normal selectivity calculation
1376          * here when either the outer or inner path is a UniquePath.
1377          */
1378         hash_selec = approx_selectivity(root, hashclauses,
1379                                                                         path->jpath.jointype);
1380         cost_qual_eval(&hash_qual_cost, hashclauses);
1381         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1382         qp_qual_cost.startup -= hash_qual_cost.startup;
1383         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1384
1385         /* approx # tuples passing the hash quals */
1386         hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1387
1388         /* cost of source data */
1389         startup_cost += outer_path->startup_cost;
1390         run_cost += outer_path->total_cost - outer_path->startup_cost;
1391         startup_cost += inner_path->total_cost;
1392
1393         /*
1394          * Cost of computing hash function: must do it once per input tuple. We
1395          * charge one cpu_operator_cost for each column's hash function.
1396          *
1397          * XXX when a hashclause is more complex than a single operator, we really
1398          * should charge the extra eval costs of the left or right side, as
1399          * appropriate, here.  This seems more work than it's worth at the moment.
1400          */
1401         startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
1402         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1403
1404         /* Get hash table size that executor would use for inner relation */
1405         ExecChooseHashTableSize(inner_path_rows,
1406                                                         inner_path->parent->width,
1407                                                         &numbuckets,
1408                                                         &numbatches);
1409         virtualbuckets = (double) numbuckets *(double) numbatches;
1410
1411         /*
1412          * Determine bucketsize fraction for inner relation.  We use the smallest
1413          * bucketsize estimated for any individual hashclause; this is undoubtedly
1414          * conservative.
1415          *
1416          * BUT: if inner relation has been unique-ified, we can assume it's good
1417          * for hashing.  This is important both because it's the right answer, and
1418          * because we avoid contaminating the cache with a value that's wrong for
1419          * non-unique-ified paths.
1420          */
1421         if (IsA(inner_path, UniquePath))
1422                 innerbucketsize = 1.0 / virtualbuckets;
1423         else
1424         {
1425                 innerbucketsize = 1.0;
1426                 foreach(hcl, hashclauses)
1427                 {
1428                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1429                         Selectivity thisbucketsize;
1430
1431                         Assert(IsA(restrictinfo, RestrictInfo));
1432
1433                         /*
1434                          * First we have to figure out which side of the hashjoin clause
1435                          * is the inner side.
1436                          *
1437                          * Since we tend to visit the same clauses over and over when
1438                          * planning a large query, we cache the bucketsize estimate in the
1439                          * RestrictInfo node to avoid repeated lookups of statistics.
1440                          */
1441                         if (bms_is_subset(restrictinfo->right_relids,
1442                                                           inner_path->parent->relids))
1443                         {
1444                                 /* righthand side is inner */
1445                                 thisbucketsize = restrictinfo->right_bucketsize;
1446                                 if (thisbucketsize < 0)
1447                                 {
1448                                         /* not cached yet */
1449                                         thisbucketsize =
1450                                                 estimate_hash_bucketsize(root,
1451                                                                                    get_rightop(restrictinfo->clause),
1452                                                                                                  virtualbuckets);
1453                                         restrictinfo->right_bucketsize = thisbucketsize;
1454                                 }
1455                         }
1456                         else
1457                         {
1458                                 Assert(bms_is_subset(restrictinfo->left_relids,
1459                                                                          inner_path->parent->relids));
1460                                 /* lefthand side is inner */
1461                                 thisbucketsize = restrictinfo->left_bucketsize;
1462                                 if (thisbucketsize < 0)
1463                                 {
1464                                         /* not cached yet */
1465                                         thisbucketsize =
1466                                                 estimate_hash_bucketsize(root,
1467                                                                                         get_leftop(restrictinfo->clause),
1468                                                                                                  virtualbuckets);
1469                                         restrictinfo->left_bucketsize = thisbucketsize;
1470                                 }
1471                         }
1472
1473                         if (innerbucketsize > thisbucketsize)
1474                                 innerbucketsize = thisbucketsize;
1475                 }
1476         }
1477
1478         /*
1479          * If inner relation is too big then we will need to "batch" the join,
1480          * which implies writing and reading most of the tuples to disk an extra
1481          * time.  Charge one cost unit per page of I/O (correct since it should be
1482          * nice and sequential...).  Writing the inner rel counts as startup cost,
1483          * all the rest as run cost.
1484          */
1485         if (numbatches > 1)
1486         {
1487                 double          outerpages = page_size(outer_path_rows,
1488                                                                                    outer_path->parent->width);
1489                 double          innerpages = page_size(inner_path_rows,
1490                                                                                    inner_path->parent->width);
1491
1492                 startup_cost += innerpages;
1493                 run_cost += innerpages + 2 * outerpages;
1494         }
1495
1496         /* CPU costs */
1497
1498         /*
1499          * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1500          * outer tuple as soon as we have one match.  Account for the effects of
1501          * this by scaling down the cost estimates in proportion to the expected
1502          * output size.  (This assumes that all the quals attached to the join are
1503          * IN quals, which should be true.)
1504          */
1505         joininfactor = join_in_selectivity(&path->jpath, root);
1506
1507         /*
1508          * The number of tuple comparisons needed is the number of outer tuples
1509          * times the typical number of tuples in a hash bucket, which is the inner
1510          * relation size times its bucketsize fraction.  At each one, we need to
1511          * evaluate the hashjoin quals.  (Note: charging the full qual eval cost
1512          * at each tuple is pessimistic, since we don't evaluate the quals unless
1513          * the hash values match exactly.)
1514          */
1515         startup_cost += hash_qual_cost.startup;
1516         run_cost += hash_qual_cost.per_tuple *
1517                 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1518                 joininfactor;
1519
1520         /*
1521          * For each tuple that gets through the hashjoin proper, we charge
1522          * cpu_tuple_cost plus the cost of evaluating additional restriction
1523          * clauses that are to be applied at the join.  (This is pessimistic since
1524          * not all of the quals may get evaluated at each tuple.)
1525          */
1526         startup_cost += qp_qual_cost.startup;
1527         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1528         run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1529
1530         /*
1531          * Bias against putting larger relation on inside.      We don't want an
1532          * absolute prohibition, though, since larger relation might have better
1533          * bucketsize --- and we can't trust the size estimates unreservedly,
1534          * anyway.      Instead, inflate the run cost by the square root of the size
1535          * ratio.  (Why square root?  No real good reason, but it seems
1536          * reasonable...)
1537          *
1538          * Note: before 7.4 we implemented this by inflating startup cost; but if
1539          * there's a disable_cost component in the input paths' startup cost, that
1540          * unfairly penalizes the hash.  Probably it'd be better to keep track of
1541          * disable penalty separately from cost.
1542          */
1543         if (innerbytes > outerbytes && outerbytes > 0)
1544                 run_cost *= sqrt(innerbytes / outerbytes);
1545
1546         path->jpath.path.startup_cost = startup_cost;
1547         path->jpath.path.total_cost = startup_cost + run_cost;
1548 }
1549
1550
1551 /*
1552  * cost_qual_eval
1553  *              Estimate the CPU costs of evaluating a WHERE clause.
1554  *              The input can be either an implicitly-ANDed list of boolean
1555  *              expressions, or a list of RestrictInfo nodes.
1556  *              The result includes both a one-time (startup) component,
1557  *              and a per-evaluation component.
1558  */
1559 void
1560 cost_qual_eval(QualCost *cost, List *quals)
1561 {
1562         ListCell   *l;
1563
1564         cost->startup = 0;
1565         cost->per_tuple = 0;
1566
1567         /* We don't charge any cost for the implicit ANDing at top level ... */
1568
1569         foreach(l, quals)
1570         {
1571                 Node       *qual = (Node *) lfirst(l);
1572
1573                 /*
1574                  * RestrictInfo nodes contain an eval_cost field reserved for this
1575                  * routine's use, so that it's not necessary to evaluate the qual
1576                  * clause's cost more than once.  If the clause's cost hasn't been
1577                  * computed yet, the field's startup value will contain -1.
1578                  *
1579                  * If the RestrictInfo is marked pseudoconstant, it will be tested
1580                  * only once, so treat its cost as all startup cost.
1581                  */
1582                 if (qual && IsA(qual, RestrictInfo))
1583                 {
1584                         RestrictInfo *rinfo = (RestrictInfo *) qual;
1585
1586                         if (rinfo->eval_cost.startup < 0)
1587                         {
1588                                 rinfo->eval_cost.startup = 0;
1589                                 rinfo->eval_cost.per_tuple = 0;
1590                                 cost_qual_eval_walker((Node *) rinfo->clause,
1591                                                                           &rinfo->eval_cost);
1592                                 if (rinfo->pseudoconstant)
1593                                 {
1594                                         /* count one execution during startup */
1595                                         rinfo->eval_cost.startup += rinfo->eval_cost.per_tuple;
1596                                         rinfo->eval_cost.per_tuple = 0;
1597                                 }
1598                         }
1599                         cost->startup += rinfo->eval_cost.startup;
1600                         cost->per_tuple += rinfo->eval_cost.per_tuple;
1601                 }
1602                 else
1603                 {
1604                         /* If it's a bare expression, must always do it the hard way */
1605                         cost_qual_eval_walker(qual, cost);
1606                 }
1607         }
1608 }
1609
1610 static bool
1611 cost_qual_eval_walker(Node *node, QualCost *total)
1612 {
1613         if (node == NULL)
1614                 return false;
1615
1616         /*
1617          * Our basic strategy is to charge one cpu_operator_cost for each operator
1618          * or function node in the given tree.  Vars and Consts are charged zero,
1619          * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
1620          * better than no model at all.
1621          *
1622          * Should we try to account for the possibility of short-circuit
1623          * evaluation of AND/OR?
1624          */
1625         if (IsA(node, FuncExpr) ||
1626                 IsA(node, OpExpr) ||
1627                 IsA(node, DistinctExpr) ||
1628                 IsA(node, NullIfExpr))
1629                 total->per_tuple += cpu_operator_cost;
1630         else if (IsA(node, ScalarArrayOpExpr))
1631         {
1632                 /*
1633                  * Estimate that the operator will be applied to about half of the
1634                  * array elements before the answer is determined.
1635                  */
1636                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1637                 Node *arraynode = (Node *) lsecond(saop->args);
1638
1639                 total->per_tuple +=
1640                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1641         }
1642         else if (IsA(node, RowCompareExpr))
1643         {
1644                 /* Conservatively assume we will check all the columns */
1645                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
1646
1647                 total->per_tuple += cpu_operator_cost * list_length(rcexpr->opnos);
1648         }
1649         else if (IsA(node, SubLink))
1650         {
1651                 /* This routine should not be applied to un-planned expressions */
1652                 elog(ERROR, "cannot handle unplanned sub-select");
1653         }
1654         else if (IsA(node, SubPlan))
1655         {
1656                 /*
1657                  * A subplan node in an expression typically indicates that the
1658                  * subplan will be executed on each evaluation, so charge accordingly.
1659                  * (Sub-selects that can be executed as InitPlans have already been
1660                  * removed from the expression.)
1661                  *
1662                  * An exception occurs when we have decided we can implement the
1663                  * subplan by hashing.
1664                  */
1665                 SubPlan    *subplan = (SubPlan *) node;
1666                 Plan       *plan = subplan->plan;
1667
1668                 if (subplan->useHashTable)
1669                 {
1670                         /*
1671                          * If we are using a hash table for the subquery outputs, then the
1672                          * cost of evaluating the query is a one-time cost. We charge one
1673                          * cpu_operator_cost per tuple for the work of loading the
1674                          * hashtable, too.
1675                          */
1676                         total->startup += plan->total_cost +
1677                                 cpu_operator_cost * plan->plan_rows;
1678
1679                         /*
1680                          * The per-tuple costs include the cost of evaluating the lefthand
1681                          * expressions, plus the cost of probing the hashtable. Recursion
1682                          * into the testexpr will handle the lefthand expressions
1683                          * properly, and will count one cpu_operator_cost for each
1684                          * comparison operator.  That is probably too low for the probing
1685                          * cost, but it's hard to make a better estimate, so live with it
1686                          * for now.
1687                          */
1688                 }
1689                 else
1690                 {
1691                         /*
1692                          * Otherwise we will be rescanning the subplan output on each
1693                          * evaluation.  We need to estimate how much of the output we will
1694                          * actually need to scan.  NOTE: this logic should agree with the
1695                          * estimates used by make_subplan() in plan/subselect.c.
1696                          */
1697                         Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
1698
1699                         if (subplan->subLinkType == EXISTS_SUBLINK)
1700                         {
1701                                 /* we only need to fetch 1 tuple */
1702                                 total->per_tuple += plan_run_cost / plan->plan_rows;
1703                         }
1704                         else if (subplan->subLinkType == ALL_SUBLINK ||
1705                                          subplan->subLinkType == ANY_SUBLINK)
1706                         {
1707                                 /* assume we need 50% of the tuples */
1708                                 total->per_tuple += 0.50 * plan_run_cost;
1709                                 /* also charge a cpu_operator_cost per row examined */
1710                                 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1711                         }
1712                         else
1713                         {
1714                                 /* assume we need all tuples */
1715                                 total->per_tuple += plan_run_cost;
1716                         }
1717
1718                         /*
1719                          * Also account for subplan's startup cost. If the subplan is
1720                          * uncorrelated or undirect correlated, AND its topmost node is a
1721                          * Sort or Material node, assume that we'll only need to pay its
1722                          * startup cost once; otherwise assume we pay the startup cost
1723                          * every time.
1724                          */
1725                         if (subplan->parParam == NIL &&
1726                                 (IsA(plan, Sort) ||
1727                                  IsA(plan, Material)))
1728                                 total->startup += plan->startup_cost;
1729                         else
1730                                 total->per_tuple += plan->startup_cost;
1731                 }
1732         }
1733
1734         return expression_tree_walker(node, cost_qual_eval_walker,
1735                                                                   (void *) total);
1736 }
1737
1738
1739 /*
1740  * approx_selectivity
1741  *              Quick-and-dirty estimation of clause selectivities.
1742  *              The input can be either an implicitly-ANDed list of boolean
1743  *              expressions, or a list of RestrictInfo nodes (typically the latter).
1744  *
1745  * This is quick-and-dirty because we bypass clauselist_selectivity, and
1746  * simply multiply the independent clause selectivities together.  Now
1747  * clauselist_selectivity often can't do any better than that anyhow, but
1748  * for some situations (such as range constraints) it is smarter.  However,
1749  * we can't effectively cache the results of clauselist_selectivity, whereas
1750  * the individual clause selectivities can be and are cached.
1751  *
1752  * Since we are only using the results to estimate how many potential
1753  * output tuples are generated and passed through qpqual checking, it
1754  * seems OK to live with the approximation.
1755  */
1756 static Selectivity
1757 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
1758 {
1759         Selectivity total = 1.0;
1760         ListCell   *l;
1761
1762         foreach(l, quals)
1763         {
1764                 Node       *qual = (Node *) lfirst(l);
1765
1766                 /* Note that clause_selectivity will be able to cache its result */
1767                 total *= clause_selectivity(root, qual, 0, jointype);
1768         }
1769         return total;
1770 }
1771
1772
1773 /*
1774  * set_baserel_size_estimates
1775  *              Set the size estimates for the given base relation.
1776  *
1777  * The rel's targetlist and restrictinfo list must have been constructed
1778  * already.
1779  *
1780  * We set the following fields of the rel node:
1781  *      rows: the estimated number of output tuples (after applying
1782  *                restriction clauses).
1783  *      width: the estimated average output tuple width in bytes.
1784  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1785  */
1786 void
1787 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
1788 {
1789         double          nrows;
1790
1791         /* Should only be applied to base relations */
1792         Assert(rel->relid > 0);
1793
1794         nrows = rel->tuples *
1795                 clauselist_selectivity(root,
1796                                                            rel->baserestrictinfo,
1797                                                            0,
1798                                                            JOIN_INNER);
1799
1800         rel->rows = clamp_row_est(nrows);
1801
1802         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1803
1804         set_rel_width(root, rel);
1805 }
1806
1807 /*
1808  * set_joinrel_size_estimates
1809  *              Set the size estimates for the given join relation.
1810  *
1811  * The rel's targetlist must have been constructed already, and a
1812  * restriction clause list that matches the given component rels must
1813  * be provided.
1814  *
1815  * Since there is more than one way to make a joinrel for more than two
1816  * base relations, the results we get here could depend on which component
1817  * rel pair is provided.  In theory we should get the same answers no matter
1818  * which pair is provided; in practice, since the selectivity estimation
1819  * routines don't handle all cases equally well, we might not.  But there's
1820  * not much to be done about it.  (Would it make sense to repeat the
1821  * calculations for each pair of input rels that's encountered, and somehow
1822  * average the results?  Probably way more trouble than it's worth.)
1823  *
1824  * It's important that the results for symmetric JoinTypes be symmetric,
1825  * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1826  * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
1827  * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1828  *
1829  * We set only the rows field here.  The width field was already set by
1830  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
1831  */
1832 void
1833 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
1834                                                    RelOptInfo *outer_rel,
1835                                                    RelOptInfo *inner_rel,
1836                                                    JoinType jointype,
1837                                                    List *restrictlist)
1838 {
1839         Selectivity selec;
1840         double          nrows;
1841         UniquePath *upath;
1842
1843         /*
1844          * Compute joinclause selectivity.      Note that we are only considering
1845          * clauses that become restriction clauses at this join level; we are not
1846          * double-counting them because they were not considered in estimating the
1847          * sizes of the component rels.
1848          */
1849         selec = clauselist_selectivity(root,
1850                                                                    restrictlist,
1851                                                                    0,
1852                                                                    jointype);
1853
1854         /*
1855          * Basically, we multiply size of Cartesian product by selectivity.
1856          *
1857          * If we are doing an outer join, take that into account: the output must
1858          * be at least as large as the non-nullable input.      (Is there any chance
1859          * of being even smarter?)  (XXX this is not really right, because it
1860          * assumes all the restriction clauses are join clauses; we should figure
1861          * pushed-down clauses separately.)
1862          *
1863          * For JOIN_IN and variants, the Cartesian product is figured with respect
1864          * to a unique-ified input, and then we can clamp to the size of the other
1865          * input.
1866          */
1867         switch (jointype)
1868         {
1869                 case JOIN_INNER:
1870                         nrows = outer_rel->rows * inner_rel->rows * selec;
1871                         break;
1872                 case JOIN_LEFT:
1873                         nrows = outer_rel->rows * inner_rel->rows * selec;
1874                         if (nrows < outer_rel->rows)
1875                                 nrows = outer_rel->rows;
1876                         break;
1877                 case JOIN_RIGHT:
1878                         nrows = outer_rel->rows * inner_rel->rows * selec;
1879                         if (nrows < inner_rel->rows)
1880                                 nrows = inner_rel->rows;
1881                         break;
1882                 case JOIN_FULL:
1883                         nrows = outer_rel->rows * inner_rel->rows * selec;
1884                         if (nrows < outer_rel->rows)
1885                                 nrows = outer_rel->rows;
1886                         if (nrows < inner_rel->rows)
1887                                 nrows = inner_rel->rows;
1888                         break;
1889                 case JOIN_IN:
1890                 case JOIN_UNIQUE_INNER:
1891                         upath = create_unique_path(root, inner_rel,
1892                                                                            inner_rel->cheapest_total_path);
1893                         nrows = outer_rel->rows * upath->rows * selec;
1894                         if (nrows > outer_rel->rows)
1895                                 nrows = outer_rel->rows;
1896                         break;
1897                 case JOIN_REVERSE_IN:
1898                 case JOIN_UNIQUE_OUTER:
1899                         upath = create_unique_path(root, outer_rel,
1900                                                                            outer_rel->cheapest_total_path);
1901                         nrows = upath->rows * inner_rel->rows * selec;
1902                         if (nrows > inner_rel->rows)
1903                                 nrows = inner_rel->rows;
1904                         break;
1905                 default:
1906                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
1907                         nrows = 0;                      /* keep compiler quiet */
1908                         break;
1909         }
1910
1911         rel->rows = clamp_row_est(nrows);
1912 }
1913
1914 /*
1915  * join_in_selectivity
1916  *        Determines the factor by which a JOIN_IN join's result is expected
1917  *        to be smaller than an ordinary inner join.
1918  *
1919  * 'path' is already filled in except for the cost fields
1920  */
1921 static Selectivity
1922 join_in_selectivity(JoinPath *path, PlannerInfo *root)
1923 {
1924         RelOptInfo *innerrel;
1925         UniquePath *innerunique;
1926         Selectivity selec;
1927         double          nrows;
1928
1929         /* Return 1.0 whenever it's not JOIN_IN */
1930         if (path->jointype != JOIN_IN)
1931                 return 1.0;
1932
1933         /*
1934          * Return 1.0 if the inner side is already known unique.  The case where
1935          * the inner path is already a UniquePath probably cannot happen in
1936          * current usage, but check it anyway for completeness.  The interesting
1937          * case is where we've determined the inner relation itself is unique,
1938          * which we can check by looking at the rows estimate for its UniquePath.
1939          */
1940         if (IsA(path->innerjoinpath, UniquePath))
1941                 return 1.0;
1942         innerrel = path->innerjoinpath->parent;
1943         innerunique = create_unique_path(root,
1944                                                                          innerrel,
1945                                                                          innerrel->cheapest_total_path);
1946         if (innerunique->rows >= innerrel->rows)
1947                 return 1.0;
1948
1949         /*
1950          * Compute same result set_joinrel_size_estimates would compute for
1951          * JOIN_INNER.  Note that we use the input rels' absolute size estimates,
1952          * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
1953          * double-counting the effects of any join clauses used in input scans.
1954          */
1955         selec = clauselist_selectivity(root,
1956                                                                    path->joinrestrictinfo,
1957                                                                    0,
1958                                                                    JOIN_INNER);
1959         nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
1960
1961         nrows = clamp_row_est(nrows);
1962
1963         /* See if it's larger than the actual JOIN_IN size estimate */
1964         if (nrows > path->path.parent->rows)
1965                 return path->path.parent->rows / nrows;
1966         else
1967                 return 1.0;
1968 }
1969
1970 /*
1971  * set_function_size_estimates
1972  *              Set the size estimates for a base relation that is a function call.
1973  *
1974  * The rel's targetlist and restrictinfo list must have been constructed
1975  * already.
1976  *
1977  * We set the same fields as set_baserel_size_estimates.
1978  */
1979 void
1980 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
1981 {
1982         RangeTblEntry *rte;
1983
1984         /* Should only be applied to base relations that are functions */
1985         Assert(rel->relid > 0);
1986         rte = rt_fetch(rel->relid, root->parse->rtable);
1987         Assert(rte->rtekind == RTE_FUNCTION);
1988
1989         /*
1990          * Estimate number of rows the function itself will return.
1991          *
1992          * XXX no idea how to do this yet; but we can at least check whether
1993          * function returns set or not...
1994          */
1995         if (expression_returns_set(rte->funcexpr))
1996                 rel->tuples = 1000;
1997         else
1998                 rel->tuples = 1;
1999
2000         /* Now estimate number of output rows, etc */
2001         set_baserel_size_estimates(root, rel);
2002 }
2003
2004
2005 /*
2006  * set_rel_width
2007  *              Set the estimated output width of a base relation.
2008  *
2009  * NB: this works best on plain relations because it prefers to look at
2010  * real Vars.  It will fail to make use of pg_statistic info when applied
2011  * to a subquery relation, even if the subquery outputs are simple vars
2012  * that we could have gotten info for.  Is it worth trying to be smarter
2013  * about subqueries?
2014  *
2015  * The per-attribute width estimates are cached for possible re-use while
2016  * building join relations.
2017  */
2018 static void
2019 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2020 {
2021         int32           tuple_width = 0;
2022         ListCell   *tllist;
2023
2024         foreach(tllist, rel->reltargetlist)
2025         {
2026                 Var                *var = (Var *) lfirst(tllist);
2027                 int                     ndx;
2028                 Oid                     relid;
2029                 int32           item_width;
2030
2031                 /* For now, punt on whole-row child Vars */
2032                 if (!IsA(var, Var))
2033                 {
2034                         tuple_width += 32;      /* arbitrary */
2035                         continue;
2036                 }
2037
2038                 ndx = var->varattno - rel->min_attr;
2039
2040                 /*
2041                  * The width probably hasn't been cached yet, but may as well check
2042                  */
2043                 if (rel->attr_widths[ndx] > 0)
2044                 {
2045                         tuple_width += rel->attr_widths[ndx];
2046                         continue;
2047                 }
2048
2049                 relid = getrelid(var->varno, root->parse->rtable);
2050                 if (relid != InvalidOid)
2051                 {
2052                         item_width = get_attavgwidth(relid, var->varattno);
2053                         if (item_width > 0)
2054                         {
2055                                 rel->attr_widths[ndx] = item_width;
2056                                 tuple_width += item_width;
2057                                 continue;
2058                         }
2059                 }
2060
2061                 /*
2062                  * Not a plain relation, or can't find statistics for it. Estimate
2063                  * using just the type info.
2064                  */
2065                 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2066                 Assert(item_width > 0);
2067                 rel->attr_widths[ndx] = item_width;
2068                 tuple_width += item_width;
2069         }
2070         Assert(tuple_width >= 0);
2071         rel->width = tuple_width;
2072 }
2073
2074 /*
2075  * relation_byte_size
2076  *        Estimate the storage space in bytes for a given number of tuples
2077  *        of a given width (size in bytes).
2078  */
2079 static double
2080 relation_byte_size(double tuples, int width)
2081 {
2082         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2083 }
2084
2085 /*
2086  * page_size
2087  *        Returns an estimate of the number of pages covered by a given
2088  *        number of tuples of a given width (size in bytes).
2089  */
2090 static double
2091 page_size(double tuples, int width)
2092 {
2093         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
2094 }