]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Turn the rangetable used by the executor into a flat list, and avoid storing
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.178 2007/02/22 22:00:24 tgl Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "optimizer/clauses.h"
69 #include "optimizer/cost.h"
70 #include "optimizer/pathnode.h"
71 #include "optimizer/planmain.h"
72 #include "parser/parsetree.h"
73 #include "utils/lsyscache.h"
74 #include "utils/selfuncs.h"
75 #include "utils/tuplesort.h"
76
77
78 #define LOG2(x)  (log(x) / 0.693147180559945)
79
80 /*
81  * Some Paths return less than the nominal number of rows of their parent
82  * relations; join nodes need to do this to get the correct input count:
83  */
84 #define PATH_ROWS(path) \
85         (IsA(path, UniquePath) ? \
86          ((UniquePath *) (path))->rows : \
87          (path)->parent->rows)
88
89
90 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
91 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
92 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
93 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
94 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
95
96 int                     effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
97
98 Cost            disable_cost = 100000000.0;
99
100 bool            enable_seqscan = true;
101 bool            enable_indexscan = true;
102 bool            enable_bitmapscan = true;
103 bool            enable_tidscan = true;
104 bool            enable_sort = true;
105 bool            enable_hashagg = true;
106 bool            enable_nestloop = true;
107 bool            enable_mergejoin = true;
108 bool            enable_hashjoin = true;
109
110 typedef struct
111 {
112         PlannerInfo *root;
113         QualCost        total;
114 } cost_qual_eval_context;
115
116 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
117                                                                                  RestrictInfo *rinfo,
118                                                                                  PathKey *pathkey);
119 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
120 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
121                                    JoinType jointype);
122 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
123 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
124 static double relation_byte_size(double tuples, int width);
125 static double page_size(double tuples, int width);
126
127
128 /*
129  * clamp_row_est
130  *              Force a row-count estimate to a sane value.
131  */
132 double
133 clamp_row_est(double nrows)
134 {
135         /*
136          * Force estimate to be at least one row, to make explain output look
137          * better and to avoid possible divide-by-zero when interpolating costs.
138          * Make it an integer, too.
139          */
140         if (nrows <= 1.0)
141                 nrows = 1.0;
142         else
143                 nrows = rint(nrows);
144
145         return nrows;
146 }
147
148
149 /*
150  * cost_seqscan
151  *        Determines and returns the cost of scanning a relation sequentially.
152  */
153 void
154 cost_seqscan(Path *path, PlannerInfo *root,
155                          RelOptInfo *baserel)
156 {
157         Cost            startup_cost = 0;
158         Cost            run_cost = 0;
159         Cost            cpu_per_tuple;
160
161         /* Should only be applied to base relations */
162         Assert(baserel->relid > 0);
163         Assert(baserel->rtekind == RTE_RELATION);
164
165         if (!enable_seqscan)
166                 startup_cost += disable_cost;
167
168         /*
169          * disk costs
170          */
171         run_cost += seq_page_cost * baserel->pages;
172
173         /* CPU costs */
174         startup_cost += baserel->baserestrictcost.startup;
175         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
176         run_cost += cpu_per_tuple * baserel->tuples;
177
178         path->startup_cost = startup_cost;
179         path->total_cost = startup_cost + run_cost;
180 }
181
182 /*
183  * cost_index
184  *        Determines and returns the cost of scanning a relation using an index.
185  *
186  * 'index' is the index to be used
187  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
188  * 'outer_rel' is the outer relation when we are considering using the index
189  *              scan as the inside of a nestloop join (hence, some of the indexQuals
190  *              are join clauses, and we should expect repeated scans of the index);
191  *              NULL for a plain index scan
192  *
193  * cost_index() takes an IndexPath not just a Path, because it sets a few
194  * additional fields of the IndexPath besides startup_cost and total_cost.
195  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
196  *
197  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
198  * Any additional quals evaluated as qpquals may reduce the number of returned
199  * tuples, but they won't reduce the number of tuples we have to fetch from
200  * the table, so they don't reduce the scan cost.
201  *
202  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
203  * it was a list of bare clause expressions.
204  */
205 void
206 cost_index(IndexPath *path, PlannerInfo *root,
207                    IndexOptInfo *index,
208                    List *indexQuals,
209                    RelOptInfo *outer_rel)
210 {
211         RelOptInfo *baserel = index->rel;
212         Cost            startup_cost = 0;
213         Cost            run_cost = 0;
214         Cost            indexStartupCost;
215         Cost            indexTotalCost;
216         Selectivity indexSelectivity;
217         double          indexCorrelation,
218                                 csquared;
219         Cost            min_IO_cost,
220                                 max_IO_cost;
221         Cost            cpu_per_tuple;
222         double          tuples_fetched;
223         double          pages_fetched;
224
225         /* Should only be applied to base relations */
226         Assert(IsA(baserel, RelOptInfo) &&
227                    IsA(index, IndexOptInfo));
228         Assert(baserel->relid > 0);
229         Assert(baserel->rtekind == RTE_RELATION);
230
231         if (!enable_indexscan)
232                 startup_cost += disable_cost;
233
234         /*
235          * Call index-access-method-specific code to estimate the processing cost
236          * for scanning the index, as well as the selectivity of the index (ie,
237          * the fraction of main-table tuples we will have to retrieve) and its
238          * correlation to the main-table tuple order.
239          */
240         OidFunctionCall8(index->amcostestimate,
241                                          PointerGetDatum(root),
242                                          PointerGetDatum(index),
243                                          PointerGetDatum(indexQuals),
244                                          PointerGetDatum(outer_rel),
245                                          PointerGetDatum(&indexStartupCost),
246                                          PointerGetDatum(&indexTotalCost),
247                                          PointerGetDatum(&indexSelectivity),
248                                          PointerGetDatum(&indexCorrelation));
249
250         /*
251          * Save amcostestimate's results for possible use in bitmap scan planning.
252          * We don't bother to save indexStartupCost or indexCorrelation, because a
253          * bitmap scan doesn't care about either.
254          */
255         path->indextotalcost = indexTotalCost;
256         path->indexselectivity = indexSelectivity;
257
258         /* all costs for touching index itself included here */
259         startup_cost += indexStartupCost;
260         run_cost += indexTotalCost - indexStartupCost;
261
262         /* estimate number of main-table tuples fetched */
263         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
264
265         /*----------
266          * Estimate number of main-table pages fetched, and compute I/O cost.
267          *
268          * When the index ordering is uncorrelated with the table ordering,
269          * we use an approximation proposed by Mackert and Lohman (see
270          * index_pages_fetched() for details) to compute the number of pages
271          * fetched, and then charge random_page_cost per page fetched.
272          *
273          * When the index ordering is exactly correlated with the table ordering
274          * (just after a CLUSTER, for example), the number of pages fetched should
275          * be exactly selectivity * table_size.  What's more, all but the first
276          * will be sequential fetches, not the random fetches that occur in the
277          * uncorrelated case.  So if the number of pages is more than 1, we
278          * ought to charge
279          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
280          * For partially-correlated indexes, we ought to charge somewhere between
281          * these two estimates.  We currently interpolate linearly between the
282          * estimates based on the correlation squared (XXX is that appropriate?).
283          *----------
284          */
285         if (outer_rel != NULL && outer_rel->rows > 1)
286         {
287                 /*
288                  * For repeated indexscans, the appropriate estimate for the
289                  * uncorrelated case is to scale up the number of tuples fetched in
290                  * the Mackert and Lohman formula by the number of scans, so that we
291                  * estimate the number of pages fetched by all the scans; then
292                  * pro-rate the costs for one scan.  In this case we assume all the
293                  * fetches are random accesses.
294                  */
295                 double          num_scans = outer_rel->rows;
296
297                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
298                                                                                         baserel->pages,
299                                                                                         (double) index->pages,
300                                                                                         root);
301
302                 max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
303
304                 /*
305                  * In the perfectly correlated case, the number of pages touched
306                  * by each scan is selectivity * table_size, and we can use the
307                  * Mackert and Lohman formula at the page level to estimate how
308                  * much work is saved by caching across scans.  We still assume
309                  * all the fetches are random, though, which is an overestimate
310                  * that's hard to correct for without double-counting the cache
311                  * effects.  (But in most cases where such a plan is actually
312                  * interesting, only one page would get fetched per scan anyway,
313                  * so it shouldn't matter much.)
314                  */
315                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
316
317                 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
318                                                                                         baserel->pages,
319                                                                                         (double) index->pages,
320                                                                                         root);
321
322                 min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
323         }
324         else
325         {
326                 /*
327                  * Normal case: apply the Mackert and Lohman formula, and then
328                  * interpolate between that and the correlation-derived result.
329                  */
330                 pages_fetched = index_pages_fetched(tuples_fetched,
331                                                                                         baserel->pages,
332                                                                                         (double) index->pages,
333                                                                                         root);
334
335                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
336                 max_IO_cost = pages_fetched * random_page_cost;
337
338                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
339                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
340                 min_IO_cost = random_page_cost;
341                 if (pages_fetched > 1)
342                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
343         }
344
345         /*
346          * Now interpolate based on estimated index order correlation to get
347          * total disk I/O cost for main table accesses.
348          */
349         csquared = indexCorrelation * indexCorrelation;
350
351         run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
352
353         /*
354          * Estimate CPU costs per tuple.
355          *
356          * Normally the indexquals will be removed from the list of restriction
357          * clauses that we have to evaluate as qpquals, so we should subtract
358          * their costs from baserestrictcost.  But if we are doing a join then
359          * some of the indexquals are join clauses and shouldn't be subtracted.
360          * Rather than work out exactly how much to subtract, we don't subtract
361          * anything.
362          */
363         startup_cost += baserel->baserestrictcost.startup;
364         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
365
366         if (outer_rel == NULL)
367         {
368                 QualCost        index_qual_cost;
369
370                 cost_qual_eval(&index_qual_cost, indexQuals, root);
371                 /* any startup cost still has to be paid ... */
372                 cpu_per_tuple -= index_qual_cost.per_tuple;
373         }
374
375         run_cost += cpu_per_tuple * tuples_fetched;
376
377         path->path.startup_cost = startup_cost;
378         path->path.total_cost = startup_cost + run_cost;
379 }
380
381 /*
382  * index_pages_fetched
383  *        Estimate the number of pages actually fetched after accounting for
384  *        cache effects.
385  *
386  * We use an approximation proposed by Mackert and Lohman, "Index Scans
387  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
388  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
389  * The Mackert and Lohman approximation is that the number of pages
390  * fetched is
391  *      PF =
392  *              min(2TNs/(2T+Ns), T)                    when T <= b
393  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
394  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
395  * where
396  *              T = # pages in table
397  *              N = # tuples in table
398  *              s = selectivity = fraction of table to be scanned
399  *              b = # buffer pages available (we include kernel space here)
400  *
401  * We assume that effective_cache_size is the total number of buffer pages
402  * available for the whole query, and pro-rate that space across all the
403  * tables in the query and the index currently under consideration.  (This
404  * ignores space needed for other indexes used by the query, but since we
405  * don't know which indexes will get used, we can't estimate that very well;
406  * and in any case counting all the tables may well be an overestimate, since
407  * depending on the join plan not all the tables may be scanned concurrently.)
408  *
409  * The product Ns is the number of tuples fetched; we pass in that
410  * product rather than calculating it here.  "pages" is the number of pages
411  * in the object under consideration (either an index or a table).
412  * "index_pages" is the amount to add to the total table space, which was
413  * computed for us by query_planner.
414  *
415  * Caller is expected to have ensured that tuples_fetched is greater than zero
416  * and rounded to integer (see clamp_row_est).  The result will likewise be
417  * greater than zero and integral.
418  */
419 double
420 index_pages_fetched(double tuples_fetched, BlockNumber pages,
421                                         double index_pages, PlannerInfo *root)
422 {
423         double          pages_fetched;
424         double          total_pages;
425         double          T,
426                                 b;
427
428         /* T is # pages in table, but don't allow it to be zero */
429         T = (pages > 1) ? (double) pages : 1.0;
430
431         /* Compute number of pages assumed to be competing for cache space */
432         total_pages = root->total_table_pages + index_pages;
433         total_pages = Max(total_pages, 1.0);
434         Assert(T <= total_pages);
435
436         /* b is pro-rated share of effective_cache_size */
437         b = (double) effective_cache_size *T / total_pages;
438
439         /* force it positive and integral */
440         if (b <= 1.0)
441                 b = 1.0;
442         else
443                 b = ceil(b);
444
445         /* This part is the Mackert and Lohman formula */
446         if (T <= b)
447         {
448                 pages_fetched =
449                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
450                 if (pages_fetched >= T)
451                         pages_fetched = T;
452                 else
453                         pages_fetched = ceil(pages_fetched);
454         }
455         else
456         {
457                 double          lim;
458
459                 lim = (2.0 * T * b) / (2.0 * T - b);
460                 if (tuples_fetched <= lim)
461                 {
462                         pages_fetched =
463                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
464                 }
465                 else
466                 {
467                         pages_fetched =
468                                 b + (tuples_fetched - lim) * (T - b) / T;
469                 }
470                 pages_fetched = ceil(pages_fetched);
471         }
472         return pages_fetched;
473 }
474
475 /*
476  * get_indexpath_pages
477  *              Determine the total size of the indexes used in a bitmap index path.
478  *
479  * Note: if the same index is used more than once in a bitmap tree, we will
480  * count it multiple times, which perhaps is the wrong thing ... but it's
481  * not completely clear, and detecting duplicates is difficult, so ignore it
482  * for now.
483  */
484 static double
485 get_indexpath_pages(Path *bitmapqual)
486 {
487         double          result = 0;
488         ListCell   *l;
489
490         if (IsA(bitmapqual, BitmapAndPath))
491         {
492                 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
493
494                 foreach(l, apath->bitmapquals)
495                 {
496                         result += get_indexpath_pages((Path *) lfirst(l));
497                 }
498         }
499         else if (IsA(bitmapqual, BitmapOrPath))
500         {
501                 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
502
503                 foreach(l, opath->bitmapquals)
504                 {
505                         result += get_indexpath_pages((Path *) lfirst(l));
506                 }
507         }
508         else if (IsA(bitmapqual, IndexPath))
509         {
510                 IndexPath  *ipath = (IndexPath *) bitmapqual;
511
512                 result = (double) ipath->indexinfo->pages;
513         }
514         else
515                 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
516
517         return result;
518 }
519
520 /*
521  * cost_bitmap_heap_scan
522  *        Determines and returns the cost of scanning a relation using a bitmap
523  *        index-then-heap plan.
524  *
525  * 'baserel' is the relation to be scanned
526  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
527  * 'outer_rel' is the outer relation when we are considering using the bitmap
528  *              scan as the inside of a nestloop join (hence, some of the indexQuals
529  *              are join clauses, and we should expect repeated scans of the table);
530  *              NULL for a plain bitmap scan
531  *
532  * Note: if this is a join inner path, the component IndexPaths in bitmapqual
533  * should have been costed accordingly.
534  */
535 void
536 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
537                                           Path *bitmapqual, RelOptInfo *outer_rel)
538 {
539         Cost            startup_cost = 0;
540         Cost            run_cost = 0;
541         Cost            indexTotalCost;
542         Selectivity indexSelectivity;
543         Cost            cpu_per_tuple;
544         Cost            cost_per_page;
545         double          tuples_fetched;
546         double          pages_fetched;
547         double          T;
548
549         /* Should only be applied to base relations */
550         Assert(IsA(baserel, RelOptInfo));
551         Assert(baserel->relid > 0);
552         Assert(baserel->rtekind == RTE_RELATION);
553
554         if (!enable_bitmapscan)
555                 startup_cost += disable_cost;
556
557         /*
558          * Fetch total cost of obtaining the bitmap, as well as its total
559          * selectivity.
560          */
561         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
562
563         startup_cost += indexTotalCost;
564
565         /*
566          * Estimate number of main-table pages fetched.
567          */
568         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
569
570         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
571
572         if (outer_rel != NULL && outer_rel->rows > 1)
573         {
574                 /*
575                  * For repeated bitmap scans, scale up the number of tuples fetched in
576                  * the Mackert and Lohman formula by the number of scans, so that we
577                  * estimate the number of pages fetched by all the scans. Then
578                  * pro-rate for one scan.
579                  */
580                 double          num_scans = outer_rel->rows;
581
582                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
583                                                                                         baserel->pages,
584                                                                                         get_indexpath_pages(bitmapqual),
585                                                                                         root);
586                 pages_fetched /= num_scans;
587         }
588         else
589         {
590                 /*
591                  * For a single scan, the number of heap pages that need to be fetched
592                  * is the same as the Mackert and Lohman formula for the case T <= b
593                  * (ie, no re-reads needed).
594                  */
595                 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
596         }
597         if (pages_fetched >= T)
598                 pages_fetched = T;
599         else
600                 pages_fetched = ceil(pages_fetched);
601
602         /*
603          * For small numbers of pages we should charge random_page_cost apiece,
604          * while if nearly all the table's pages are being read, it's more
605          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
606          * too. For lack of a better idea, interpolate like this to determine the
607          * cost per page.
608          */
609         if (pages_fetched >= 2.0)
610                 cost_per_page = random_page_cost -
611                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
612         else
613                 cost_per_page = random_page_cost;
614
615         run_cost += pages_fetched * cost_per_page;
616
617         /*
618          * Estimate CPU costs per tuple.
619          *
620          * Often the indexquals don't need to be rechecked at each tuple ... but
621          * not always, especially not if there are enough tuples involved that the
622          * bitmaps become lossy.  For the moment, just assume they will be
623          * rechecked always.
624          */
625         startup_cost += baserel->baserestrictcost.startup;
626         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
627
628         run_cost += cpu_per_tuple * tuples_fetched;
629
630         path->startup_cost = startup_cost;
631         path->total_cost = startup_cost + run_cost;
632 }
633
634 /*
635  * cost_bitmap_tree_node
636  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
637  */
638 void
639 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
640 {
641         if (IsA(path, IndexPath))
642         {
643                 *cost = ((IndexPath *) path)->indextotalcost;
644                 *selec = ((IndexPath *) path)->indexselectivity;
645                 /*
646                  * Charge a small amount per retrieved tuple to reflect the costs of
647                  * manipulating the bitmap.  This is mostly to make sure that a bitmap
648                  * scan doesn't look to be the same cost as an indexscan to retrieve
649                  * a single tuple.
650                  */
651                 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
652         }
653         else if (IsA(path, BitmapAndPath))
654         {
655                 *cost = path->total_cost;
656                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
657         }
658         else if (IsA(path, BitmapOrPath))
659         {
660                 *cost = path->total_cost;
661                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
662         }
663         else
664         {
665                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
666                 *cost = *selec = 0;             /* keep compiler quiet */
667         }
668 }
669
670 /*
671  * cost_bitmap_and_node
672  *              Estimate the cost of a BitmapAnd node
673  *
674  * Note that this considers only the costs of index scanning and bitmap
675  * creation, not the eventual heap access.      In that sense the object isn't
676  * truly a Path, but it has enough path-like properties (costs in particular)
677  * to warrant treating it as one.
678  */
679 void
680 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
681 {
682         Cost            totalCost;
683         Selectivity selec;
684         ListCell   *l;
685
686         /*
687          * We estimate AND selectivity on the assumption that the inputs are
688          * independent.  This is probably often wrong, but we don't have the info
689          * to do better.
690          *
691          * The runtime cost of the BitmapAnd itself is estimated at 100x
692          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
693          * definitely too simplistic?
694          */
695         totalCost = 0.0;
696         selec = 1.0;
697         foreach(l, path->bitmapquals)
698         {
699                 Path       *subpath = (Path *) lfirst(l);
700                 Cost            subCost;
701                 Selectivity subselec;
702
703                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
704
705                 selec *= subselec;
706
707                 totalCost += subCost;
708                 if (l != list_head(path->bitmapquals))
709                         totalCost += 100.0 * cpu_operator_cost;
710         }
711         path->bitmapselectivity = selec;
712         path->path.startup_cost = totalCost;
713         path->path.total_cost = totalCost;
714 }
715
716 /*
717  * cost_bitmap_or_node
718  *              Estimate the cost of a BitmapOr node
719  *
720  * See comments for cost_bitmap_and_node.
721  */
722 void
723 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
724 {
725         Cost            totalCost;
726         Selectivity selec;
727         ListCell   *l;
728
729         /*
730          * We estimate OR selectivity on the assumption that the inputs are
731          * non-overlapping, since that's often the case in "x IN (list)" type
732          * situations.  Of course, we clamp to 1.0 at the end.
733          *
734          * The runtime cost of the BitmapOr itself is estimated at 100x
735          * cpu_operator_cost for each tbm_union needed.  Probably too small,
736          * definitely too simplistic?  We are aware that the tbm_unions are
737          * optimized out when the inputs are BitmapIndexScans.
738          */
739         totalCost = 0.0;
740         selec = 0.0;
741         foreach(l, path->bitmapquals)
742         {
743                 Path       *subpath = (Path *) lfirst(l);
744                 Cost            subCost;
745                 Selectivity subselec;
746
747                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
748
749                 selec += subselec;
750
751                 totalCost += subCost;
752                 if (l != list_head(path->bitmapquals) &&
753                         !IsA(subpath, IndexPath))
754                         totalCost += 100.0 * cpu_operator_cost;
755         }
756         path->bitmapselectivity = Min(selec, 1.0);
757         path->path.startup_cost = totalCost;
758         path->path.total_cost = totalCost;
759 }
760
761 /*
762  * cost_tidscan
763  *        Determines and returns the cost of scanning a relation using TIDs.
764  */
765 void
766 cost_tidscan(Path *path, PlannerInfo *root,
767                          RelOptInfo *baserel, List *tidquals)
768 {
769         Cost            startup_cost = 0;
770         Cost            run_cost = 0;
771         Cost            cpu_per_tuple;
772         int                     ntuples;
773         ListCell   *l;
774
775         /* Should only be applied to base relations */
776         Assert(baserel->relid > 0);
777         Assert(baserel->rtekind == RTE_RELATION);
778
779         if (!enable_tidscan)
780                 startup_cost += disable_cost;
781
782         /* Count how many tuples we expect to retrieve */
783         ntuples = 0;
784         foreach(l, tidquals)
785         {
786                 if (IsA(lfirst(l), ScalarArrayOpExpr))
787                 {
788                         /* Each element of the array yields 1 tuple */
789                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
790                         Node       *arraynode = (Node *) lsecond(saop->args);
791
792                         ntuples += estimate_array_length(arraynode);
793                 }
794                 else
795                 {
796                         /* It's just CTID = something, count 1 tuple */
797                         ntuples++;
798                 }
799         }
800
801         /* disk costs --- assume each tuple on a different page */
802         run_cost += random_page_cost * ntuples;
803
804         /* CPU costs */
805         startup_cost += baserel->baserestrictcost.startup;
806         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
807         run_cost += cpu_per_tuple * ntuples;
808
809         path->startup_cost = startup_cost;
810         path->total_cost = startup_cost + run_cost;
811 }
812
813 /*
814  * cost_subqueryscan
815  *        Determines and returns the cost of scanning a subquery RTE.
816  */
817 void
818 cost_subqueryscan(Path *path, RelOptInfo *baserel)
819 {
820         Cost            startup_cost;
821         Cost            run_cost;
822         Cost            cpu_per_tuple;
823
824         /* Should only be applied to base relations that are subqueries */
825         Assert(baserel->relid > 0);
826         Assert(baserel->rtekind == RTE_SUBQUERY);
827
828         /*
829          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
830          * any restriction clauses that will be attached to the SubqueryScan node,
831          * plus cpu_tuple_cost to account for selection and projection overhead.
832          */
833         path->startup_cost = baserel->subplan->startup_cost;
834         path->total_cost = baserel->subplan->total_cost;
835
836         startup_cost = baserel->baserestrictcost.startup;
837         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
838         run_cost = cpu_per_tuple * baserel->tuples;
839
840         path->startup_cost += startup_cost;
841         path->total_cost += startup_cost + run_cost;
842 }
843
844 /*
845  * cost_functionscan
846  *        Determines and returns the cost of scanning a function RTE.
847  */
848 void
849 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
850 {
851         Cost            startup_cost = 0;
852         Cost            run_cost = 0;
853         Cost            cpu_per_tuple;
854         RangeTblEntry *rte;
855         QualCost        exprcost;
856
857         /* Should only be applied to base relations that are functions */
858         Assert(baserel->relid > 0);
859         rte = rt_fetch(baserel->relid, root->parse->rtable);
860         Assert(rte->rtekind == RTE_FUNCTION);
861
862         /* Estimate costs of executing the function expression */
863         cost_qual_eval_node(&exprcost, rte->funcexpr, root);
864
865         startup_cost += exprcost.startup;
866         cpu_per_tuple = exprcost.per_tuple;
867
868         /* Add scanning CPU costs */
869         startup_cost += baserel->baserestrictcost.startup;
870         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
871         run_cost += cpu_per_tuple * baserel->tuples;
872
873         path->startup_cost = startup_cost;
874         path->total_cost = startup_cost + run_cost;
875 }
876
877 /*
878  * cost_valuesscan
879  *        Determines and returns the cost of scanning a VALUES RTE.
880  */
881 void
882 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
883 {
884         Cost            startup_cost = 0;
885         Cost            run_cost = 0;
886         Cost            cpu_per_tuple;
887
888         /* Should only be applied to base relations that are values lists */
889         Assert(baserel->relid > 0);
890         Assert(baserel->rtekind == RTE_VALUES);
891
892         /*
893          * For now, estimate list evaluation cost at one operator eval per list
894          * (probably pretty bogus, but is it worth being smarter?)
895          */
896         cpu_per_tuple = cpu_operator_cost;
897
898         /* Add scanning CPU costs */
899         startup_cost += baserel->baserestrictcost.startup;
900         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
901         run_cost += cpu_per_tuple * baserel->tuples;
902
903         path->startup_cost = startup_cost;
904         path->total_cost = startup_cost + run_cost;
905 }
906
907 /*
908  * cost_sort
909  *        Determines and returns the cost of sorting a relation, including
910  *        the cost of reading the input data.
911  *
912  * If the total volume of data to sort is less than work_mem, we will do
913  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
914  * comparisons for t tuples.
915  *
916  * If the total volume exceeds work_mem, we switch to a tape-style merge
917  * algorithm.  There will still be about t*log2(t) tuple comparisons in
918  * total, but we will also need to write and read each tuple once per
919  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
920  * number of initial runs formed and M is the merge order used by tuplesort.c.
921  * Since the average initial run should be about twice work_mem, we have
922  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
923  *              cpu = comparison_cost * t * log2(t)
924  *
925  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
926  * accesses (XXX can't we refine that guess?)
927  *
928  * We charge two operator evals per tuple comparison, which should be in
929  * the right ballpark in most cases.
930  *
931  * 'pathkeys' is a list of sort keys
932  * 'input_cost' is the total cost for reading the input data
933  * 'tuples' is the number of tuples in the relation
934  * 'width' is the average tuple width in bytes
935  *
936  * NOTE: some callers currently pass NIL for pathkeys because they
937  * can't conveniently supply the sort keys.  Since this routine doesn't
938  * currently do anything with pathkeys anyway, that doesn't matter...
939  * but if it ever does, it should react gracefully to lack of key data.
940  * (Actually, the thing we'd most likely be interested in is just the number
941  * of sort keys, which all callers *could* supply.)
942  */
943 void
944 cost_sort(Path *path, PlannerInfo *root,
945                   List *pathkeys, Cost input_cost, double tuples, int width)
946 {
947         Cost            startup_cost = input_cost;
948         Cost            run_cost = 0;
949         double          nbytes = relation_byte_size(tuples, width);
950         long            work_mem_bytes = work_mem * 1024L;
951
952         if (!enable_sort)
953                 startup_cost += disable_cost;
954
955         /*
956          * We want to be sure the cost of a sort is never estimated as zero, even
957          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
958          */
959         if (tuples < 2.0)
960                 tuples = 2.0;
961
962         /*
963          * CPU costs
964          *
965          * Assume about two operator evals per tuple comparison and N log2 N
966          * comparisons
967          */
968         startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
969
970         /* disk costs */
971         if (nbytes > work_mem_bytes)
972         {
973                 double          npages = ceil(nbytes / BLCKSZ);
974                 double          nruns = (nbytes / work_mem_bytes) * 0.5;
975                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
976                 double          log_runs;
977                 double          npageaccesses;
978
979                 /* Compute logM(r) as log(r) / log(M) */
980                 if (nruns > mergeorder)
981                         log_runs = ceil(log(nruns) / log(mergeorder));
982                 else
983                         log_runs = 1.0;
984                 npageaccesses = 2.0 * npages * log_runs;
985                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
986                 startup_cost += npageaccesses *
987                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
988         }
989
990         /*
991          * Also charge a small amount (arbitrarily set equal to operator cost) per
992          * extracted tuple.
993          */
994         run_cost += cpu_operator_cost * tuples;
995
996         path->startup_cost = startup_cost;
997         path->total_cost = startup_cost + run_cost;
998 }
999
1000 /*
1001  * cost_material
1002  *        Determines and returns the cost of materializing a relation, including
1003  *        the cost of reading the input data.
1004  *
1005  * If the total volume of data to materialize exceeds work_mem, we will need
1006  * to write it to disk, so the cost is much higher in that case.
1007  */
1008 void
1009 cost_material(Path *path,
1010                           Cost input_cost, double tuples, int width)
1011 {
1012         Cost            startup_cost = input_cost;
1013         Cost            run_cost = 0;
1014         double          nbytes = relation_byte_size(tuples, width);
1015         long            work_mem_bytes = work_mem * 1024L;
1016
1017         /* disk costs */
1018         if (nbytes > work_mem_bytes)
1019         {
1020                 double          npages = ceil(nbytes / BLCKSZ);
1021
1022                 /* We'll write during startup and read during retrieval */
1023                 startup_cost += seq_page_cost * npages;
1024                 run_cost += seq_page_cost * npages;
1025         }
1026
1027         /*
1028          * Charge a very small amount per inserted tuple, to reflect bookkeeping
1029          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
1030          * tie that would otherwise exist between nestloop with A outer,
1031          * materialized B inner and nestloop with B outer, materialized A inner.
1032          * The extra cost ensures we'll prefer materializing the smaller rel.
1033          */
1034         startup_cost += cpu_tuple_cost * 0.1 * tuples;
1035
1036         /*
1037          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
1038          * so that it doesn't appear worthwhile to materialize a bare seqscan.
1039          */
1040         run_cost += cpu_tuple_cost * tuples;
1041
1042         path->startup_cost = startup_cost;
1043         path->total_cost = startup_cost + run_cost;
1044 }
1045
1046 /*
1047  * cost_agg
1048  *              Determines and returns the cost of performing an Agg plan node,
1049  *              including the cost of its input.
1050  *
1051  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1052  * are for appropriately-sorted input.
1053  */
1054 void
1055 cost_agg(Path *path, PlannerInfo *root,
1056                  AggStrategy aggstrategy, int numAggs,
1057                  int numGroupCols, double numGroups,
1058                  Cost input_startup_cost, Cost input_total_cost,
1059                  double input_tuples)
1060 {
1061         Cost            startup_cost;
1062         Cost            total_cost;
1063
1064         /*
1065          * We charge one cpu_operator_cost per aggregate function per input tuple,
1066          * and another one per output tuple (corresponding to transfn and finalfn
1067          * calls respectively).  If we are grouping, we charge an additional
1068          * cpu_operator_cost per grouping column per input tuple for grouping
1069          * comparisons.
1070          *
1071          * We will produce a single output tuple if not grouping, and a tuple per
1072          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1073          *
1074          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1075          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1076          * input path is already sorted appropriately, AGG_SORTED should be
1077          * preferred (since it has no risk of memory overflow).  This will happen
1078          * as long as the computed total costs are indeed exactly equal --- but if
1079          * there's roundoff error we might do the wrong thing.  So be sure that
1080          * the computations below form the same intermediate values in the same
1081          * order.
1082          *
1083          * Note: ideally we should use the pg_proc.procost costs of each
1084          * aggregate's component functions, but for now that seems like an
1085          * excessive amount of work.
1086          */
1087         if (aggstrategy == AGG_PLAIN)
1088         {
1089                 startup_cost = input_total_cost;
1090                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1091                 /* we aren't grouping */
1092                 total_cost = startup_cost + cpu_tuple_cost;
1093         }
1094         else if (aggstrategy == AGG_SORTED)
1095         {
1096                 /* Here we are able to deliver output on-the-fly */
1097                 startup_cost = input_startup_cost;
1098                 total_cost = input_total_cost;
1099                 /* calcs phrased this way to match HASHED case, see note above */
1100                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1101                 total_cost += cpu_operator_cost * input_tuples * numAggs;
1102                 total_cost += cpu_operator_cost * numGroups * numAggs;
1103                 total_cost += cpu_tuple_cost * numGroups;
1104         }
1105         else
1106         {
1107                 /* must be AGG_HASHED */
1108                 startup_cost = input_total_cost;
1109                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1110                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1111                 total_cost = startup_cost;
1112                 total_cost += cpu_operator_cost * numGroups * numAggs;
1113                 total_cost += cpu_tuple_cost * numGroups;
1114         }
1115
1116         path->startup_cost = startup_cost;
1117         path->total_cost = total_cost;
1118 }
1119
1120 /*
1121  * cost_group
1122  *              Determines and returns the cost of performing a Group plan node,
1123  *              including the cost of its input.
1124  *
1125  * Note: caller must ensure that input costs are for appropriately-sorted
1126  * input.
1127  */
1128 void
1129 cost_group(Path *path, PlannerInfo *root,
1130                    int numGroupCols, double numGroups,
1131                    Cost input_startup_cost, Cost input_total_cost,
1132                    double input_tuples)
1133 {
1134         Cost            startup_cost;
1135         Cost            total_cost;
1136
1137         startup_cost = input_startup_cost;
1138         total_cost = input_total_cost;
1139
1140         /*
1141          * Charge one cpu_operator_cost per comparison per input tuple. We assume
1142          * all columns get compared at most of the tuples.
1143          */
1144         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1145
1146         path->startup_cost = startup_cost;
1147         path->total_cost = total_cost;
1148 }
1149
1150 /*
1151  * If a nestloop's inner path is an indexscan, be sure to use its estimated
1152  * output row count, which may be lower than the restriction-clause-only row
1153  * count of its parent.  (We don't include this case in the PATH_ROWS macro
1154  * because it applies *only* to a nestloop's inner relation.)  We have to
1155  * be prepared to recurse through Append nodes in case of an appendrel.
1156  */
1157 static double
1158 nestloop_inner_path_rows(Path *path)
1159 {
1160         double          result;
1161
1162         if (IsA(path, IndexPath))
1163                 result = ((IndexPath *) path)->rows;
1164         else if (IsA(path, BitmapHeapPath))
1165                 result = ((BitmapHeapPath *) path)->rows;
1166         else if (IsA(path, AppendPath))
1167         {
1168                 ListCell   *l;
1169
1170                 result = 0;
1171                 foreach(l, ((AppendPath *) path)->subpaths)
1172                 {
1173                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1174                 }
1175         }
1176         else
1177                 result = PATH_ROWS(path);
1178
1179         return result;
1180 }
1181
1182 /*
1183  * cost_nestloop
1184  *        Determines and returns the cost of joining two relations using the
1185  *        nested loop algorithm.
1186  *
1187  * 'path' is already filled in except for the cost fields
1188  */
1189 void
1190 cost_nestloop(NestPath *path, PlannerInfo *root)
1191 {
1192         Path       *outer_path = path->outerjoinpath;
1193         Path       *inner_path = path->innerjoinpath;
1194         Cost            startup_cost = 0;
1195         Cost            run_cost = 0;
1196         Cost            cpu_per_tuple;
1197         QualCost        restrict_qual_cost;
1198         double          outer_path_rows = PATH_ROWS(outer_path);
1199         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1200         double          ntuples;
1201         Selectivity joininfactor;
1202
1203         if (!enable_nestloop)
1204                 startup_cost += disable_cost;
1205
1206         /*
1207          * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1208          * outer tuple as soon as we have one match.  Account for the effects of
1209          * this by scaling down the cost estimates in proportion to the JOIN_IN
1210          * selectivity.  (This assumes that all the quals attached to the join are
1211          * IN quals, which should be true.)
1212          */
1213         joininfactor = join_in_selectivity(path, root);
1214
1215         /* cost of source data */
1216
1217         /*
1218          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1219          * before we can start returning tuples, so the join's startup cost is
1220          * their sum.  What's not so clear is whether the inner path's
1221          * startup_cost must be paid again on each rescan of the inner path. This
1222          * is not true if the inner path is materialized or is a hashjoin, but
1223          * probably is true otherwise.
1224          */
1225         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1226         run_cost += outer_path->total_cost - outer_path->startup_cost;
1227         if (IsA(inner_path, MaterialPath) ||
1228                 IsA(inner_path, HashPath))
1229         {
1230                 /* charge only run cost for each iteration of inner path */
1231         }
1232         else
1233         {
1234                 /*
1235                  * charge startup cost for each iteration of inner path, except we
1236                  * already charged the first startup_cost in our own startup
1237                  */
1238                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1239         }
1240         run_cost += outer_path_rows *
1241                 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1242
1243         /*
1244          * Compute number of tuples processed (not number emitted!)
1245          */
1246         ntuples = outer_path_rows * inner_path_rows * joininfactor;
1247
1248         /* CPU costs */
1249         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
1250         startup_cost += restrict_qual_cost.startup;
1251         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1252         run_cost += cpu_per_tuple * ntuples;
1253
1254         path->path.startup_cost = startup_cost;
1255         path->path.total_cost = startup_cost + run_cost;
1256 }
1257
1258 /*
1259  * cost_mergejoin
1260  *        Determines and returns the cost of joining two relations using the
1261  *        merge join algorithm.
1262  *
1263  * 'path' is already filled in except for the cost fields
1264  *
1265  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1266  * outersortkeys and innersortkeys are lists of the keys to be used
1267  * to sort the outer and inner relations, or NIL if no explicit
1268  * sort is needed because the source path is already ordered.
1269  */
1270 void
1271 cost_mergejoin(MergePath *path, PlannerInfo *root)
1272 {
1273         Path       *outer_path = path->jpath.outerjoinpath;
1274         Path       *inner_path = path->jpath.innerjoinpath;
1275         List       *mergeclauses = path->path_mergeclauses;
1276         List       *outersortkeys = path->outersortkeys;
1277         List       *innersortkeys = path->innersortkeys;
1278         Cost            startup_cost = 0;
1279         Cost            run_cost = 0;
1280         Cost            cpu_per_tuple;
1281         Selectivity merge_selec;
1282         QualCost        merge_qual_cost;
1283         QualCost        qp_qual_cost;
1284         double          outer_path_rows = PATH_ROWS(outer_path);
1285         double          inner_path_rows = PATH_ROWS(inner_path);
1286         double          outer_rows,
1287                                 inner_rows;
1288         double          mergejointuples,
1289                                 rescannedtuples;
1290         double          rescanratio;
1291         Selectivity outerscansel,
1292                                 innerscansel;
1293         Selectivity joininfactor;
1294         Path            sort_path;              /* dummy for result of cost_sort */
1295
1296         if (!enable_mergejoin)
1297                 startup_cost += disable_cost;
1298
1299         /*
1300          * Compute cost and selectivity of the mergequals and qpquals (other
1301          * restriction clauses) separately.  We use approx_selectivity here for
1302          * speed --- in most cases, any errors won't affect the result much.
1303          *
1304          * Note: it's probably bogus to use the normal selectivity calculation
1305          * here when either the outer or inner path is a UniquePath.
1306          */
1307         merge_selec = approx_selectivity(root, mergeclauses,
1308                                                                          path->jpath.jointype);
1309         cost_qual_eval(&merge_qual_cost, mergeclauses, root);
1310         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1311         qp_qual_cost.startup -= merge_qual_cost.startup;
1312         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1313
1314         /* approx # tuples passing the merge quals */
1315         mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1316
1317         /*
1318          * When there are equal merge keys in the outer relation, the mergejoin
1319          * must rescan any matching tuples in the inner relation. This means
1320          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1321          * costs the same as an original fetch, which is probably an overestimate;
1322          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1323          * Not clear if it's worth developing a more refined model.
1324          *
1325          * The number of re-fetches can be estimated approximately as size of
1326          * merge join output minus size of inner relation.      Assume that the
1327          * distinct key values are 1, 2, ..., and denote the number of values of
1328          * each key in the outer relation as m1, m2, ...; in the inner relation,
1329          * n1, n2, ... Then we have
1330          *
1331          * size of join = m1 * n1 + m2 * n2 + ...
1332          *
1333          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1334          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1335          * relation
1336          *
1337          * This equation works correctly for outer tuples having no inner match
1338          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1339          * are effectively subtracting those from the number of rescanned tuples,
1340          * when we should not.  Can we do better without expensive selectivity
1341          * computations?
1342          */
1343         if (IsA(outer_path, UniquePath))
1344                 rescannedtuples = 0;
1345         else
1346         {
1347                 rescannedtuples = mergejointuples - inner_path_rows;
1348                 /* Must clamp because of possible underestimate */
1349                 if (rescannedtuples < 0)
1350                         rescannedtuples = 0;
1351         }
1352         /* We'll inflate inner run cost this much to account for rescanning */
1353         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1354
1355         /*
1356          * A merge join will stop as soon as it exhausts either input stream
1357          * (unless it's an outer join, in which case the outer side has to be
1358          * scanned all the way anyway).  Estimate fraction of the left and right
1359          * inputs that will actually need to be scanned. We use only the first
1360          * (most significant) merge clause for this purpose.  Since
1361          * mergejoinscansel() is a fairly expensive computation, we cache the
1362          * results in the merge clause RestrictInfo.
1363          */
1364         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1365         {
1366                 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1367                 List       *opathkeys;
1368                 List       *ipathkeys;
1369                 PathKey    *opathkey;
1370                 PathKey    *ipathkey;
1371                 MergeScanSelCache *cache;
1372
1373                 /* Get the input pathkeys to determine the sort-order details */
1374                 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1375                 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1376                 Assert(opathkeys);
1377                 Assert(ipathkeys);
1378                 opathkey = (PathKey *) linitial(opathkeys);
1379                 ipathkey = (PathKey *) linitial(ipathkeys);
1380                 /* debugging check */
1381                 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1382                         opathkey->pk_strategy != ipathkey->pk_strategy ||
1383                         opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1384                         elog(ERROR, "left and right pathkeys do not match in mergejoin");
1385
1386                 /* Get the selectivity with caching */
1387                 cache = cached_scansel(root, firstclause, opathkey);
1388
1389                 if (bms_is_subset(firstclause->left_relids,
1390                                                   outer_path->parent->relids))
1391                 {
1392                         /* left side of clause is outer */
1393                         outerscansel = cache->leftscansel;
1394                         innerscansel = cache->rightscansel;
1395                 }
1396                 else
1397                 {
1398                         /* left side of clause is inner */
1399                         outerscansel = cache->rightscansel;
1400                         innerscansel = cache->leftscansel;
1401                 }
1402                 if (path->jpath.jointype == JOIN_LEFT)
1403                         outerscansel = 1.0;
1404                 else if (path->jpath.jointype == JOIN_RIGHT)
1405                         innerscansel = 1.0;
1406         }
1407         else
1408         {
1409                 /* cope with clauseless or full mergejoin */
1410                 outerscansel = innerscansel = 1.0;
1411         }
1412
1413         /* convert selectivity to row count; must scan at least one row */
1414         outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1415         inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1416
1417         /*
1418          * Readjust scan selectivities to account for above rounding.  This is
1419          * normally an insignificant effect, but when there are only a few rows in
1420          * the inputs, failing to do this makes for a large percentage error.
1421          */
1422         outerscansel = outer_rows / outer_path_rows;
1423         innerscansel = inner_rows / inner_path_rows;
1424
1425         /* cost of source data */
1426
1427         if (outersortkeys)                      /* do we need to sort outer? */
1428         {
1429                 cost_sort(&sort_path,
1430                                   root,
1431                                   outersortkeys,
1432                                   outer_path->total_cost,
1433                                   outer_path_rows,
1434                                   outer_path->parent->width);
1435                 startup_cost += sort_path.startup_cost;
1436                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1437                         * outerscansel;
1438         }
1439         else
1440         {
1441                 startup_cost += outer_path->startup_cost;
1442                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1443                         * outerscansel;
1444         }
1445
1446         if (innersortkeys)                      /* do we need to sort inner? */
1447         {
1448                 cost_sort(&sort_path,
1449                                   root,
1450                                   innersortkeys,
1451                                   inner_path->total_cost,
1452                                   inner_path_rows,
1453                                   inner_path->parent->width);
1454                 startup_cost += sort_path.startup_cost;
1455                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1456                         * innerscansel * rescanratio;
1457         }
1458         else
1459         {
1460                 startup_cost += inner_path->startup_cost;
1461                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1462                         * innerscansel * rescanratio;
1463         }
1464
1465         /* CPU costs */
1466
1467         /*
1468          * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1469          * outer tuple as soon as we have one match.  Account for the effects of
1470          * this by scaling down the cost estimates in proportion to the expected
1471          * output size.  (This assumes that all the quals attached to the join are
1472          * IN quals, which should be true.)
1473          */
1474         joininfactor = join_in_selectivity(&path->jpath, root);
1475
1476         /*
1477          * The number of tuple comparisons needed is approximately number of outer
1478          * rows plus number of inner rows plus number of rescanned tuples (can we
1479          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1480          * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1481          * joininfactor.
1482          */
1483         startup_cost += merge_qual_cost.startup;
1484         run_cost += merge_qual_cost.per_tuple *
1485                 (outer_rows + inner_rows * rescanratio);
1486
1487         /*
1488          * For each tuple that gets through the mergejoin proper, we charge
1489          * cpu_tuple_cost plus the cost of evaluating additional restriction
1490          * clauses that are to be applied at the join.  (This is pessimistic since
1491          * not all of the quals may get evaluated at each tuple.)  This work is
1492          * skipped in JOIN_IN mode, so apply the factor.
1493          */
1494         startup_cost += qp_qual_cost.startup;
1495         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1496         run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1497
1498         path->jpath.path.startup_cost = startup_cost;
1499         path->jpath.path.total_cost = startup_cost + run_cost;
1500 }
1501
1502 /*
1503  * run mergejoinscansel() with caching
1504  */
1505 static MergeScanSelCache *
1506 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
1507 {
1508         MergeScanSelCache *cache;
1509         ListCell   *lc;
1510         Selectivity leftscansel,
1511                                 rightscansel;
1512         MemoryContext oldcontext;
1513
1514         /* Do we have this result already? */
1515         foreach(lc, rinfo->scansel_cache)
1516         {
1517                 cache = (MergeScanSelCache *) lfirst(lc);
1518                 if (cache->opfamily == pathkey->pk_opfamily &&
1519                         cache->strategy == pathkey->pk_strategy &&
1520                         cache->nulls_first == pathkey->pk_nulls_first)
1521                         return cache;
1522         }
1523
1524         /* Nope, do the computation */
1525         mergejoinscansel(root,
1526                                          (Node *) rinfo->clause,
1527                                          pathkey->pk_opfamily,
1528                                          pathkey->pk_strategy,
1529                                          pathkey->pk_nulls_first,
1530                                          &leftscansel,
1531                                          &rightscansel);
1532
1533         /* Cache the result in suitably long-lived workspace */
1534         oldcontext = MemoryContextSwitchTo(root->planner_cxt);
1535
1536         cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
1537         cache->opfamily = pathkey->pk_opfamily;
1538         cache->strategy = pathkey->pk_strategy;
1539         cache->nulls_first = pathkey->pk_nulls_first;
1540         cache->leftscansel = leftscansel;
1541         cache->rightscansel = rightscansel;
1542
1543         rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
1544
1545         MemoryContextSwitchTo(oldcontext);
1546
1547         return cache;
1548 }
1549
1550 /*
1551  * cost_hashjoin
1552  *        Determines and returns the cost of joining two relations using the
1553  *        hash join algorithm.
1554  *
1555  * 'path' is already filled in except for the cost fields
1556  *
1557  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1558  */
1559 void
1560 cost_hashjoin(HashPath *path, PlannerInfo *root)
1561 {
1562         Path       *outer_path = path->jpath.outerjoinpath;
1563         Path       *inner_path = path->jpath.innerjoinpath;
1564         List       *hashclauses = path->path_hashclauses;
1565         Cost            startup_cost = 0;
1566         Cost            run_cost = 0;
1567         Cost            cpu_per_tuple;
1568         Selectivity hash_selec;
1569         QualCost        hash_qual_cost;
1570         QualCost        qp_qual_cost;
1571         double          hashjointuples;
1572         double          outer_path_rows = PATH_ROWS(outer_path);
1573         double          inner_path_rows = PATH_ROWS(inner_path);
1574         int                     num_hashclauses = list_length(hashclauses);
1575         int                     numbuckets;
1576         int                     numbatches;
1577         double          virtualbuckets;
1578         Selectivity innerbucketsize;
1579         Selectivity joininfactor;
1580         ListCell   *hcl;
1581
1582         if (!enable_hashjoin)
1583                 startup_cost += disable_cost;
1584
1585         /*
1586          * Compute cost and selectivity of the hashquals and qpquals (other
1587          * restriction clauses) separately.  We use approx_selectivity here for
1588          * speed --- in most cases, any errors won't affect the result much.
1589          *
1590          * Note: it's probably bogus to use the normal selectivity calculation
1591          * here when either the outer or inner path is a UniquePath.
1592          */
1593         hash_selec = approx_selectivity(root, hashclauses,
1594                                                                         path->jpath.jointype);
1595         cost_qual_eval(&hash_qual_cost, hashclauses, root);
1596         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1597         qp_qual_cost.startup -= hash_qual_cost.startup;
1598         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1599
1600         /* approx # tuples passing the hash quals */
1601         hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1602
1603         /* cost of source data */
1604         startup_cost += outer_path->startup_cost;
1605         run_cost += outer_path->total_cost - outer_path->startup_cost;
1606         startup_cost += inner_path->total_cost;
1607
1608         /*
1609          * Cost of computing hash function: must do it once per input tuple. We
1610          * charge one cpu_operator_cost for each column's hash function.  Also,
1611          * tack on one cpu_tuple_cost per inner row, to model the costs of
1612          * inserting the row into the hashtable.
1613          *
1614          * XXX when a hashclause is more complex than a single operator, we really
1615          * should charge the extra eval costs of the left or right side, as
1616          * appropriate, here.  This seems more work than it's worth at the moment.
1617          */
1618         startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1619                 * inner_path_rows;
1620         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1621
1622         /* Get hash table size that executor would use for inner relation */
1623         ExecChooseHashTableSize(inner_path_rows,
1624                                                         inner_path->parent->width,
1625                                                         &numbuckets,
1626                                                         &numbatches);
1627         virtualbuckets = (double) numbuckets *(double) numbatches;
1628
1629         /*
1630          * Determine bucketsize fraction for inner relation.  We use the smallest
1631          * bucketsize estimated for any individual hashclause; this is undoubtedly
1632          * conservative.
1633          *
1634          * BUT: if inner relation has been unique-ified, we can assume it's good
1635          * for hashing.  This is important both because it's the right answer, and
1636          * because we avoid contaminating the cache with a value that's wrong for
1637          * non-unique-ified paths.
1638          */
1639         if (IsA(inner_path, UniquePath))
1640                 innerbucketsize = 1.0 / virtualbuckets;
1641         else
1642         {
1643                 innerbucketsize = 1.0;
1644                 foreach(hcl, hashclauses)
1645                 {
1646                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1647                         Selectivity thisbucketsize;
1648
1649                         Assert(IsA(restrictinfo, RestrictInfo));
1650
1651                         /*
1652                          * First we have to figure out which side of the hashjoin clause
1653                          * is the inner side.
1654                          *
1655                          * Since we tend to visit the same clauses over and over when
1656                          * planning a large query, we cache the bucketsize estimate in the
1657                          * RestrictInfo node to avoid repeated lookups of statistics.
1658                          */
1659                         if (bms_is_subset(restrictinfo->right_relids,
1660                                                           inner_path->parent->relids))
1661                         {
1662                                 /* righthand side is inner */
1663                                 thisbucketsize = restrictinfo->right_bucketsize;
1664                                 if (thisbucketsize < 0)
1665                                 {
1666                                         /* not cached yet */
1667                                         thisbucketsize =
1668                                                 estimate_hash_bucketsize(root,
1669                                                                                    get_rightop(restrictinfo->clause),
1670                                                                                                  virtualbuckets);
1671                                         restrictinfo->right_bucketsize = thisbucketsize;
1672                                 }
1673                         }
1674                         else
1675                         {
1676                                 Assert(bms_is_subset(restrictinfo->left_relids,
1677                                                                          inner_path->parent->relids));
1678                                 /* lefthand side is inner */
1679                                 thisbucketsize = restrictinfo->left_bucketsize;
1680                                 if (thisbucketsize < 0)
1681                                 {
1682                                         /* not cached yet */
1683                                         thisbucketsize =
1684                                                 estimate_hash_bucketsize(root,
1685                                                                                         get_leftop(restrictinfo->clause),
1686                                                                                                  virtualbuckets);
1687                                         restrictinfo->left_bucketsize = thisbucketsize;
1688                                 }
1689                         }
1690
1691                         if (innerbucketsize > thisbucketsize)
1692                                 innerbucketsize = thisbucketsize;
1693                 }
1694         }
1695
1696         /*
1697          * If inner relation is too big then we will need to "batch" the join,
1698          * which implies writing and reading most of the tuples to disk an extra
1699          * time.  Charge seq_page_cost per page, since the I/O should be nice and
1700          * sequential.  Writing the inner rel counts as startup cost,
1701          * all the rest as run cost.
1702          */
1703         if (numbatches > 1)
1704         {
1705                 double          outerpages = page_size(outer_path_rows,
1706                                                                                    outer_path->parent->width);
1707                 double          innerpages = page_size(inner_path_rows,
1708                                                                                    inner_path->parent->width);
1709
1710                 startup_cost += seq_page_cost * innerpages;
1711                 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
1712         }
1713
1714         /* CPU costs */
1715
1716         /*
1717          * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1718          * outer tuple as soon as we have one match.  Account for the effects of
1719          * this by scaling down the cost estimates in proportion to the expected
1720          * output size.  (This assumes that all the quals attached to the join are
1721          * IN quals, which should be true.)
1722          */
1723         joininfactor = join_in_selectivity(&path->jpath, root);
1724
1725         /*
1726          * The number of tuple comparisons needed is the number of outer tuples
1727          * times the typical number of tuples in a hash bucket, which is the inner
1728          * relation size times its bucketsize fraction.  At each one, we need to
1729          * evaluate the hashjoin quals.  But actually, charging the full qual eval
1730          * cost at each tuple is pessimistic, since we don't evaluate the quals
1731          * unless the hash values match exactly.  For lack of a better idea, halve
1732          * the cost estimate to allow for that.
1733          */
1734         startup_cost += hash_qual_cost.startup;
1735         run_cost += hash_qual_cost.per_tuple *
1736                 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1737                 joininfactor * 0.5;
1738
1739         /*
1740          * For each tuple that gets through the hashjoin proper, we charge
1741          * cpu_tuple_cost plus the cost of evaluating additional restriction
1742          * clauses that are to be applied at the join.  (This is pessimistic since
1743          * not all of the quals may get evaluated at each tuple.)
1744          */
1745         startup_cost += qp_qual_cost.startup;
1746         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1747         run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1748
1749         path->jpath.path.startup_cost = startup_cost;
1750         path->jpath.path.total_cost = startup_cost + run_cost;
1751 }
1752
1753
1754 /*
1755  * cost_qual_eval
1756  *              Estimate the CPU costs of evaluating a WHERE clause.
1757  *              The input can be either an implicitly-ANDed list of boolean
1758  *              expressions, or a list of RestrictInfo nodes.  (The latter is
1759  *              preferred since it allows caching of the results.)
1760  *              The result includes both a one-time (startup) component,
1761  *              and a per-evaluation component.
1762  */
1763 void
1764 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
1765 {
1766         cost_qual_eval_context context;
1767         ListCell   *l;
1768
1769         context.root = root;
1770         context.total.startup = 0;
1771         context.total.per_tuple = 0;
1772
1773         /* We don't charge any cost for the implicit ANDing at top level ... */
1774
1775         foreach(l, quals)
1776         {
1777                 Node       *qual = (Node *) lfirst(l);
1778
1779                 cost_qual_eval_walker(qual, &context);
1780         }
1781
1782         *cost = context.total;
1783 }
1784
1785 /*
1786  * cost_qual_eval_node
1787  *              As above, for a single RestrictInfo or expression.
1788  */
1789 void
1790 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
1791 {
1792         cost_qual_eval_context context;
1793
1794         context.root = root;
1795         context.total.startup = 0;
1796         context.total.per_tuple = 0;
1797
1798         cost_qual_eval_walker(qual, &context);
1799
1800         *cost = context.total;
1801 }
1802
1803 static bool
1804 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
1805 {
1806         if (node == NULL)
1807                 return false;
1808
1809         /*
1810          * RestrictInfo nodes contain an eval_cost field reserved for this
1811          * routine's use, so that it's not necessary to evaluate the qual
1812          * clause's cost more than once.  If the clause's cost hasn't been
1813          * computed yet, the field's startup value will contain -1.
1814          */
1815         if (IsA(node, RestrictInfo))
1816         {
1817                 RestrictInfo *rinfo = (RestrictInfo *) node;
1818
1819                 if (rinfo->eval_cost.startup < 0)
1820                 {
1821                         cost_qual_eval_context locContext;
1822
1823                         locContext.root = context->root;
1824                         locContext.total.startup = 0;
1825                         locContext.total.per_tuple = 0;
1826                         /*
1827                          * For an OR clause, recurse into the marked-up tree so that
1828                          * we set the eval_cost for contained RestrictInfos too.
1829                          */
1830                         if (rinfo->orclause)
1831                                 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
1832                         else
1833                                 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
1834                         /*
1835                          * If the RestrictInfo is marked pseudoconstant, it will be tested
1836                          * only once, so treat its cost as all startup cost.
1837                          */
1838                         if (rinfo->pseudoconstant)
1839                         {
1840                                 /* count one execution during startup */
1841                                 locContext.total.startup += locContext.total.per_tuple;
1842                                 locContext.total.per_tuple = 0;
1843                         }
1844                         rinfo->eval_cost = locContext.total;
1845                 }
1846                 context->total.startup += rinfo->eval_cost.startup;
1847                 context->total.per_tuple += rinfo->eval_cost.per_tuple;
1848                 /* do NOT recurse into children */
1849                 return false;
1850         }
1851
1852         /*
1853          * For each operator or function node in the given tree, we charge the
1854          * estimated execution cost given by pg_proc.procost (remember to
1855          * multiply this by cpu_operator_cost).
1856          *
1857          * Vars and Consts are charged zero, and so are boolean operators (AND,
1858          * OR, NOT). Simplistic, but a lot better than no model at all.
1859          *
1860          * Should we try to account for the possibility of short-circuit
1861          * evaluation of AND/OR?  Probably *not*, because that would make the
1862          * results depend on the clause ordering, and we are not in any position
1863          * to expect that the current ordering of the clauses is the one that's
1864          * going to end up being used.  (Is it worth applying order_qual_clauses
1865          * much earlier in the planning process to fix this?)
1866          */
1867         if (IsA(node, FuncExpr))
1868         {
1869                 context->total.per_tuple +=
1870                         get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
1871         }
1872         else if (IsA(node, OpExpr) ||
1873                          IsA(node, DistinctExpr) ||
1874                          IsA(node, NullIfExpr))
1875         {
1876                 /* rely on struct equivalence to treat these all alike */
1877                 set_opfuncid((OpExpr *) node);
1878                 context->total.per_tuple +=
1879                         get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
1880         }
1881         else if (IsA(node, ScalarArrayOpExpr))
1882         {
1883                 /*
1884                  * Estimate that the operator will be applied to about half of the
1885                  * array elements before the answer is determined.
1886                  */
1887                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1888                 Node       *arraynode = (Node *) lsecond(saop->args);
1889
1890                 set_sa_opfuncid(saop);
1891                 context->total.per_tuple += get_func_cost(saop->opfuncid) *
1892                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1893         }
1894         else if (IsA(node, RowCompareExpr))
1895         {
1896                 /* Conservatively assume we will check all the columns */
1897                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
1898                 ListCell   *lc;
1899
1900                 foreach(lc, rcexpr->opnos)
1901                 {
1902                         Oid             opid = lfirst_oid(lc);
1903
1904                         context->total.per_tuple += get_func_cost(get_opcode(opid)) *
1905                                 cpu_operator_cost;
1906                 }
1907         }
1908         else if (IsA(node, SubLink))
1909         {
1910                 /* This routine should not be applied to un-planned expressions */
1911                 elog(ERROR, "cannot handle unplanned sub-select");
1912         }
1913         else if (IsA(node, SubPlan))
1914         {
1915                 /*
1916                  * A subplan node in an expression typically indicates that the
1917                  * subplan will be executed on each evaluation, so charge accordingly.
1918                  * (Sub-selects that can be executed as InitPlans have already been
1919                  * removed from the expression.)
1920                  *
1921                  * An exception occurs when we have decided we can implement the
1922                  * subplan by hashing.
1923                  */
1924                 SubPlan    *subplan = (SubPlan *) node;
1925                 Plan       *plan = planner_subplan_get_plan(context->root, subplan);
1926
1927                 if (subplan->useHashTable)
1928                 {
1929                         /*
1930                          * If we are using a hash table for the subquery outputs, then the
1931                          * cost of evaluating the query is a one-time cost. We charge one
1932                          * cpu_operator_cost per tuple for the work of loading the
1933                          * hashtable, too.
1934                          */
1935                         context->total.startup += plan->total_cost +
1936                                 cpu_operator_cost * plan->plan_rows;
1937
1938                         /*
1939                          * The per-tuple costs include the cost of evaluating the lefthand
1940                          * expressions, plus the cost of probing the hashtable. Recursion
1941                          * into the testexpr will handle the lefthand expressions
1942                          * properly, and will count one cpu_operator_cost for each
1943                          * comparison operator.  That is probably too low for the probing
1944                          * cost, but it's hard to make a better estimate, so live with it
1945                          * for now.
1946                          */
1947                 }
1948                 else
1949                 {
1950                         /*
1951                          * Otherwise we will be rescanning the subplan output on each
1952                          * evaluation.  We need to estimate how much of the output we will
1953                          * actually need to scan.  NOTE: this logic should agree with the
1954                          * estimates used by make_subplan() in plan/subselect.c.
1955                          */
1956                         Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
1957
1958                         if (subplan->subLinkType == EXISTS_SUBLINK)
1959                         {
1960                                 /* we only need to fetch 1 tuple */
1961                                 context->total.per_tuple += plan_run_cost / plan->plan_rows;
1962                         }
1963                         else if (subplan->subLinkType == ALL_SUBLINK ||
1964                                          subplan->subLinkType == ANY_SUBLINK)
1965                         {
1966                                 /* assume we need 50% of the tuples */
1967                                 context->total.per_tuple += 0.50 * plan_run_cost;
1968                                 /* also charge a cpu_operator_cost per row examined */
1969                                 context->total.per_tuple +=
1970                                         0.50 * plan->plan_rows * cpu_operator_cost;
1971                         }
1972                         else
1973                         {
1974                                 /* assume we need all tuples */
1975                                 context->total.per_tuple += plan_run_cost;
1976                         }
1977
1978                         /*
1979                          * Also account for subplan's startup cost. If the subplan is
1980                          * uncorrelated or undirect correlated, AND its topmost node is a
1981                          * Sort or Material node, assume that we'll only need to pay its
1982                          * startup cost once; otherwise assume we pay the startup cost
1983                          * every time.
1984                          */
1985                         if (subplan->parParam == NIL &&
1986                                 (IsA(plan, Sort) ||
1987                                  IsA(plan, Material)))
1988                                 context->total.startup += plan->startup_cost;
1989                         else
1990                                 context->total.per_tuple += plan->startup_cost;
1991                 }
1992         }
1993
1994         /* recurse into children */
1995         return expression_tree_walker(node, cost_qual_eval_walker,
1996                                                                   (void *) context);
1997 }
1998
1999
2000 /*
2001  * approx_selectivity
2002  *              Quick-and-dirty estimation of clause selectivities.
2003  *              The input can be either an implicitly-ANDed list of boolean
2004  *              expressions, or a list of RestrictInfo nodes (typically the latter).
2005  *
2006  * This is quick-and-dirty because we bypass clauselist_selectivity, and
2007  * simply multiply the independent clause selectivities together.  Now
2008  * clauselist_selectivity often can't do any better than that anyhow, but
2009  * for some situations (such as range constraints) it is smarter.  However,
2010  * we can't effectively cache the results of clauselist_selectivity, whereas
2011  * the individual clause selectivities can be and are cached.
2012  *
2013  * Since we are only using the results to estimate how many potential
2014  * output tuples are generated and passed through qpqual checking, it
2015  * seems OK to live with the approximation.
2016  */
2017 static Selectivity
2018 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
2019 {
2020         Selectivity total = 1.0;
2021         ListCell   *l;
2022
2023         foreach(l, quals)
2024         {
2025                 Node       *qual = (Node *) lfirst(l);
2026
2027                 /* Note that clause_selectivity will be able to cache its result */
2028                 total *= clause_selectivity(root, qual, 0, jointype);
2029         }
2030         return total;
2031 }
2032
2033
2034 /*
2035  * set_baserel_size_estimates
2036  *              Set the size estimates for the given base relation.
2037  *
2038  * The rel's targetlist and restrictinfo list must have been constructed
2039  * already.
2040  *
2041  * We set the following fields of the rel node:
2042  *      rows: the estimated number of output tuples (after applying
2043  *                restriction clauses).
2044  *      width: the estimated average output tuple width in bytes.
2045  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
2046  */
2047 void
2048 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2049 {
2050         double          nrows;
2051
2052         /* Should only be applied to base relations */
2053         Assert(rel->relid > 0);
2054
2055         nrows = rel->tuples *
2056                 clauselist_selectivity(root,
2057                                                            rel->baserestrictinfo,
2058                                                            0,
2059                                                            JOIN_INNER);
2060
2061         rel->rows = clamp_row_est(nrows);
2062
2063         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
2064
2065         set_rel_width(root, rel);
2066 }
2067
2068 /*
2069  * set_joinrel_size_estimates
2070  *              Set the size estimates for the given join relation.
2071  *
2072  * The rel's targetlist must have been constructed already, and a
2073  * restriction clause list that matches the given component rels must
2074  * be provided.
2075  *
2076  * Since there is more than one way to make a joinrel for more than two
2077  * base relations, the results we get here could depend on which component
2078  * rel pair is provided.  In theory we should get the same answers no matter
2079  * which pair is provided; in practice, since the selectivity estimation
2080  * routines don't handle all cases equally well, we might not.  But there's
2081  * not much to be done about it.  (Would it make sense to repeat the
2082  * calculations for each pair of input rels that's encountered, and somehow
2083  * average the results?  Probably way more trouble than it's worth.)
2084  *
2085  * It's important that the results for symmetric JoinTypes be symmetric,
2086  * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
2087  * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
2088  * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
2089  *
2090  * We set only the rows field here.  The width field was already set by
2091  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
2092  */
2093 void
2094 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
2095                                                    RelOptInfo *outer_rel,
2096                                                    RelOptInfo *inner_rel,
2097                                                    JoinType jointype,
2098                                                    List *restrictlist)
2099 {
2100         Selectivity jselec;
2101         Selectivity pselec;
2102         double          nrows;
2103         UniquePath *upath;
2104
2105         /*
2106          * Compute joinclause selectivity.      Note that we are only considering
2107          * clauses that become restriction clauses at this join level; we are not
2108          * double-counting them because they were not considered in estimating the
2109          * sizes of the component rels.
2110          *
2111          * For an outer join, we have to distinguish the selectivity of the
2112          * join's own clauses (JOIN/ON conditions) from any clauses that were
2113          * "pushed down".  For inner joins we just count them all as joinclauses.
2114          */
2115         if (IS_OUTER_JOIN(jointype))
2116         {
2117                 List       *joinquals = NIL;
2118                 List       *pushedquals = NIL;
2119                 ListCell   *l;
2120
2121                 /* Grovel through the clauses to separate into two lists */
2122                 foreach(l, restrictlist)
2123                 {
2124                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2125
2126                         Assert(IsA(rinfo, RestrictInfo));
2127                         if (rinfo->is_pushed_down)
2128                                 pushedquals = lappend(pushedquals, rinfo);
2129                         else
2130                                 joinquals = lappend(joinquals, rinfo);
2131                 }
2132
2133                 /* Get the separate selectivities */
2134                 jselec = clauselist_selectivity(root,
2135                                                                                 joinquals,
2136                                                                                 0,
2137                                                                                 jointype);
2138                 pselec = clauselist_selectivity(root,
2139                                                                                 pushedquals,
2140                                                                                 0,
2141                                                                                 jointype);
2142
2143                 /* Avoid leaking a lot of ListCells */
2144                 list_free(joinquals);
2145                 list_free(pushedquals);
2146         }
2147         else
2148         {
2149                 jselec = clauselist_selectivity(root,
2150                                                                                 restrictlist,
2151                                                                                 0,
2152                                                                                 jointype);
2153                 pselec = 0.0;                   /* not used, keep compiler quiet */
2154         }
2155
2156         /*
2157          * Basically, we multiply size of Cartesian product by selectivity.
2158          *
2159          * If we are doing an outer join, take that into account: the joinqual
2160          * selectivity has to be clamped using the knowledge that the output must
2161          * be at least as large as the non-nullable input.  However, any
2162          * pushed-down quals are applied after the outer join, so their
2163          * selectivity applies fully.
2164          *
2165          * For JOIN_IN and variants, the Cartesian product is figured with respect
2166          * to a unique-ified input, and then we can clamp to the size of the other
2167          * input.
2168          */
2169         switch (jointype)
2170         {
2171                 case JOIN_INNER:
2172                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2173                         break;
2174                 case JOIN_LEFT:
2175                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2176                         if (nrows < outer_rel->rows)
2177                                 nrows = outer_rel->rows;
2178                         nrows *= pselec;
2179                         break;
2180                 case JOIN_RIGHT:
2181                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2182                         if (nrows < inner_rel->rows)
2183                                 nrows = inner_rel->rows;
2184                         nrows *= pselec;
2185                         break;
2186                 case JOIN_FULL:
2187                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2188                         if (nrows < outer_rel->rows)
2189                                 nrows = outer_rel->rows;
2190                         if (nrows < inner_rel->rows)
2191                                 nrows = inner_rel->rows;
2192                         nrows *= pselec;
2193                         break;
2194                 case JOIN_IN:
2195                 case JOIN_UNIQUE_INNER:
2196                         upath = create_unique_path(root, inner_rel,
2197                                                                            inner_rel->cheapest_total_path);
2198                         nrows = outer_rel->rows * upath->rows * jselec;
2199                         if (nrows > outer_rel->rows)
2200                                 nrows = outer_rel->rows;
2201                         break;
2202                 case JOIN_REVERSE_IN:
2203                 case JOIN_UNIQUE_OUTER:
2204                         upath = create_unique_path(root, outer_rel,
2205                                                                            outer_rel->cheapest_total_path);
2206                         nrows = upath->rows * inner_rel->rows * jselec;
2207                         if (nrows > inner_rel->rows)
2208                                 nrows = inner_rel->rows;
2209                         break;
2210                 default:
2211                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
2212                         nrows = 0;                      /* keep compiler quiet */
2213                         break;
2214         }
2215
2216         rel->rows = clamp_row_est(nrows);
2217 }
2218
2219 /*
2220  * join_in_selectivity
2221  *        Determines the factor by which a JOIN_IN join's result is expected
2222  *        to be smaller than an ordinary inner join.
2223  *
2224  * 'path' is already filled in except for the cost fields
2225  */
2226 static Selectivity
2227 join_in_selectivity(JoinPath *path, PlannerInfo *root)
2228 {
2229         RelOptInfo *innerrel;
2230         UniquePath *innerunique;
2231         Selectivity selec;
2232         double          nrows;
2233
2234         /* Return 1.0 whenever it's not JOIN_IN */
2235         if (path->jointype != JOIN_IN)
2236                 return 1.0;
2237
2238         /*
2239          * Return 1.0 if the inner side is already known unique.  The case where
2240          * the inner path is already a UniquePath probably cannot happen in
2241          * current usage, but check it anyway for completeness.  The interesting
2242          * case is where we've determined the inner relation itself is unique,
2243          * which we can check by looking at the rows estimate for its UniquePath.
2244          */
2245         if (IsA(path->innerjoinpath, UniquePath))
2246                 return 1.0;
2247         innerrel = path->innerjoinpath->parent;
2248         innerunique = create_unique_path(root,
2249                                                                          innerrel,
2250                                                                          innerrel->cheapest_total_path);
2251         if (innerunique->rows >= innerrel->rows)
2252                 return 1.0;
2253
2254         /*
2255          * Compute same result set_joinrel_size_estimates would compute for
2256          * JOIN_INNER.  Note that we use the input rels' absolute size estimates,
2257          * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
2258          * double-counting the effects of any join clauses used in input scans.
2259          */
2260         selec = clauselist_selectivity(root,
2261                                                                    path->joinrestrictinfo,
2262                                                                    0,
2263                                                                    JOIN_INNER);
2264         nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
2265
2266         nrows = clamp_row_est(nrows);
2267
2268         /* See if it's larger than the actual JOIN_IN size estimate */
2269         if (nrows > path->path.parent->rows)
2270                 return path->path.parent->rows / nrows;
2271         else
2272                 return 1.0;
2273 }
2274
2275 /*
2276  * set_function_size_estimates
2277  *              Set the size estimates for a base relation that is a function call.
2278  *
2279  * The rel's targetlist and restrictinfo list must have been constructed
2280  * already.
2281  *
2282  * We set the same fields as set_baserel_size_estimates.
2283  */
2284 void
2285 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2286 {
2287         RangeTblEntry *rte;
2288
2289         /* Should only be applied to base relations that are functions */
2290         Assert(rel->relid > 0);
2291         rte = rt_fetch(rel->relid, root->parse->rtable);
2292         Assert(rte->rtekind == RTE_FUNCTION);
2293
2294         /* Estimate number of rows the function itself will return */
2295         rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
2296
2297         /* Now estimate number of output rows, etc */
2298         set_baserel_size_estimates(root, rel);
2299 }
2300
2301 /*
2302  * set_values_size_estimates
2303  *              Set the size estimates for a base relation that is a values list.
2304  *
2305  * The rel's targetlist and restrictinfo list must have been constructed
2306  * already.
2307  *
2308  * We set the same fields as set_baserel_size_estimates.
2309  */
2310 void
2311 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2312 {
2313         RangeTblEntry *rte;
2314
2315         /* Should only be applied to base relations that are values lists */
2316         Assert(rel->relid > 0);
2317         rte = rt_fetch(rel->relid, root->parse->rtable);
2318         Assert(rte->rtekind == RTE_VALUES);
2319
2320         /*
2321          * Estimate number of rows the values list will return. We know this
2322          * precisely based on the list length (well, barring set-returning
2323          * functions in list items, but that's a refinement not catered for
2324          * anywhere else either).
2325          */
2326         rel->tuples = list_length(rte->values_lists);
2327
2328         /* Now estimate number of output rows, etc */
2329         set_baserel_size_estimates(root, rel);
2330 }
2331
2332
2333 /*
2334  * set_rel_width
2335  *              Set the estimated output width of a base relation.
2336  *
2337  * NB: this works best on plain relations because it prefers to look at
2338  * real Vars.  It will fail to make use of pg_statistic info when applied
2339  * to a subquery relation, even if the subquery outputs are simple vars
2340  * that we could have gotten info for.  Is it worth trying to be smarter
2341  * about subqueries?
2342  *
2343  * The per-attribute width estimates are cached for possible re-use while
2344  * building join relations.
2345  */
2346 static void
2347 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2348 {
2349         int32           tuple_width = 0;
2350         ListCell   *tllist;
2351
2352         foreach(tllist, rel->reltargetlist)
2353         {
2354                 Var                *var = (Var *) lfirst(tllist);
2355                 int                     ndx;
2356                 Oid                     relid;
2357                 int32           item_width;
2358
2359                 /* For now, punt on whole-row child Vars */
2360                 if (!IsA(var, Var))
2361                 {
2362                         tuple_width += 32;      /* arbitrary */
2363                         continue;
2364                 }
2365
2366                 ndx = var->varattno - rel->min_attr;
2367
2368                 /*
2369                  * The width probably hasn't been cached yet, but may as well check
2370                  */
2371                 if (rel->attr_widths[ndx] > 0)
2372                 {
2373                         tuple_width += rel->attr_widths[ndx];
2374                         continue;
2375                 }
2376
2377                 relid = getrelid(var->varno, root->parse->rtable);
2378                 if (relid != InvalidOid)
2379                 {
2380                         item_width = get_attavgwidth(relid, var->varattno);
2381                         if (item_width > 0)
2382                         {
2383                                 rel->attr_widths[ndx] = item_width;
2384                                 tuple_width += item_width;
2385                                 continue;
2386                         }
2387                 }
2388
2389                 /*
2390                  * Not a plain relation, or can't find statistics for it. Estimate
2391                  * using just the type info.
2392                  */
2393                 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2394                 Assert(item_width > 0);
2395                 rel->attr_widths[ndx] = item_width;
2396                 tuple_width += item_width;
2397         }
2398         Assert(tuple_width >= 0);
2399         rel->width = tuple_width;
2400 }
2401
2402 /*
2403  * relation_byte_size
2404  *        Estimate the storage space in bytes for a given number of tuples
2405  *        of a given width (size in bytes).
2406  */
2407 static double
2408 relation_byte_size(double tuples, int width)
2409 {
2410         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2411 }
2412
2413 /*
2414  * page_size
2415  *        Returns an estimate of the number of pages covered by a given
2416  *        number of tuples of a given width (size in bytes).
2417  */
2418 static double
2419 page_size(double tuples, int width)
2420 {
2421         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
2422 }