]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Re-run pgindent with updated list of typedefs. (Updated README should
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.189 2007/11/15 22:25:15 momjian Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "optimizer/clauses.h"
69 #include "optimizer/cost.h"
70 #include "optimizer/pathnode.h"
71 #include "optimizer/planmain.h"
72 #include "parser/parsetree.h"
73 #include "parser/parse_expr.h"
74 #include "utils/lsyscache.h"
75 #include "utils/selfuncs.h"
76 #include "utils/tuplesort.h"
77
78
79 #define LOG2(x)  (log(x) / 0.693147180559945)
80
81 /*
82  * Some Paths return less than the nominal number of rows of their parent
83  * relations; join nodes need to do this to get the correct input count:
84  */
85 #define PATH_ROWS(path) \
86         (IsA(path, UniquePath) ? \
87          ((UniquePath *) (path))->rows : \
88          (path)->parent->rows)
89
90
91 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
92 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
93 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
94 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
95 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
96
97 int                     effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
98
99 Cost            disable_cost = 100000000.0;
100
101 bool            enable_seqscan = true;
102 bool            enable_indexscan = true;
103 bool            enable_bitmapscan = true;
104 bool            enable_tidscan = true;
105 bool            enable_sort = true;
106 bool            enable_hashagg = true;
107 bool            enable_nestloop = true;
108 bool            enable_mergejoin = true;
109 bool            enable_hashjoin = true;
110
111 typedef struct
112 {
113         PlannerInfo *root;
114         QualCost        total;
115 } cost_qual_eval_context;
116
117 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
118                            RestrictInfo *rinfo,
119                            PathKey *pathkey);
120 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
121 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
122                                    JoinType jointype);
123 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
124 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
125 static double relation_byte_size(double tuples, int width);
126 static double page_size(double tuples, int width);
127
128
129 /*
130  * clamp_row_est
131  *              Force a row-count estimate to a sane value.
132  */
133 double
134 clamp_row_est(double nrows)
135 {
136         /*
137          * Force estimate to be at least one row, to make explain output look
138          * better and to avoid possible divide-by-zero when interpolating costs.
139          * Make it an integer, too.
140          */
141         if (nrows <= 1.0)
142                 nrows = 1.0;
143         else
144                 nrows = rint(nrows);
145
146         return nrows;
147 }
148
149
150 /*
151  * cost_seqscan
152  *        Determines and returns the cost of scanning a relation sequentially.
153  */
154 void
155 cost_seqscan(Path *path, PlannerInfo *root,
156                          RelOptInfo *baserel)
157 {
158         Cost            startup_cost = 0;
159         Cost            run_cost = 0;
160         Cost            cpu_per_tuple;
161
162         /* Should only be applied to base relations */
163         Assert(baserel->relid > 0);
164         Assert(baserel->rtekind == RTE_RELATION);
165
166         if (!enable_seqscan)
167                 startup_cost += disable_cost;
168
169         /*
170          * disk costs
171          */
172         run_cost += seq_page_cost * baserel->pages;
173
174         /* CPU costs */
175         startup_cost += baserel->baserestrictcost.startup;
176         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
177         run_cost += cpu_per_tuple * baserel->tuples;
178
179         path->startup_cost = startup_cost;
180         path->total_cost = startup_cost + run_cost;
181 }
182
183 /*
184  * cost_index
185  *        Determines and returns the cost of scanning a relation using an index.
186  *
187  * 'index' is the index to be used
188  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
189  * 'outer_rel' is the outer relation when we are considering using the index
190  *              scan as the inside of a nestloop join (hence, some of the indexQuals
191  *              are join clauses, and we should expect repeated scans of the index);
192  *              NULL for a plain index scan
193  *
194  * cost_index() takes an IndexPath not just a Path, because it sets a few
195  * additional fields of the IndexPath besides startup_cost and total_cost.
196  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
197  *
198  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
199  * Any additional quals evaluated as qpquals may reduce the number of returned
200  * tuples, but they won't reduce the number of tuples we have to fetch from
201  * the table, so they don't reduce the scan cost.
202  *
203  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
204  * it was a list of bare clause expressions.
205  */
206 void
207 cost_index(IndexPath *path, PlannerInfo *root,
208                    IndexOptInfo *index,
209                    List *indexQuals,
210                    RelOptInfo *outer_rel)
211 {
212         RelOptInfo *baserel = index->rel;
213         Cost            startup_cost = 0;
214         Cost            run_cost = 0;
215         Cost            indexStartupCost;
216         Cost            indexTotalCost;
217         Selectivity indexSelectivity;
218         double          indexCorrelation,
219                                 csquared;
220         Cost            min_IO_cost,
221                                 max_IO_cost;
222         Cost            cpu_per_tuple;
223         double          tuples_fetched;
224         double          pages_fetched;
225
226         /* Should only be applied to base relations */
227         Assert(IsA(baserel, RelOptInfo) &&
228                    IsA(index, IndexOptInfo));
229         Assert(baserel->relid > 0);
230         Assert(baserel->rtekind == RTE_RELATION);
231
232         if (!enable_indexscan)
233                 startup_cost += disable_cost;
234
235         /*
236          * Call index-access-method-specific code to estimate the processing cost
237          * for scanning the index, as well as the selectivity of the index (ie,
238          * the fraction of main-table tuples we will have to retrieve) and its
239          * correlation to the main-table tuple order.
240          */
241         OidFunctionCall8(index->amcostestimate,
242                                          PointerGetDatum(root),
243                                          PointerGetDatum(index),
244                                          PointerGetDatum(indexQuals),
245                                          PointerGetDatum(outer_rel),
246                                          PointerGetDatum(&indexStartupCost),
247                                          PointerGetDatum(&indexTotalCost),
248                                          PointerGetDatum(&indexSelectivity),
249                                          PointerGetDatum(&indexCorrelation));
250
251         /*
252          * Save amcostestimate's results for possible use in bitmap scan planning.
253          * We don't bother to save indexStartupCost or indexCorrelation, because a
254          * bitmap scan doesn't care about either.
255          */
256         path->indextotalcost = indexTotalCost;
257         path->indexselectivity = indexSelectivity;
258
259         /* all costs for touching index itself included here */
260         startup_cost += indexStartupCost;
261         run_cost += indexTotalCost - indexStartupCost;
262
263         /* estimate number of main-table tuples fetched */
264         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
265
266         /*----------
267          * Estimate number of main-table pages fetched, and compute I/O cost.
268          *
269          * When the index ordering is uncorrelated with the table ordering,
270          * we use an approximation proposed by Mackert and Lohman (see
271          * index_pages_fetched() for details) to compute the number of pages
272          * fetched, and then charge random_page_cost per page fetched.
273          *
274          * When the index ordering is exactly correlated with the table ordering
275          * (just after a CLUSTER, for example), the number of pages fetched should
276          * be exactly selectivity * table_size.  What's more, all but the first
277          * will be sequential fetches, not the random fetches that occur in the
278          * uncorrelated case.  So if the number of pages is more than 1, we
279          * ought to charge
280          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
281          * For partially-correlated indexes, we ought to charge somewhere between
282          * these two estimates.  We currently interpolate linearly between the
283          * estimates based on the correlation squared (XXX is that appropriate?).
284          *----------
285          */
286         if (outer_rel != NULL && outer_rel->rows > 1)
287         {
288                 /*
289                  * For repeated indexscans, the appropriate estimate for the
290                  * uncorrelated case is to scale up the number of tuples fetched in
291                  * the Mackert and Lohman formula by the number of scans, so that we
292                  * estimate the number of pages fetched by all the scans; then
293                  * pro-rate the costs for one scan.  In this case we assume all the
294                  * fetches are random accesses.
295                  */
296                 double          num_scans = outer_rel->rows;
297
298                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
299                                                                                         baserel->pages,
300                                                                                         (double) index->pages,
301                                                                                         root);
302
303                 max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
304
305                 /*
306                  * In the perfectly correlated case, the number of pages touched by
307                  * each scan is selectivity * table_size, and we can use the Mackert
308                  * and Lohman formula at the page level to estimate how much work is
309                  * saved by caching across scans.  We still assume all the fetches are
310                  * random, though, which is an overestimate that's hard to correct for
311                  * without double-counting the cache effects.  (But in most cases
312                  * where such a plan is actually interesting, only one page would get
313                  * fetched per scan anyway, so it shouldn't matter much.)
314                  */
315                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
316
317                 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
318                                                                                         baserel->pages,
319                                                                                         (double) index->pages,
320                                                                                         root);
321
322                 min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
323         }
324         else
325         {
326                 /*
327                  * Normal case: apply the Mackert and Lohman formula, and then
328                  * interpolate between that and the correlation-derived result.
329                  */
330                 pages_fetched = index_pages_fetched(tuples_fetched,
331                                                                                         baserel->pages,
332                                                                                         (double) index->pages,
333                                                                                         root);
334
335                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
336                 max_IO_cost = pages_fetched * random_page_cost;
337
338                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
339                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
340                 min_IO_cost = random_page_cost;
341                 if (pages_fetched > 1)
342                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
343         }
344
345         /*
346          * Now interpolate based on estimated index order correlation to get total
347          * disk I/O cost for main table accesses.
348          */
349         csquared = indexCorrelation * indexCorrelation;
350
351         run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
352
353         /*
354          * Estimate CPU costs per tuple.
355          *
356          * Normally the indexquals will be removed from the list of restriction
357          * clauses that we have to evaluate as qpquals, so we should subtract
358          * their costs from baserestrictcost.  But if we are doing a join then
359          * some of the indexquals are join clauses and shouldn't be subtracted.
360          * Rather than work out exactly how much to subtract, we don't subtract
361          * anything.
362          */
363         startup_cost += baserel->baserestrictcost.startup;
364         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
365
366         if (outer_rel == NULL)
367         {
368                 QualCost        index_qual_cost;
369
370                 cost_qual_eval(&index_qual_cost, indexQuals, root);
371                 /* any startup cost still has to be paid ... */
372                 cpu_per_tuple -= index_qual_cost.per_tuple;
373         }
374
375         run_cost += cpu_per_tuple * tuples_fetched;
376
377         path->path.startup_cost = startup_cost;
378         path->path.total_cost = startup_cost + run_cost;
379 }
380
381 /*
382  * index_pages_fetched
383  *        Estimate the number of pages actually fetched after accounting for
384  *        cache effects.
385  *
386  * We use an approximation proposed by Mackert and Lohman, "Index Scans
387  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
388  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
389  * The Mackert and Lohman approximation is that the number of pages
390  * fetched is
391  *      PF =
392  *              min(2TNs/(2T+Ns), T)                    when T <= b
393  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
394  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
395  * where
396  *              T = # pages in table
397  *              N = # tuples in table
398  *              s = selectivity = fraction of table to be scanned
399  *              b = # buffer pages available (we include kernel space here)
400  *
401  * We assume that effective_cache_size is the total number of buffer pages
402  * available for the whole query, and pro-rate that space across all the
403  * tables in the query and the index currently under consideration.  (This
404  * ignores space needed for other indexes used by the query, but since we
405  * don't know which indexes will get used, we can't estimate that very well;
406  * and in any case counting all the tables may well be an overestimate, since
407  * depending on the join plan not all the tables may be scanned concurrently.)
408  *
409  * The product Ns is the number of tuples fetched; we pass in that
410  * product rather than calculating it here.  "pages" is the number of pages
411  * in the object under consideration (either an index or a table).
412  * "index_pages" is the amount to add to the total table space, which was
413  * computed for us by query_planner.
414  *
415  * Caller is expected to have ensured that tuples_fetched is greater than zero
416  * and rounded to integer (see clamp_row_est).  The result will likewise be
417  * greater than zero and integral.
418  */
419 double
420 index_pages_fetched(double tuples_fetched, BlockNumber pages,
421                                         double index_pages, PlannerInfo *root)
422 {
423         double          pages_fetched;
424         double          total_pages;
425         double          T,
426                                 b;
427
428         /* T is # pages in table, but don't allow it to be zero */
429         T = (pages > 1) ? (double) pages : 1.0;
430
431         /* Compute number of pages assumed to be competing for cache space */
432         total_pages = root->total_table_pages + index_pages;
433         total_pages = Max(total_pages, 1.0);
434         Assert(T <= total_pages);
435
436         /* b is pro-rated share of effective_cache_size */
437         b = (double) effective_cache_size *T / total_pages;
438
439         /* force it positive and integral */
440         if (b <= 1.0)
441                 b = 1.0;
442         else
443                 b = ceil(b);
444
445         /* This part is the Mackert and Lohman formula */
446         if (T <= b)
447         {
448                 pages_fetched =
449                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
450                 if (pages_fetched >= T)
451                         pages_fetched = T;
452                 else
453                         pages_fetched = ceil(pages_fetched);
454         }
455         else
456         {
457                 double          lim;
458
459                 lim = (2.0 * T * b) / (2.0 * T - b);
460                 if (tuples_fetched <= lim)
461                 {
462                         pages_fetched =
463                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
464                 }
465                 else
466                 {
467                         pages_fetched =
468                                 b + (tuples_fetched - lim) * (T - b) / T;
469                 }
470                 pages_fetched = ceil(pages_fetched);
471         }
472         return pages_fetched;
473 }
474
475 /*
476  * get_indexpath_pages
477  *              Determine the total size of the indexes used in a bitmap index path.
478  *
479  * Note: if the same index is used more than once in a bitmap tree, we will
480  * count it multiple times, which perhaps is the wrong thing ... but it's
481  * not completely clear, and detecting duplicates is difficult, so ignore it
482  * for now.
483  */
484 static double
485 get_indexpath_pages(Path *bitmapqual)
486 {
487         double          result = 0;
488         ListCell   *l;
489
490         if (IsA(bitmapqual, BitmapAndPath))
491         {
492                 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
493
494                 foreach(l, apath->bitmapquals)
495                 {
496                         result += get_indexpath_pages((Path *) lfirst(l));
497                 }
498         }
499         else if (IsA(bitmapqual, BitmapOrPath))
500         {
501                 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
502
503                 foreach(l, opath->bitmapquals)
504                 {
505                         result += get_indexpath_pages((Path *) lfirst(l));
506                 }
507         }
508         else if (IsA(bitmapqual, IndexPath))
509         {
510                 IndexPath  *ipath = (IndexPath *) bitmapqual;
511
512                 result = (double) ipath->indexinfo->pages;
513         }
514         else
515                 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
516
517         return result;
518 }
519
520 /*
521  * cost_bitmap_heap_scan
522  *        Determines and returns the cost of scanning a relation using a bitmap
523  *        index-then-heap plan.
524  *
525  * 'baserel' is the relation to be scanned
526  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
527  * 'outer_rel' is the outer relation when we are considering using the bitmap
528  *              scan as the inside of a nestloop join (hence, some of the indexQuals
529  *              are join clauses, and we should expect repeated scans of the table);
530  *              NULL for a plain bitmap scan
531  *
532  * Note: if this is a join inner path, the component IndexPaths in bitmapqual
533  * should have been costed accordingly.
534  */
535 void
536 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
537                                           Path *bitmapqual, RelOptInfo *outer_rel)
538 {
539         Cost            startup_cost = 0;
540         Cost            run_cost = 0;
541         Cost            indexTotalCost;
542         Selectivity indexSelectivity;
543         Cost            cpu_per_tuple;
544         Cost            cost_per_page;
545         double          tuples_fetched;
546         double          pages_fetched;
547         double          T;
548
549         /* Should only be applied to base relations */
550         Assert(IsA(baserel, RelOptInfo));
551         Assert(baserel->relid > 0);
552         Assert(baserel->rtekind == RTE_RELATION);
553
554         if (!enable_bitmapscan)
555                 startup_cost += disable_cost;
556
557         /*
558          * Fetch total cost of obtaining the bitmap, as well as its total
559          * selectivity.
560          */
561         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
562
563         startup_cost += indexTotalCost;
564
565         /*
566          * Estimate number of main-table pages fetched.
567          */
568         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
569
570         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
571
572         if (outer_rel != NULL && outer_rel->rows > 1)
573         {
574                 /*
575                  * For repeated bitmap scans, scale up the number of tuples fetched in
576                  * the Mackert and Lohman formula by the number of scans, so that we
577                  * estimate the number of pages fetched by all the scans. Then
578                  * pro-rate for one scan.
579                  */
580                 double          num_scans = outer_rel->rows;
581
582                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
583                                                                                         baserel->pages,
584                                                                                         get_indexpath_pages(bitmapqual),
585                                                                                         root);
586                 pages_fetched /= num_scans;
587         }
588         else
589         {
590                 /*
591                  * For a single scan, the number of heap pages that need to be fetched
592                  * is the same as the Mackert and Lohman formula for the case T <= b
593                  * (ie, no re-reads needed).
594                  */
595                 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
596         }
597         if (pages_fetched >= T)
598                 pages_fetched = T;
599         else
600                 pages_fetched = ceil(pages_fetched);
601
602         /*
603          * For small numbers of pages we should charge random_page_cost apiece,
604          * while if nearly all the table's pages are being read, it's more
605          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
606          * too. For lack of a better idea, interpolate like this to determine the
607          * cost per page.
608          */
609         if (pages_fetched >= 2.0)
610                 cost_per_page = random_page_cost -
611                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
612         else
613                 cost_per_page = random_page_cost;
614
615         run_cost += pages_fetched * cost_per_page;
616
617         /*
618          * Estimate CPU costs per tuple.
619          *
620          * Often the indexquals don't need to be rechecked at each tuple ... but
621          * not always, especially not if there are enough tuples involved that the
622          * bitmaps become lossy.  For the moment, just assume they will be
623          * rechecked always.
624          */
625         startup_cost += baserel->baserestrictcost.startup;
626         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
627
628         run_cost += cpu_per_tuple * tuples_fetched;
629
630         path->startup_cost = startup_cost;
631         path->total_cost = startup_cost + run_cost;
632 }
633
634 /*
635  * cost_bitmap_tree_node
636  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
637  */
638 void
639 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
640 {
641         if (IsA(path, IndexPath))
642         {
643                 *cost = ((IndexPath *) path)->indextotalcost;
644                 *selec = ((IndexPath *) path)->indexselectivity;
645
646                 /*
647                  * Charge a small amount per retrieved tuple to reflect the costs of
648                  * manipulating the bitmap.  This is mostly to make sure that a bitmap
649                  * scan doesn't look to be the same cost as an indexscan to retrieve a
650                  * single tuple.
651                  */
652                 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
653         }
654         else if (IsA(path, BitmapAndPath))
655         {
656                 *cost = path->total_cost;
657                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
658         }
659         else if (IsA(path, BitmapOrPath))
660         {
661                 *cost = path->total_cost;
662                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
663         }
664         else
665         {
666                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
667                 *cost = *selec = 0;             /* keep compiler quiet */
668         }
669 }
670
671 /*
672  * cost_bitmap_and_node
673  *              Estimate the cost of a BitmapAnd node
674  *
675  * Note that this considers only the costs of index scanning and bitmap
676  * creation, not the eventual heap access.      In that sense the object isn't
677  * truly a Path, but it has enough path-like properties (costs in particular)
678  * to warrant treating it as one.
679  */
680 void
681 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
682 {
683         Cost            totalCost;
684         Selectivity selec;
685         ListCell   *l;
686
687         /*
688          * We estimate AND selectivity on the assumption that the inputs are
689          * independent.  This is probably often wrong, but we don't have the info
690          * to do better.
691          *
692          * The runtime cost of the BitmapAnd itself is estimated at 100x
693          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
694          * definitely too simplistic?
695          */
696         totalCost = 0.0;
697         selec = 1.0;
698         foreach(l, path->bitmapquals)
699         {
700                 Path       *subpath = (Path *) lfirst(l);
701                 Cost            subCost;
702                 Selectivity subselec;
703
704                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
705
706                 selec *= subselec;
707
708                 totalCost += subCost;
709                 if (l != list_head(path->bitmapquals))
710                         totalCost += 100.0 * cpu_operator_cost;
711         }
712         path->bitmapselectivity = selec;
713         path->path.startup_cost = totalCost;
714         path->path.total_cost = totalCost;
715 }
716
717 /*
718  * cost_bitmap_or_node
719  *              Estimate the cost of a BitmapOr node
720  *
721  * See comments for cost_bitmap_and_node.
722  */
723 void
724 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
725 {
726         Cost            totalCost;
727         Selectivity selec;
728         ListCell   *l;
729
730         /*
731          * We estimate OR selectivity on the assumption that the inputs are
732          * non-overlapping, since that's often the case in "x IN (list)" type
733          * situations.  Of course, we clamp to 1.0 at the end.
734          *
735          * The runtime cost of the BitmapOr itself is estimated at 100x
736          * cpu_operator_cost for each tbm_union needed.  Probably too small,
737          * definitely too simplistic?  We are aware that the tbm_unions are
738          * optimized out when the inputs are BitmapIndexScans.
739          */
740         totalCost = 0.0;
741         selec = 0.0;
742         foreach(l, path->bitmapquals)
743         {
744                 Path       *subpath = (Path *) lfirst(l);
745                 Cost            subCost;
746                 Selectivity subselec;
747
748                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
749
750                 selec += subselec;
751
752                 totalCost += subCost;
753                 if (l != list_head(path->bitmapquals) &&
754                         !IsA(subpath, IndexPath))
755                         totalCost += 100.0 * cpu_operator_cost;
756         }
757         path->bitmapselectivity = Min(selec, 1.0);
758         path->path.startup_cost = totalCost;
759         path->path.total_cost = totalCost;
760 }
761
762 /*
763  * cost_tidscan
764  *        Determines and returns the cost of scanning a relation using TIDs.
765  */
766 void
767 cost_tidscan(Path *path, PlannerInfo *root,
768                          RelOptInfo *baserel, List *tidquals)
769 {
770         Cost            startup_cost = 0;
771         Cost            run_cost = 0;
772         bool            isCurrentOf = false;
773         Cost            cpu_per_tuple;
774         QualCost        tid_qual_cost;
775         int                     ntuples;
776         ListCell   *l;
777
778         /* Should only be applied to base relations */
779         Assert(baserel->relid > 0);
780         Assert(baserel->rtekind == RTE_RELATION);
781
782         /* Count how many tuples we expect to retrieve */
783         ntuples = 0;
784         foreach(l, tidquals)
785         {
786                 if (IsA(lfirst(l), ScalarArrayOpExpr))
787                 {
788                         /* Each element of the array yields 1 tuple */
789                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
790                         Node       *arraynode = (Node *) lsecond(saop->args);
791
792                         ntuples += estimate_array_length(arraynode);
793                 }
794                 else if (IsA(lfirst(l), CurrentOfExpr))
795                 {
796                         /* CURRENT OF yields 1 tuple */
797                         isCurrentOf = true;
798                         ntuples++;
799                 }
800                 else
801                 {
802                         /* It's just CTID = something, count 1 tuple */
803                         ntuples++;
804                 }
805         }
806
807         /*
808          * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
809          * understands how to do it correctly.  Therefore, honor enable_tidscan
810          * only when CURRENT OF isn't present.  Also note that cost_qual_eval
811          * counts a CurrentOfExpr as having startup cost disable_cost, which we
812          * subtract off here; that's to prevent other plan types such as seqscan
813          * from winning.
814          */
815         if (isCurrentOf)
816         {
817                 Assert(baserel->baserestrictcost.startup >= disable_cost);
818                 startup_cost -= disable_cost;
819         }
820         else if (!enable_tidscan)
821                 startup_cost += disable_cost;
822
823         /*
824          * The TID qual expressions will be computed once, any other baserestrict
825          * quals once per retrived tuple.
826          */
827         cost_qual_eval(&tid_qual_cost, tidquals, root);
828
829         /* disk costs --- assume each tuple on a different page */
830         run_cost += random_page_cost * ntuples;
831
832         /* CPU costs */
833         startup_cost += baserel->baserestrictcost.startup +
834                 tid_qual_cost.per_tuple;
835         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
836                 tid_qual_cost.per_tuple;
837         run_cost += cpu_per_tuple * ntuples;
838
839         path->startup_cost = startup_cost;
840         path->total_cost = startup_cost + run_cost;
841 }
842
843 /*
844  * cost_subqueryscan
845  *        Determines and returns the cost of scanning a subquery RTE.
846  */
847 void
848 cost_subqueryscan(Path *path, RelOptInfo *baserel)
849 {
850         Cost            startup_cost;
851         Cost            run_cost;
852         Cost            cpu_per_tuple;
853
854         /* Should only be applied to base relations that are subqueries */
855         Assert(baserel->relid > 0);
856         Assert(baserel->rtekind == RTE_SUBQUERY);
857
858         /*
859          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
860          * any restriction clauses that will be attached to the SubqueryScan node,
861          * plus cpu_tuple_cost to account for selection and projection overhead.
862          */
863         path->startup_cost = baserel->subplan->startup_cost;
864         path->total_cost = baserel->subplan->total_cost;
865
866         startup_cost = baserel->baserestrictcost.startup;
867         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
868         run_cost = cpu_per_tuple * baserel->tuples;
869
870         path->startup_cost += startup_cost;
871         path->total_cost += startup_cost + run_cost;
872 }
873
874 /*
875  * cost_functionscan
876  *        Determines and returns the cost of scanning a function RTE.
877  */
878 void
879 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
880 {
881         Cost            startup_cost = 0;
882         Cost            run_cost = 0;
883         Cost            cpu_per_tuple;
884         RangeTblEntry *rte;
885         QualCost        exprcost;
886
887         /* Should only be applied to base relations that are functions */
888         Assert(baserel->relid > 0);
889         rte = planner_rt_fetch(baserel->relid, root);
890         Assert(rte->rtekind == RTE_FUNCTION);
891
892         /* Estimate costs of executing the function expression */
893         cost_qual_eval_node(&exprcost, rte->funcexpr, root);
894
895         startup_cost += exprcost.startup;
896         cpu_per_tuple = exprcost.per_tuple;
897
898         /* Add scanning CPU costs */
899         startup_cost += baserel->baserestrictcost.startup;
900         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
901         run_cost += cpu_per_tuple * baserel->tuples;
902
903         path->startup_cost = startup_cost;
904         path->total_cost = startup_cost + run_cost;
905 }
906
907 /*
908  * cost_valuesscan
909  *        Determines and returns the cost of scanning a VALUES RTE.
910  */
911 void
912 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
913 {
914         Cost            startup_cost = 0;
915         Cost            run_cost = 0;
916         Cost            cpu_per_tuple;
917
918         /* Should only be applied to base relations that are values lists */
919         Assert(baserel->relid > 0);
920         Assert(baserel->rtekind == RTE_VALUES);
921
922         /*
923          * For now, estimate list evaluation cost at one operator eval per list
924          * (probably pretty bogus, but is it worth being smarter?)
925          */
926         cpu_per_tuple = cpu_operator_cost;
927
928         /* Add scanning CPU costs */
929         startup_cost += baserel->baserestrictcost.startup;
930         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
931         run_cost += cpu_per_tuple * baserel->tuples;
932
933         path->startup_cost = startup_cost;
934         path->total_cost = startup_cost + run_cost;
935 }
936
937 /*
938  * cost_sort
939  *        Determines and returns the cost of sorting a relation, including
940  *        the cost of reading the input data.
941  *
942  * If the total volume of data to sort is less than work_mem, we will do
943  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
944  * comparisons for t tuples.
945  *
946  * If the total volume exceeds work_mem, we switch to a tape-style merge
947  * algorithm.  There will still be about t*log2(t) tuple comparisons in
948  * total, but we will also need to write and read each tuple once per
949  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
950  * number of initial runs formed and M is the merge order used by tuplesort.c.
951  * Since the average initial run should be about twice work_mem, we have
952  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
953  *              cpu = comparison_cost * t * log2(t)
954  *
955  * If the sort is bounded (i.e., only the first k result tuples are needed)
956  * and k tuples can fit into work_mem, we use a heap method that keeps only
957  * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
958  *
959  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
960  * accesses (XXX can't we refine that guess?)
961  *
962  * We charge two operator evals per tuple comparison, which should be in
963  * the right ballpark in most cases.
964  *
965  * 'pathkeys' is a list of sort keys
966  * 'input_cost' is the total cost for reading the input data
967  * 'tuples' is the number of tuples in the relation
968  * 'width' is the average tuple width in bytes
969  * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
970  *
971  * NOTE: some callers currently pass NIL for pathkeys because they
972  * can't conveniently supply the sort keys.  Since this routine doesn't
973  * currently do anything with pathkeys anyway, that doesn't matter...
974  * but if it ever does, it should react gracefully to lack of key data.
975  * (Actually, the thing we'd most likely be interested in is just the number
976  * of sort keys, which all callers *could* supply.)
977  */
978 void
979 cost_sort(Path *path, PlannerInfo *root,
980                   List *pathkeys, Cost input_cost, double tuples, int width,
981                   double limit_tuples)
982 {
983         Cost            startup_cost = input_cost;
984         Cost            run_cost = 0;
985         double          input_bytes = relation_byte_size(tuples, width);
986         double          output_bytes;
987         double          output_tuples;
988         long            work_mem_bytes = work_mem * 1024L;
989
990         if (!enable_sort)
991                 startup_cost += disable_cost;
992
993         /*
994          * We want to be sure the cost of a sort is never estimated as zero, even
995          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
996          */
997         if (tuples < 2.0)
998                 tuples = 2.0;
999
1000         /* Do we have a useful LIMIT? */
1001         if (limit_tuples > 0 && limit_tuples < tuples)
1002         {
1003                 output_tuples = limit_tuples;
1004                 output_bytes = relation_byte_size(output_tuples, width);
1005         }
1006         else
1007         {
1008                 output_tuples = tuples;
1009                 output_bytes = input_bytes;
1010         }
1011
1012         if (output_bytes > work_mem_bytes)
1013         {
1014                 /*
1015                  * We'll have to use a disk-based sort of all the tuples
1016                  */
1017                 double          npages = ceil(input_bytes / BLCKSZ);
1018                 double          nruns = (input_bytes / work_mem_bytes) * 0.5;
1019                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
1020                 double          log_runs;
1021                 double          npageaccesses;
1022
1023                 /*
1024                  * CPU costs
1025                  *
1026                  * Assume about two operator evals per tuple comparison and N log2 N
1027                  * comparisons
1028                  */
1029                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1030
1031                 /* Disk costs */
1032
1033                 /* Compute logM(r) as log(r) / log(M) */
1034                 if (nruns > mergeorder)
1035                         log_runs = ceil(log(nruns) / log(mergeorder));
1036                 else
1037                         log_runs = 1.0;
1038                 npageaccesses = 2.0 * npages * log_runs;
1039                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1040                 startup_cost += npageaccesses *
1041                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
1042         }
1043         else if (tuples > 2 * output_tuples || input_bytes > work_mem_bytes)
1044         {
1045                 /*
1046                  * We'll use a bounded heap-sort keeping just K tuples in memory, for
1047                  * a total number of tuple comparisons of N log2 K; but the constant
1048                  * factor is a bit higher than for quicksort.  Tweak it so that the
1049                  * cost curve is continuous at the crossover point.
1050                  */
1051                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(2.0 * output_tuples);
1052         }
1053         else
1054         {
1055                 /* We'll use plain quicksort on all the input tuples */
1056                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1057         }
1058
1059         /*
1060          * Also charge a small amount (arbitrarily set equal to operator cost) per
1061          * extracted tuple.  Note it's correct to use tuples not output_tuples
1062          * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1063          * counting the LIMIT otherwise.
1064          */
1065         run_cost += cpu_operator_cost * tuples;
1066
1067         path->startup_cost = startup_cost;
1068         path->total_cost = startup_cost + run_cost;
1069 }
1070
1071 /*
1072  * sort_exceeds_work_mem
1073  *        Given a finished Sort plan node, detect whether it is expected to
1074  *        spill to disk (ie, will need more than work_mem workspace)
1075  *
1076  * This assumes there will be no available LIMIT.
1077  */
1078 bool
1079 sort_exceeds_work_mem(Sort *sort)
1080 {
1081         double          input_bytes = relation_byte_size(sort->plan.plan_rows,
1082                                                                                                  sort->plan.plan_width);
1083         long            work_mem_bytes = work_mem * 1024L;
1084
1085         return (input_bytes > work_mem_bytes);
1086 }
1087
1088 /*
1089  * cost_material
1090  *        Determines and returns the cost of materializing a relation, including
1091  *        the cost of reading the input data.
1092  *
1093  * If the total volume of data to materialize exceeds work_mem, we will need
1094  * to write it to disk, so the cost is much higher in that case.
1095  */
1096 void
1097 cost_material(Path *path,
1098                           Cost input_cost, double tuples, int width)
1099 {
1100         Cost            startup_cost = input_cost;
1101         Cost            run_cost = 0;
1102         double          nbytes = relation_byte_size(tuples, width);
1103         long            work_mem_bytes = work_mem * 1024L;
1104
1105         /* disk costs */
1106         if (nbytes > work_mem_bytes)
1107         {
1108                 double          npages = ceil(nbytes / BLCKSZ);
1109
1110                 /* We'll write during startup and read during retrieval */
1111                 startup_cost += seq_page_cost * npages;
1112                 run_cost += seq_page_cost * npages;
1113         }
1114
1115         /*
1116          * Charge a very small amount per inserted tuple, to reflect bookkeeping
1117          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
1118          * tie that would otherwise exist between nestloop with A outer,
1119          * materialized B inner and nestloop with B outer, materialized A inner.
1120          * The extra cost ensures we'll prefer materializing the smaller rel.
1121          */
1122         startup_cost += cpu_tuple_cost * 0.1 * tuples;
1123
1124         /*
1125          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
1126          * so that it doesn't appear worthwhile to materialize a bare seqscan.
1127          */
1128         run_cost += cpu_tuple_cost * tuples;
1129
1130         path->startup_cost = startup_cost;
1131         path->total_cost = startup_cost + run_cost;
1132 }
1133
1134 /*
1135  * cost_agg
1136  *              Determines and returns the cost of performing an Agg plan node,
1137  *              including the cost of its input.
1138  *
1139  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1140  * are for appropriately-sorted input.
1141  */
1142 void
1143 cost_agg(Path *path, PlannerInfo *root,
1144                  AggStrategy aggstrategy, int numAggs,
1145                  int numGroupCols, double numGroups,
1146                  Cost input_startup_cost, Cost input_total_cost,
1147                  double input_tuples)
1148 {
1149         Cost            startup_cost;
1150         Cost            total_cost;
1151
1152         /*
1153          * We charge one cpu_operator_cost per aggregate function per input tuple,
1154          * and another one per output tuple (corresponding to transfn and finalfn
1155          * calls respectively).  If we are grouping, we charge an additional
1156          * cpu_operator_cost per grouping column per input tuple for grouping
1157          * comparisons.
1158          *
1159          * We will produce a single output tuple if not grouping, and a tuple per
1160          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1161          *
1162          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1163          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1164          * input path is already sorted appropriately, AGG_SORTED should be
1165          * preferred (since it has no risk of memory overflow).  This will happen
1166          * as long as the computed total costs are indeed exactly equal --- but if
1167          * there's roundoff error we might do the wrong thing.  So be sure that
1168          * the computations below form the same intermediate values in the same
1169          * order.
1170          *
1171          * Note: ideally we should use the pg_proc.procost costs of each
1172          * aggregate's component functions, but for now that seems like an
1173          * excessive amount of work.
1174          */
1175         if (aggstrategy == AGG_PLAIN)
1176         {
1177                 startup_cost = input_total_cost;
1178                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1179                 /* we aren't grouping */
1180                 total_cost = startup_cost + cpu_tuple_cost;
1181         }
1182         else if (aggstrategy == AGG_SORTED)
1183         {
1184                 /* Here we are able to deliver output on-the-fly */
1185                 startup_cost = input_startup_cost;
1186                 total_cost = input_total_cost;
1187                 /* calcs phrased this way to match HASHED case, see note above */
1188                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1189                 total_cost += cpu_operator_cost * input_tuples * numAggs;
1190                 total_cost += cpu_operator_cost * numGroups * numAggs;
1191                 total_cost += cpu_tuple_cost * numGroups;
1192         }
1193         else
1194         {
1195                 /* must be AGG_HASHED */
1196                 startup_cost = input_total_cost;
1197                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1198                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1199                 total_cost = startup_cost;
1200                 total_cost += cpu_operator_cost * numGroups * numAggs;
1201                 total_cost += cpu_tuple_cost * numGroups;
1202         }
1203
1204         path->startup_cost = startup_cost;
1205         path->total_cost = total_cost;
1206 }
1207
1208 /*
1209  * cost_group
1210  *              Determines and returns the cost of performing a Group plan node,
1211  *              including the cost of its input.
1212  *
1213  * Note: caller must ensure that input costs are for appropriately-sorted
1214  * input.
1215  */
1216 void
1217 cost_group(Path *path, PlannerInfo *root,
1218                    int numGroupCols, double numGroups,
1219                    Cost input_startup_cost, Cost input_total_cost,
1220                    double input_tuples)
1221 {
1222         Cost            startup_cost;
1223         Cost            total_cost;
1224
1225         startup_cost = input_startup_cost;
1226         total_cost = input_total_cost;
1227
1228         /*
1229          * Charge one cpu_operator_cost per comparison per input tuple. We assume
1230          * all columns get compared at most of the tuples.
1231          */
1232         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1233
1234         path->startup_cost = startup_cost;
1235         path->total_cost = total_cost;
1236 }
1237
1238 /*
1239  * If a nestloop's inner path is an indexscan, be sure to use its estimated
1240  * output row count, which may be lower than the restriction-clause-only row
1241  * count of its parent.  (We don't include this case in the PATH_ROWS macro
1242  * because it applies *only* to a nestloop's inner relation.)  We have to
1243  * be prepared to recurse through Append nodes in case of an appendrel.
1244  */
1245 static double
1246 nestloop_inner_path_rows(Path *path)
1247 {
1248         double          result;
1249
1250         if (IsA(path, IndexPath))
1251                 result = ((IndexPath *) path)->rows;
1252         else if (IsA(path, BitmapHeapPath))
1253                 result = ((BitmapHeapPath *) path)->rows;
1254         else if (IsA(path, AppendPath))
1255         {
1256                 ListCell   *l;
1257
1258                 result = 0;
1259                 foreach(l, ((AppendPath *) path)->subpaths)
1260                 {
1261                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1262                 }
1263         }
1264         else
1265                 result = PATH_ROWS(path);
1266
1267         return result;
1268 }
1269
1270 /*
1271  * cost_nestloop
1272  *        Determines and returns the cost of joining two relations using the
1273  *        nested loop algorithm.
1274  *
1275  * 'path' is already filled in except for the cost fields
1276  */
1277 void
1278 cost_nestloop(NestPath *path, PlannerInfo *root)
1279 {
1280         Path       *outer_path = path->outerjoinpath;
1281         Path       *inner_path = path->innerjoinpath;
1282         Cost            startup_cost = 0;
1283         Cost            run_cost = 0;
1284         Cost            cpu_per_tuple;
1285         QualCost        restrict_qual_cost;
1286         double          outer_path_rows = PATH_ROWS(outer_path);
1287         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1288         double          ntuples;
1289         Selectivity joininfactor;
1290
1291         if (!enable_nestloop)
1292                 startup_cost += disable_cost;
1293
1294         /*
1295          * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1296          * outer tuple as soon as we have one match.  Account for the effects of
1297          * this by scaling down the cost estimates in proportion to the JOIN_IN
1298          * selectivity.  (This assumes that all the quals attached to the join are
1299          * IN quals, which should be true.)
1300          */
1301         joininfactor = join_in_selectivity(path, root);
1302
1303         /* cost of source data */
1304
1305         /*
1306          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1307          * before we can start returning tuples, so the join's startup cost is
1308          * their sum.  What's not so clear is whether the inner path's
1309          * startup_cost must be paid again on each rescan of the inner path. This
1310          * is not true if the inner path is materialized or is a hashjoin, but
1311          * probably is true otherwise.
1312          */
1313         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1314         run_cost += outer_path->total_cost - outer_path->startup_cost;
1315         if (IsA(inner_path, MaterialPath) ||
1316                 IsA(inner_path, HashPath))
1317         {
1318                 /* charge only run cost for each iteration of inner path */
1319         }
1320         else
1321         {
1322                 /*
1323                  * charge startup cost for each iteration of inner path, except we
1324                  * already charged the first startup_cost in our own startup
1325                  */
1326                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1327         }
1328         run_cost += outer_path_rows *
1329                 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1330
1331         /*
1332          * Compute number of tuples processed (not number emitted!)
1333          */
1334         ntuples = outer_path_rows * inner_path_rows * joininfactor;
1335
1336         /* CPU costs */
1337         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
1338         startup_cost += restrict_qual_cost.startup;
1339         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1340         run_cost += cpu_per_tuple * ntuples;
1341
1342         path->path.startup_cost = startup_cost;
1343         path->path.total_cost = startup_cost + run_cost;
1344 }
1345
1346 /*
1347  * cost_mergejoin
1348  *        Determines and returns the cost of joining two relations using the
1349  *        merge join algorithm.
1350  *
1351  * 'path' is already filled in except for the cost fields
1352  *
1353  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1354  * outersortkeys and innersortkeys are lists of the keys to be used
1355  * to sort the outer and inner relations, or NIL if no explicit
1356  * sort is needed because the source path is already ordered.
1357  */
1358 void
1359 cost_mergejoin(MergePath *path, PlannerInfo *root)
1360 {
1361         Path       *outer_path = path->jpath.outerjoinpath;
1362         Path       *inner_path = path->jpath.innerjoinpath;
1363         List       *mergeclauses = path->path_mergeclauses;
1364         List       *outersortkeys = path->outersortkeys;
1365         List       *innersortkeys = path->innersortkeys;
1366         Cost            startup_cost = 0;
1367         Cost            run_cost = 0;
1368         Cost            cpu_per_tuple;
1369         Selectivity merge_selec;
1370         QualCost        merge_qual_cost;
1371         QualCost        qp_qual_cost;
1372         double          outer_path_rows = PATH_ROWS(outer_path);
1373         double          inner_path_rows = PATH_ROWS(inner_path);
1374         double          outer_rows,
1375                                 inner_rows;
1376         double          mergejointuples,
1377                                 rescannedtuples;
1378         double          rescanratio;
1379         Selectivity outerscansel,
1380                                 innerscansel;
1381         Selectivity joininfactor;
1382         Path            sort_path;              /* dummy for result of cost_sort */
1383
1384         if (!enable_mergejoin)
1385                 startup_cost += disable_cost;
1386
1387         /*
1388          * Compute cost and selectivity of the mergequals and qpquals (other
1389          * restriction clauses) separately.  We use approx_selectivity here for
1390          * speed --- in most cases, any errors won't affect the result much.
1391          *
1392          * Note: it's probably bogus to use the normal selectivity calculation
1393          * here when either the outer or inner path is a UniquePath.
1394          */
1395         merge_selec = approx_selectivity(root, mergeclauses,
1396                                                                          path->jpath.jointype);
1397         cost_qual_eval(&merge_qual_cost, mergeclauses, root);
1398         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1399         qp_qual_cost.startup -= merge_qual_cost.startup;
1400         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1401
1402         /* approx # tuples passing the merge quals */
1403         mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1404
1405         /*
1406          * When there are equal merge keys in the outer relation, the mergejoin
1407          * must rescan any matching tuples in the inner relation. This means
1408          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1409          * costs the same as an original fetch, which is probably an overestimate;
1410          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1411          * Not clear if it's worth developing a more refined model.
1412          *
1413          * The number of re-fetches can be estimated approximately as size of
1414          * merge join output minus size of inner relation.      Assume that the
1415          * distinct key values are 1, 2, ..., and denote the number of values of
1416          * each key in the outer relation as m1, m2, ...; in the inner relation,
1417          * n1, n2, ... Then we have
1418          *
1419          * size of join = m1 * n1 + m2 * n2 + ...
1420          *
1421          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1422          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1423          * relation
1424          *
1425          * This equation works correctly for outer tuples having no inner match
1426          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1427          * are effectively subtracting those from the number of rescanned tuples,
1428          * when we should not.  Can we do better without expensive selectivity
1429          * computations?
1430          */
1431         if (IsA(outer_path, UniquePath))
1432                 rescannedtuples = 0;
1433         else
1434         {
1435                 rescannedtuples = mergejointuples - inner_path_rows;
1436                 /* Must clamp because of possible underestimate */
1437                 if (rescannedtuples < 0)
1438                         rescannedtuples = 0;
1439         }
1440         /* We'll inflate inner run cost this much to account for rescanning */
1441         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1442
1443         /*
1444          * A merge join will stop as soon as it exhausts either input stream
1445          * (unless it's an outer join, in which case the outer side has to be
1446          * scanned all the way anyway).  Estimate fraction of the left and right
1447          * inputs that will actually need to be scanned. We use only the first
1448          * (most significant) merge clause for this purpose.  Since
1449          * mergejoinscansel() is a fairly expensive computation, we cache the
1450          * results in the merge clause RestrictInfo.
1451          */
1452         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1453         {
1454                 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1455                 List       *opathkeys;
1456                 List       *ipathkeys;
1457                 PathKey    *opathkey;
1458                 PathKey    *ipathkey;
1459                 MergeScanSelCache *cache;
1460
1461                 /* Get the input pathkeys to determine the sort-order details */
1462                 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1463                 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1464                 Assert(opathkeys);
1465                 Assert(ipathkeys);
1466                 opathkey = (PathKey *) linitial(opathkeys);
1467                 ipathkey = (PathKey *) linitial(ipathkeys);
1468                 /* debugging check */
1469                 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1470                         opathkey->pk_strategy != ipathkey->pk_strategy ||
1471                         opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1472                         elog(ERROR, "left and right pathkeys do not match in mergejoin");
1473
1474                 /* Get the selectivity with caching */
1475                 cache = cached_scansel(root, firstclause, opathkey);
1476
1477                 if (bms_is_subset(firstclause->left_relids,
1478                                                   outer_path->parent->relids))
1479                 {
1480                         /* left side of clause is outer */
1481                         outerscansel = cache->leftscansel;
1482                         innerscansel = cache->rightscansel;
1483                 }
1484                 else
1485                 {
1486                         /* left side of clause is inner */
1487                         outerscansel = cache->rightscansel;
1488                         innerscansel = cache->leftscansel;
1489                 }
1490                 if (path->jpath.jointype == JOIN_LEFT)
1491                         outerscansel = 1.0;
1492                 else if (path->jpath.jointype == JOIN_RIGHT)
1493                         innerscansel = 1.0;
1494         }
1495         else
1496         {
1497                 /* cope with clauseless or full mergejoin */
1498                 outerscansel = innerscansel = 1.0;
1499         }
1500
1501         /* convert selectivity to row count; must scan at least one row */
1502         outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1503         inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1504
1505         /*
1506          * Readjust scan selectivities to account for above rounding.  This is
1507          * normally an insignificant effect, but when there are only a few rows in
1508          * the inputs, failing to do this makes for a large percentage error.
1509          */
1510         outerscansel = outer_rows / outer_path_rows;
1511         innerscansel = inner_rows / inner_path_rows;
1512
1513         /* cost of source data */
1514
1515         if (outersortkeys)                      /* do we need to sort outer? */
1516         {
1517                 cost_sort(&sort_path,
1518                                   root,
1519                                   outersortkeys,
1520                                   outer_path->total_cost,
1521                                   outer_path_rows,
1522                                   outer_path->parent->width,
1523                                   -1.0);
1524                 startup_cost += sort_path.startup_cost;
1525                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1526                         * outerscansel;
1527         }
1528         else
1529         {
1530                 startup_cost += outer_path->startup_cost;
1531                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1532                         * outerscansel;
1533         }
1534
1535         if (innersortkeys)                      /* do we need to sort inner? */
1536         {
1537                 cost_sort(&sort_path,
1538                                   root,
1539                                   innersortkeys,
1540                                   inner_path->total_cost,
1541                                   inner_path_rows,
1542                                   inner_path->parent->width,
1543                                   -1.0);
1544                 startup_cost += sort_path.startup_cost;
1545                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1546                         * innerscansel * rescanratio;
1547         }
1548         else
1549         {
1550                 startup_cost += inner_path->startup_cost;
1551                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1552                         * innerscansel * rescanratio;
1553         }
1554
1555         /* CPU costs */
1556
1557         /*
1558          * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1559          * outer tuple as soon as we have one match.  Account for the effects of
1560          * this by scaling down the cost estimates in proportion to the expected
1561          * output size.  (This assumes that all the quals attached to the join are
1562          * IN quals, which should be true.)
1563          */
1564         joininfactor = join_in_selectivity(&path->jpath, root);
1565
1566         /*
1567          * The number of tuple comparisons needed is approximately number of outer
1568          * rows plus number of inner rows plus number of rescanned tuples (can we
1569          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1570          * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1571          * joininfactor.
1572          */
1573         startup_cost += merge_qual_cost.startup;
1574         run_cost += merge_qual_cost.per_tuple *
1575                 (outer_rows + inner_rows * rescanratio);
1576
1577         /*
1578          * For each tuple that gets through the mergejoin proper, we charge
1579          * cpu_tuple_cost plus the cost of evaluating additional restriction
1580          * clauses that are to be applied at the join.  (This is pessimistic since
1581          * not all of the quals may get evaluated at each tuple.)  This work is
1582          * skipped in JOIN_IN mode, so apply the factor.
1583          */
1584         startup_cost += qp_qual_cost.startup;
1585         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1586         run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1587
1588         path->jpath.path.startup_cost = startup_cost;
1589         path->jpath.path.total_cost = startup_cost + run_cost;
1590 }
1591
1592 /*
1593  * run mergejoinscansel() with caching
1594  */
1595 static MergeScanSelCache *
1596 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
1597 {
1598         MergeScanSelCache *cache;
1599         ListCell   *lc;
1600         Selectivity leftscansel,
1601                                 rightscansel;
1602         MemoryContext oldcontext;
1603
1604         /* Do we have this result already? */
1605         foreach(lc, rinfo->scansel_cache)
1606         {
1607                 cache = (MergeScanSelCache *) lfirst(lc);
1608                 if (cache->opfamily == pathkey->pk_opfamily &&
1609                         cache->strategy == pathkey->pk_strategy &&
1610                         cache->nulls_first == pathkey->pk_nulls_first)
1611                         return cache;
1612         }
1613
1614         /* Nope, do the computation */
1615         mergejoinscansel(root,
1616                                          (Node *) rinfo->clause,
1617                                          pathkey->pk_opfamily,
1618                                          pathkey->pk_strategy,
1619                                          pathkey->pk_nulls_first,
1620                                          &leftscansel,
1621                                          &rightscansel);
1622
1623         /* Cache the result in suitably long-lived workspace */
1624         oldcontext = MemoryContextSwitchTo(root->planner_cxt);
1625
1626         cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
1627         cache->opfamily = pathkey->pk_opfamily;
1628         cache->strategy = pathkey->pk_strategy;
1629         cache->nulls_first = pathkey->pk_nulls_first;
1630         cache->leftscansel = leftscansel;
1631         cache->rightscansel = rightscansel;
1632
1633         rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
1634
1635         MemoryContextSwitchTo(oldcontext);
1636
1637         return cache;
1638 }
1639
1640 /*
1641  * cost_hashjoin
1642  *        Determines and returns the cost of joining two relations using the
1643  *        hash join algorithm.
1644  *
1645  * 'path' is already filled in except for the cost fields
1646  *
1647  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1648  */
1649 void
1650 cost_hashjoin(HashPath *path, PlannerInfo *root)
1651 {
1652         Path       *outer_path = path->jpath.outerjoinpath;
1653         Path       *inner_path = path->jpath.innerjoinpath;
1654         List       *hashclauses = path->path_hashclauses;
1655         Cost            startup_cost = 0;
1656         Cost            run_cost = 0;
1657         Cost            cpu_per_tuple;
1658         Selectivity hash_selec;
1659         QualCost        hash_qual_cost;
1660         QualCost        qp_qual_cost;
1661         double          hashjointuples;
1662         double          outer_path_rows = PATH_ROWS(outer_path);
1663         double          inner_path_rows = PATH_ROWS(inner_path);
1664         int                     num_hashclauses = list_length(hashclauses);
1665         int                     numbuckets;
1666         int                     numbatches;
1667         double          virtualbuckets;
1668         Selectivity innerbucketsize;
1669         Selectivity joininfactor;
1670         ListCell   *hcl;
1671
1672         if (!enable_hashjoin)
1673                 startup_cost += disable_cost;
1674
1675         /*
1676          * Compute cost and selectivity of the hashquals and qpquals (other
1677          * restriction clauses) separately.  We use approx_selectivity here for
1678          * speed --- in most cases, any errors won't affect the result much.
1679          *
1680          * Note: it's probably bogus to use the normal selectivity calculation
1681          * here when either the outer or inner path is a UniquePath.
1682          */
1683         hash_selec = approx_selectivity(root, hashclauses,
1684                                                                         path->jpath.jointype);
1685         cost_qual_eval(&hash_qual_cost, hashclauses, root);
1686         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1687         qp_qual_cost.startup -= hash_qual_cost.startup;
1688         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1689
1690         /* approx # tuples passing the hash quals */
1691         hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1692
1693         /* cost of source data */
1694         startup_cost += outer_path->startup_cost;
1695         run_cost += outer_path->total_cost - outer_path->startup_cost;
1696         startup_cost += inner_path->total_cost;
1697
1698         /*
1699          * Cost of computing hash function: must do it once per input tuple. We
1700          * charge one cpu_operator_cost for each column's hash function.  Also,
1701          * tack on one cpu_tuple_cost per inner row, to model the costs of
1702          * inserting the row into the hashtable.
1703          *
1704          * XXX when a hashclause is more complex than a single operator, we really
1705          * should charge the extra eval costs of the left or right side, as
1706          * appropriate, here.  This seems more work than it's worth at the moment.
1707          */
1708         startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1709                 * inner_path_rows;
1710         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1711
1712         /* Get hash table size that executor would use for inner relation */
1713         ExecChooseHashTableSize(inner_path_rows,
1714                                                         inner_path->parent->width,
1715                                                         &numbuckets,
1716                                                         &numbatches);
1717         virtualbuckets = (double) numbuckets *(double) numbatches;
1718
1719         /*
1720          * Determine bucketsize fraction for inner relation.  We use the smallest
1721          * bucketsize estimated for any individual hashclause; this is undoubtedly
1722          * conservative.
1723          *
1724          * BUT: if inner relation has been unique-ified, we can assume it's good
1725          * for hashing.  This is important both because it's the right answer, and
1726          * because we avoid contaminating the cache with a value that's wrong for
1727          * non-unique-ified paths.
1728          */
1729         if (IsA(inner_path, UniquePath))
1730                 innerbucketsize = 1.0 / virtualbuckets;
1731         else
1732         {
1733                 innerbucketsize = 1.0;
1734                 foreach(hcl, hashclauses)
1735                 {
1736                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1737                         Selectivity thisbucketsize;
1738
1739                         Assert(IsA(restrictinfo, RestrictInfo));
1740
1741                         /*
1742                          * First we have to figure out which side of the hashjoin clause
1743                          * is the inner side.
1744                          *
1745                          * Since we tend to visit the same clauses over and over when
1746                          * planning a large query, we cache the bucketsize estimate in the
1747                          * RestrictInfo node to avoid repeated lookups of statistics.
1748                          */
1749                         if (bms_is_subset(restrictinfo->right_relids,
1750                                                           inner_path->parent->relids))
1751                         {
1752                                 /* righthand side is inner */
1753                                 thisbucketsize = restrictinfo->right_bucketsize;
1754                                 if (thisbucketsize < 0)
1755                                 {
1756                                         /* not cached yet */
1757                                         thisbucketsize =
1758                                                 estimate_hash_bucketsize(root,
1759                                                                                    get_rightop(restrictinfo->clause),
1760                                                                                                  virtualbuckets);
1761                                         restrictinfo->right_bucketsize = thisbucketsize;
1762                                 }
1763                         }
1764                         else
1765                         {
1766                                 Assert(bms_is_subset(restrictinfo->left_relids,
1767                                                                          inner_path->parent->relids));
1768                                 /* lefthand side is inner */
1769                                 thisbucketsize = restrictinfo->left_bucketsize;
1770                                 if (thisbucketsize < 0)
1771                                 {
1772                                         /* not cached yet */
1773                                         thisbucketsize =
1774                                                 estimate_hash_bucketsize(root,
1775                                                                                         get_leftop(restrictinfo->clause),
1776                                                                                                  virtualbuckets);
1777                                         restrictinfo->left_bucketsize = thisbucketsize;
1778                                 }
1779                         }
1780
1781                         if (innerbucketsize > thisbucketsize)
1782                                 innerbucketsize = thisbucketsize;
1783                 }
1784         }
1785
1786         /*
1787          * If inner relation is too big then we will need to "batch" the join,
1788          * which implies writing and reading most of the tuples to disk an extra
1789          * time.  Charge seq_page_cost per page, since the I/O should be nice and
1790          * sequential.  Writing the inner rel counts as startup cost, all the rest
1791          * as run cost.
1792          */
1793         if (numbatches > 1)
1794         {
1795                 double          outerpages = page_size(outer_path_rows,
1796                                                                                    outer_path->parent->width);
1797                 double          innerpages = page_size(inner_path_rows,
1798                                                                                    inner_path->parent->width);
1799
1800                 startup_cost += seq_page_cost * innerpages;
1801                 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
1802         }
1803
1804         /* CPU costs */
1805
1806         /*
1807          * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1808          * outer tuple as soon as we have one match.  Account for the effects of
1809          * this by scaling down the cost estimates in proportion to the expected
1810          * output size.  (This assumes that all the quals attached to the join are
1811          * IN quals, which should be true.)
1812          */
1813         joininfactor = join_in_selectivity(&path->jpath, root);
1814
1815         /*
1816          * The number of tuple comparisons needed is the number of outer tuples
1817          * times the typical number of tuples in a hash bucket, which is the inner
1818          * relation size times its bucketsize fraction.  At each one, we need to
1819          * evaluate the hashjoin quals.  But actually, charging the full qual eval
1820          * cost at each tuple is pessimistic, since we don't evaluate the quals
1821          * unless the hash values match exactly.  For lack of a better idea, halve
1822          * the cost estimate to allow for that.
1823          */
1824         startup_cost += hash_qual_cost.startup;
1825         run_cost += hash_qual_cost.per_tuple *
1826                 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1827                 joininfactor * 0.5;
1828
1829         /*
1830          * For each tuple that gets through the hashjoin proper, we charge
1831          * cpu_tuple_cost plus the cost of evaluating additional restriction
1832          * clauses that are to be applied at the join.  (This is pessimistic since
1833          * not all of the quals may get evaluated at each tuple.)
1834          */
1835         startup_cost += qp_qual_cost.startup;
1836         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1837         run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1838
1839         path->jpath.path.startup_cost = startup_cost;
1840         path->jpath.path.total_cost = startup_cost + run_cost;
1841 }
1842
1843
1844 /*
1845  * cost_qual_eval
1846  *              Estimate the CPU costs of evaluating a WHERE clause.
1847  *              The input can be either an implicitly-ANDed list of boolean
1848  *              expressions, or a list of RestrictInfo nodes.  (The latter is
1849  *              preferred since it allows caching of the results.)
1850  *              The result includes both a one-time (startup) component,
1851  *              and a per-evaluation component.
1852  */
1853 void
1854 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
1855 {
1856         cost_qual_eval_context context;
1857         ListCell   *l;
1858
1859         context.root = root;
1860         context.total.startup = 0;
1861         context.total.per_tuple = 0;
1862
1863         /* We don't charge any cost for the implicit ANDing at top level ... */
1864
1865         foreach(l, quals)
1866         {
1867                 Node       *qual = (Node *) lfirst(l);
1868
1869                 cost_qual_eval_walker(qual, &context);
1870         }
1871
1872         *cost = context.total;
1873 }
1874
1875 /*
1876  * cost_qual_eval_node
1877  *              As above, for a single RestrictInfo or expression.
1878  */
1879 void
1880 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
1881 {
1882         cost_qual_eval_context context;
1883
1884         context.root = root;
1885         context.total.startup = 0;
1886         context.total.per_tuple = 0;
1887
1888         cost_qual_eval_walker(qual, &context);
1889
1890         *cost = context.total;
1891 }
1892
1893 static bool
1894 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
1895 {
1896         if (node == NULL)
1897                 return false;
1898
1899         /*
1900          * RestrictInfo nodes contain an eval_cost field reserved for this
1901          * routine's use, so that it's not necessary to evaluate the qual clause's
1902          * cost more than once.  If the clause's cost hasn't been computed yet,
1903          * the field's startup value will contain -1.
1904          */
1905         if (IsA(node, RestrictInfo))
1906         {
1907                 RestrictInfo *rinfo = (RestrictInfo *) node;
1908
1909                 if (rinfo->eval_cost.startup < 0)
1910                 {
1911                         cost_qual_eval_context locContext;
1912
1913                         locContext.root = context->root;
1914                         locContext.total.startup = 0;
1915                         locContext.total.per_tuple = 0;
1916
1917                         /*
1918                          * For an OR clause, recurse into the marked-up tree so that we
1919                          * set the eval_cost for contained RestrictInfos too.
1920                          */
1921                         if (rinfo->orclause)
1922                                 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
1923                         else
1924                                 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
1925
1926                         /*
1927                          * If the RestrictInfo is marked pseudoconstant, it will be tested
1928                          * only once, so treat its cost as all startup cost.
1929                          */
1930                         if (rinfo->pseudoconstant)
1931                         {
1932                                 /* count one execution during startup */
1933                                 locContext.total.startup += locContext.total.per_tuple;
1934                                 locContext.total.per_tuple = 0;
1935                         }
1936                         rinfo->eval_cost = locContext.total;
1937                 }
1938                 context->total.startup += rinfo->eval_cost.startup;
1939                 context->total.per_tuple += rinfo->eval_cost.per_tuple;
1940                 /* do NOT recurse into children */
1941                 return false;
1942         }
1943
1944         /*
1945          * For each operator or function node in the given tree, we charge the
1946          * estimated execution cost given by pg_proc.procost (remember to multiply
1947          * this by cpu_operator_cost).
1948          *
1949          * Vars and Consts are charged zero, and so are boolean operators (AND,
1950          * OR, NOT). Simplistic, but a lot better than no model at all.
1951          *
1952          * Should we try to account for the possibility of short-circuit
1953          * evaluation of AND/OR?  Probably *not*, because that would make the
1954          * results depend on the clause ordering, and we are not in any position
1955          * to expect that the current ordering of the clauses is the one that's
1956          * going to end up being used.  (Is it worth applying order_qual_clauses
1957          * much earlier in the planning process to fix this?)
1958          */
1959         if (IsA(node, FuncExpr))
1960         {
1961                 context->total.per_tuple +=
1962                         get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
1963         }
1964         else if (IsA(node, OpExpr) ||
1965                          IsA(node, DistinctExpr) ||
1966                          IsA(node, NullIfExpr))
1967         {
1968                 /* rely on struct equivalence to treat these all alike */
1969                 set_opfuncid((OpExpr *) node);
1970                 context->total.per_tuple +=
1971                         get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
1972         }
1973         else if (IsA(node, ScalarArrayOpExpr))
1974         {
1975                 /*
1976                  * Estimate that the operator will be applied to about half of the
1977                  * array elements before the answer is determined.
1978                  */
1979                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1980                 Node       *arraynode = (Node *) lsecond(saop->args);
1981
1982                 set_sa_opfuncid(saop);
1983                 context->total.per_tuple += get_func_cost(saop->opfuncid) *
1984                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1985         }
1986         else if (IsA(node, CoerceViaIO))
1987         {
1988                 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
1989                 Oid                     iofunc;
1990                 Oid                     typioparam;
1991                 bool            typisvarlena;
1992
1993                 /* check the result type's input function */
1994                 getTypeInputInfo(iocoerce->resulttype,
1995                                                  &iofunc, &typioparam);
1996                 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
1997                 /* check the input type's output function */
1998                 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
1999                                                   &iofunc, &typisvarlena);
2000                 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2001         }
2002         else if (IsA(node, ArrayCoerceExpr))
2003         {
2004                 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
2005                 Node       *arraynode = (Node *) acoerce->arg;
2006
2007                 if (OidIsValid(acoerce->elemfuncid))
2008                         context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
2009                                 cpu_operator_cost * estimate_array_length(arraynode);
2010         }
2011         else if (IsA(node, RowCompareExpr))
2012         {
2013                 /* Conservatively assume we will check all the columns */
2014                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
2015                 ListCell   *lc;
2016
2017                 foreach(lc, rcexpr->opnos)
2018                 {
2019                         Oid                     opid = lfirst_oid(lc);
2020
2021                         context->total.per_tuple += get_func_cost(get_opcode(opid)) *
2022                                 cpu_operator_cost;
2023                 }
2024         }
2025         else if (IsA(node, CurrentOfExpr))
2026         {
2027                 /* Report high cost to prevent selection of anything but TID scan */
2028                 context->total.startup += disable_cost;
2029         }
2030         else if (IsA(node, SubLink))
2031         {
2032                 /* This routine should not be applied to un-planned expressions */
2033                 elog(ERROR, "cannot handle unplanned sub-select");
2034         }
2035         else if (IsA(node, SubPlan))
2036         {
2037                 /*
2038                  * A subplan node in an expression typically indicates that the
2039                  * subplan will be executed on each evaluation, so charge accordingly.
2040                  * (Sub-selects that can be executed as InitPlans have already been
2041                  * removed from the expression.)
2042                  *
2043                  * An exception occurs when we have decided we can implement the
2044                  * subplan by hashing.
2045                  */
2046                 SubPlan    *subplan = (SubPlan *) node;
2047                 Plan       *plan = planner_subplan_get_plan(context->root, subplan);
2048
2049                 if (subplan->useHashTable)
2050                 {
2051                         /*
2052                          * If we are using a hash table for the subquery outputs, then the
2053                          * cost of evaluating the query is a one-time cost. We charge one
2054                          * cpu_operator_cost per tuple for the work of loading the
2055                          * hashtable, too.
2056                          */
2057                         context->total.startup += plan->total_cost +
2058                                 cpu_operator_cost * plan->plan_rows;
2059
2060                         /*
2061                          * The per-tuple costs include the cost of evaluating the lefthand
2062                          * expressions, plus the cost of probing the hashtable. Recursion
2063                          * into the testexpr will handle the lefthand expressions
2064                          * properly, and will count one cpu_operator_cost for each
2065                          * comparison operator.  That is probably too low for the probing
2066                          * cost, but it's hard to make a better estimate, so live with it
2067                          * for now.
2068                          */
2069                 }
2070                 else
2071                 {
2072                         /*
2073                          * Otherwise we will be rescanning the subplan output on each
2074                          * evaluation.  We need to estimate how much of the output we will
2075                          * actually need to scan.  NOTE: this logic should agree with
2076                          * get_initplan_cost, below, and with the estimates used by
2077                          * make_subplan() in plan/subselect.c.
2078                          */
2079                         Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
2080
2081                         if (subplan->subLinkType == EXISTS_SUBLINK)
2082                         {
2083                                 /* we only need to fetch 1 tuple */
2084                                 context->total.per_tuple += plan_run_cost / plan->plan_rows;
2085                         }
2086                         else if (subplan->subLinkType == ALL_SUBLINK ||
2087                                          subplan->subLinkType == ANY_SUBLINK)
2088                         {
2089                                 /* assume we need 50% of the tuples */
2090                                 context->total.per_tuple += 0.50 * plan_run_cost;
2091                                 /* also charge a cpu_operator_cost per row examined */
2092                                 context->total.per_tuple +=
2093                                         0.50 * plan->plan_rows * cpu_operator_cost;
2094                         }
2095                         else
2096                         {
2097                                 /* assume we need all tuples */
2098                                 context->total.per_tuple += plan_run_cost;
2099                         }
2100
2101                         /*
2102                          * Also account for subplan's startup cost. If the subplan is
2103                          * uncorrelated or undirect correlated, AND its topmost node is a
2104                          * Sort or Material node, assume that we'll only need to pay its
2105                          * startup cost once; otherwise assume we pay the startup cost
2106                          * every time.
2107                          */
2108                         if (subplan->parParam == NIL &&
2109                                 (IsA(plan, Sort) ||
2110                                  IsA(plan, Material)))
2111                                 context->total.startup += plan->startup_cost;
2112                         else
2113                                 context->total.per_tuple += plan->startup_cost;
2114                 }
2115         }
2116
2117         /* recurse into children */
2118         return expression_tree_walker(node, cost_qual_eval_walker,
2119                                                                   (void *) context);
2120 }
2121
2122
2123 /*
2124  * get_initplan_cost
2125  *              Get the expected cost of evaluating an initPlan.
2126  *
2127  * Keep this in sync with cost_qual_eval_walker's handling of subplans, above,
2128  * and with the estimates used by make_subplan() in plan/subselect.c.
2129  */
2130 Cost
2131 get_initplan_cost(PlannerInfo *root, SubPlan *subplan)
2132 {
2133         Cost            result;
2134         Plan       *plan = planner_subplan_get_plan(root, subplan);
2135
2136         /* initPlans never use hashtables */
2137         Assert(!subplan->useHashTable);
2138         /* they are never ALL or ANY, either */
2139         Assert(!(subplan->subLinkType == ALL_SUBLINK ||
2140                          subplan->subLinkType == ANY_SUBLINK));
2141
2142         if (subplan->subLinkType == EXISTS_SUBLINK)
2143         {
2144                 /* we only need to fetch 1 tuple */
2145                 Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
2146
2147                 result = plan->startup_cost;
2148                 result += plan_run_cost / plan->plan_rows;
2149         }
2150         else
2151         {
2152                 /* assume we need all tuples */
2153                 result = plan->total_cost;
2154         }
2155
2156         return result;
2157 }
2158
2159
2160 /*
2161  * approx_selectivity
2162  *              Quick-and-dirty estimation of clause selectivities.
2163  *              The input can be either an implicitly-ANDed list of boolean
2164  *              expressions, or a list of RestrictInfo nodes (typically the latter).
2165  *
2166  * This is quick-and-dirty because we bypass clauselist_selectivity, and
2167  * simply multiply the independent clause selectivities together.  Now
2168  * clauselist_selectivity often can't do any better than that anyhow, but
2169  * for some situations (such as range constraints) it is smarter.  However,
2170  * we can't effectively cache the results of clauselist_selectivity, whereas
2171  * the individual clause selectivities can be and are cached.
2172  *
2173  * Since we are only using the results to estimate how many potential
2174  * output tuples are generated and passed through qpqual checking, it
2175  * seems OK to live with the approximation.
2176  */
2177 static Selectivity
2178 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
2179 {
2180         Selectivity total = 1.0;
2181         ListCell   *l;
2182
2183         foreach(l, quals)
2184         {
2185                 Node       *qual = (Node *) lfirst(l);
2186
2187                 /* Note that clause_selectivity will be able to cache its result */
2188                 total *= clause_selectivity(root, qual, 0, jointype);
2189         }
2190         return total;
2191 }
2192
2193
2194 /*
2195  * set_baserel_size_estimates
2196  *              Set the size estimates for the given base relation.
2197  *
2198  * The rel's targetlist and restrictinfo list must have been constructed
2199  * already.
2200  *
2201  * We set the following fields of the rel node:
2202  *      rows: the estimated number of output tuples (after applying
2203  *                restriction clauses).
2204  *      width: the estimated average output tuple width in bytes.
2205  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
2206  */
2207 void
2208 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2209 {
2210         double          nrows;
2211
2212         /* Should only be applied to base relations */
2213         Assert(rel->relid > 0);
2214
2215         nrows = rel->tuples *
2216                 clauselist_selectivity(root,
2217                                                            rel->baserestrictinfo,
2218                                                            0,
2219                                                            JOIN_INNER);
2220
2221         rel->rows = clamp_row_est(nrows);
2222
2223         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
2224
2225         set_rel_width(root, rel);
2226 }
2227
2228 /*
2229  * set_joinrel_size_estimates
2230  *              Set the size estimates for the given join relation.
2231  *
2232  * The rel's targetlist must have been constructed already, and a
2233  * restriction clause list that matches the given component rels must
2234  * be provided.
2235  *
2236  * Since there is more than one way to make a joinrel for more than two
2237  * base relations, the results we get here could depend on which component
2238  * rel pair is provided.  In theory we should get the same answers no matter
2239  * which pair is provided; in practice, since the selectivity estimation
2240  * routines don't handle all cases equally well, we might not.  But there's
2241  * not much to be done about it.  (Would it make sense to repeat the
2242  * calculations for each pair of input rels that's encountered, and somehow
2243  * average the results?  Probably way more trouble than it's worth.)
2244  *
2245  * It's important that the results for symmetric JoinTypes be symmetric,
2246  * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
2247  * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
2248  * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
2249  *
2250  * We set only the rows field here.  The width field was already set by
2251  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
2252  */
2253 void
2254 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
2255                                                    RelOptInfo *outer_rel,
2256                                                    RelOptInfo *inner_rel,
2257                                                    JoinType jointype,
2258                                                    List *restrictlist)
2259 {
2260         Selectivity jselec;
2261         Selectivity pselec;
2262         double          nrows;
2263         UniquePath *upath;
2264
2265         /*
2266          * Compute joinclause selectivity.      Note that we are only considering
2267          * clauses that become restriction clauses at this join level; we are not
2268          * double-counting them because they were not considered in estimating the
2269          * sizes of the component rels.
2270          *
2271          * For an outer join, we have to distinguish the selectivity of the join's
2272          * own clauses (JOIN/ON conditions) from any clauses that were "pushed
2273          * down".  For inner joins we just count them all as joinclauses.
2274          */
2275         if (IS_OUTER_JOIN(jointype))
2276         {
2277                 List       *joinquals = NIL;
2278                 List       *pushedquals = NIL;
2279                 ListCell   *l;
2280
2281                 /* Grovel through the clauses to separate into two lists */
2282                 foreach(l, restrictlist)
2283                 {
2284                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2285
2286                         Assert(IsA(rinfo, RestrictInfo));
2287                         if (rinfo->is_pushed_down)
2288                                 pushedquals = lappend(pushedquals, rinfo);
2289                         else
2290                                 joinquals = lappend(joinquals, rinfo);
2291                 }
2292
2293                 /* Get the separate selectivities */
2294                 jselec = clauselist_selectivity(root,
2295                                                                                 joinquals,
2296                                                                                 0,
2297                                                                                 jointype);
2298                 pselec = clauselist_selectivity(root,
2299                                                                                 pushedquals,
2300                                                                                 0,
2301                                                                                 jointype);
2302
2303                 /* Avoid leaking a lot of ListCells */
2304                 list_free(joinquals);
2305                 list_free(pushedquals);
2306         }
2307         else
2308         {
2309                 jselec = clauselist_selectivity(root,
2310                                                                                 restrictlist,
2311                                                                                 0,
2312                                                                                 jointype);
2313                 pselec = 0.0;                   /* not used, keep compiler quiet */
2314         }
2315
2316         /*
2317          * Basically, we multiply size of Cartesian product by selectivity.
2318          *
2319          * If we are doing an outer join, take that into account: the joinqual
2320          * selectivity has to be clamped using the knowledge that the output must
2321          * be at least as large as the non-nullable input.      However, any
2322          * pushed-down quals are applied after the outer join, so their
2323          * selectivity applies fully.
2324          *
2325          * For JOIN_IN and variants, the Cartesian product is figured with respect
2326          * to a unique-ified input, and then we can clamp to the size of the other
2327          * input.
2328          */
2329         switch (jointype)
2330         {
2331                 case JOIN_INNER:
2332                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2333                         break;
2334                 case JOIN_LEFT:
2335                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2336                         if (nrows < outer_rel->rows)
2337                                 nrows = outer_rel->rows;
2338                         nrows *= pselec;
2339                         break;
2340                 case JOIN_RIGHT:
2341                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2342                         if (nrows < inner_rel->rows)
2343                                 nrows = inner_rel->rows;
2344                         nrows *= pselec;
2345                         break;
2346                 case JOIN_FULL:
2347                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2348                         if (nrows < outer_rel->rows)
2349                                 nrows = outer_rel->rows;
2350                         if (nrows < inner_rel->rows)
2351                                 nrows = inner_rel->rows;
2352                         nrows *= pselec;
2353                         break;
2354                 case JOIN_IN:
2355                 case JOIN_UNIQUE_INNER:
2356                         upath = create_unique_path(root, inner_rel,
2357                                                                            inner_rel->cheapest_total_path);
2358                         nrows = outer_rel->rows * upath->rows * jselec;
2359                         if (nrows > outer_rel->rows)
2360                                 nrows = outer_rel->rows;
2361                         break;
2362                 case JOIN_REVERSE_IN:
2363                 case JOIN_UNIQUE_OUTER:
2364                         upath = create_unique_path(root, outer_rel,
2365                                                                            outer_rel->cheapest_total_path);
2366                         nrows = upath->rows * inner_rel->rows * jselec;
2367                         if (nrows > inner_rel->rows)
2368                                 nrows = inner_rel->rows;
2369                         break;
2370                 default:
2371                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
2372                         nrows = 0;                      /* keep compiler quiet */
2373                         break;
2374         }
2375
2376         rel->rows = clamp_row_est(nrows);
2377 }
2378
2379 /*
2380  * join_in_selectivity
2381  *        Determines the factor by which a JOIN_IN join's result is expected
2382  *        to be smaller than an ordinary inner join.
2383  *
2384  * 'path' is already filled in except for the cost fields
2385  */
2386 static Selectivity
2387 join_in_selectivity(JoinPath *path, PlannerInfo *root)
2388 {
2389         RelOptInfo *innerrel;
2390         UniquePath *innerunique;
2391         Selectivity selec;
2392         double          nrows;
2393
2394         /* Return 1.0 whenever it's not JOIN_IN */
2395         if (path->jointype != JOIN_IN)
2396                 return 1.0;
2397
2398         /*
2399          * Return 1.0 if the inner side is already known unique.  The case where
2400          * the inner path is already a UniquePath probably cannot happen in
2401          * current usage, but check it anyway for completeness.  The interesting
2402          * case is where we've determined the inner relation itself is unique,
2403          * which we can check by looking at the rows estimate for its UniquePath.
2404          */
2405         if (IsA(path->innerjoinpath, UniquePath))
2406                 return 1.0;
2407         innerrel = path->innerjoinpath->parent;
2408         innerunique = create_unique_path(root,
2409                                                                          innerrel,
2410                                                                          innerrel->cheapest_total_path);
2411         if (innerunique->rows >= innerrel->rows)
2412                 return 1.0;
2413
2414         /*
2415          * Compute same result set_joinrel_size_estimates would compute for
2416          * JOIN_INNER.  Note that we use the input rels' absolute size estimates,
2417          * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
2418          * double-counting the effects of any join clauses used in input scans.
2419          */
2420         selec = clauselist_selectivity(root,
2421                                                                    path->joinrestrictinfo,
2422                                                                    0,
2423                                                                    JOIN_INNER);
2424         nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
2425
2426         nrows = clamp_row_est(nrows);
2427
2428         /* See if it's larger than the actual JOIN_IN size estimate */
2429         if (nrows > path->path.parent->rows)
2430                 return path->path.parent->rows / nrows;
2431         else
2432                 return 1.0;
2433 }
2434
2435 /*
2436  * set_function_size_estimates
2437  *              Set the size estimates for a base relation that is a function call.
2438  *
2439  * The rel's targetlist and restrictinfo list must have been constructed
2440  * already.
2441  *
2442  * We set the same fields as set_baserel_size_estimates.
2443  */
2444 void
2445 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2446 {
2447         RangeTblEntry *rte;
2448
2449         /* Should only be applied to base relations that are functions */
2450         Assert(rel->relid > 0);
2451         rte = planner_rt_fetch(rel->relid, root);
2452         Assert(rte->rtekind == RTE_FUNCTION);
2453
2454         /* Estimate number of rows the function itself will return */
2455         rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
2456
2457         /* Now estimate number of output rows, etc */
2458         set_baserel_size_estimates(root, rel);
2459 }
2460
2461 /*
2462  * set_values_size_estimates
2463  *              Set the size estimates for a base relation that is a values list.
2464  *
2465  * The rel's targetlist and restrictinfo list must have been constructed
2466  * already.
2467  *
2468  * We set the same fields as set_baserel_size_estimates.
2469  */
2470 void
2471 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2472 {
2473         RangeTblEntry *rte;
2474
2475         /* Should only be applied to base relations that are values lists */
2476         Assert(rel->relid > 0);
2477         rte = planner_rt_fetch(rel->relid, root);
2478         Assert(rte->rtekind == RTE_VALUES);
2479
2480         /*
2481          * Estimate number of rows the values list will return. We know this
2482          * precisely based on the list length (well, barring set-returning
2483          * functions in list items, but that's a refinement not catered for
2484          * anywhere else either).
2485          */
2486         rel->tuples = list_length(rte->values_lists);
2487
2488         /* Now estimate number of output rows, etc */
2489         set_baserel_size_estimates(root, rel);
2490 }
2491
2492
2493 /*
2494  * set_rel_width
2495  *              Set the estimated output width of a base relation.
2496  *
2497  * NB: this works best on plain relations because it prefers to look at
2498  * real Vars.  It will fail to make use of pg_statistic info when applied
2499  * to a subquery relation, even if the subquery outputs are simple vars
2500  * that we could have gotten info for.  Is it worth trying to be smarter
2501  * about subqueries?
2502  *
2503  * The per-attribute width estimates are cached for possible re-use while
2504  * building join relations.
2505  */
2506 static void
2507 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2508 {
2509         int32           tuple_width = 0;
2510         ListCell   *tllist;
2511         Oid                     rel_reloid;
2512
2513         /*
2514          * Usually (perhaps always), all the Vars have the same reloid, so we can
2515          * save some redundant list-searching by doing getrelid just once.
2516          */
2517         if (rel->relid > 0)
2518                 rel_reloid = getrelid(rel->relid, root->parse->rtable);
2519         else
2520                 rel_reloid = InvalidOid;        /* probably can't happen */
2521
2522         foreach(tllist, rel->reltargetlist)
2523         {
2524                 Var                *var = (Var *) lfirst(tllist);
2525                 int                     ndx;
2526                 Oid                     var_reloid;
2527                 int32           item_width;
2528
2529                 /* For now, punt on whole-row child Vars */
2530                 if (!IsA(var, Var))
2531                 {
2532                         tuple_width += 32;      /* arbitrary */
2533                         continue;
2534                 }
2535
2536                 ndx = var->varattno - rel->min_attr;
2537
2538                 /*
2539                  * The width probably hasn't been cached yet, but may as well check
2540                  */
2541                 if (rel->attr_widths[ndx] > 0)
2542                 {
2543                         tuple_width += rel->attr_widths[ndx];
2544                         continue;
2545                 }
2546
2547                 if (var->varno == rel->relid)
2548                         var_reloid = rel_reloid;
2549                 else
2550                         var_reloid = getrelid(var->varno, root->parse->rtable);
2551
2552                 if (var_reloid != InvalidOid)
2553                 {
2554                         item_width = get_attavgwidth(var_reloid, var->varattno);
2555                         if (item_width > 0)
2556                         {
2557                                 rel->attr_widths[ndx] = item_width;
2558                                 tuple_width += item_width;
2559                                 continue;
2560                         }
2561                 }
2562
2563                 /*
2564                  * Not a plain relation, or can't find statistics for it. Estimate
2565                  * using just the type info.
2566                  */
2567                 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2568                 Assert(item_width > 0);
2569                 rel->attr_widths[ndx] = item_width;
2570                 tuple_width += item_width;
2571         }
2572         Assert(tuple_width >= 0);
2573         rel->width = tuple_width;
2574 }
2575
2576 /*
2577  * relation_byte_size
2578  *        Estimate the storage space in bytes for a given number of tuples
2579  *        of a given width (size in bytes).
2580  */
2581 static double
2582 relation_byte_size(double tuples, int width)
2583 {
2584         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2585 }
2586
2587 /*
2588  * page_size
2589  *        Returns an estimate of the number of pages covered by a given
2590  *        number of tuples of a given width (size in bytes).
2591  */
2592 static double
2593 page_size(double tuples, int width)
2594 {
2595         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
2596 }