]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Move exprType(), exprTypmod(), expression_tree_walker(), and related routines
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in arbitrary units established by these basic
7  * parameters:
8  *
9  *      seq_page_cost           Cost of a sequential page fetch
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to execute an operator or function
14  *
15  * We expect that the kernel will typically do some amount of read-ahead
16  * optimization; this in conjunction with seek costs means that seq_page_cost
17  * is normally considerably less than random_page_cost.  (However, if the
18  * database is fully cached in RAM, it is reasonable to set them equal.)
19  *
20  * We also use a rough estimate "effective_cache_size" of the number of
21  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
22  * NBuffers for this purpose because that would ignore the effects of
23  * the kernel's disk cache.)
24  *
25  * Obviously, taking constants for these values is an oversimplification,
26  * but it's tough enough to get any useful estimates even at this level of
27  * detail.      Note that all of these parameters are user-settable, in case
28  * the default values are drastically off for a particular platform.
29  *
30  * We compute two separate costs for each path:
31  *              total_cost: total estimated cost to fetch all tuples
32  *              startup_cost: cost that is expended before first tuple is fetched
33  * In some scenarios, such as when there is a LIMIT or we are implementing
34  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35  * path's result.  A caller can estimate the cost of fetching a partial
36  * result by interpolating between startup_cost and total_cost.  In detail:
37  *              actual_cost = startup_cost +
38  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39  * Note that a base relation's rows count (and, by extension, plan_rows for
40  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41  * that this equation works properly.  (Also, these routines guarantee not to
42  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
43  * applied as a top-level plan node.
44  *
45  * For largely historical reasons, most of the routines in this module use
46  * the passed result Path only to store their startup_cost and total_cost
47  * results into.  All the input data they need is passed as separate
48  * parameters, even though much of it could be extracted from the Path.
49  * An exception is made for the cost_XXXjoin() routines, which expect all
50  * the non-cost fields of the passed XXXPath to be filled in.
51  *
52  *
53  * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
54  * Portions Copyright (c) 1994, Regents of the University of California
55  *
56  * IDENTIFICATION
57  *        $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.196 2008/08/25 22:42:32 tgl Exp $
58  *
59  *-------------------------------------------------------------------------
60  */
61
62 #include "postgres.h"
63
64 #include <math.h>
65
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "nodes/nodeFuncs.h"
69 #include "optimizer/clauses.h"
70 #include "optimizer/cost.h"
71 #include "optimizer/pathnode.h"
72 #include "optimizer/planmain.h"
73 #include "parser/parsetree.h"
74 #include "utils/lsyscache.h"
75 #include "utils/selfuncs.h"
76 #include "utils/tuplesort.h"
77
78
79 #define LOG2(x)  (log(x) / 0.693147180559945)
80
81 /*
82  * Some Paths return less than the nominal number of rows of their parent
83  * relations; join nodes need to do this to get the correct input count:
84  */
85 #define PATH_ROWS(path) \
86         (IsA(path, UniquePath) ? \
87          ((UniquePath *) (path))->rows : \
88          (path)->parent->rows)
89
90
91 double          seq_page_cost = DEFAULT_SEQ_PAGE_COST;
92 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
93 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
94 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
95 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
96
97 int                     effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
98
99 Cost            disable_cost = 100000000.0;
100
101 bool            enable_seqscan = true;
102 bool            enable_indexscan = true;
103 bool            enable_bitmapscan = true;
104 bool            enable_tidscan = true;
105 bool            enable_sort = true;
106 bool            enable_hashagg = true;
107 bool            enable_nestloop = true;
108 bool            enable_mergejoin = true;
109 bool            enable_hashjoin = true;
110
111 typedef struct
112 {
113         PlannerInfo *root;
114         QualCost        total;
115 } cost_qual_eval_context;
116
117 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
118                            RestrictInfo *rinfo,
119                            PathKey *pathkey);
120 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
121 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
122                                                                  List *quals, SpecialJoinInfo *sjinfo);
123 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
124 static double relation_byte_size(double tuples, int width);
125 static double page_size(double tuples, int width);
126
127
128 /*
129  * clamp_row_est
130  *              Force a row-count estimate to a sane value.
131  */
132 double
133 clamp_row_est(double nrows)
134 {
135         /*
136          * Force estimate to be at least one row, to make explain output look
137          * better and to avoid possible divide-by-zero when interpolating costs.
138          * Make it an integer, too.
139          */
140         if (nrows <= 1.0)
141                 nrows = 1.0;
142         else
143                 nrows = rint(nrows);
144
145         return nrows;
146 }
147
148
149 /*
150  * cost_seqscan
151  *        Determines and returns the cost of scanning a relation sequentially.
152  */
153 void
154 cost_seqscan(Path *path, PlannerInfo *root,
155                          RelOptInfo *baserel)
156 {
157         Cost            startup_cost = 0;
158         Cost            run_cost = 0;
159         Cost            cpu_per_tuple;
160
161         /* Should only be applied to base relations */
162         Assert(baserel->relid > 0);
163         Assert(baserel->rtekind == RTE_RELATION);
164
165         if (!enable_seqscan)
166                 startup_cost += disable_cost;
167
168         /*
169          * disk costs
170          */
171         run_cost += seq_page_cost * baserel->pages;
172
173         /* CPU costs */
174         startup_cost += baserel->baserestrictcost.startup;
175         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
176         run_cost += cpu_per_tuple * baserel->tuples;
177
178         path->startup_cost = startup_cost;
179         path->total_cost = startup_cost + run_cost;
180 }
181
182 /*
183  * cost_index
184  *        Determines and returns the cost of scanning a relation using an index.
185  *
186  * 'index' is the index to be used
187  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
188  * 'outer_rel' is the outer relation when we are considering using the index
189  *              scan as the inside of a nestloop join (hence, some of the indexQuals
190  *              are join clauses, and we should expect repeated scans of the index);
191  *              NULL for a plain index scan
192  *
193  * cost_index() takes an IndexPath not just a Path, because it sets a few
194  * additional fields of the IndexPath besides startup_cost and total_cost.
195  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
196  *
197  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
198  * Any additional quals evaluated as qpquals may reduce the number of returned
199  * tuples, but they won't reduce the number of tuples we have to fetch from
200  * the table, so they don't reduce the scan cost.
201  *
202  * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
203  * it was a list of bare clause expressions.
204  */
205 void
206 cost_index(IndexPath *path, PlannerInfo *root,
207                    IndexOptInfo *index,
208                    List *indexQuals,
209                    RelOptInfo *outer_rel)
210 {
211         RelOptInfo *baserel = index->rel;
212         Cost            startup_cost = 0;
213         Cost            run_cost = 0;
214         Cost            indexStartupCost;
215         Cost            indexTotalCost;
216         Selectivity indexSelectivity;
217         double          indexCorrelation,
218                                 csquared;
219         Cost            min_IO_cost,
220                                 max_IO_cost;
221         Cost            cpu_per_tuple;
222         double          tuples_fetched;
223         double          pages_fetched;
224
225         /* Should only be applied to base relations */
226         Assert(IsA(baserel, RelOptInfo) &&
227                    IsA(index, IndexOptInfo));
228         Assert(baserel->relid > 0);
229         Assert(baserel->rtekind == RTE_RELATION);
230
231         if (!enable_indexscan)
232                 startup_cost += disable_cost;
233
234         /*
235          * Call index-access-method-specific code to estimate the processing cost
236          * for scanning the index, as well as the selectivity of the index (ie,
237          * the fraction of main-table tuples we will have to retrieve) and its
238          * correlation to the main-table tuple order.
239          */
240         OidFunctionCall8(index->amcostestimate,
241                                          PointerGetDatum(root),
242                                          PointerGetDatum(index),
243                                          PointerGetDatum(indexQuals),
244                                          PointerGetDatum(outer_rel),
245                                          PointerGetDatum(&indexStartupCost),
246                                          PointerGetDatum(&indexTotalCost),
247                                          PointerGetDatum(&indexSelectivity),
248                                          PointerGetDatum(&indexCorrelation));
249
250         /*
251          * Save amcostestimate's results for possible use in bitmap scan planning.
252          * We don't bother to save indexStartupCost or indexCorrelation, because a
253          * bitmap scan doesn't care about either.
254          */
255         path->indextotalcost = indexTotalCost;
256         path->indexselectivity = indexSelectivity;
257
258         /* all costs for touching index itself included here */
259         startup_cost += indexStartupCost;
260         run_cost += indexTotalCost - indexStartupCost;
261
262         /* estimate number of main-table tuples fetched */
263         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
264
265         /*----------
266          * Estimate number of main-table pages fetched, and compute I/O cost.
267          *
268          * When the index ordering is uncorrelated with the table ordering,
269          * we use an approximation proposed by Mackert and Lohman (see
270          * index_pages_fetched() for details) to compute the number of pages
271          * fetched, and then charge random_page_cost per page fetched.
272          *
273          * When the index ordering is exactly correlated with the table ordering
274          * (just after a CLUSTER, for example), the number of pages fetched should
275          * be exactly selectivity * table_size.  What's more, all but the first
276          * will be sequential fetches, not the random fetches that occur in the
277          * uncorrelated case.  So if the number of pages is more than 1, we
278          * ought to charge
279          *              random_page_cost + (pages_fetched - 1) * seq_page_cost
280          * For partially-correlated indexes, we ought to charge somewhere between
281          * these two estimates.  We currently interpolate linearly between the
282          * estimates based on the correlation squared (XXX is that appropriate?).
283          *----------
284          */
285         if (outer_rel != NULL && outer_rel->rows > 1)
286         {
287                 /*
288                  * For repeated indexscans, the appropriate estimate for the
289                  * uncorrelated case is to scale up the number of tuples fetched in
290                  * the Mackert and Lohman formula by the number of scans, so that we
291                  * estimate the number of pages fetched by all the scans; then
292                  * pro-rate the costs for one scan.  In this case we assume all the
293                  * fetches are random accesses.
294                  */
295                 double          num_scans = outer_rel->rows;
296
297                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
298                                                                                         baserel->pages,
299                                                                                         (double) index->pages,
300                                                                                         root);
301
302                 max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
303
304                 /*
305                  * In the perfectly correlated case, the number of pages touched by
306                  * each scan is selectivity * table_size, and we can use the Mackert
307                  * and Lohman formula at the page level to estimate how much work is
308                  * saved by caching across scans.  We still assume all the fetches are
309                  * random, though, which is an overestimate that's hard to correct for
310                  * without double-counting the cache effects.  (But in most cases
311                  * where such a plan is actually interesting, only one page would get
312                  * fetched per scan anyway, so it shouldn't matter much.)
313                  */
314                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
315
316                 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
317                                                                                         baserel->pages,
318                                                                                         (double) index->pages,
319                                                                                         root);
320
321                 min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
322         }
323         else
324         {
325                 /*
326                  * Normal case: apply the Mackert and Lohman formula, and then
327                  * interpolate between that and the correlation-derived result.
328                  */
329                 pages_fetched = index_pages_fetched(tuples_fetched,
330                                                                                         baserel->pages,
331                                                                                         (double) index->pages,
332                                                                                         root);
333
334                 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
335                 max_IO_cost = pages_fetched * random_page_cost;
336
337                 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
338                 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
339                 min_IO_cost = random_page_cost;
340                 if (pages_fetched > 1)
341                         min_IO_cost += (pages_fetched - 1) * seq_page_cost;
342         }
343
344         /*
345          * Now interpolate based on estimated index order correlation to get total
346          * disk I/O cost for main table accesses.
347          */
348         csquared = indexCorrelation * indexCorrelation;
349
350         run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
351
352         /*
353          * Estimate CPU costs per tuple.
354          *
355          * Normally the indexquals will be removed from the list of restriction
356          * clauses that we have to evaluate as qpquals, so we should subtract
357          * their costs from baserestrictcost.  But if we are doing a join then
358          * some of the indexquals are join clauses and shouldn't be subtracted.
359          * Rather than work out exactly how much to subtract, we don't subtract
360          * anything.
361          */
362         startup_cost += baserel->baserestrictcost.startup;
363         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
364
365         if (outer_rel == NULL)
366         {
367                 QualCost        index_qual_cost;
368
369                 cost_qual_eval(&index_qual_cost, indexQuals, root);
370                 /* any startup cost still has to be paid ... */
371                 cpu_per_tuple -= index_qual_cost.per_tuple;
372         }
373
374         run_cost += cpu_per_tuple * tuples_fetched;
375
376         path->path.startup_cost = startup_cost;
377         path->path.total_cost = startup_cost + run_cost;
378 }
379
380 /*
381  * index_pages_fetched
382  *        Estimate the number of pages actually fetched after accounting for
383  *        cache effects.
384  *
385  * We use an approximation proposed by Mackert and Lohman, "Index Scans
386  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
387  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
388  * The Mackert and Lohman approximation is that the number of pages
389  * fetched is
390  *      PF =
391  *              min(2TNs/(2T+Ns), T)                    when T <= b
392  *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
393  *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
394  * where
395  *              T = # pages in table
396  *              N = # tuples in table
397  *              s = selectivity = fraction of table to be scanned
398  *              b = # buffer pages available (we include kernel space here)
399  *
400  * We assume that effective_cache_size is the total number of buffer pages
401  * available for the whole query, and pro-rate that space across all the
402  * tables in the query and the index currently under consideration.  (This
403  * ignores space needed for other indexes used by the query, but since we
404  * don't know which indexes will get used, we can't estimate that very well;
405  * and in any case counting all the tables may well be an overestimate, since
406  * depending on the join plan not all the tables may be scanned concurrently.)
407  *
408  * The product Ns is the number of tuples fetched; we pass in that
409  * product rather than calculating it here.  "pages" is the number of pages
410  * in the object under consideration (either an index or a table).
411  * "index_pages" is the amount to add to the total table space, which was
412  * computed for us by query_planner.
413  *
414  * Caller is expected to have ensured that tuples_fetched is greater than zero
415  * and rounded to integer (see clamp_row_est).  The result will likewise be
416  * greater than zero and integral.
417  */
418 double
419 index_pages_fetched(double tuples_fetched, BlockNumber pages,
420                                         double index_pages, PlannerInfo *root)
421 {
422         double          pages_fetched;
423         double          total_pages;
424         double          T,
425                                 b;
426
427         /* T is # pages in table, but don't allow it to be zero */
428         T = (pages > 1) ? (double) pages : 1.0;
429
430         /* Compute number of pages assumed to be competing for cache space */
431         total_pages = root->total_table_pages + index_pages;
432         total_pages = Max(total_pages, 1.0);
433         Assert(T <= total_pages);
434
435         /* b is pro-rated share of effective_cache_size */
436         b = (double) effective_cache_size *T / total_pages;
437
438         /* force it positive and integral */
439         if (b <= 1.0)
440                 b = 1.0;
441         else
442                 b = ceil(b);
443
444         /* This part is the Mackert and Lohman formula */
445         if (T <= b)
446         {
447                 pages_fetched =
448                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
449                 if (pages_fetched >= T)
450                         pages_fetched = T;
451                 else
452                         pages_fetched = ceil(pages_fetched);
453         }
454         else
455         {
456                 double          lim;
457
458                 lim = (2.0 * T * b) / (2.0 * T - b);
459                 if (tuples_fetched <= lim)
460                 {
461                         pages_fetched =
462                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
463                 }
464                 else
465                 {
466                         pages_fetched =
467                                 b + (tuples_fetched - lim) * (T - b) / T;
468                 }
469                 pages_fetched = ceil(pages_fetched);
470         }
471         return pages_fetched;
472 }
473
474 /*
475  * get_indexpath_pages
476  *              Determine the total size of the indexes used in a bitmap index path.
477  *
478  * Note: if the same index is used more than once in a bitmap tree, we will
479  * count it multiple times, which perhaps is the wrong thing ... but it's
480  * not completely clear, and detecting duplicates is difficult, so ignore it
481  * for now.
482  */
483 static double
484 get_indexpath_pages(Path *bitmapqual)
485 {
486         double          result = 0;
487         ListCell   *l;
488
489         if (IsA(bitmapqual, BitmapAndPath))
490         {
491                 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
492
493                 foreach(l, apath->bitmapquals)
494                 {
495                         result += get_indexpath_pages((Path *) lfirst(l));
496                 }
497         }
498         else if (IsA(bitmapqual, BitmapOrPath))
499         {
500                 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
501
502                 foreach(l, opath->bitmapquals)
503                 {
504                         result += get_indexpath_pages((Path *) lfirst(l));
505                 }
506         }
507         else if (IsA(bitmapqual, IndexPath))
508         {
509                 IndexPath  *ipath = (IndexPath *) bitmapqual;
510
511                 result = (double) ipath->indexinfo->pages;
512         }
513         else
514                 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
515
516         return result;
517 }
518
519 /*
520  * cost_bitmap_heap_scan
521  *        Determines and returns the cost of scanning a relation using a bitmap
522  *        index-then-heap plan.
523  *
524  * 'baserel' is the relation to be scanned
525  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
526  * 'outer_rel' is the outer relation when we are considering using the bitmap
527  *              scan as the inside of a nestloop join (hence, some of the indexQuals
528  *              are join clauses, and we should expect repeated scans of the table);
529  *              NULL for a plain bitmap scan
530  *
531  * Note: if this is a join inner path, the component IndexPaths in bitmapqual
532  * should have been costed accordingly.
533  */
534 void
535 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
536                                           Path *bitmapqual, RelOptInfo *outer_rel)
537 {
538         Cost            startup_cost = 0;
539         Cost            run_cost = 0;
540         Cost            indexTotalCost;
541         Selectivity indexSelectivity;
542         Cost            cpu_per_tuple;
543         Cost            cost_per_page;
544         double          tuples_fetched;
545         double          pages_fetched;
546         double          T;
547
548         /* Should only be applied to base relations */
549         Assert(IsA(baserel, RelOptInfo));
550         Assert(baserel->relid > 0);
551         Assert(baserel->rtekind == RTE_RELATION);
552
553         if (!enable_bitmapscan)
554                 startup_cost += disable_cost;
555
556         /*
557          * Fetch total cost of obtaining the bitmap, as well as its total
558          * selectivity.
559          */
560         cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
561
562         startup_cost += indexTotalCost;
563
564         /*
565          * Estimate number of main-table pages fetched.
566          */
567         tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
568
569         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
570
571         if (outer_rel != NULL && outer_rel->rows > 1)
572         {
573                 /*
574                  * For repeated bitmap scans, scale up the number of tuples fetched in
575                  * the Mackert and Lohman formula by the number of scans, so that we
576                  * estimate the number of pages fetched by all the scans. Then
577                  * pro-rate for one scan.
578                  */
579                 double          num_scans = outer_rel->rows;
580
581                 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
582                                                                                         baserel->pages,
583                                                                                         get_indexpath_pages(bitmapqual),
584                                                                                         root);
585                 pages_fetched /= num_scans;
586         }
587         else
588         {
589                 /*
590                  * For a single scan, the number of heap pages that need to be fetched
591                  * is the same as the Mackert and Lohman formula for the case T <= b
592                  * (ie, no re-reads needed).
593                  */
594                 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
595         }
596         if (pages_fetched >= T)
597                 pages_fetched = T;
598         else
599                 pages_fetched = ceil(pages_fetched);
600
601         /*
602          * For small numbers of pages we should charge random_page_cost apiece,
603          * while if nearly all the table's pages are being read, it's more
604          * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
605          * too. For lack of a better idea, interpolate like this to determine the
606          * cost per page.
607          */
608         if (pages_fetched >= 2.0)
609                 cost_per_page = random_page_cost -
610                         (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
611         else
612                 cost_per_page = random_page_cost;
613
614         run_cost += pages_fetched * cost_per_page;
615
616         /*
617          * Estimate CPU costs per tuple.
618          *
619          * Often the indexquals don't need to be rechecked at each tuple ... but
620          * not always, especially not if there are enough tuples involved that the
621          * bitmaps become lossy.  For the moment, just assume they will be
622          * rechecked always.
623          */
624         startup_cost += baserel->baserestrictcost.startup;
625         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
626
627         run_cost += cpu_per_tuple * tuples_fetched;
628
629         path->startup_cost = startup_cost;
630         path->total_cost = startup_cost + run_cost;
631 }
632
633 /*
634  * cost_bitmap_tree_node
635  *              Extract cost and selectivity from a bitmap tree node (index/and/or)
636  */
637 void
638 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
639 {
640         if (IsA(path, IndexPath))
641         {
642                 *cost = ((IndexPath *) path)->indextotalcost;
643                 *selec = ((IndexPath *) path)->indexselectivity;
644
645                 /*
646                  * Charge a small amount per retrieved tuple to reflect the costs of
647                  * manipulating the bitmap.  This is mostly to make sure that a bitmap
648                  * scan doesn't look to be the same cost as an indexscan to retrieve a
649                  * single tuple.
650                  */
651                 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
652         }
653         else if (IsA(path, BitmapAndPath))
654         {
655                 *cost = path->total_cost;
656                 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
657         }
658         else if (IsA(path, BitmapOrPath))
659         {
660                 *cost = path->total_cost;
661                 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
662         }
663         else
664         {
665                 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
666                 *cost = *selec = 0;             /* keep compiler quiet */
667         }
668 }
669
670 /*
671  * cost_bitmap_and_node
672  *              Estimate the cost of a BitmapAnd node
673  *
674  * Note that this considers only the costs of index scanning and bitmap
675  * creation, not the eventual heap access.      In that sense the object isn't
676  * truly a Path, but it has enough path-like properties (costs in particular)
677  * to warrant treating it as one.
678  */
679 void
680 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
681 {
682         Cost            totalCost;
683         Selectivity selec;
684         ListCell   *l;
685
686         /*
687          * We estimate AND selectivity on the assumption that the inputs are
688          * independent.  This is probably often wrong, but we don't have the info
689          * to do better.
690          *
691          * The runtime cost of the BitmapAnd itself is estimated at 100x
692          * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
693          * definitely too simplistic?
694          */
695         totalCost = 0.0;
696         selec = 1.0;
697         foreach(l, path->bitmapquals)
698         {
699                 Path       *subpath = (Path *) lfirst(l);
700                 Cost            subCost;
701                 Selectivity subselec;
702
703                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
704
705                 selec *= subselec;
706
707                 totalCost += subCost;
708                 if (l != list_head(path->bitmapquals))
709                         totalCost += 100.0 * cpu_operator_cost;
710         }
711         path->bitmapselectivity = selec;
712         path->path.startup_cost = totalCost;
713         path->path.total_cost = totalCost;
714 }
715
716 /*
717  * cost_bitmap_or_node
718  *              Estimate the cost of a BitmapOr node
719  *
720  * See comments for cost_bitmap_and_node.
721  */
722 void
723 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
724 {
725         Cost            totalCost;
726         Selectivity selec;
727         ListCell   *l;
728
729         /*
730          * We estimate OR selectivity on the assumption that the inputs are
731          * non-overlapping, since that's often the case in "x IN (list)" type
732          * situations.  Of course, we clamp to 1.0 at the end.
733          *
734          * The runtime cost of the BitmapOr itself is estimated at 100x
735          * cpu_operator_cost for each tbm_union needed.  Probably too small,
736          * definitely too simplistic?  We are aware that the tbm_unions are
737          * optimized out when the inputs are BitmapIndexScans.
738          */
739         totalCost = 0.0;
740         selec = 0.0;
741         foreach(l, path->bitmapquals)
742         {
743                 Path       *subpath = (Path *) lfirst(l);
744                 Cost            subCost;
745                 Selectivity subselec;
746
747                 cost_bitmap_tree_node(subpath, &subCost, &subselec);
748
749                 selec += subselec;
750
751                 totalCost += subCost;
752                 if (l != list_head(path->bitmapquals) &&
753                         !IsA(subpath, IndexPath))
754                         totalCost += 100.0 * cpu_operator_cost;
755         }
756         path->bitmapselectivity = Min(selec, 1.0);
757         path->path.startup_cost = totalCost;
758         path->path.total_cost = totalCost;
759 }
760
761 /*
762  * cost_tidscan
763  *        Determines and returns the cost of scanning a relation using TIDs.
764  */
765 void
766 cost_tidscan(Path *path, PlannerInfo *root,
767                          RelOptInfo *baserel, List *tidquals)
768 {
769         Cost            startup_cost = 0;
770         Cost            run_cost = 0;
771         bool            isCurrentOf = false;
772         Cost            cpu_per_tuple;
773         QualCost        tid_qual_cost;
774         int                     ntuples;
775         ListCell   *l;
776
777         /* Should only be applied to base relations */
778         Assert(baserel->relid > 0);
779         Assert(baserel->rtekind == RTE_RELATION);
780
781         /* Count how many tuples we expect to retrieve */
782         ntuples = 0;
783         foreach(l, tidquals)
784         {
785                 if (IsA(lfirst(l), ScalarArrayOpExpr))
786                 {
787                         /* Each element of the array yields 1 tuple */
788                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
789                         Node       *arraynode = (Node *) lsecond(saop->args);
790
791                         ntuples += estimate_array_length(arraynode);
792                 }
793                 else if (IsA(lfirst(l), CurrentOfExpr))
794                 {
795                         /* CURRENT OF yields 1 tuple */
796                         isCurrentOf = true;
797                         ntuples++;
798                 }
799                 else
800                 {
801                         /* It's just CTID = something, count 1 tuple */
802                         ntuples++;
803                 }
804         }
805
806         /*
807          * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
808          * understands how to do it correctly.  Therefore, honor enable_tidscan
809          * only when CURRENT OF isn't present.  Also note that cost_qual_eval
810          * counts a CurrentOfExpr as having startup cost disable_cost, which we
811          * subtract off here; that's to prevent other plan types such as seqscan
812          * from winning.
813          */
814         if (isCurrentOf)
815         {
816                 Assert(baserel->baserestrictcost.startup >= disable_cost);
817                 startup_cost -= disable_cost;
818         }
819         else if (!enable_tidscan)
820                 startup_cost += disable_cost;
821
822         /*
823          * The TID qual expressions will be computed once, any other baserestrict
824          * quals once per retrived tuple.
825          */
826         cost_qual_eval(&tid_qual_cost, tidquals, root);
827
828         /* disk costs --- assume each tuple on a different page */
829         run_cost += random_page_cost * ntuples;
830
831         /* CPU costs */
832         startup_cost += baserel->baserestrictcost.startup +
833                 tid_qual_cost.per_tuple;
834         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
835                 tid_qual_cost.per_tuple;
836         run_cost += cpu_per_tuple * ntuples;
837
838         path->startup_cost = startup_cost;
839         path->total_cost = startup_cost + run_cost;
840 }
841
842 /*
843  * cost_subqueryscan
844  *        Determines and returns the cost of scanning a subquery RTE.
845  */
846 void
847 cost_subqueryscan(Path *path, RelOptInfo *baserel)
848 {
849         Cost            startup_cost;
850         Cost            run_cost;
851         Cost            cpu_per_tuple;
852
853         /* Should only be applied to base relations that are subqueries */
854         Assert(baserel->relid > 0);
855         Assert(baserel->rtekind == RTE_SUBQUERY);
856
857         /*
858          * Cost of path is cost of evaluating the subplan, plus cost of evaluating
859          * any restriction clauses that will be attached to the SubqueryScan node,
860          * plus cpu_tuple_cost to account for selection and projection overhead.
861          */
862         path->startup_cost = baserel->subplan->startup_cost;
863         path->total_cost = baserel->subplan->total_cost;
864
865         startup_cost = baserel->baserestrictcost.startup;
866         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
867         run_cost = cpu_per_tuple * baserel->tuples;
868
869         path->startup_cost += startup_cost;
870         path->total_cost += startup_cost + run_cost;
871 }
872
873 /*
874  * cost_functionscan
875  *        Determines and returns the cost of scanning a function RTE.
876  */
877 void
878 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
879 {
880         Cost            startup_cost = 0;
881         Cost            run_cost = 0;
882         Cost            cpu_per_tuple;
883         RangeTblEntry *rte;
884         QualCost        exprcost;
885
886         /* Should only be applied to base relations that are functions */
887         Assert(baserel->relid > 0);
888         rte = planner_rt_fetch(baserel->relid, root);
889         Assert(rte->rtekind == RTE_FUNCTION);
890
891         /* Estimate costs of executing the function expression */
892         cost_qual_eval_node(&exprcost, rte->funcexpr, root);
893
894         startup_cost += exprcost.startup;
895         cpu_per_tuple = exprcost.per_tuple;
896
897         /* Add scanning CPU costs */
898         startup_cost += baserel->baserestrictcost.startup;
899         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
900         run_cost += cpu_per_tuple * baserel->tuples;
901
902         path->startup_cost = startup_cost;
903         path->total_cost = startup_cost + run_cost;
904 }
905
906 /*
907  * cost_valuesscan
908  *        Determines and returns the cost of scanning a VALUES RTE.
909  */
910 void
911 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
912 {
913         Cost            startup_cost = 0;
914         Cost            run_cost = 0;
915         Cost            cpu_per_tuple;
916
917         /* Should only be applied to base relations that are values lists */
918         Assert(baserel->relid > 0);
919         Assert(baserel->rtekind == RTE_VALUES);
920
921         /*
922          * For now, estimate list evaluation cost at one operator eval per list
923          * (probably pretty bogus, but is it worth being smarter?)
924          */
925         cpu_per_tuple = cpu_operator_cost;
926
927         /* Add scanning CPU costs */
928         startup_cost += baserel->baserestrictcost.startup;
929         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
930         run_cost += cpu_per_tuple * baserel->tuples;
931
932         path->startup_cost = startup_cost;
933         path->total_cost = startup_cost + run_cost;
934 }
935
936 /*
937  * cost_sort
938  *        Determines and returns the cost of sorting a relation, including
939  *        the cost of reading the input data.
940  *
941  * If the total volume of data to sort is less than work_mem, we will do
942  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
943  * comparisons for t tuples.
944  *
945  * If the total volume exceeds work_mem, we switch to a tape-style merge
946  * algorithm.  There will still be about t*log2(t) tuple comparisons in
947  * total, but we will also need to write and read each tuple once per
948  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
949  * number of initial runs formed and M is the merge order used by tuplesort.c.
950  * Since the average initial run should be about twice work_mem, we have
951  *              disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
952  *              cpu = comparison_cost * t * log2(t)
953  *
954  * If the sort is bounded (i.e., only the first k result tuples are needed)
955  * and k tuples can fit into work_mem, we use a heap method that keeps only
956  * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
957  *
958  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
959  * accesses (XXX can't we refine that guess?)
960  *
961  * We charge two operator evals per tuple comparison, which should be in
962  * the right ballpark in most cases.
963  *
964  * 'pathkeys' is a list of sort keys
965  * 'input_cost' is the total cost for reading the input data
966  * 'tuples' is the number of tuples in the relation
967  * 'width' is the average tuple width in bytes
968  * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
969  *
970  * NOTE: some callers currently pass NIL for pathkeys because they
971  * can't conveniently supply the sort keys.  Since this routine doesn't
972  * currently do anything with pathkeys anyway, that doesn't matter...
973  * but if it ever does, it should react gracefully to lack of key data.
974  * (Actually, the thing we'd most likely be interested in is just the number
975  * of sort keys, which all callers *could* supply.)
976  */
977 void
978 cost_sort(Path *path, PlannerInfo *root,
979                   List *pathkeys, Cost input_cost, double tuples, int width,
980                   double limit_tuples)
981 {
982         Cost            startup_cost = input_cost;
983         Cost            run_cost = 0;
984         double          input_bytes = relation_byte_size(tuples, width);
985         double          output_bytes;
986         double          output_tuples;
987         long            work_mem_bytes = work_mem * 1024L;
988
989         if (!enable_sort)
990                 startup_cost += disable_cost;
991
992         /*
993          * We want to be sure the cost of a sort is never estimated as zero, even
994          * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
995          */
996         if (tuples < 2.0)
997                 tuples = 2.0;
998
999         /* Do we have a useful LIMIT? */
1000         if (limit_tuples > 0 && limit_tuples < tuples)
1001         {
1002                 output_tuples = limit_tuples;
1003                 output_bytes = relation_byte_size(output_tuples, width);
1004         }
1005         else
1006         {
1007                 output_tuples = tuples;
1008                 output_bytes = input_bytes;
1009         }
1010
1011         if (output_bytes > work_mem_bytes)
1012         {
1013                 /*
1014                  * We'll have to use a disk-based sort of all the tuples
1015                  */
1016                 double          npages = ceil(input_bytes / BLCKSZ);
1017                 double          nruns = (input_bytes / work_mem_bytes) * 0.5;
1018                 double          mergeorder = tuplesort_merge_order(work_mem_bytes);
1019                 double          log_runs;
1020                 double          npageaccesses;
1021
1022                 /*
1023                  * CPU costs
1024                  *
1025                  * Assume about two operator evals per tuple comparison and N log2 N
1026                  * comparisons
1027                  */
1028                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1029
1030                 /* Disk costs */
1031
1032                 /* Compute logM(r) as log(r) / log(M) */
1033                 if (nruns > mergeorder)
1034                         log_runs = ceil(log(nruns) / log(mergeorder));
1035                 else
1036                         log_runs = 1.0;
1037                 npageaccesses = 2.0 * npages * log_runs;
1038                 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1039                 startup_cost += npageaccesses *
1040                         (seq_page_cost * 0.75 + random_page_cost * 0.25);
1041         }
1042         else if (tuples > 2 * output_tuples || input_bytes > work_mem_bytes)
1043         {
1044                 /*
1045                  * We'll use a bounded heap-sort keeping just K tuples in memory, for
1046                  * a total number of tuple comparisons of N log2 K; but the constant
1047                  * factor is a bit higher than for quicksort.  Tweak it so that the
1048                  * cost curve is continuous at the crossover point.
1049                  */
1050                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(2.0 * output_tuples);
1051         }
1052         else
1053         {
1054                 /* We'll use plain quicksort on all the input tuples */
1055                 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1056         }
1057
1058         /*
1059          * Also charge a small amount (arbitrarily set equal to operator cost) per
1060          * extracted tuple.  Note it's correct to use tuples not output_tuples
1061          * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1062          * counting the LIMIT otherwise.
1063          */
1064         run_cost += cpu_operator_cost * tuples;
1065
1066         path->startup_cost = startup_cost;
1067         path->total_cost = startup_cost + run_cost;
1068 }
1069
1070 /*
1071  * sort_exceeds_work_mem
1072  *        Given a finished Sort plan node, detect whether it is expected to
1073  *        spill to disk (ie, will need more than work_mem workspace)
1074  *
1075  * This assumes there will be no available LIMIT.
1076  */
1077 bool
1078 sort_exceeds_work_mem(Sort *sort)
1079 {
1080         double          input_bytes = relation_byte_size(sort->plan.plan_rows,
1081                                                                                                  sort->plan.plan_width);
1082         long            work_mem_bytes = work_mem * 1024L;
1083
1084         return (input_bytes > work_mem_bytes);
1085 }
1086
1087 /*
1088  * cost_material
1089  *        Determines and returns the cost of materializing a relation, including
1090  *        the cost of reading the input data.
1091  *
1092  * If the total volume of data to materialize exceeds work_mem, we will need
1093  * to write it to disk, so the cost is much higher in that case.
1094  */
1095 void
1096 cost_material(Path *path,
1097                           Cost input_cost, double tuples, int width)
1098 {
1099         Cost            startup_cost = input_cost;
1100         Cost            run_cost = 0;
1101         double          nbytes = relation_byte_size(tuples, width);
1102         long            work_mem_bytes = work_mem * 1024L;
1103
1104         /* disk costs */
1105         if (nbytes > work_mem_bytes)
1106         {
1107                 double          npages = ceil(nbytes / BLCKSZ);
1108
1109                 /* We'll write during startup and read during retrieval */
1110                 startup_cost += seq_page_cost * npages;
1111                 run_cost += seq_page_cost * npages;
1112         }
1113
1114         /*
1115          * Charge a very small amount per inserted tuple, to reflect bookkeeping
1116          * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
1117          * tie that would otherwise exist between nestloop with A outer,
1118          * materialized B inner and nestloop with B outer, materialized A inner.
1119          * The extra cost ensures we'll prefer materializing the smaller rel.
1120          */
1121         startup_cost += cpu_tuple_cost * 0.1 * tuples;
1122
1123         /*
1124          * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
1125          * so that it doesn't appear worthwhile to materialize a bare seqscan.
1126          */
1127         run_cost += cpu_tuple_cost * tuples;
1128
1129         path->startup_cost = startup_cost;
1130         path->total_cost = startup_cost + run_cost;
1131 }
1132
1133 /*
1134  * cost_agg
1135  *              Determines and returns the cost of performing an Agg plan node,
1136  *              including the cost of its input.
1137  *
1138  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1139  * are for appropriately-sorted input.
1140  */
1141 void
1142 cost_agg(Path *path, PlannerInfo *root,
1143                  AggStrategy aggstrategy, int numAggs,
1144                  int numGroupCols, double numGroups,
1145                  Cost input_startup_cost, Cost input_total_cost,
1146                  double input_tuples)
1147 {
1148         Cost            startup_cost;
1149         Cost            total_cost;
1150
1151         /*
1152          * We charge one cpu_operator_cost per aggregate function per input tuple,
1153          * and another one per output tuple (corresponding to transfn and finalfn
1154          * calls respectively).  If we are grouping, we charge an additional
1155          * cpu_operator_cost per grouping column per input tuple for grouping
1156          * comparisons.
1157          *
1158          * We will produce a single output tuple if not grouping, and a tuple per
1159          * group otherwise.  We charge cpu_tuple_cost for each output tuple.
1160          *
1161          * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1162          * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
1163          * input path is already sorted appropriately, AGG_SORTED should be
1164          * preferred (since it has no risk of memory overflow).  This will happen
1165          * as long as the computed total costs are indeed exactly equal --- but if
1166          * there's roundoff error we might do the wrong thing.  So be sure that
1167          * the computations below form the same intermediate values in the same
1168          * order.
1169          *
1170          * Note: ideally we should use the pg_proc.procost costs of each
1171          * aggregate's component functions, but for now that seems like an
1172          * excessive amount of work.
1173          */
1174         if (aggstrategy == AGG_PLAIN)
1175         {
1176                 startup_cost = input_total_cost;
1177                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1178                 /* we aren't grouping */
1179                 total_cost = startup_cost + cpu_tuple_cost;
1180         }
1181         else if (aggstrategy == AGG_SORTED)
1182         {
1183                 /* Here we are able to deliver output on-the-fly */
1184                 startup_cost = input_startup_cost;
1185                 total_cost = input_total_cost;
1186                 /* calcs phrased this way to match HASHED case, see note above */
1187                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1188                 total_cost += cpu_operator_cost * input_tuples * numAggs;
1189                 total_cost += cpu_operator_cost * numGroups * numAggs;
1190                 total_cost += cpu_tuple_cost * numGroups;
1191         }
1192         else
1193         {
1194                 /* must be AGG_HASHED */
1195                 startup_cost = input_total_cost;
1196                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1197                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1198                 total_cost = startup_cost;
1199                 total_cost += cpu_operator_cost * numGroups * numAggs;
1200                 total_cost += cpu_tuple_cost * numGroups;
1201         }
1202
1203         path->startup_cost = startup_cost;
1204         path->total_cost = total_cost;
1205 }
1206
1207 /*
1208  * cost_group
1209  *              Determines and returns the cost of performing a Group plan node,
1210  *              including the cost of its input.
1211  *
1212  * Note: caller must ensure that input costs are for appropriately-sorted
1213  * input.
1214  */
1215 void
1216 cost_group(Path *path, PlannerInfo *root,
1217                    int numGroupCols, double numGroups,
1218                    Cost input_startup_cost, Cost input_total_cost,
1219                    double input_tuples)
1220 {
1221         Cost            startup_cost;
1222         Cost            total_cost;
1223
1224         startup_cost = input_startup_cost;
1225         total_cost = input_total_cost;
1226
1227         /*
1228          * Charge one cpu_operator_cost per comparison per input tuple. We assume
1229          * all columns get compared at most of the tuples.
1230          */
1231         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1232
1233         path->startup_cost = startup_cost;
1234         path->total_cost = total_cost;
1235 }
1236
1237 /*
1238  * If a nestloop's inner path is an indexscan, be sure to use its estimated
1239  * output row count, which may be lower than the restriction-clause-only row
1240  * count of its parent.  (We don't include this case in the PATH_ROWS macro
1241  * because it applies *only* to a nestloop's inner relation.)  We have to
1242  * be prepared to recurse through Append nodes in case of an appendrel.
1243  */
1244 static double
1245 nestloop_inner_path_rows(Path *path)
1246 {
1247         double          result;
1248
1249         if (IsA(path, IndexPath))
1250                 result = ((IndexPath *) path)->rows;
1251         else if (IsA(path, BitmapHeapPath))
1252                 result = ((BitmapHeapPath *) path)->rows;
1253         else if (IsA(path, AppendPath))
1254         {
1255                 ListCell   *l;
1256
1257                 result = 0;
1258                 foreach(l, ((AppendPath *) path)->subpaths)
1259                 {
1260                         result += nestloop_inner_path_rows((Path *) lfirst(l));
1261                 }
1262         }
1263         else
1264                 result = PATH_ROWS(path);
1265
1266         return result;
1267 }
1268
1269 /*
1270  * cost_nestloop
1271  *        Determines and returns the cost of joining two relations using the
1272  *        nested loop algorithm.
1273  *
1274  * 'path' is already filled in except for the cost fields
1275  * 'sjinfo' is extra info about the join for selectivity estimation
1276  */
1277 void
1278 cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1279 {
1280         Path       *outer_path = path->outerjoinpath;
1281         Path       *inner_path = path->innerjoinpath;
1282         Cost            startup_cost = 0;
1283         Cost            run_cost = 0;
1284         Cost            cpu_per_tuple;
1285         QualCost        restrict_qual_cost;
1286         double          outer_path_rows = PATH_ROWS(outer_path);
1287         double          inner_path_rows = nestloop_inner_path_rows(inner_path);
1288         double          ntuples;
1289
1290         if (!enable_nestloop)
1291                 startup_cost += disable_cost;
1292
1293         /* cost of source data */
1294
1295         /*
1296          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1297          * before we can start returning tuples, so the join's startup cost is
1298          * their sum.  What's not so clear is whether the inner path's
1299          * startup_cost must be paid again on each rescan of the inner path. This
1300          * is not true if the inner path is materialized or is a hashjoin, but
1301          * probably is true otherwise.
1302          */
1303         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1304         run_cost += outer_path->total_cost - outer_path->startup_cost;
1305         if (IsA(inner_path, MaterialPath) ||
1306                 IsA(inner_path, HashPath))
1307         {
1308                 /* charge only run cost for each iteration of inner path */
1309         }
1310         else
1311         {
1312                 /*
1313                  * charge startup cost for each iteration of inner path, except we
1314                  * already charged the first startup_cost in our own startup
1315                  */
1316                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1317         }
1318         run_cost += outer_path_rows *
1319                 (inner_path->total_cost - inner_path->startup_cost);
1320
1321         /*
1322          * Compute number of tuples processed (not number emitted!)
1323          */
1324         ntuples = outer_path_rows * inner_path_rows;
1325
1326         /* CPU costs */
1327         cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
1328         startup_cost += restrict_qual_cost.startup;
1329         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1330         run_cost += cpu_per_tuple * ntuples;
1331
1332         path->path.startup_cost = startup_cost;
1333         path->path.total_cost = startup_cost + run_cost;
1334 }
1335
1336 /*
1337  * cost_mergejoin
1338  *        Determines and returns the cost of joining two relations using the
1339  *        merge join algorithm.
1340  *
1341  * 'path' is already filled in except for the cost fields
1342  * 'sjinfo' is extra info about the join for selectivity estimation
1343  *
1344  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1345  * outersortkeys and innersortkeys are lists of the keys to be used
1346  * to sort the outer and inner relations, or NIL if no explicit
1347  * sort is needed because the source path is already ordered.
1348  */
1349 void
1350 cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1351 {
1352         Path       *outer_path = path->jpath.outerjoinpath;
1353         Path       *inner_path = path->jpath.innerjoinpath;
1354         List       *mergeclauses = path->path_mergeclauses;
1355         List       *outersortkeys = path->outersortkeys;
1356         List       *innersortkeys = path->innersortkeys;
1357         Cost            startup_cost = 0;
1358         Cost            run_cost = 0;
1359         Cost            cpu_per_tuple;
1360         QualCost        merge_qual_cost;
1361         QualCost        qp_qual_cost;
1362         double          outer_path_rows = PATH_ROWS(outer_path);
1363         double          inner_path_rows = PATH_ROWS(inner_path);
1364         double          outer_rows,
1365                                 inner_rows,
1366                                 outer_skip_rows,
1367                                 inner_skip_rows;
1368         double          mergejointuples,
1369                                 rescannedtuples;
1370         double          rescanratio;
1371         Selectivity outerstartsel,
1372                                 outerendsel,
1373                                 innerstartsel,
1374                                 innerendsel;
1375         Path            sort_path;              /* dummy for result of cost_sort */
1376
1377         /* Protect some assumptions below that rowcounts aren't zero */
1378         if (outer_path_rows <= 0)
1379                 outer_path_rows = 1;
1380         if (inner_path_rows <= 0)
1381                 inner_path_rows = 1;
1382
1383         if (!enable_mergejoin)
1384                 startup_cost += disable_cost;
1385
1386         /*
1387          * Compute cost of the mergequals and qpquals (other restriction clauses)
1388          * separately.
1389          */
1390         cost_qual_eval(&merge_qual_cost, mergeclauses, root);
1391         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1392         qp_qual_cost.startup -= merge_qual_cost.startup;
1393         qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1394
1395         /*
1396          * Get approx # tuples passing the mergequals.  We use approx_tuple_count
1397          * here for speed --- in most cases, any errors won't affect the result
1398          * much.
1399          */
1400         mergejointuples = approx_tuple_count(root, &path->jpath,
1401                                                                                  mergeclauses, sjinfo);
1402
1403         /*
1404          * When there are equal merge keys in the outer relation, the mergejoin
1405          * must rescan any matching tuples in the inner relation. This means
1406          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
1407          * costs the same as an original fetch, which is probably an overestimate;
1408          * but on the other hand we ignore the bookkeeping costs of mark/restore.
1409          * Not clear if it's worth developing a more refined model.
1410          *
1411          * For regular inner and outer joins, the number of re-fetches can be
1412          * estimated approximately as size of merge join output minus size of
1413          * inner relation. Assume that the distinct key values are 1, 2, ..., and
1414          * denote the number of values of each key in the outer relation as m1,
1415          * m2, ...; in the inner relation, n1, n2, ...  Then we have
1416          *
1417          * size of join = m1 * n1 + m2 * n2 + ...
1418          *
1419          * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1420          * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1421          * relation
1422          *
1423          * This equation works correctly for outer tuples having no inner match
1424          * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1425          * are effectively subtracting those from the number of rescanned tuples,
1426          * when we should not.  Can we do better without expensive selectivity
1427          * computations?
1428          *
1429          * For SEMI and ANTI joins, only one inner tuple need be rescanned for
1430          * each group of same-keyed outer tuples (assuming that all joinquals
1431          * are merge quals).  This makes the effect small enough to ignore,
1432          * so we just set rescannedtuples = 0.  Likewise, the whole issue is
1433          * moot if we are working from a unique-ified outer input.
1434          */
1435         if (sjinfo->jointype == JOIN_SEMI ||
1436                 sjinfo->jointype == JOIN_ANTI)
1437                 rescannedtuples = 0;
1438         else if (IsA(outer_path, UniquePath))
1439                 rescannedtuples = 0;
1440         else
1441         {
1442                 rescannedtuples = mergejointuples - inner_path_rows;
1443                 /* Must clamp because of possible underestimate */
1444                 if (rescannedtuples < 0)
1445                         rescannedtuples = 0;
1446         }
1447         /* We'll inflate inner run cost this much to account for rescanning */
1448         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1449
1450         /*
1451          * A merge join will stop as soon as it exhausts either input stream
1452          * (unless it's an outer join, in which case the outer side has to be
1453          * scanned all the way anyway).  Estimate fraction of the left and right
1454          * inputs that will actually need to be scanned.  Likewise, we can
1455          * estimate the number of rows that will be skipped before the first
1456          * join pair is found, which should be factored into startup cost.
1457          * We use only the first (most significant) merge clause for this purpose.
1458          * Since mergejoinscansel() is a fairly expensive computation, we cache
1459          * the results in the merge clause RestrictInfo.
1460          */
1461         if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1462         {
1463                 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1464                 List       *opathkeys;
1465                 List       *ipathkeys;
1466                 PathKey    *opathkey;
1467                 PathKey    *ipathkey;
1468                 MergeScanSelCache *cache;
1469
1470                 /* Get the input pathkeys to determine the sort-order details */
1471                 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1472                 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1473                 Assert(opathkeys);
1474                 Assert(ipathkeys);
1475                 opathkey = (PathKey *) linitial(opathkeys);
1476                 ipathkey = (PathKey *) linitial(ipathkeys);
1477                 /* debugging check */
1478                 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1479                         opathkey->pk_strategy != ipathkey->pk_strategy ||
1480                         opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1481                         elog(ERROR, "left and right pathkeys do not match in mergejoin");
1482
1483                 /* Get the selectivity with caching */
1484                 cache = cached_scansel(root, firstclause, opathkey);
1485
1486                 if (bms_is_subset(firstclause->left_relids,
1487                                                   outer_path->parent->relids))
1488                 {
1489                         /* left side of clause is outer */
1490                         outerstartsel = cache->leftstartsel;
1491                         outerendsel = cache->leftendsel;
1492                         innerstartsel = cache->rightstartsel;
1493                         innerendsel = cache->rightendsel;
1494                 }
1495                 else
1496                 {
1497                         /* left side of clause is inner */
1498                         outerstartsel = cache->rightstartsel;
1499                         outerendsel = cache->rightendsel;
1500                         innerstartsel = cache->leftstartsel;
1501                         innerendsel = cache->leftendsel;
1502                 }
1503                 if (path->jpath.jointype == JOIN_LEFT ||
1504                         path->jpath.jointype == JOIN_ANTI)
1505                 {
1506                         outerstartsel = 0.0;
1507                         outerendsel = 1.0;
1508                 }
1509                 else if (path->jpath.jointype == JOIN_RIGHT)
1510                 {
1511                         innerstartsel = 0.0;
1512                         innerendsel = 1.0;
1513                 }
1514         }
1515         else
1516         {
1517                 /* cope with clauseless or full mergejoin */
1518                 outerstartsel = innerstartsel = 0.0;
1519                 outerendsel = innerendsel = 1.0;
1520         }
1521
1522         /*
1523          * Convert selectivities to row counts.  We force outer_rows and
1524          * inner_rows to be at least 1, but the skip_rows estimates can be zero.
1525          */
1526         outer_skip_rows = rint(outer_path_rows * outerstartsel);
1527         inner_skip_rows = rint(inner_path_rows * innerstartsel);
1528         outer_rows = clamp_row_est(outer_path_rows * outerendsel);
1529         inner_rows = clamp_row_est(inner_path_rows * innerendsel);
1530
1531         Assert(outer_skip_rows <= outer_rows);
1532         Assert(inner_skip_rows <= inner_rows);
1533
1534         /*
1535          * Readjust scan selectivities to account for above rounding.  This is
1536          * normally an insignificant effect, but when there are only a few rows in
1537          * the inputs, failing to do this makes for a large percentage error.
1538          */
1539         outerstartsel = outer_skip_rows / outer_path_rows;
1540         innerstartsel = inner_skip_rows / inner_path_rows;
1541         outerendsel = outer_rows / outer_path_rows;
1542         innerendsel = inner_rows / inner_path_rows;
1543
1544         Assert(outerstartsel <= outerendsel);
1545         Assert(innerstartsel <= innerendsel);
1546
1547         /* cost of source data */
1548
1549         if (outersortkeys)                      /* do we need to sort outer? */
1550         {
1551                 cost_sort(&sort_path,
1552                                   root,
1553                                   outersortkeys,
1554                                   outer_path->total_cost,
1555                                   outer_path_rows,
1556                                   outer_path->parent->width,
1557                                   -1.0);
1558                 startup_cost += sort_path.startup_cost;
1559                 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
1560                         * outerstartsel;
1561                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1562                         * (outerendsel - outerstartsel);
1563         }
1564         else
1565         {
1566                 startup_cost += outer_path->startup_cost;
1567                 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
1568                         * outerstartsel;
1569                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1570                         * (outerendsel - outerstartsel);
1571         }
1572
1573         if (innersortkeys)                      /* do we need to sort inner? */
1574         {
1575                 cost_sort(&sort_path,
1576                                   root,
1577                                   innersortkeys,
1578                                   inner_path->total_cost,
1579                                   inner_path_rows,
1580                                   inner_path->parent->width,
1581                                   -1.0);
1582                 startup_cost += sort_path.startup_cost;
1583                 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
1584                         * innerstartsel * rescanratio;
1585                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1586                         * (innerendsel - innerstartsel) * rescanratio;
1587         }
1588         else
1589         {
1590                 startup_cost += inner_path->startup_cost;
1591                 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
1592                         * innerstartsel * rescanratio;
1593                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1594                         * (innerendsel - innerstartsel) * rescanratio;
1595         }
1596
1597         /* CPU costs */
1598
1599         /*
1600          * The number of tuple comparisons needed is approximately number of outer
1601          * rows plus number of inner rows plus number of rescanned tuples (can we
1602          * refine this?).  At each one, we need to evaluate the mergejoin quals.
1603          */
1604         startup_cost += merge_qual_cost.startup;
1605         startup_cost += merge_qual_cost.per_tuple *
1606                 (outer_skip_rows + inner_skip_rows * rescanratio);
1607         run_cost += merge_qual_cost.per_tuple *
1608                 ((outer_rows - outer_skip_rows) +
1609                  (inner_rows - inner_skip_rows) * rescanratio);
1610
1611         /*
1612          * For each tuple that gets through the mergejoin proper, we charge
1613          * cpu_tuple_cost plus the cost of evaluating additional restriction
1614          * clauses that are to be applied at the join.  (This is pessimistic since
1615          * not all of the quals may get evaluated at each tuple.)
1616          */
1617         startup_cost += qp_qual_cost.startup;
1618         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1619         run_cost += cpu_per_tuple * mergejointuples;
1620
1621         path->jpath.path.startup_cost = startup_cost;
1622         path->jpath.path.total_cost = startup_cost + run_cost;
1623 }
1624
1625 /*
1626  * run mergejoinscansel() with caching
1627  */
1628 static MergeScanSelCache *
1629 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
1630 {
1631         MergeScanSelCache *cache;
1632         ListCell   *lc;
1633         Selectivity leftstartsel,
1634                                 leftendsel,
1635                                 rightstartsel,
1636                                 rightendsel;
1637         MemoryContext oldcontext;
1638
1639         /* Do we have this result already? */
1640         foreach(lc, rinfo->scansel_cache)
1641         {
1642                 cache = (MergeScanSelCache *) lfirst(lc);
1643                 if (cache->opfamily == pathkey->pk_opfamily &&
1644                         cache->strategy == pathkey->pk_strategy &&
1645                         cache->nulls_first == pathkey->pk_nulls_first)
1646                         return cache;
1647         }
1648
1649         /* Nope, do the computation */
1650         mergejoinscansel(root,
1651                                          (Node *) rinfo->clause,
1652                                          pathkey->pk_opfamily,
1653                                          pathkey->pk_strategy,
1654                                          pathkey->pk_nulls_first,
1655                                          &leftstartsel,
1656                                          &leftendsel,
1657                                          &rightstartsel,
1658                                          &rightendsel);
1659
1660         /* Cache the result in suitably long-lived workspace */
1661         oldcontext = MemoryContextSwitchTo(root->planner_cxt);
1662
1663         cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
1664         cache->opfamily = pathkey->pk_opfamily;
1665         cache->strategy = pathkey->pk_strategy;
1666         cache->nulls_first = pathkey->pk_nulls_first;
1667         cache->leftstartsel = leftstartsel;
1668         cache->leftendsel = leftendsel;
1669         cache->rightstartsel = rightstartsel;
1670         cache->rightendsel = rightendsel;
1671
1672         rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
1673
1674         MemoryContextSwitchTo(oldcontext);
1675
1676         return cache;
1677 }
1678
1679 /*
1680  * cost_hashjoin
1681  *        Determines and returns the cost of joining two relations using the
1682  *        hash join algorithm.
1683  *
1684  * 'path' is already filled in except for the cost fields
1685  * 'sjinfo' is extra info about the join for selectivity estimation
1686  *
1687  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1688  */
1689 void
1690 cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1691 {
1692         Path       *outer_path = path->jpath.outerjoinpath;
1693         Path       *inner_path = path->jpath.innerjoinpath;
1694         List       *hashclauses = path->path_hashclauses;
1695         Cost            startup_cost = 0;
1696         Cost            run_cost = 0;
1697         Cost            cpu_per_tuple;
1698         QualCost        hash_qual_cost;
1699         QualCost        qp_qual_cost;
1700         double          hashjointuples;
1701         double          outer_path_rows = PATH_ROWS(outer_path);
1702         double          inner_path_rows = PATH_ROWS(inner_path);
1703         int                     num_hashclauses = list_length(hashclauses);
1704         int                     numbuckets;
1705         int                     numbatches;
1706         double          virtualbuckets;
1707         Selectivity innerbucketsize;
1708         ListCell   *hcl;
1709
1710         if (!enable_hashjoin)
1711                 startup_cost += disable_cost;
1712
1713         /*
1714          * Compute cost of the hashquals and qpquals (other restriction clauses)
1715          * separately.
1716          */
1717         cost_qual_eval(&hash_qual_cost, hashclauses, root);
1718         cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1719         qp_qual_cost.startup -= hash_qual_cost.startup;
1720         qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1721
1722         /*
1723          * Get approx # tuples passing the hashquals.  We use approx_tuple_count
1724          * here for speed --- in most cases, any errors won't affect the result
1725          * much.
1726          */
1727         hashjointuples = approx_tuple_count(root, &path->jpath,
1728                                                                                 hashclauses, sjinfo);
1729
1730         /* cost of source data */
1731         startup_cost += outer_path->startup_cost;
1732         run_cost += outer_path->total_cost - outer_path->startup_cost;
1733         startup_cost += inner_path->total_cost;
1734
1735         /*
1736          * Cost of computing hash function: must do it once per input tuple. We
1737          * charge one cpu_operator_cost for each column's hash function.  Also,
1738          * tack on one cpu_tuple_cost per inner row, to model the costs of
1739          * inserting the row into the hashtable.
1740          *
1741          * XXX when a hashclause is more complex than a single operator, we really
1742          * should charge the extra eval costs of the left or right side, as
1743          * appropriate, here.  This seems more work than it's worth at the moment.
1744          */
1745         startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1746                 * inner_path_rows;
1747         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1748
1749         /* Get hash table size that executor would use for inner relation */
1750         ExecChooseHashTableSize(inner_path_rows,
1751                                                         inner_path->parent->width,
1752                                                         &numbuckets,
1753                                                         &numbatches);
1754         virtualbuckets = (double) numbuckets *(double) numbatches;
1755
1756         /*
1757          * Determine bucketsize fraction for inner relation.  We use the smallest
1758          * bucketsize estimated for any individual hashclause; this is undoubtedly
1759          * conservative.
1760          *
1761          * BUT: if inner relation has been unique-ified, we can assume it's good
1762          * for hashing.  This is important both because it's the right answer, and
1763          * because we avoid contaminating the cache with a value that's wrong for
1764          * non-unique-ified paths.
1765          */
1766         if (IsA(inner_path, UniquePath))
1767                 innerbucketsize = 1.0 / virtualbuckets;
1768         else
1769         {
1770                 innerbucketsize = 1.0;
1771                 foreach(hcl, hashclauses)
1772                 {
1773                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1774                         Selectivity thisbucketsize;
1775
1776                         Assert(IsA(restrictinfo, RestrictInfo));
1777
1778                         /*
1779                          * First we have to figure out which side of the hashjoin clause
1780                          * is the inner side.
1781                          *
1782                          * Since we tend to visit the same clauses over and over when
1783                          * planning a large query, we cache the bucketsize estimate in the
1784                          * RestrictInfo node to avoid repeated lookups of statistics.
1785                          */
1786                         if (bms_is_subset(restrictinfo->right_relids,
1787                                                           inner_path->parent->relids))
1788                         {
1789                                 /* righthand side is inner */
1790                                 thisbucketsize = restrictinfo->right_bucketsize;
1791                                 if (thisbucketsize < 0)
1792                                 {
1793                                         /* not cached yet */
1794                                         thisbucketsize =
1795                                                 estimate_hash_bucketsize(root,
1796                                                                                    get_rightop(restrictinfo->clause),
1797                                                                                                  virtualbuckets);
1798                                         restrictinfo->right_bucketsize = thisbucketsize;
1799                                 }
1800                         }
1801                         else
1802                         {
1803                                 Assert(bms_is_subset(restrictinfo->left_relids,
1804                                                                          inner_path->parent->relids));
1805                                 /* lefthand side is inner */
1806                                 thisbucketsize = restrictinfo->left_bucketsize;
1807                                 if (thisbucketsize < 0)
1808                                 {
1809                                         /* not cached yet */
1810                                         thisbucketsize =
1811                                                 estimate_hash_bucketsize(root,
1812                                                                                         get_leftop(restrictinfo->clause),
1813                                                                                                  virtualbuckets);
1814                                         restrictinfo->left_bucketsize = thisbucketsize;
1815                                 }
1816                         }
1817
1818                         if (innerbucketsize > thisbucketsize)
1819                                 innerbucketsize = thisbucketsize;
1820                 }
1821         }
1822
1823         /*
1824          * If inner relation is too big then we will need to "batch" the join,
1825          * which implies writing and reading most of the tuples to disk an extra
1826          * time.  Charge seq_page_cost per page, since the I/O should be nice and
1827          * sequential.  Writing the inner rel counts as startup cost, all the rest
1828          * as run cost.
1829          */
1830         if (numbatches > 1)
1831         {
1832                 double          outerpages = page_size(outer_path_rows,
1833                                                                                    outer_path->parent->width);
1834                 double          innerpages = page_size(inner_path_rows,
1835                                                                                    inner_path->parent->width);
1836
1837                 startup_cost += seq_page_cost * innerpages;
1838                 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
1839         }
1840
1841         /* CPU costs */
1842
1843         /*
1844          * The number of tuple comparisons needed is the number of outer tuples
1845          * times the typical number of tuples in a hash bucket, which is the inner
1846          * relation size times its bucketsize fraction.  At each one, we need to
1847          * evaluate the hashjoin quals.  But actually, charging the full qual eval
1848          * cost at each tuple is pessimistic, since we don't evaluate the quals
1849          * unless the hash values match exactly.  For lack of a better idea, halve
1850          * the cost estimate to allow for that.
1851          */
1852         startup_cost += hash_qual_cost.startup;
1853         run_cost += hash_qual_cost.per_tuple *
1854                 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
1855
1856         /*
1857          * For each tuple that gets through the hashjoin proper, we charge
1858          * cpu_tuple_cost plus the cost of evaluating additional restriction
1859          * clauses that are to be applied at the join.  (This is pessimistic since
1860          * not all of the quals may get evaluated at each tuple.)
1861          */
1862         startup_cost += qp_qual_cost.startup;
1863         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1864         run_cost += cpu_per_tuple * hashjointuples;
1865
1866         path->jpath.path.startup_cost = startup_cost;
1867         path->jpath.path.total_cost = startup_cost + run_cost;
1868 }
1869
1870
1871 /*
1872  * cost_subplan
1873  *              Figure the costs for a SubPlan (or initplan).
1874  *
1875  * Note: we could dig the subplan's Plan out of the root list, but in practice
1876  * all callers have it handy already, so we make them pass it.
1877  */
1878 void
1879 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
1880 {
1881         QualCost        sp_cost;
1882
1883         /* Figure any cost for evaluating the testexpr */
1884         cost_qual_eval(&sp_cost,
1885                                    make_ands_implicit((Expr *) subplan->testexpr),
1886                                    root);
1887
1888         if (subplan->useHashTable)
1889         {
1890                 /*
1891                  * If we are using a hash table for the subquery outputs, then the
1892                  * cost of evaluating the query is a one-time cost.  We charge one
1893                  * cpu_operator_cost per tuple for the work of loading the hashtable,
1894                  * too.
1895                  */
1896                 sp_cost.startup += plan->total_cost +
1897                         cpu_operator_cost * plan->plan_rows;
1898
1899                 /*
1900                  * The per-tuple costs include the cost of evaluating the lefthand
1901                  * expressions, plus the cost of probing the hashtable.  We already
1902                  * accounted for the lefthand expressions as part of the testexpr,
1903                  * and will also have counted one cpu_operator_cost for each
1904                  * comparison operator.  That is probably too low for the probing
1905                  * cost, but it's hard to make a better estimate, so live with it for
1906                  * now.
1907                  */
1908         }
1909         else
1910         {
1911                 /*
1912                  * Otherwise we will be rescanning the subplan output on each
1913                  * evaluation.  We need to estimate how much of the output we will
1914                  * actually need to scan.  NOTE: this logic should agree with the
1915                  * tuple_fraction estimates used by make_subplan() in
1916                  * plan/subselect.c.
1917                  */
1918                 Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
1919
1920                 if (subplan->subLinkType == EXISTS_SUBLINK)
1921                 {
1922                         /* we only need to fetch 1 tuple */
1923                         sp_cost.per_tuple += plan_run_cost / plan->plan_rows;
1924                 }
1925                 else if (subplan->subLinkType == ALL_SUBLINK ||
1926                                  subplan->subLinkType == ANY_SUBLINK)
1927                 {
1928                         /* assume we need 50% of the tuples */
1929                         sp_cost.per_tuple += 0.50 * plan_run_cost;
1930                         /* also charge a cpu_operator_cost per row examined */
1931                         sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1932                 }
1933                 else
1934                 {
1935                         /* assume we need all tuples */
1936                         sp_cost.per_tuple += plan_run_cost;
1937                 }
1938
1939                 /*
1940                  * Also account for subplan's startup cost. If the subplan is
1941                  * uncorrelated or undirect correlated, AND its topmost node is a Sort
1942                  * or Material node, assume that we'll only need to pay its startup
1943                  * cost once; otherwise assume we pay the startup cost every time.
1944                  */
1945                 if (subplan->parParam == NIL &&
1946                         (IsA(plan, Sort) ||
1947                          IsA(plan, Material)))
1948                         sp_cost.startup += plan->startup_cost;
1949                 else
1950                         sp_cost.per_tuple += plan->startup_cost;
1951         }
1952
1953         subplan->startup_cost = sp_cost.startup;
1954         subplan->per_call_cost = sp_cost.per_tuple;
1955 }
1956
1957
1958 /*
1959  * cost_qual_eval
1960  *              Estimate the CPU costs of evaluating a WHERE clause.
1961  *              The input can be either an implicitly-ANDed list of boolean
1962  *              expressions, or a list of RestrictInfo nodes.  (The latter is
1963  *              preferred since it allows caching of the results.)
1964  *              The result includes both a one-time (startup) component,
1965  *              and a per-evaluation component.
1966  */
1967 void
1968 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
1969 {
1970         cost_qual_eval_context context;
1971         ListCell   *l;
1972
1973         context.root = root;
1974         context.total.startup = 0;
1975         context.total.per_tuple = 0;
1976
1977         /* We don't charge any cost for the implicit ANDing at top level ... */
1978
1979         foreach(l, quals)
1980         {
1981                 Node       *qual = (Node *) lfirst(l);
1982
1983                 cost_qual_eval_walker(qual, &context);
1984         }
1985
1986         *cost = context.total;
1987 }
1988
1989 /*
1990  * cost_qual_eval_node
1991  *              As above, for a single RestrictInfo or expression.
1992  */
1993 void
1994 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
1995 {
1996         cost_qual_eval_context context;
1997
1998         context.root = root;
1999         context.total.startup = 0;
2000         context.total.per_tuple = 0;
2001
2002         cost_qual_eval_walker(qual, &context);
2003
2004         *cost = context.total;
2005 }
2006
2007 static bool
2008 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
2009 {
2010         if (node == NULL)
2011                 return false;
2012
2013         /*
2014          * RestrictInfo nodes contain an eval_cost field reserved for this
2015          * routine's use, so that it's not necessary to evaluate the qual clause's
2016          * cost more than once.  If the clause's cost hasn't been computed yet,
2017          * the field's startup value will contain -1.
2018          */
2019         if (IsA(node, RestrictInfo))
2020         {
2021                 RestrictInfo *rinfo = (RestrictInfo *) node;
2022
2023                 if (rinfo->eval_cost.startup < 0)
2024                 {
2025                         cost_qual_eval_context locContext;
2026
2027                         locContext.root = context->root;
2028                         locContext.total.startup = 0;
2029                         locContext.total.per_tuple = 0;
2030
2031                         /*
2032                          * For an OR clause, recurse into the marked-up tree so that we
2033                          * set the eval_cost for contained RestrictInfos too.
2034                          */
2035                         if (rinfo->orclause)
2036                                 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
2037                         else
2038                                 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
2039
2040                         /*
2041                          * If the RestrictInfo is marked pseudoconstant, it will be tested
2042                          * only once, so treat its cost as all startup cost.
2043                          */
2044                         if (rinfo->pseudoconstant)
2045                         {
2046                                 /* count one execution during startup */
2047                                 locContext.total.startup += locContext.total.per_tuple;
2048                                 locContext.total.per_tuple = 0;
2049                         }
2050                         rinfo->eval_cost = locContext.total;
2051                 }
2052                 context->total.startup += rinfo->eval_cost.startup;
2053                 context->total.per_tuple += rinfo->eval_cost.per_tuple;
2054                 /* do NOT recurse into children */
2055                 return false;
2056         }
2057
2058         /*
2059          * For each operator or function node in the given tree, we charge the
2060          * estimated execution cost given by pg_proc.procost (remember to multiply
2061          * this by cpu_operator_cost).
2062          *
2063          * Vars and Consts are charged zero, and so are boolean operators (AND,
2064          * OR, NOT). Simplistic, but a lot better than no model at all.
2065          *
2066          * Should we try to account for the possibility of short-circuit
2067          * evaluation of AND/OR?  Probably *not*, because that would make the
2068          * results depend on the clause ordering, and we are not in any position
2069          * to expect that the current ordering of the clauses is the one that's
2070          * going to end up being used.  (Is it worth applying order_qual_clauses
2071          * much earlier in the planning process to fix this?)
2072          */
2073         if (IsA(node, FuncExpr))
2074         {
2075                 context->total.per_tuple +=
2076                         get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
2077         }
2078         else if (IsA(node, OpExpr) ||
2079                          IsA(node, DistinctExpr) ||
2080                          IsA(node, NullIfExpr))
2081         {
2082                 /* rely on struct equivalence to treat these all alike */
2083                 set_opfuncid((OpExpr *) node);
2084                 context->total.per_tuple +=
2085                         get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
2086         }
2087         else if (IsA(node, ScalarArrayOpExpr))
2088         {
2089                 /*
2090                  * Estimate that the operator will be applied to about half of the
2091                  * array elements before the answer is determined.
2092                  */
2093                 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
2094                 Node       *arraynode = (Node *) lsecond(saop->args);
2095
2096                 set_sa_opfuncid(saop);
2097                 context->total.per_tuple += get_func_cost(saop->opfuncid) *
2098                         cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
2099         }
2100         else if (IsA(node, CoerceViaIO))
2101         {
2102                 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
2103                 Oid                     iofunc;
2104                 Oid                     typioparam;
2105                 bool            typisvarlena;
2106
2107                 /* check the result type's input function */
2108                 getTypeInputInfo(iocoerce->resulttype,
2109                                                  &iofunc, &typioparam);
2110                 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2111                 /* check the input type's output function */
2112                 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
2113                                                   &iofunc, &typisvarlena);
2114                 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2115         }
2116         else if (IsA(node, ArrayCoerceExpr))
2117         {
2118                 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
2119                 Node       *arraynode = (Node *) acoerce->arg;
2120
2121                 if (OidIsValid(acoerce->elemfuncid))
2122                         context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
2123                                 cpu_operator_cost * estimate_array_length(arraynode);
2124         }
2125         else if (IsA(node, RowCompareExpr))
2126         {
2127                 /* Conservatively assume we will check all the columns */
2128                 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
2129                 ListCell   *lc;
2130
2131                 foreach(lc, rcexpr->opnos)
2132                 {
2133                         Oid                     opid = lfirst_oid(lc);
2134
2135                         context->total.per_tuple += get_func_cost(get_opcode(opid)) *
2136                                 cpu_operator_cost;
2137                 }
2138         }
2139         else if (IsA(node, CurrentOfExpr))
2140         {
2141                 /* Report high cost to prevent selection of anything but TID scan */
2142                 context->total.startup += disable_cost;
2143         }
2144         else if (IsA(node, SubLink))
2145         {
2146                 /* This routine should not be applied to un-planned expressions */
2147                 elog(ERROR, "cannot handle unplanned sub-select");
2148         }
2149         else if (IsA(node, SubPlan))
2150         {
2151                 /*
2152                  * A subplan node in an expression typically indicates that the
2153                  * subplan will be executed on each evaluation, so charge accordingly.
2154                  * (Sub-selects that can be executed as InitPlans have already been
2155                  * removed from the expression.)
2156                  */
2157                 SubPlan    *subplan = (SubPlan *) node;
2158
2159                 context->total.startup += subplan->startup_cost;
2160                 context->total.per_tuple += subplan->per_call_cost;
2161
2162                 /*
2163                  * We don't want to recurse into the testexpr, because it was already
2164                  * counted in the SubPlan node's costs.  So we're done.
2165                  */
2166                 return false;
2167         }
2168         else if (IsA(node, AlternativeSubPlan))
2169         {
2170                 /*
2171                  * Arbitrarily use the first alternative plan for costing.  (We should
2172                  * certainly only include one alternative, and we don't yet have
2173                  * enough information to know which one the executor is most likely
2174                  * to use.)
2175                  */
2176                 AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
2177
2178                 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
2179                                                                          context);
2180         }
2181
2182         /* recurse into children */
2183         return expression_tree_walker(node, cost_qual_eval_walker,
2184                                                                   (void *) context);
2185 }
2186
2187
2188 /*
2189  * approx_tuple_count
2190  *              Quick-and-dirty estimation of the number of join rows passing
2191  *              a set of qual conditions.
2192  *
2193  * The quals can be either an implicitly-ANDed list of boolean expressions,
2194  * or a list of RestrictInfo nodes (typically the latter).
2195  *
2196  * This is quick-and-dirty because we bypass clauselist_selectivity, and
2197  * simply multiply the independent clause selectivities together.  Now
2198  * clauselist_selectivity often can't do any better than that anyhow, but
2199  * for some situations (such as range constraints) it is smarter.  However,
2200  * we can't effectively cache the results of clauselist_selectivity, whereas
2201  * the individual clause selectivities can be and are cached.
2202  *
2203  * Since we are only using the results to estimate how many potential
2204  * output tuples are generated and passed through qpqual checking, it
2205  * seems OK to live with the approximation.
2206  */
2207 static double
2208 approx_tuple_count(PlannerInfo *root, JoinPath *path,
2209                                    List *quals, SpecialJoinInfo *sjinfo)
2210 {
2211         double          tuples;
2212         double          outer_tuples = path->outerjoinpath->parent->rows;
2213         double          inner_tuples = path->innerjoinpath->parent->rows;
2214         Selectivity selec = 1.0;
2215         ListCell   *l;
2216
2217         /* Get the approximate selectivity */
2218         foreach(l, quals)
2219         {
2220                 Node       *qual = (Node *) lfirst(l);
2221
2222                 /* Note that clause_selectivity will be able to cache its result */
2223                 selec *= clause_selectivity(root, qual, 0, sjinfo->jointype, sjinfo);
2224         }
2225
2226         /* Apply it correctly using the input relation sizes */
2227         if (sjinfo->jointype == JOIN_SEMI)
2228                 tuples = selec * outer_tuples;
2229         else if (sjinfo->jointype == JOIN_ANTI)
2230                 tuples = (1.0 - selec) * outer_tuples;
2231         else
2232                 tuples = selec * outer_tuples * inner_tuples;
2233
2234         return clamp_row_est(tuples);
2235 }
2236
2237
2238 /*
2239  * set_baserel_size_estimates
2240  *              Set the size estimates for the given base relation.
2241  *
2242  * The rel's targetlist and restrictinfo list must have been constructed
2243  * already.
2244  *
2245  * We set the following fields of the rel node:
2246  *      rows: the estimated number of output tuples (after applying
2247  *                restriction clauses).
2248  *      width: the estimated average output tuple width in bytes.
2249  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
2250  */
2251 void
2252 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2253 {
2254         double          nrows;
2255
2256         /* Should only be applied to base relations */
2257         Assert(rel->relid > 0);
2258
2259         nrows = rel->tuples *
2260                 clauselist_selectivity(root,
2261                                                            rel->baserestrictinfo,
2262                                                            0,
2263                                                            JOIN_INNER,
2264                                                            NULL);
2265
2266         rel->rows = clamp_row_est(nrows);
2267
2268         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
2269
2270         set_rel_width(root, rel);
2271 }
2272
2273 /*
2274  * set_joinrel_size_estimates
2275  *              Set the size estimates for the given join relation.
2276  *
2277  * The rel's targetlist must have been constructed already, and a
2278  * restriction clause list that matches the given component rels must
2279  * be provided.
2280  *
2281  * Since there is more than one way to make a joinrel for more than two
2282  * base relations, the results we get here could depend on which component
2283  * rel pair is provided.  In theory we should get the same answers no matter
2284  * which pair is provided; in practice, since the selectivity estimation
2285  * routines don't handle all cases equally well, we might not.  But there's
2286  * not much to be done about it.  (Would it make sense to repeat the
2287  * calculations for each pair of input rels that's encountered, and somehow
2288  * average the results?  Probably way more trouble than it's worth.)
2289  *
2290  * We set only the rows field here.  The width field was already set by
2291  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
2292  */
2293 void
2294 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
2295                                                    RelOptInfo *outer_rel,
2296                                                    RelOptInfo *inner_rel,
2297                                                    SpecialJoinInfo *sjinfo,
2298                                                    List *restrictlist)
2299 {
2300         JoinType        jointype = sjinfo->jointype;
2301         Selectivity jselec;
2302         Selectivity pselec;
2303         double          nrows;
2304
2305         /*
2306          * Compute joinclause selectivity.      Note that we are only considering
2307          * clauses that become restriction clauses at this join level; we are not
2308          * double-counting them because they were not considered in estimating the
2309          * sizes of the component rels.
2310          *
2311          * For an outer join, we have to distinguish the selectivity of the join's
2312          * own clauses (JOIN/ON conditions) from any clauses that were "pushed
2313          * down".  For inner joins we just count them all as joinclauses.
2314          */
2315         if (IS_OUTER_JOIN(jointype))
2316         {
2317                 List       *joinquals = NIL;
2318                 List       *pushedquals = NIL;
2319                 ListCell   *l;
2320
2321                 /* Grovel through the clauses to separate into two lists */
2322                 foreach(l, restrictlist)
2323                 {
2324                         RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2325
2326                         Assert(IsA(rinfo, RestrictInfo));
2327                         if (rinfo->is_pushed_down)
2328                                 pushedquals = lappend(pushedquals, rinfo);
2329                         else
2330                                 joinquals = lappend(joinquals, rinfo);
2331                 }
2332
2333                 /* Get the separate selectivities */
2334                 jselec = clauselist_selectivity(root,
2335                                                                                 joinquals,
2336                                                                                 0,
2337                                                                                 jointype,
2338                                                                                 sjinfo);
2339                 pselec = clauselist_selectivity(root,
2340                                                                                 pushedquals,
2341                                                                                 0,
2342                                                                                 jointype,
2343                                                                                 sjinfo);
2344
2345                 /* Avoid leaking a lot of ListCells */
2346                 list_free(joinquals);
2347                 list_free(pushedquals);
2348         }
2349         else
2350         {
2351                 jselec = clauselist_selectivity(root,
2352                                                                                 restrictlist,
2353                                                                                 0,
2354                                                                                 jointype,
2355                                                                                 sjinfo);
2356                 pselec = 0.0;                   /* not used, keep compiler quiet */
2357         }
2358
2359         /*
2360          * Basically, we multiply size of Cartesian product by selectivity.
2361          *
2362          * If we are doing an outer join, take that into account: the joinqual
2363          * selectivity has to be clamped using the knowledge that the output must
2364          * be at least as large as the non-nullable input.      However, any
2365          * pushed-down quals are applied after the outer join, so their
2366          * selectivity applies fully.
2367          *
2368          * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
2369          * of LHS rows that have matches, and we apply that straightforwardly.
2370          */
2371         switch (jointype)
2372         {
2373                 case JOIN_INNER:
2374                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2375                         break;
2376                 case JOIN_LEFT:
2377                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2378                         if (nrows < outer_rel->rows)
2379                                 nrows = outer_rel->rows;
2380                         nrows *= pselec;
2381                         break;
2382                 case JOIN_FULL:
2383                         nrows = outer_rel->rows * inner_rel->rows * jselec;
2384                         if (nrows < outer_rel->rows)
2385                                 nrows = outer_rel->rows;
2386                         if (nrows < inner_rel->rows)
2387                                 nrows = inner_rel->rows;
2388                         nrows *= pselec;
2389                         break;
2390                 case JOIN_SEMI:
2391                         nrows = outer_rel->rows * jselec;
2392                         nrows *= pselec;
2393                         break;
2394                 case JOIN_ANTI:
2395                         nrows = outer_rel->rows * (1.0 - jselec);
2396                         nrows *= pselec;
2397                         break;
2398                 default:
2399                         /* other values not expected here */
2400                         elog(ERROR, "unrecognized join type: %d", (int) jointype);
2401                         nrows = 0;                      /* keep compiler quiet */
2402                         break;
2403         }
2404
2405         rel->rows = clamp_row_est(nrows);
2406 }
2407
2408 /*
2409  * set_function_size_estimates
2410  *              Set the size estimates for a base relation that is a function call.
2411  *
2412  * The rel's targetlist and restrictinfo list must have been constructed
2413  * already.
2414  *
2415  * We set the same fields as set_baserel_size_estimates.
2416  */
2417 void
2418 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2419 {
2420         RangeTblEntry *rte;
2421
2422         /* Should only be applied to base relations that are functions */
2423         Assert(rel->relid > 0);
2424         rte = planner_rt_fetch(rel->relid, root);
2425         Assert(rte->rtekind == RTE_FUNCTION);
2426
2427         /* Estimate number of rows the function itself will return */
2428         rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
2429
2430         /* Now estimate number of output rows, etc */
2431         set_baserel_size_estimates(root, rel);
2432 }
2433
2434 /*
2435  * set_values_size_estimates
2436  *              Set the size estimates for a base relation that is a values list.
2437  *
2438  * The rel's targetlist and restrictinfo list must have been constructed
2439  * already.
2440  *
2441  * We set the same fields as set_baserel_size_estimates.
2442  */
2443 void
2444 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2445 {
2446         RangeTblEntry *rte;
2447
2448         /* Should only be applied to base relations that are values lists */
2449         Assert(rel->relid > 0);
2450         rte = planner_rt_fetch(rel->relid, root);
2451         Assert(rte->rtekind == RTE_VALUES);
2452
2453         /*
2454          * Estimate number of rows the values list will return. We know this
2455          * precisely based on the list length (well, barring set-returning
2456          * functions in list items, but that's a refinement not catered for
2457          * anywhere else either).
2458          */
2459         rel->tuples = list_length(rte->values_lists);
2460
2461         /* Now estimate number of output rows, etc */
2462         set_baserel_size_estimates(root, rel);
2463 }
2464
2465
2466 /*
2467  * set_rel_width
2468  *              Set the estimated output width of a base relation.
2469  *
2470  * NB: this works best on plain relations because it prefers to look at
2471  * real Vars.  It will fail to make use of pg_statistic info when applied
2472  * to a subquery relation, even if the subquery outputs are simple vars
2473  * that we could have gotten info for.  Is it worth trying to be smarter
2474  * about subqueries?
2475  *
2476  * The per-attribute width estimates are cached for possible re-use while
2477  * building join relations.
2478  */
2479 static void
2480 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2481 {
2482         int32           tuple_width = 0;
2483         ListCell   *tllist;
2484         Oid                     rel_reloid;
2485
2486         /*
2487          * Usually (perhaps always), all the Vars have the same reloid, so we can
2488          * save some redundant list-searching by doing getrelid just once.
2489          */
2490         if (rel->relid > 0)
2491                 rel_reloid = getrelid(rel->relid, root->parse->rtable);
2492         else
2493                 rel_reloid = InvalidOid;        /* probably can't happen */
2494
2495         foreach(tllist, rel->reltargetlist)
2496         {
2497                 Var                *var = (Var *) lfirst(tllist);
2498                 int                     ndx;
2499                 Oid                     var_reloid;
2500                 int32           item_width;
2501
2502                 /* For now, punt on whole-row child Vars */
2503                 if (!IsA(var, Var))
2504                 {
2505                         tuple_width += 32;      /* arbitrary */
2506                         continue;
2507                 }
2508
2509                 ndx = var->varattno - rel->min_attr;
2510
2511                 /*
2512                  * The width probably hasn't been cached yet, but may as well check
2513                  */
2514                 if (rel->attr_widths[ndx] > 0)
2515                 {
2516                         tuple_width += rel->attr_widths[ndx];
2517                         continue;
2518                 }
2519
2520                 if (var->varno == rel->relid)
2521                         var_reloid = rel_reloid;
2522                 else
2523                         var_reloid = getrelid(var->varno, root->parse->rtable);
2524
2525                 if (var_reloid != InvalidOid)
2526                 {
2527                         item_width = get_attavgwidth(var_reloid, var->varattno);
2528                         if (item_width > 0)
2529                         {
2530                                 rel->attr_widths[ndx] = item_width;
2531                                 tuple_width += item_width;
2532                                 continue;
2533                         }
2534                 }
2535
2536                 /*
2537                  * Not a plain relation, or can't find statistics for it. Estimate
2538                  * using just the type info.
2539                  */
2540                 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2541                 Assert(item_width > 0);
2542                 rel->attr_widths[ndx] = item_width;
2543                 tuple_width += item_width;
2544         }
2545         Assert(tuple_width >= 0);
2546         rel->width = tuple_width;
2547 }
2548
2549 /*
2550  * relation_byte_size
2551  *        Estimate the storage space in bytes for a given number of tuples
2552  *        of a given width (size in bytes).
2553  */
2554 static double
2555 relation_byte_size(double tuples, int width)
2556 {
2557         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2558 }
2559
2560 /*
2561  * page_size
2562  *        Returns an estimate of the number of pages covered by a given
2563  *        number of tuples of a given width (size in bytes).
2564  */
2565 static double
2566 page_size(double tuples, int width)
2567 {
2568         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
2569 }