]> granicus.if.org Git - postgresql/blob - src/backend/optimizer/path/costsize.c
Replace planner's representation of relation sets, per pghackers discussion.
[postgresql] / src / backend / optimizer / path / costsize.c
1 /*-------------------------------------------------------------------------
2  *
3  * costsize.c
4  *        Routines to compute (and set) relation sizes and path costs
5  *
6  * Path costs are measured in units of disk accesses: one sequential page
7  * fetch has cost 1.  All else is scaled relative to a page fetch, using
8  * the scaling parameters
9  *
10  *      random_page_cost        Cost of a non-sequential page fetch
11  *      cpu_tuple_cost          Cost of typical CPU time to process a tuple
12  *      cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
13  *      cpu_operator_cost       Cost of CPU time to process a typical WHERE operator
14  *
15  * We also use a rough estimate "effective_cache_size" of the number of
16  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
17  * NBuffers for this purpose because that would ignore the effects of
18  * the kernel's disk cache.)
19  *
20  * Obviously, taking constants for these values is an oversimplification,
21  * but it's tough enough to get any useful estimates even at this level of
22  * detail.      Note that all of these parameters are user-settable, in case
23  * the default values are drastically off for a particular platform.
24  *
25  * We compute two separate costs for each path:
26  *              total_cost: total estimated cost to fetch all tuples
27  *              startup_cost: cost that is expended before first tuple is fetched
28  * In some scenarios, such as when there is a LIMIT or we are implementing
29  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
30  * path's result.  A caller can estimate the cost of fetching a partial
31  * result by interpolating between startup_cost and total_cost.  In detail:
32  *              actual_cost = startup_cost +
33  *                      (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
34  * Note that a base relation's rows count (and, by extension, plan_rows for
35  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
36  * that this equation works properly.  (Also, these routines guarantee not to
37  * set the rows count to zero, so there will be no zero divide.)  The LIMIT is
38  * applied as a top-level plan node.
39  *
40  * For largely historical reasons, most of the routines in this module use
41  * the passed result Path only to store their startup_cost and total_cost
42  * results into.  All the input data they need is passed as separate
43  * parameters, even though much of it could be extracted from the Path.
44  * An exception is made for the cost_XXXjoin() routines, which expect all
45  * the non-cost fields of the passed XXXPath to be filled in.
46  *
47  *
48  * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
49  * Portions Copyright (c) 1994, Regents of the University of California
50  *
51  * IDENTIFICATION
52  *        $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.105 2003/02/08 20:20:54 tgl Exp $
53  *
54  *-------------------------------------------------------------------------
55  */
56
57 #include "postgres.h"
58
59 #include <math.h>
60
61 #include "catalog/pg_statistic.h"
62 #include "executor/nodeHash.h"
63 #include "miscadmin.h"
64 #include "optimizer/clauses.h"
65 #include "optimizer/cost.h"
66 #include "optimizer/pathnode.h"
67 #include "parser/parsetree.h"
68 #include "utils/selfuncs.h"
69 #include "utils/lsyscache.h"
70 #include "utils/syscache.h"
71
72
73 #define LOG2(x)  (log(x) / 0.693147180559945)
74 #define LOG6(x)  (log(x) / 1.79175946922805)
75
76 /*
77  * Some Paths return less than the nominal number of rows of their parent
78  * relations; join nodes need to do this to get the correct input count:
79  */
80 #define PATH_ROWS(path) \
81         (IsA(path, UniquePath) ? \
82          ((UniquePath *) (path))->rows : \
83          (path)->parent->rows)
84
85
86 double          effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
87 double          random_page_cost = DEFAULT_RANDOM_PAGE_COST;
88 double          cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
89 double          cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
90 double          cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
91
92 Cost            disable_cost = 100000000.0;
93
94 bool            enable_seqscan = true;
95 bool            enable_indexscan = true;
96 bool            enable_tidscan = true;
97 bool            enable_sort = true;
98 bool            enable_hashagg = true;
99 bool            enable_nestloop = true;
100 bool            enable_mergejoin = true;
101 bool            enable_hashjoin = true;
102
103
104 static Selectivity estimate_hash_bucketsize(Query *root, Var *var,
105                                                                                         int nbuckets);
106 static bool cost_qual_eval_walker(Node *node, QualCost *total);
107 static Selectivity approx_selectivity(Query *root, List *quals,
108                                                                           JoinType jointype);
109 static void set_rel_width(Query *root, RelOptInfo *rel);
110 static double relation_byte_size(double tuples, int width);
111 static double page_size(double tuples, int width);
112
113
114 /*
115  * cost_seqscan
116  *        Determines and returns the cost of scanning a relation sequentially.
117  */
118 void
119 cost_seqscan(Path *path, Query *root,
120                          RelOptInfo *baserel)
121 {
122         Cost            startup_cost = 0;
123         Cost            run_cost = 0;
124         Cost            cpu_per_tuple;
125
126         /* Should only be applied to base relations */
127         Assert(baserel->relid > 0);
128         Assert(baserel->rtekind == RTE_RELATION);
129
130         if (!enable_seqscan)
131                 startup_cost += disable_cost;
132
133         /*
134          * disk costs
135          *
136          * The cost of reading a page sequentially is 1.0, by definition. Note
137          * that the Unix kernel will typically do some amount of read-ahead
138          * optimization, so that this cost is less than the true cost of
139          * reading a page from disk.  We ignore that issue here, but must take
140          * it into account when estimating the cost of non-sequential
141          * accesses!
142          */
143         run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
144
145         /* CPU costs */
146         startup_cost += baserel->baserestrictcost.startup;
147         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
148         run_cost += cpu_per_tuple * baserel->tuples;
149
150         path->startup_cost = startup_cost;
151         path->total_cost = startup_cost + run_cost;
152 }
153
154 /*
155  * cost_nonsequential_access
156  *        Estimate the cost of accessing one page at random from a relation
157  *        (or sort temp file) of the given size in pages.
158  *
159  * The simplistic model that the cost is random_page_cost is what we want
160  * to use for large relations; but for small ones that is a serious
161  * overestimate because of the effects of caching.      This routine tries to
162  * account for that.
163  *
164  * Unfortunately we don't have any good way of estimating the effective cache
165  * size we are working with --- we know that Postgres itself has NBuffers
166  * internal buffers, but the size of the kernel's disk cache is uncertain,
167  * and how much of it we get to use is even less certain.  We punt the problem
168  * for now by assuming we are given an effective_cache_size parameter.
169  *
170  * Given a guesstimated cache size, we estimate the actual I/O cost per page
171  * with the entirely ad-hoc equations:
172  *      if relpages >= effective_cache_size:
173  *              random_page_cost * (1 - (effective_cache_size/relpages)/2)
174  *      if relpages < effective_cache_size:
175  *              1 + (random_page_cost/2-1) * (relpages/effective_cache_size) ** 2
176  * These give the right asymptotic behavior (=> 1.0 as relpages becomes
177  * small, => random_page_cost as it becomes large) and meet in the middle
178  * with the estimate that the cache is about 50% effective for a relation
179  * of the same size as effective_cache_size.  (XXX this is probably all
180  * wrong, but I haven't been able to find any theory about how effective
181  * a disk cache should be presumed to be.)
182  */
183 static Cost
184 cost_nonsequential_access(double relpages)
185 {
186         double          relsize;
187
188         /* don't crash on bad input data */
189         if (relpages <= 0.0 || effective_cache_size <= 0.0)
190                 return random_page_cost;
191
192         relsize = relpages / effective_cache_size;
193
194         if (relsize >= 1.0)
195                 return random_page_cost * (1.0 - 0.5 / relsize);
196         else
197                 return 1.0 + (random_page_cost * 0.5 - 1.0) * relsize * relsize;
198 }
199
200 /*
201  * cost_index
202  *        Determines and returns the cost of scanning a relation using an index.
203  *
204  *        NOTE: an indexscan plan node can actually represent several passes,
205  *        but here we consider the cost of just one pass.
206  *
207  * 'root' is the query root
208  * 'baserel' is the base relation the index is for
209  * 'index' is the index to be used
210  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
211  * 'is_injoin' is T if we are considering using the index scan as the inside
212  *              of a nestloop join (hence, some of the indexQuals are join clauses)
213  *
214  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
215  * Any additional quals evaluated as qpquals may reduce the number of returned
216  * tuples, but they won't reduce the number of tuples we have to fetch from
217  * the table, so they don't reduce the scan cost.
218  */
219 void
220 cost_index(Path *path, Query *root,
221                    RelOptInfo *baserel,
222                    IndexOptInfo *index,
223                    List *indexQuals,
224                    bool is_injoin)
225 {
226         Cost            startup_cost = 0;
227         Cost            run_cost = 0;
228         Cost            indexStartupCost;
229         Cost            indexTotalCost;
230         Selectivity indexSelectivity;
231         double          indexCorrelation,
232                                 csquared;
233         Cost            min_IO_cost,
234                                 max_IO_cost;
235         Cost            cpu_per_tuple;
236         double          tuples_fetched;
237         double          pages_fetched;
238         double          T,
239                                 b;
240
241         /* Should only be applied to base relations */
242         Assert(IsA(baserel, RelOptInfo) &&
243                    IsA(index, IndexOptInfo));
244         Assert(baserel->relid > 0);
245         Assert(baserel->rtekind == RTE_RELATION);
246
247         if (!enable_indexscan)
248                 startup_cost += disable_cost;
249
250         /*
251          * Call index-access-method-specific code to estimate the processing
252          * cost for scanning the index, as well as the selectivity of the
253          * index (ie, the fraction of main-table tuples we will have to
254          * retrieve) and its correlation to the main-table tuple order.
255          */
256         OidFunctionCall8(index->amcostestimate,
257                                          PointerGetDatum(root),
258                                          PointerGetDatum(baserel),
259                                          PointerGetDatum(index),
260                                          PointerGetDatum(indexQuals),
261                                          PointerGetDatum(&indexStartupCost),
262                                          PointerGetDatum(&indexTotalCost),
263                                          PointerGetDatum(&indexSelectivity),
264                                          PointerGetDatum(&indexCorrelation));
265
266         /* all costs for touching index itself included here */
267         startup_cost += indexStartupCost;
268         run_cost += indexTotalCost - indexStartupCost;
269
270         /*----------
271          * Estimate number of main-table tuples and pages fetched.
272          *
273          * When the index ordering is uncorrelated with the table ordering,
274          * we use an approximation proposed by Mackert and Lohman, "Index Scans
275          * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
276          * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
277          * The Mackert and Lohman approximation is that the number of pages
278          * fetched is
279          *      PF =
280          *              min(2TNs/(2T+Ns), T)                    when T <= b
281          *              2TNs/(2T+Ns)                                    when T > b and Ns <= 2Tb/(2T-b)
282          *              b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
283          * where
284          *              T = # pages in table
285          *              N = # tuples in table
286          *              s = selectivity = fraction of table to be scanned
287          *              b = # buffer pages available (we include kernel space here)
288          *
289          * When the index ordering is exactly correlated with the table ordering
290          * (just after a CLUSTER, for example), the number of pages fetched should
291          * be just sT.  What's more, these will be sequential fetches, not the
292          * random fetches that occur in the uncorrelated case.  So, depending on
293          * the extent of correlation, we should estimate the actual I/O cost
294          * somewhere between s * T * 1.0 and PF * random_cost.  We currently
295          * interpolate linearly between these two endpoints based on the
296          * correlation squared (XXX is that appropriate?).
297          *
298          * In any case the number of tuples fetched is Ns.
299          *----------
300          */
301
302         tuples_fetched = indexSelectivity * baserel->tuples;
303         /* Don't believe estimates less than 1... */
304         if (tuples_fetched < 1.0)
305                 tuples_fetched = 1.0;
306
307         /* This part is the Mackert and Lohman formula */
308
309         T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
310         b = (effective_cache_size > 1) ? effective_cache_size : 1.0;
311
312         if (T <= b)
313         {
314                 pages_fetched =
315                         (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
316                 if (pages_fetched > T)
317                         pages_fetched = T;
318         }
319         else
320         {
321                 double          lim;
322
323                 lim = (2.0 * T * b) / (2.0 * T - b);
324                 if (tuples_fetched <= lim)
325                 {
326                         pages_fetched =
327                                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
328                 }
329                 else
330                 {
331                         pages_fetched =
332                                 b + (tuples_fetched - lim) * (T - b) / T;
333                 }
334         }
335
336         /*
337          * min_IO_cost corresponds to the perfectly correlated case
338          * (csquared=1), max_IO_cost to the perfectly uncorrelated case
339          * (csquared=0).  Note that we just charge random_page_cost per page
340          * in the uncorrelated case, rather than using
341          * cost_nonsequential_access, since we've already accounted for
342          * caching effects by using the Mackert model.
343          */
344         min_IO_cost = ceil(indexSelectivity * T);
345         max_IO_cost = pages_fetched * random_page_cost;
346
347         /*
348          * Now interpolate based on estimated index order correlation to get
349          * total disk I/O cost for main table accesses.
350          */
351         csquared = indexCorrelation * indexCorrelation;
352
353         run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
354
355         /*
356          * Estimate CPU costs per tuple.
357          *
358          * Normally the indexquals will be removed from the list of restriction
359          * clauses that we have to evaluate as qpquals, so we should subtract
360          * their costs from baserestrictcost.  But if we are doing a join then
361          * some of the indexquals are join clauses and shouldn't be subtracted.
362          * Rather than work out exactly how much to subtract, we don't subtract
363          * anything.
364          *
365          * XXX For a lossy index, not all the quals will be removed and so we
366          * really shouldn't subtract their costs; but detecting that seems more
367          * expensive than it's worth.
368          */
369         startup_cost += baserel->baserestrictcost.startup;
370         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
371
372         if (!is_injoin)
373         {
374                 QualCost        index_qual_cost;
375
376                 cost_qual_eval(&index_qual_cost, indexQuals);
377                 cpu_per_tuple -= index_qual_cost.per_tuple;
378         }
379
380         run_cost += cpu_per_tuple * tuples_fetched;
381
382         path->startup_cost = startup_cost;
383         path->total_cost = startup_cost + run_cost;
384 }
385
386 /*
387  * cost_tidscan
388  *        Determines and returns the cost of scanning a relation using TIDs.
389  */
390 void
391 cost_tidscan(Path *path, Query *root,
392                          RelOptInfo *baserel, List *tideval)
393 {
394         Cost            startup_cost = 0;
395         Cost            run_cost = 0;
396         Cost            cpu_per_tuple;
397         int                     ntuples = length(tideval);
398
399         /* Should only be applied to base relations */
400         Assert(baserel->relid > 0);
401         Assert(baserel->rtekind == RTE_RELATION);
402
403         if (!enable_tidscan)
404                 startup_cost += disable_cost;
405
406         /* disk costs --- assume each tuple on a different page */
407         run_cost += random_page_cost * ntuples;
408
409         /* CPU costs */
410         startup_cost += baserel->baserestrictcost.startup;
411         cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
412         run_cost += cpu_per_tuple * ntuples;
413
414         path->startup_cost = startup_cost;
415         path->total_cost = startup_cost + run_cost;
416 }
417
418 /*
419  * cost_functionscan
420  *        Determines and returns the cost of scanning a function RTE.
421  */
422 void
423 cost_functionscan(Path *path, Query *root, RelOptInfo *baserel)
424 {
425         Cost            startup_cost = 0;
426         Cost            run_cost = 0;
427         Cost            cpu_per_tuple;
428
429         /* Should only be applied to base relations that are functions */
430         Assert(baserel->relid > 0);
431         Assert(baserel->rtekind == RTE_FUNCTION);
432
433         /*
434          * For now, estimate function's cost at one operator eval per function
435          * call.  Someday we should revive the function cost estimate columns
436          * in pg_proc...
437          */
438         cpu_per_tuple = cpu_operator_cost;
439
440         /* Add scanning CPU costs */
441         startup_cost += baserel->baserestrictcost.startup;
442         cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
443         run_cost += cpu_per_tuple * baserel->tuples;
444
445         path->startup_cost = startup_cost;
446         path->total_cost = startup_cost + run_cost;
447 }
448
449 /*
450  * cost_sort
451  *        Determines and returns the cost of sorting a relation, including
452  *        the cost of reading the input data.
453  *
454  * If the total volume of data to sort is less than SortMem, we will do
455  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
456  * comparisons for t tuples.
457  *
458  * If the total volume exceeds SortMem, we switch to a tape-style merge
459  * algorithm.  There will still be about t*log2(t) tuple comparisons in
460  * total, but we will also need to write and read each tuple once per
461  * merge pass.  We expect about ceil(log6(r)) merge passes where r is the
462  * number of initial runs formed (log6 because tuplesort.c uses six-tape
463  * merging).  Since the average initial run should be about twice SortMem,
464  * we have
465  *              disk traffic = 2 * relsize * ceil(log6(p / (2*SortMem)))
466  *              cpu = comparison_cost * t * log2(t)
467  *
468  * The disk traffic is assumed to be half sequential and half random
469  * accesses (XXX can't we refine that guess?)
470  *
471  * We charge two operator evals per tuple comparison, which should be in
472  * the right ballpark in most cases.
473  *
474  * 'pathkeys' is a list of sort keys
475  * 'input_cost' is the total cost for reading the input data
476  * 'tuples' is the number of tuples in the relation
477  * 'width' is the average tuple width in bytes
478  *
479  * NOTE: some callers currently pass NIL for pathkeys because they
480  * can't conveniently supply the sort keys.  Since this routine doesn't
481  * currently do anything with pathkeys anyway, that doesn't matter...
482  * but if it ever does, it should react gracefully to lack of key data.
483  * (Actually, the thing we'd most likely be interested in is just the number
484  * of sort keys, which all callers *could* supply.)
485  */
486 void
487 cost_sort(Path *path, Query *root,
488                   List *pathkeys, Cost input_cost, double tuples, int width)
489 {
490         Cost            startup_cost = input_cost;
491         Cost            run_cost = 0;
492         double          nbytes = relation_byte_size(tuples, width);
493         long            sortmembytes = SortMem * 1024L;
494
495         if (!enable_sort)
496                 startup_cost += disable_cost;
497
498         /*
499          * We want to be sure the cost of a sort is never estimated as zero,
500          * even if passed-in tuple count is zero.  Besides, mustn't do
501          * log(0)...
502          */
503         if (tuples < 2.0)
504                 tuples = 2.0;
505
506         /*
507          * CPU costs
508          *
509          * Assume about two operator evals per tuple comparison and N log2 N
510          * comparisons
511          */
512         startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
513
514         /* disk costs */
515         if (nbytes > sortmembytes)
516         {
517                 double          npages = ceil(nbytes / BLCKSZ);
518                 double          nruns = nbytes / (sortmembytes * 2);
519                 double          log_runs = ceil(LOG6(nruns));
520                 double          npageaccesses;
521
522                 if (log_runs < 1.0)
523                         log_runs = 1.0;
524                 npageaccesses = 2.0 * npages * log_runs;
525                 /* Assume half are sequential (cost 1), half are not */
526                 startup_cost += npageaccesses *
527                         (1.0 + cost_nonsequential_access(npages)) * 0.5;
528         }
529
530         /*
531          * Also charge a small amount (arbitrarily set equal to operator cost)
532          * per extracted tuple.
533          */
534         run_cost += cpu_operator_cost * tuples;
535
536         path->startup_cost = startup_cost;
537         path->total_cost = startup_cost + run_cost;
538 }
539
540 /*
541  * cost_material
542  *        Determines and returns the cost of materializing a relation, including
543  *        the cost of reading the input data.
544  *
545  * If the total volume of data to materialize exceeds SortMem, we will need
546  * to write it to disk, so the cost is much higher in that case.
547  */
548 void
549 cost_material(Path *path,
550                           Cost input_cost, double tuples, int width)
551 {
552         Cost            startup_cost = input_cost;
553         Cost            run_cost = 0;
554         double          nbytes = relation_byte_size(tuples, width);
555         long            sortmembytes = SortMem * 1024L;
556
557         /* disk costs */
558         if (nbytes > sortmembytes)
559         {
560                 double          npages = ceil(nbytes / BLCKSZ);
561
562                 /* We'll write during startup and read during retrieval */
563                 startup_cost += npages;
564                 run_cost += npages;
565         }
566
567         /*
568          * Also charge a small amount per extracted tuple.  We use cpu_tuple_cost
569          * so that it doesn't appear worthwhile to materialize a bare seqscan.
570          */
571         run_cost += cpu_tuple_cost * tuples;
572
573         path->startup_cost = startup_cost;
574         path->total_cost = startup_cost + run_cost;
575 }
576
577 /*
578  * cost_agg
579  *              Determines and returns the cost of performing an Agg plan node,
580  *              including the cost of its input.
581  *
582  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
583  * are for appropriately-sorted input.
584  */
585 void
586 cost_agg(Path *path, Query *root,
587                  AggStrategy aggstrategy, int numAggs,
588                  int numGroupCols, double numGroups,
589                  Cost input_startup_cost, Cost input_total_cost,
590                  double input_tuples)
591 {
592         Cost            startup_cost;
593         Cost            total_cost;
594
595         /*
596          * We charge one cpu_operator_cost per aggregate function per input
597          * tuple, and another one per output tuple (corresponding to transfn
598          * and finalfn calls respectively).  If we are grouping, we charge an
599          * additional cpu_operator_cost per grouping column per input tuple
600          * for grouping comparisons.
601          *
602          * We will produce a single output tuple if not grouping,
603          * and a tuple per group otherwise.
604          */
605         if (aggstrategy == AGG_PLAIN)
606         {
607                 startup_cost = input_total_cost;
608                 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
609                 /* we aren't grouping */
610                 total_cost = startup_cost;
611         }
612         else if (aggstrategy == AGG_SORTED)
613         {
614                 /* Here we are able to deliver output on-the-fly */
615                 startup_cost = input_startup_cost;
616                 total_cost = input_total_cost;
617                 total_cost += cpu_operator_cost * (input_tuples + numGroups) * numAggs;
618                 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
619         }
620         else
621         {
622                 /* must be AGG_HASHED */
623                 startup_cost = input_total_cost;
624                 startup_cost += cpu_operator_cost * input_tuples * numAggs;
625                 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
626                 total_cost = startup_cost;
627                 total_cost += cpu_operator_cost * numGroups * numAggs;
628         }
629
630         path->startup_cost = startup_cost;
631         path->total_cost = total_cost;
632 }
633
634 /*
635  * cost_group
636  *              Determines and returns the cost of performing a Group plan node,
637  *              including the cost of its input.
638  *
639  * Note: caller must ensure that input costs are for appropriately-sorted
640  * input.
641  */
642 void
643 cost_group(Path *path, Query *root,
644                    int numGroupCols, double numGroups,
645                    Cost input_startup_cost, Cost input_total_cost,
646                    double input_tuples)
647 {
648         Cost            startup_cost;
649         Cost            total_cost;
650
651         startup_cost = input_startup_cost;
652         total_cost = input_total_cost;
653
654         /*
655          * Charge one cpu_operator_cost per comparison per input tuple. We
656          * assume all columns get compared at most of the tuples.
657          */
658         total_cost += cpu_operator_cost * input_tuples * numGroupCols;
659
660         path->startup_cost = startup_cost;
661         path->total_cost = total_cost;
662 }
663
664 /*
665  * cost_nestloop
666  *        Determines and returns the cost of joining two relations using the
667  *        nested loop algorithm.
668  *
669  * 'path' is already filled in except for the cost fields
670  */
671 void
672 cost_nestloop(NestPath *path, Query *root)
673 {
674         Path       *outer_path = path->outerjoinpath;
675         Path       *inner_path = path->innerjoinpath;
676         List       *restrictlist = path->joinrestrictinfo;
677         Cost            startup_cost = 0;
678         Cost            run_cost = 0;
679         Cost            cpu_per_tuple;
680         QualCost        restrict_qual_cost;
681         double          outer_path_rows = PATH_ROWS(outer_path);
682         double          inner_path_rows = PATH_ROWS(inner_path);
683         double          ntuples;
684         Selectivity     joininfactor;
685
686         if (!enable_nestloop)
687                 startup_cost += disable_cost;
688
689         /*
690          * If we're doing JOIN_IN then we will stop scanning inner tuples for an
691          * outer tuple as soon as we have one match.  Account for the effects of
692          * this by scaling down the cost estimates in proportion to the expected
693          * output size.  (This assumes that all the quals attached to the join are
694          * IN quals, which should be true.)
695          *
696          * Note: it's probably bogus to use the normal selectivity calculation
697          * here when either the outer or inner path is a UniquePath.
698          */
699         if (path->jointype == JOIN_IN)
700         {
701                 Selectivity     qual_selec = approx_selectivity(root, restrictlist,
702                                                                                                         path->jointype);
703                 double  qptuples;
704
705                 qptuples = ceil(qual_selec * outer_path_rows * inner_path_rows);
706                 if (qptuples > path->path.parent->rows)
707                         joininfactor = path->path.parent->rows / qptuples;
708                 else
709                         joininfactor = 1.0;
710         }
711         else
712                 joininfactor = 1.0;
713
714         /* cost of source data */
715
716         /*
717          * NOTE: clearly, we must pay both outer and inner paths' startup_cost
718          * before we can start returning tuples, so the join's startup cost is
719          * their sum.  What's not so clear is whether the inner path's
720          * startup_cost must be paid again on each rescan of the inner path.
721          * This is not true if the inner path is materialized or is a hashjoin,
722          * but probably is true otherwise.
723          */
724         startup_cost += outer_path->startup_cost + inner_path->startup_cost;
725         run_cost += outer_path->total_cost - outer_path->startup_cost;
726         if (IsA(inner_path, MaterialPath) ||
727                 IsA(inner_path, HashPath))
728         {
729                 /* charge only run cost for each iteration of inner path */
730         }
731         else
732         {
733                 /*
734                  * charge startup cost for each iteration of inner path, except we
735                  * already charged the first startup_cost in our own startup
736                  */
737                 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
738         }
739         run_cost += outer_path_rows *
740                 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
741
742         /*
743          * Compute number of tuples processed (not number emitted!).
744          * If inner path is an indexscan, be sure to use its estimated output row
745          * count, which may be lower than the restriction-clause-only row count of
746          * its parent.  (We don't include this case in the PATH_ROWS macro because
747          * it applies *only* to a nestloop's inner relation.)  Note: it is correct
748          * to use the unadjusted inner_path_rows in the above calculation for
749          * joininfactor, since otherwise we'd be double-counting the selectivity
750          * of the join clause being used for the index.
751          */
752         if (IsA(inner_path, IndexPath))
753                 inner_path_rows = ((IndexPath *) inner_path)->rows;
754
755         ntuples = inner_path_rows * outer_path_rows;
756
757         /* CPU costs */
758         cost_qual_eval(&restrict_qual_cost, restrictlist);
759         startup_cost += restrict_qual_cost.startup;
760         cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
761         run_cost += cpu_per_tuple * ntuples;
762
763         path->path.startup_cost = startup_cost;
764         path->path.total_cost = startup_cost + run_cost;
765 }
766
767 /*
768  * cost_mergejoin
769  *        Determines and returns the cost of joining two relations using the
770  *        merge join algorithm.
771  *
772  * 'path' is already filled in except for the cost fields
773  *
774  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
775  * outersortkeys and innersortkeys are lists of the keys to be used
776  * to sort the outer and inner relations, or NIL if no explicit
777  * sort is needed because the source path is already ordered.
778  */
779 void
780 cost_mergejoin(MergePath *path, Query *root)
781 {
782         Path       *outer_path = path->jpath.outerjoinpath;
783         Path       *inner_path = path->jpath.innerjoinpath;
784         List       *restrictlist = path->jpath.joinrestrictinfo;
785         List       *mergeclauses = path->path_mergeclauses;
786         List       *outersortkeys = path->outersortkeys;
787         List       *innersortkeys = path->innersortkeys;
788         Cost            startup_cost = 0;
789         Cost            run_cost = 0;
790         Cost            cpu_per_tuple;
791         Selectivity     merge_selec;
792         Selectivity     qp_selec;
793         QualCost        merge_qual_cost;
794         QualCost        qp_qual_cost;
795         RestrictInfo *firstclause;
796         List       *qpquals;
797         double          outer_path_rows = PATH_ROWS(outer_path);
798         double          inner_path_rows = PATH_ROWS(inner_path);
799         double          outer_rows,
800                                 inner_rows;
801         double          mergejointuples,
802                                 rescannedtuples;
803         double          qptuples;
804         double          rescanratio;
805         Selectivity outerscansel,
806                                 innerscansel;
807         Selectivity     joininfactor;
808         Path            sort_path;              /* dummy for result of cost_sort */
809
810         if (!enable_mergejoin)
811                 startup_cost += disable_cost;
812
813         /*
814          * Compute cost and selectivity of the mergequals and qpquals (other
815          * restriction clauses) separately.  We use approx_selectivity here
816          * for speed --- in most cases, any errors won't affect the result much.
817          *
818          * Note: it's probably bogus to use the normal selectivity calculation
819          * here when either the outer or inner path is a UniquePath.
820          */
821         merge_selec = approx_selectivity(root, mergeclauses,
822                                                                          path->jpath.jointype);
823         cost_qual_eval(&merge_qual_cost, mergeclauses);
824         qpquals = set_ptrDifference(restrictlist, mergeclauses);
825         qp_selec = approx_selectivity(root, qpquals,
826                                                                   path->jpath.jointype);
827         cost_qual_eval(&qp_qual_cost, qpquals);
828         freeList(qpquals);
829
830         /* approx # tuples passing the merge quals */
831         mergejointuples = ceil(merge_selec * outer_path_rows * inner_path_rows);
832         /* approx # tuples passing qpquals as well */
833         qptuples = ceil(mergejointuples * qp_selec);
834
835         /*
836          * When there are equal merge keys in the outer relation, the mergejoin
837          * must rescan any matching tuples in the inner relation.  This means
838          * re-fetching inner tuples.  Our cost model for this is that a re-fetch
839          * costs the same as an original fetch, which is probably an overestimate;
840          * but on the other hand we ignore the bookkeeping costs of mark/restore.
841          * Not clear if it's worth developing a more refined model.
842          *
843          * The number of re-fetches can be estimated approximately as size of
844          * merge join output minus size of inner relation.  Assume that the
845          * distinct key values are 1, 2, ..., and denote the number of values of
846          * each key in the outer relation as m1, m2, ...; in the inner relation,
847          * n1, n2, ...  Then we have
848          *
849          *      size of join = m1 * n1 + m2 * n2 + ...
850          *
851          *      number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ...
852          *              = m1 * n1 + m2 * n2 + ... - (n1 + n2 + ...)
853          *              = size of join - size of inner relation
854          *
855          * This equation works correctly for outer tuples having no inner match
856          * (nk = 0), but not for inner tuples having no outer match (mk = 0);
857          * we are effectively subtracting those from the number of rescanned
858          * tuples, when we should not.  Can we do better without expensive
859          * selectivity computations?
860          */
861         if (IsA(outer_path, UniquePath))
862                 rescannedtuples = 0;
863         else
864         {
865                 rescannedtuples = mergejointuples - inner_path_rows;
866                 /* Must clamp because of possible underestimate */
867                 if (rescannedtuples < 0)
868                         rescannedtuples = 0;
869         }
870         /* We'll inflate inner run cost this much to account for rescanning */
871         rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
872
873         /*
874          * A merge join will stop as soon as it exhausts either input stream.
875          * Estimate fraction of the left and right inputs that will actually
876          * need to be scanned.  We use only the first (most significant) merge
877          * clause for this purpose.
878          *
879          * Since this calculation is somewhat expensive, and will be the same for
880          * all mergejoin paths associated with the merge clause, we cache the
881          * results in the RestrictInfo node.
882          */
883         firstclause = (RestrictInfo *) lfirst(mergeclauses);
884         if (firstclause->left_mergescansel < 0)         /* not computed yet? */
885                 mergejoinscansel(root, (Node *) firstclause->clause,
886                                                  &firstclause->left_mergescansel,
887                                                  &firstclause->right_mergescansel);
888
889         if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
890         {
891                 /* left side of clause is outer */
892                 outerscansel = firstclause->left_mergescansel;
893                 innerscansel = firstclause->right_mergescansel;
894         }
895         else
896         {
897                 /* left side of clause is inner */
898                 outerscansel = firstclause->right_mergescansel;
899                 innerscansel = firstclause->left_mergescansel;
900         }
901
902         /* convert selectivity to row count; must scan at least one row */
903
904         outer_rows = ceil(outer_path_rows * outerscansel);
905         if (outer_rows < 1)
906                 outer_rows = 1;
907         inner_rows = ceil(inner_path_rows * innerscansel);
908         if (inner_rows < 1)
909                 inner_rows = 1;
910
911         /*
912          * Readjust scan selectivities to account for above rounding.  This is
913          * normally an insignificant effect, but when there are only a few rows
914          * in the inputs, failing to do this makes for a large percentage error.
915          */
916         outerscansel = outer_rows / outer_path_rows;
917         innerscansel = inner_rows / inner_path_rows;
918
919         /* cost of source data */
920
921         if (outersortkeys)                      /* do we need to sort outer? */
922         {
923                 cost_sort(&sort_path,
924                                   root,
925                                   outersortkeys,
926                                   outer_path->total_cost,
927                                   outer_path_rows,
928                                   outer_path->parent->width);
929                 startup_cost += sort_path.startup_cost;
930                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
931                         * outerscansel;
932         }
933         else
934         {
935                 startup_cost += outer_path->startup_cost;
936                 run_cost += (outer_path->total_cost - outer_path->startup_cost)
937                         * outerscansel;
938         }
939
940         if (innersortkeys)                      /* do we need to sort inner? */
941         {
942                 cost_sort(&sort_path,
943                                   root,
944                                   innersortkeys,
945                                   inner_path->total_cost,
946                                   inner_path_rows,
947                                   inner_path->parent->width);
948                 startup_cost += sort_path.startup_cost;
949                 run_cost += (sort_path.total_cost - sort_path.startup_cost)
950                         * innerscansel * rescanratio;
951         }
952         else
953         {
954                 startup_cost += inner_path->startup_cost;
955                 run_cost += (inner_path->total_cost - inner_path->startup_cost)
956                         * innerscansel * rescanratio;
957         }
958
959         /* CPU costs */
960
961         /*
962          * If we're doing JOIN_IN then we will stop outputting inner
963          * tuples for an outer tuple as soon as we have one match.  Account for
964          * the effects of this by scaling down the cost estimates in proportion
965          * to the expected output size.  (This assumes that all the quals attached
966          * to the join are IN quals, which should be true.)
967          */
968         if (path->jpath.jointype == JOIN_IN &&
969                 qptuples > path->jpath.path.parent->rows)
970                 joininfactor = path->jpath.path.parent->rows / qptuples;
971         else
972                 joininfactor = 1.0;
973
974         /*
975          * The number of tuple comparisons needed is approximately number of
976          * outer rows plus number of inner rows plus number of rescanned
977          * tuples (can we refine this?).  At each one, we need to evaluate
978          * the mergejoin quals.  NOTE: JOIN_IN mode does not save any work
979          * here, so do NOT include joininfactor.
980          */
981         startup_cost += merge_qual_cost.startup;
982         run_cost += merge_qual_cost.per_tuple *
983                 (outer_rows + inner_rows * rescanratio);
984
985         /*
986          * For each tuple that gets through the mergejoin proper, we charge
987          * cpu_tuple_cost plus the cost of evaluating additional restriction
988          * clauses that are to be applied at the join.  (This is pessimistic
989          * since not all of the quals may get evaluated at each tuple.)  This
990          * work is skipped in JOIN_IN mode, so apply the factor.
991          */
992         startup_cost += qp_qual_cost.startup;
993         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
994         run_cost += cpu_per_tuple * mergejointuples * joininfactor;
995
996         path->jpath.path.startup_cost = startup_cost;
997         path->jpath.path.total_cost = startup_cost + run_cost;
998 }
999
1000 /*
1001  * cost_hashjoin
1002  *        Determines and returns the cost of joining two relations using the
1003  *        hash join algorithm.
1004  *
1005  * 'path' is already filled in except for the cost fields
1006  *
1007  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1008  */
1009 void
1010 cost_hashjoin(HashPath *path, Query *root)
1011 {
1012         Path       *outer_path = path->jpath.outerjoinpath;
1013         Path       *inner_path = path->jpath.innerjoinpath;
1014         List       *restrictlist = path->jpath.joinrestrictinfo;
1015         List       *hashclauses = path->path_hashclauses;
1016         Cost            startup_cost = 0;
1017         Cost            run_cost = 0;
1018         Cost            cpu_per_tuple;
1019         Selectivity     hash_selec;
1020         Selectivity     qp_selec;
1021         QualCost        hash_qual_cost;
1022         QualCost        qp_qual_cost;
1023         double          hashjointuples;
1024         double          qptuples;
1025         double          outer_path_rows = PATH_ROWS(outer_path);
1026         double          inner_path_rows = PATH_ROWS(inner_path);
1027         double          outerbytes = relation_byte_size(outer_path_rows,
1028                                                                                           outer_path->parent->width);
1029         double          innerbytes = relation_byte_size(inner_path_rows,
1030                                                                                           inner_path->parent->width);
1031         int                     num_hashclauses = length(hashclauses);
1032         int                     virtualbuckets;
1033         int                     physicalbuckets;
1034         int                     numbatches;
1035         Selectivity innerbucketsize;
1036         Selectivity     joininfactor;
1037         List       *hcl;
1038         List       *qpquals;
1039
1040         if (!enable_hashjoin)
1041                 startup_cost += disable_cost;
1042
1043         /*
1044          * Compute cost and selectivity of the hashquals and qpquals (other
1045          * restriction clauses) separately.  We use approx_selectivity here
1046          * for speed --- in most cases, any errors won't affect the result much.
1047          *
1048          * Note: it's probably bogus to use the normal selectivity calculation
1049          * here when either the outer or inner path is a UniquePath.
1050          */
1051         hash_selec = approx_selectivity(root, hashclauses,
1052                                                                         path->jpath.jointype);
1053         cost_qual_eval(&hash_qual_cost, hashclauses);
1054         qpquals = set_ptrDifference(restrictlist, hashclauses);
1055         qp_selec = approx_selectivity(root, qpquals,
1056                                                                   path->jpath.jointype);
1057         cost_qual_eval(&qp_qual_cost, qpquals);
1058         freeList(qpquals);
1059
1060         /* approx # tuples passing the hash quals */
1061         hashjointuples = ceil(hash_selec * outer_path_rows * inner_path_rows);
1062         /* approx # tuples passing qpquals as well */
1063         qptuples = ceil(hashjointuples * qp_selec);
1064
1065         /* cost of source data */
1066         startup_cost += outer_path->startup_cost;
1067         run_cost += outer_path->total_cost - outer_path->startup_cost;
1068         startup_cost += inner_path->total_cost;
1069
1070         /*
1071          * Cost of computing hash function: must do it once per input tuple.
1072          * We charge one cpu_operator_cost for each column's hash function.
1073          *
1074          * XXX when a hashclause is more complex than a single operator,
1075          * we really should charge the extra eval costs of the left or right
1076          * side, as appropriate, here.  This seems more work than it's worth
1077          * at the moment.
1078          */
1079         startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
1080         run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1081
1082         /* Get hash table size that executor would use for inner relation */
1083         ExecChooseHashTableSize(inner_path_rows,
1084                                                         inner_path->parent->width,
1085                                                         &virtualbuckets,
1086                                                         &physicalbuckets,
1087                                                         &numbatches);
1088
1089         /*
1090          * Determine bucketsize fraction for inner relation.  We use the
1091          * smallest bucketsize estimated for any individual hashclause;
1092          * this is undoubtedly conservative.
1093          *
1094          * BUT: if inner relation has been unique-ified, we can assume it's
1095          * good for hashing.  This is important both because it's the right
1096          * answer, and because we avoid contaminating the cache with a value
1097          * that's wrong for non-unique-ified paths.
1098          */
1099         if (IsA(inner_path, UniquePath))
1100                 innerbucketsize = 1.0 / virtualbuckets;
1101         else
1102         {
1103                 innerbucketsize = 1.0;
1104                 foreach(hcl, hashclauses)
1105                 {
1106                         RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1107                         Selectivity thisbucketsize;
1108
1109                         Assert(IsA(restrictinfo, RestrictInfo));
1110
1111                         /*
1112                          * First we have to figure out which side of the hashjoin clause
1113                          * is the inner side.
1114                          *
1115                          * Since we tend to visit the same clauses over and over when
1116                          * planning a large query, we cache the bucketsize estimate in the
1117                          * RestrictInfo node to avoid repeated lookups of statistics.
1118                          */
1119                         if (bms_is_subset(restrictinfo->right_relids,
1120                                                           inner_path->parent->relids))
1121                         {
1122                                 /* righthand side is inner */
1123                                 thisbucketsize = restrictinfo->right_bucketsize;
1124                                 if (thisbucketsize < 0)
1125                                 {
1126                                         /* not cached yet */
1127                                         thisbucketsize =
1128                                                 estimate_hash_bucketsize(root,
1129                                                                         (Var *) get_rightop(restrictinfo->clause),
1130                                                                                                  virtualbuckets);
1131                                         restrictinfo->right_bucketsize = thisbucketsize;
1132                                 }
1133                         }
1134                         else
1135                         {
1136                                 Assert(bms_is_subset(restrictinfo->left_relids,
1137                                                                          inner_path->parent->relids));
1138                                 /* lefthand side is inner */
1139                                 thisbucketsize = restrictinfo->left_bucketsize;
1140                                 if (thisbucketsize < 0)
1141                                 {
1142                                         /* not cached yet */
1143                                         thisbucketsize =
1144                                                 estimate_hash_bucketsize(root,
1145                                                                         (Var *) get_leftop(restrictinfo->clause),
1146                                                                                                  virtualbuckets);
1147                                         restrictinfo->left_bucketsize = thisbucketsize;
1148                                 }
1149                         }
1150
1151                         if (innerbucketsize > thisbucketsize)
1152                                 innerbucketsize = thisbucketsize;
1153                 }
1154         }
1155
1156         /*
1157          * if inner relation is too big then we will need to "batch" the join,
1158          * which implies writing and reading most of the tuples to disk an
1159          * extra time.  Charge one cost unit per page of I/O (correct since it
1160          * should be nice and sequential...).  Writing the inner rel counts as
1161          * startup cost, all the rest as run cost.
1162          */
1163         if (numbatches)
1164         {
1165                 double          outerpages = page_size(outer_path_rows,
1166                                                                                    outer_path->parent->width);
1167                 double          innerpages = page_size(inner_path_rows,
1168                                                                                    inner_path->parent->width);
1169
1170                 startup_cost += innerpages;
1171                 run_cost += innerpages + 2 * outerpages;
1172         }
1173
1174         /* CPU costs */
1175
1176         /*
1177          * If we're doing JOIN_IN then we will stop comparing inner
1178          * tuples to an outer tuple as soon as we have one match.  Account for
1179          * the effects of this by scaling down the cost estimates in proportion
1180          * to the expected output size.  (This assumes that all the quals attached
1181          * to the join are IN quals, which should be true.)
1182          */
1183         if (path->jpath.jointype == JOIN_IN &&
1184                 qptuples > path->jpath.path.parent->rows)
1185                 joininfactor = path->jpath.path.parent->rows / qptuples;
1186         else
1187                 joininfactor = 1.0;
1188
1189         /*
1190          * The number of tuple comparisons needed is the number of outer
1191          * tuples times the typical number of tuples in a hash bucket, which
1192          * is the inner relation size times its bucketsize fraction.  At each
1193          * one, we need to evaluate the hashjoin quals.
1194          */
1195         startup_cost += hash_qual_cost.startup;
1196         run_cost += hash_qual_cost.per_tuple *
1197                 outer_path_rows * ceil(inner_path_rows * innerbucketsize) *
1198                 joininfactor;
1199
1200         /*
1201          * For each tuple that gets through the hashjoin proper, we charge
1202          * cpu_tuple_cost plus the cost of evaluating additional restriction
1203          * clauses that are to be applied at the join.  (This is pessimistic
1204          * since not all of the quals may get evaluated at each tuple.)
1205          */
1206         startup_cost += qp_qual_cost.startup;
1207         cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1208         run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1209
1210         /*
1211          * Bias against putting larger relation on inside.      We don't want an
1212          * absolute prohibition, though, since larger relation might have
1213          * better bucketsize --- and we can't trust the size estimates
1214          * unreservedly, anyway.  Instead, inflate the run cost by the
1215          * square root of the size ratio.  (Why square root?  No real good
1216          * reason, but it seems reasonable...)
1217          *
1218          * Note: before 7.4 we implemented this by inflating startup cost;
1219          * but if there's a disable_cost component in the input paths'
1220          * startup cost, that unfairly penalizes the hash.  Probably it'd
1221          * be better to keep track of disable penalty separately from cost.
1222          */
1223         if (innerbytes > outerbytes && outerbytes > 0)
1224                 run_cost *= sqrt(innerbytes / outerbytes);
1225
1226         path->jpath.path.startup_cost = startup_cost;
1227         path->jpath.path.total_cost = startup_cost + run_cost;
1228 }
1229
1230 /*
1231  * Estimate hash bucketsize fraction (ie, number of entries in a bucket
1232  * divided by total tuples in relation) if the specified Var is used
1233  * as a hash key.
1234  *
1235  * XXX This is really pretty bogus since we're effectively assuming that the
1236  * distribution of hash keys will be the same after applying restriction
1237  * clauses as it was in the underlying relation.  However, we are not nearly
1238  * smart enough to figure out how the restrict clauses might change the
1239  * distribution, so this will have to do for now.
1240  *
1241  * We are passed the number of buckets the executor will use for the given
1242  * input relation.      If the data were perfectly distributed, with the same
1243  * number of tuples going into each available bucket, then the bucketsize
1244  * fraction would be 1/nbuckets.  But this happy state of affairs will occur
1245  * only if (a) there are at least nbuckets distinct data values, and (b)
1246  * we have a not-too-skewed data distribution.  Otherwise the buckets will
1247  * be nonuniformly occupied.  If the other relation in the join has a key
1248  * distribution similar to this one's, then the most-loaded buckets are
1249  * exactly those that will be probed most often.  Therefore, the "average"
1250  * bucket size for costing purposes should really be taken as something close
1251  * to the "worst case" bucket size.  We try to estimate this by adjusting the
1252  * fraction if there are too few distinct data values, and then scaling up
1253  * by the ratio of the most common value's frequency to the average frequency.
1254  *
1255  * If no statistics are available, use a default estimate of 0.1.  This will
1256  * discourage use of a hash rather strongly if the inner relation is large,
1257  * which is what we want.  We do not want to hash unless we know that the
1258  * inner rel is well-dispersed (or the alternatives seem much worse).
1259  */
1260 static Selectivity
1261 estimate_hash_bucketsize(Query *root, Var *var, int nbuckets)
1262 {
1263         Oid                     relid;
1264         RelOptInfo *rel;
1265         HeapTuple       tuple;
1266         Form_pg_statistic stats;
1267         double          estfract,
1268                                 ndistinct,
1269                                 mcvfreq,
1270                                 avgfreq;
1271         float4     *numbers;
1272         int                     nnumbers;
1273
1274         /*
1275          * Lookup info about var's relation and attribute; if none available,
1276          * return default estimate.
1277          */
1278         if (var == NULL || !IsA(var, Var))
1279                 return 0.1;
1280
1281         relid = getrelid(var->varno, root->rtable);
1282         if (relid == InvalidOid)
1283                 return 0.1;
1284
1285         rel = find_base_rel(root, var->varno);
1286
1287         if (rel->tuples <= 0.0 || rel->rows <= 0.0)
1288                 return 0.1;                             /* ensure we can divide below */
1289
1290         tuple = SearchSysCache(STATRELATT,
1291                                                    ObjectIdGetDatum(relid),
1292                                                    Int16GetDatum(var->varattno),
1293                                                    0, 0);
1294         if (!HeapTupleIsValid(tuple))
1295         {
1296                 /*
1297                  * Perhaps the Var is a system attribute; if so, it will have no
1298                  * entry in pg_statistic, but we may be able to guess something
1299                  * about its distribution anyway.
1300                  */
1301                 switch (var->varattno)
1302                 {
1303                         case ObjectIdAttributeNumber:
1304                         case SelfItemPointerAttributeNumber:
1305                                 /* these are unique, so buckets should be well-distributed */
1306                                 return 1.0 / (double) nbuckets;
1307                         case TableOidAttributeNumber:
1308                                 /* hashing this is a terrible idea... */
1309                                 return 1.0;
1310                 }
1311                 return 0.1;
1312         }
1313         stats = (Form_pg_statistic) GETSTRUCT(tuple);
1314
1315         /*
1316          * Obtain number of distinct data values in raw relation.
1317          */
1318         ndistinct = stats->stadistinct;
1319         if (ndistinct < 0.0)
1320                 ndistinct = -ndistinct * rel->tuples;
1321
1322         if (ndistinct <= 0.0)           /* ensure we can divide */
1323         {
1324                 ReleaseSysCache(tuple);
1325                 return 0.1;
1326         }
1327
1328         /* Also compute avg freq of all distinct data values in raw relation */
1329         avgfreq = (1.0 - stats->stanullfrac) / ndistinct;
1330
1331         /*
1332          * Adjust ndistinct to account for restriction clauses.  Observe we
1333          * are assuming that the data distribution is affected uniformly by
1334          * the restriction clauses!
1335          *
1336          * XXX Possibly better way, but much more expensive: multiply by
1337          * selectivity of rel's restriction clauses that mention the target
1338          * Var.
1339          */
1340         ndistinct *= rel->rows / rel->tuples;
1341
1342         /*
1343          * Initial estimate of bucketsize fraction is 1/nbuckets as long as
1344          * the number of buckets is less than the expected number of distinct
1345          * values; otherwise it is 1/ndistinct.
1346          */
1347         if (ndistinct > (double) nbuckets)
1348                 estfract = 1.0 / (double) nbuckets;
1349         else
1350                 estfract = 1.0 / ndistinct;
1351
1352         /*
1353          * Look up the frequency of the most common value, if available.
1354          */
1355         mcvfreq = 0.0;
1356
1357         if (get_attstatsslot(tuple, var->vartype, var->vartypmod,
1358                                                  STATISTIC_KIND_MCV, InvalidOid,
1359                                                  NULL, NULL, &numbers, &nnumbers))
1360         {
1361                 /*
1362                  * The first MCV stat is for the most common value.
1363                  */
1364                 if (nnumbers > 0)
1365                         mcvfreq = numbers[0];
1366                 free_attstatsslot(var->vartype, NULL, 0,
1367                                                   numbers, nnumbers);
1368         }
1369
1370         /*
1371          * Adjust estimated bucketsize upward to account for skewed
1372          * distribution.
1373          */
1374         if (avgfreq > 0.0 && mcvfreq > avgfreq)
1375                 estfract *= mcvfreq / avgfreq;
1376
1377         /*
1378          * Clamp bucketsize to sane range (the above adjustment could easily
1379          * produce an out-of-range result).  We set the lower bound a little
1380          * above zero, since zero isn't a very sane result.
1381          */
1382         if (estfract < 1.0e-6)
1383                 estfract = 1.0e-6;
1384         else if (estfract > 1.0)
1385                 estfract = 1.0;
1386
1387         ReleaseSysCache(tuple);
1388
1389         return (Selectivity) estfract;
1390 }
1391
1392
1393 /*
1394  * cost_qual_eval
1395  *              Estimate the CPU costs of evaluating a WHERE clause.
1396  *              The input can be either an implicitly-ANDed list of boolean
1397  *              expressions, or a list of RestrictInfo nodes.
1398  *              The result includes both a one-time (startup) component,
1399  *              and a per-evaluation component.
1400  */
1401 void
1402 cost_qual_eval(QualCost *cost, List *quals)
1403 {
1404         List       *l;
1405
1406         cost->startup = 0;
1407         cost->per_tuple = 0;
1408
1409         /* We don't charge any cost for the implicit ANDing at top level ... */
1410
1411         foreach(l, quals)
1412         {
1413                 Node       *qual = (Node *) lfirst(l);
1414
1415                 /*
1416                  * RestrictInfo nodes contain an eval_cost field reserved for this
1417                  * routine's use, so that it's not necessary to evaluate the qual
1418                  * clause's cost more than once.  If the clause's cost hasn't been
1419                  * computed yet, the field's startup value will contain -1.
1420                  */
1421                 if (qual && IsA(qual, RestrictInfo))
1422                 {
1423                         RestrictInfo *restrictinfo = (RestrictInfo *) qual;
1424
1425                         if (restrictinfo->eval_cost.startup < 0)
1426                         {
1427                                 restrictinfo->eval_cost.startup = 0;
1428                                 restrictinfo->eval_cost.per_tuple = 0;
1429                                 cost_qual_eval_walker((Node *) restrictinfo->clause,
1430                                                                           &restrictinfo->eval_cost);
1431                         }
1432                         cost->startup += restrictinfo->eval_cost.startup;
1433                         cost->per_tuple += restrictinfo->eval_cost.per_tuple;
1434                 }
1435                 else
1436                 {
1437                         /* If it's a bare expression, must always do it the hard way */
1438                         cost_qual_eval_walker(qual, cost);
1439                 }
1440         }
1441 }
1442
1443 static bool
1444 cost_qual_eval_walker(Node *node, QualCost *total)
1445 {
1446         if (node == NULL)
1447                 return false;
1448
1449         /*
1450          * Our basic strategy is to charge one cpu_operator_cost for each
1451          * operator or function node in the given tree.  Vars and Consts are
1452          * charged zero, and so are boolean operators (AND, OR, NOT).
1453          * Simplistic, but a lot better than no model at all.
1454          *
1455          * Should we try to account for the possibility of short-circuit
1456          * evaluation of AND/OR?
1457          */
1458         if (IsA(node, FuncExpr) ||
1459                 IsA(node, OpExpr) ||
1460                 IsA(node, DistinctExpr))
1461         {
1462                 total->per_tuple += cpu_operator_cost;
1463         }
1464         else if (IsA(node, SubLink))
1465         {
1466                 /* This routine should not be applied to un-planned expressions */
1467                 elog(ERROR, "cost_qual_eval: can't handle unplanned sub-select");
1468         }
1469         else if (IsA(node, SubPlan))
1470         {
1471                 /*
1472                  * A subplan node in an expression typically indicates that the
1473                  * subplan will be executed on each evaluation, so charge accordingly.
1474                  * (Sub-selects that can be executed as InitPlans have already been
1475                  * removed from the expression.)
1476                  *
1477                  * An exception occurs when we have decided we can implement the
1478                  * subplan by hashing.
1479                  *
1480                  */
1481                 SubPlan    *subplan = (SubPlan *) node;
1482                 Plan       *plan = subplan->plan;
1483
1484                 if (subplan->useHashTable)
1485                 {
1486                         /*
1487                          * If we are using a hash table for the subquery outputs, then
1488                          * the cost of evaluating the query is a one-time cost.
1489                          * We charge one cpu_operator_cost per tuple for the work of
1490                          * loading the hashtable, too.
1491                          */
1492                         total->startup += plan->total_cost +
1493                                 cpu_operator_cost * plan->plan_rows;
1494                         /*
1495                          * The per-tuple costs include the cost of evaluating the
1496                          * lefthand expressions, plus the cost of probing the hashtable.
1497                          * Recursion into the exprs list will handle the lefthand
1498                          * expressions properly, and will count one cpu_operator_cost
1499                          * for each comparison operator.  That is probably too low for
1500                          * the probing cost, but it's hard to make a better estimate,
1501                          * so live with it for now.
1502                          */
1503                 }
1504                 else
1505                 {
1506                         /*
1507                          * Otherwise we will be rescanning the subplan output on each
1508                          * evaluation.  We need to estimate how much of the output
1509                          * we will actually need to scan.  NOTE: this logic should
1510                          * agree with the estimates used by make_subplan() in
1511                          * plan/subselect.c.
1512                          */
1513                         Cost    plan_run_cost = plan->total_cost - plan->startup_cost;
1514
1515                         if (subplan->subLinkType == EXISTS_SUBLINK)
1516                         {
1517                                 /* we only need to fetch 1 tuple */
1518                                 total->per_tuple += plan_run_cost / plan->plan_rows;
1519                         }
1520                         else if (subplan->subLinkType == ALL_SUBLINK ||
1521                                          subplan->subLinkType == ANY_SUBLINK)
1522                         {
1523                                 /* assume we need 50% of the tuples */
1524                                 total->per_tuple += 0.50 * plan_run_cost;
1525                                 /* also charge a cpu_operator_cost per row examined */
1526                                 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1527                         }
1528                         else
1529                         {
1530                                 /* assume we need all tuples */
1531                                 total->per_tuple += plan_run_cost;
1532                         }
1533                         /*
1534                          * Also account for subplan's startup cost.
1535                          * If the subplan is uncorrelated or undirect correlated,
1536                          * AND its topmost node is a Sort or Material node, assume
1537                          * that we'll only need to pay its startup cost once;
1538                          * otherwise assume we pay the startup cost every time.
1539                          */
1540                         if (subplan->parParam == NIL &&
1541                                 (IsA(plan, Sort) ||
1542                                  IsA(plan, Material)))
1543                         {
1544                                 total->startup += plan->startup_cost;
1545                         }
1546                         else
1547                         {
1548                                 total->per_tuple += plan->startup_cost;
1549                         }
1550                 }
1551         }
1552
1553         return expression_tree_walker(node, cost_qual_eval_walker,
1554                                                                   (void *) total);
1555 }
1556
1557
1558 /*
1559  * approx_selectivity
1560  *              Quick-and-dirty estimation of clause selectivities.
1561  *              The input can be either an implicitly-ANDed list of boolean
1562  *              expressions, or a list of RestrictInfo nodes (typically the latter).
1563  *
1564  * The "quick" part comes from caching the selectivity estimates so we can
1565  * avoid recomputing them later.  (Since the same clauses are typically
1566  * examined over and over in different possible join trees, this makes a
1567  * big difference.)
1568  *
1569  * The "dirty" part comes from the fact that the selectivities of multiple
1570  * clauses are estimated independently and multiplied together.  Now
1571  * clauselist_selectivity often can't do any better than that anyhow, but
1572  * for some situations (such as range constraints) it is smarter.
1573  *
1574  * Since we are only using the results to estimate how many potential
1575  * output tuples are generated and passed through qpqual checking, it
1576  * seems OK to live with the approximation.
1577  */
1578 static Selectivity
1579 approx_selectivity(Query *root, List *quals, JoinType jointype)
1580 {
1581         Selectivity total = 1.0;
1582         List       *l;
1583
1584         foreach(l, quals)
1585         {
1586                 Node       *qual = (Node *) lfirst(l);
1587                 Selectivity selec;
1588
1589                 /*
1590                  * RestrictInfo nodes contain a this_selec field reserved for this
1591                  * routine's use, so that it's not necessary to evaluate the qual
1592                  * clause's selectivity more than once.  If the clause's
1593                  * selectivity hasn't been computed yet, the field will contain
1594                  * -1.
1595                  */
1596                 if (qual && IsA(qual, RestrictInfo))
1597                 {
1598                         RestrictInfo *restrictinfo = (RestrictInfo *) qual;
1599
1600                         if (restrictinfo->this_selec < 0)
1601                                 restrictinfo->this_selec =
1602                                         clause_selectivity(root,
1603                                                                            (Node *) restrictinfo->clause,
1604                                                                            0,
1605                                                                            jointype);
1606                         selec = restrictinfo->this_selec;
1607                 }
1608                 else
1609                 {
1610                         /* If it's a bare expression, must always do it the hard way */
1611                         selec = clause_selectivity(root, qual, 0, jointype);
1612                 }
1613                 total *= selec;
1614         }
1615         return total;
1616 }
1617
1618
1619 /*
1620  * set_baserel_size_estimates
1621  *              Set the size estimates for the given base relation.
1622  *
1623  * The rel's targetlist and restrictinfo list must have been constructed
1624  * already.
1625  *
1626  * We set the following fields of the rel node:
1627  *      rows: the estimated number of output tuples (after applying
1628  *                restriction clauses).
1629  *      width: the estimated average output tuple width in bytes.
1630  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1631  */
1632 void
1633 set_baserel_size_estimates(Query *root, RelOptInfo *rel)
1634 {
1635         double          temp;
1636
1637         /* Should only be applied to base relations */
1638         Assert(rel->relid > 0);
1639
1640         temp = rel->tuples *
1641                 restrictlist_selectivity(root,
1642                                                                  rel->baserestrictinfo,
1643                                                                  rel->relid,
1644                                                                  JOIN_INNER);
1645
1646         /*
1647          * Force estimate to be at least one row, to make explain output look
1648          * better and to avoid possible divide-by-zero when interpolating
1649          * cost.  Make it an integer, too.
1650          */
1651         if (temp < 1.0)
1652                 temp = 1.0;
1653         else
1654                 temp = ceil(temp);
1655
1656         rel->rows = temp;
1657
1658         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1659
1660         set_rel_width(root, rel);
1661 }
1662
1663 /*
1664  * set_joinrel_size_estimates
1665  *              Set the size estimates for the given join relation.
1666  *
1667  * The rel's targetlist must have been constructed already, and a
1668  * restriction clause list that matches the given component rels must
1669  * be provided.
1670  *
1671  * Since there is more than one way to make a joinrel for more than two
1672  * base relations, the results we get here could depend on which component
1673  * rel pair is provided.  In theory we should get the same answers no matter
1674  * which pair is provided; in practice, since the selectivity estimation
1675  * routines don't handle all cases equally well, we might not.  But there's
1676  * not much to be done about it.  (Would it make sense to repeat the
1677  * calculations for each pair of input rels that's encountered, and somehow
1678  * average the results?  Probably way more trouble than it's worth.)
1679  *
1680  * It's important that the results for symmetric JoinTypes be symmetric,
1681  * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1682  * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
1683  * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1684  *
1685  * We set the same relnode fields as set_baserel_size_estimates() does.
1686  */
1687 void
1688 set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
1689                                                    RelOptInfo *outer_rel,
1690                                                    RelOptInfo *inner_rel,
1691                                                    JoinType jointype,
1692                                                    List *restrictlist)
1693 {
1694         Selectivity selec;
1695         double          temp;
1696         UniquePath *upath;
1697
1698         /*
1699          * Compute joinclause selectivity.  Note that we are only considering
1700          * clauses that become restriction clauses at this join level; we are
1701          * not double-counting them because they were not considered in
1702          * estimating the sizes of the component rels.
1703          */
1704         selec = restrictlist_selectivity(root,
1705                                                                          restrictlist,
1706                                                                          0,
1707                                                                          jointype);
1708
1709         /*
1710          * Basically, we multiply size of Cartesian product by selectivity.
1711          *
1712          * If we are doing an outer join, take that into account: the output
1713          * must be at least as large as the non-nullable input.  (Is there any
1714          * chance of being even smarter?)
1715          *
1716          * For JOIN_IN and variants, the Cartesian product is figured with
1717          * respect to a unique-ified input, and then we can clamp to the size
1718          * of the other input.
1719          */
1720         switch (jointype)
1721         {
1722                 case JOIN_INNER:
1723                         temp = outer_rel->rows * inner_rel->rows * selec;
1724                         break;
1725                 case JOIN_LEFT:
1726                         temp = outer_rel->rows * inner_rel->rows * selec;
1727                         if (temp < outer_rel->rows)
1728                                 temp = outer_rel->rows;
1729                         break;
1730                 case JOIN_RIGHT:
1731                         temp = outer_rel->rows * inner_rel->rows * selec;
1732                         if (temp < inner_rel->rows)
1733                                 temp = inner_rel->rows;
1734                         break;
1735                 case JOIN_FULL:
1736                         temp = outer_rel->rows * inner_rel->rows * selec;
1737                         if (temp < outer_rel->rows)
1738                                 temp = outer_rel->rows;
1739                         if (temp < inner_rel->rows)
1740                                 temp = inner_rel->rows;
1741                         break;
1742                 case JOIN_IN:
1743                 case JOIN_UNIQUE_INNER:
1744                         upath = create_unique_path(root, inner_rel,
1745                                                                            inner_rel->cheapest_total_path);
1746                         temp = outer_rel->rows * upath->rows * selec;
1747                         if (temp > outer_rel->rows)
1748                                 temp = outer_rel->rows;
1749                         break;
1750                 case JOIN_REVERSE_IN:
1751                 case JOIN_UNIQUE_OUTER:
1752                         upath = create_unique_path(root, outer_rel,
1753                                                                            outer_rel->cheapest_total_path);
1754                         temp = upath->rows * inner_rel->rows * selec;
1755                         if (temp > inner_rel->rows)
1756                                 temp = inner_rel->rows;
1757                         break;
1758                 default:
1759                         elog(ERROR, "set_joinrel_size_estimates: unsupported join type %d",
1760                                  (int) jointype);
1761                         temp = 0;                       /* keep compiler quiet */
1762                         break;
1763         }
1764
1765         /*
1766          * Force estimate to be at least one row, to make explain output look
1767          * better and to avoid possible divide-by-zero when interpolating
1768          * cost.  Make it an integer, too.
1769          */
1770         if (temp < 1.0)
1771                 temp = 1.0;
1772         else
1773                 temp = ceil(temp);
1774
1775         rel->rows = temp;
1776
1777         /*
1778          * We could apply set_rel_width() to compute the output tuple width
1779          * from scratch, but at present it's always just the sum of the input
1780          * widths, so why work harder than necessary?  If relnode.c is ever
1781          * taught to remove unneeded columns from join targetlists, go back to
1782          * using set_rel_width here.
1783          */
1784         rel->width = outer_rel->width + inner_rel->width;
1785 }
1786
1787 /*
1788  * set_function_size_estimates
1789  *              Set the size estimates for a base relation that is a function call.
1790  *
1791  * The rel's targetlist and restrictinfo list must have been constructed
1792  * already.
1793  *
1794  * We set the following fields of the rel node:
1795  *      rows: the estimated number of output tuples (after applying
1796  *                restriction clauses).
1797  *      width: the estimated average output tuple width in bytes.
1798  *      baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1799  */
1800 void
1801 set_function_size_estimates(Query *root, RelOptInfo *rel)
1802 {
1803         double          temp;
1804
1805         /* Should only be applied to base relations that are functions */
1806         Assert(rel->relid > 0);
1807         Assert(rel->rtekind == RTE_FUNCTION);
1808
1809         /*
1810          * Estimate number of rows the function itself will return.
1811          *
1812          * XXX no idea how to do this yet; but should at least check whether
1813          * function returns set or not...
1814          */
1815         rel->tuples = 1000;
1816
1817         /* Now estimate number of output rows */
1818         temp = rel->tuples *
1819                 restrictlist_selectivity(root,
1820                                                                  rel->baserestrictinfo,
1821                                                                  rel->relid,
1822                                                                  JOIN_INNER);
1823
1824         /*
1825          * Force estimate to be at least one row, to make explain output look
1826          * better and to avoid possible divide-by-zero when interpolating
1827          * cost.  Make it an integer, too.
1828          */
1829         if (temp < 1.0)
1830                 temp = 1.0;
1831         else
1832                 temp = ceil(temp);
1833
1834         rel->rows = temp;
1835
1836         cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1837
1838         set_rel_width(root, rel);
1839 }
1840
1841
1842 /*
1843  * set_rel_width
1844  *              Set the estimated output width of the relation.
1845  *
1846  * NB: this works best on base relations because it prefers to look at
1847  * real Vars.  It will fail to make use of pg_statistic info when applied
1848  * to a subquery relation, even if the subquery outputs are simple vars
1849  * that we could have gotten info for.  Is it worth trying to be smarter
1850  * about subqueries?
1851  */
1852 static void
1853 set_rel_width(Query *root, RelOptInfo *rel)
1854 {
1855         int32           tuple_width = 0;
1856         List       *tllist;
1857
1858         foreach(tllist, rel->targetlist)
1859         {
1860                 TargetEntry *tle = (TargetEntry *) lfirst(tllist);
1861                 int32           item_width;
1862
1863                 /*
1864                  * If it's a Var, try to get statistical info from pg_statistic.
1865                  */
1866                 if (tle->expr && IsA(tle->expr, Var))
1867                 {
1868                         Var                *var = (Var *) tle->expr;
1869                         Oid                     relid;
1870
1871                         relid = getrelid(var->varno, root->rtable);
1872                         if (relid != InvalidOid)
1873                         {
1874                                 item_width = get_attavgwidth(relid, var->varattno);
1875                                 if (item_width > 0)
1876                                 {
1877                                         tuple_width += item_width;
1878                                         continue;
1879                                 }
1880                         }
1881                 }
1882
1883                 /*
1884                  * Not a Var, or can't find statistics for it.  Estimate using
1885                  * just the type info.
1886                  */
1887                 item_width = get_typavgwidth(tle->resdom->restype,
1888                                                                          tle->resdom->restypmod);
1889                 Assert(item_width > 0);
1890                 tuple_width += item_width;
1891         }
1892         Assert(tuple_width >= 0);
1893         rel->width = tuple_width;
1894 }
1895
1896 /*
1897  * relation_byte_size
1898  *        Estimate the storage space in bytes for a given number of tuples
1899  *        of a given width (size in bytes).
1900  */
1901 static double
1902 relation_byte_size(double tuples, int width)
1903 {
1904         return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleData)));
1905 }
1906
1907 /*
1908  * page_size
1909  *        Returns an estimate of the number of pages covered by a given
1910  *        number of tuples of a given width (size in bytes).
1911  */
1912 static double
1913 page_size(double tuples, int width)
1914 {
1915         return ceil(relation_byte_size(tuples, width) / BLCKSZ);
1916 }