1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in units of disk accesses: one sequential page
7 * fetch has cost 1. All else is scaled relative to a page fetch, using
8 * the scaling parameters
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to process a typical WHERE operator
15 * We also use a rough estimate "effective_cache_size" of the number of
16 * disk pages in Postgres + OS-level disk cache. (We can't simply use
17 * NBuffers for this purpose because that would ignore the effects of
18 * the kernel's disk cache.)
20 * Obviously, taking constants for these values is an oversimplification,
21 * but it's tough enough to get any useful estimates even at this level of
22 * detail. Note that all of these parameters are user-settable, in case
23 * the default values are drastically off for a particular platform.
25 * We compute two separate costs for each path:
26 * total_cost: total estimated cost to fetch all tuples
27 * startup_cost: cost that is expended before first tuple is fetched
28 * In some scenarios, such as when there is a LIMIT or we are implementing
29 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
30 * path's result. A caller can estimate the cost of fetching a partial
31 * result by interpolating between startup_cost and total_cost. In detail:
32 * actual_cost = startup_cost +
33 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
34 * Note that a base relation's rows count (and, by extension, plan_rows for
35 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
36 * that this equation works properly. (Also, these routines guarantee not to
37 * set the rows count to zero, so there will be no zero divide.) The LIMIT is
38 * applied as a top-level plan node.
40 * For largely historical reasons, most of the routines in this module use
41 * the passed result Path only to store their startup_cost and total_cost
42 * results into. All the input data they need is passed as separate
43 * parameters, even though much of it could be extracted from the Path.
44 * An exception is made for the cost_XXXjoin() routines, which expect all
45 * the non-cost fields of the passed XXXPath to be filled in.
48 * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
49 * Portions Copyright (c) 1994, Regents of the University of California
52 * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.105 2003/02/08 20:20:54 tgl Exp $
54 *-------------------------------------------------------------------------
61 #include "catalog/pg_statistic.h"
62 #include "executor/nodeHash.h"
63 #include "miscadmin.h"
64 #include "optimizer/clauses.h"
65 #include "optimizer/cost.h"
66 #include "optimizer/pathnode.h"
67 #include "parser/parsetree.h"
68 #include "utils/selfuncs.h"
69 #include "utils/lsyscache.h"
70 #include "utils/syscache.h"
73 #define LOG2(x) (log(x) / 0.693147180559945)
74 #define LOG6(x) (log(x) / 1.79175946922805)
77 * Some Paths return less than the nominal number of rows of their parent
78 * relations; join nodes need to do this to get the correct input count:
80 #define PATH_ROWS(path) \
81 (IsA(path, UniquePath) ? \
82 ((UniquePath *) (path))->rows : \
86 double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
87 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
88 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
89 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
90 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
92 Cost disable_cost = 100000000.0;
94 bool enable_seqscan = true;
95 bool enable_indexscan = true;
96 bool enable_tidscan = true;
97 bool enable_sort = true;
98 bool enable_hashagg = true;
99 bool enable_nestloop = true;
100 bool enable_mergejoin = true;
101 bool enable_hashjoin = true;
104 static Selectivity estimate_hash_bucketsize(Query *root, Var *var,
106 static bool cost_qual_eval_walker(Node *node, QualCost *total);
107 static Selectivity approx_selectivity(Query *root, List *quals,
109 static void set_rel_width(Query *root, RelOptInfo *rel);
110 static double relation_byte_size(double tuples, int width);
111 static double page_size(double tuples, int width);
116 * Determines and returns the cost of scanning a relation sequentially.
119 cost_seqscan(Path *path, Query *root,
122 Cost startup_cost = 0;
126 /* Should only be applied to base relations */
127 Assert(baserel->relid > 0);
128 Assert(baserel->rtekind == RTE_RELATION);
131 startup_cost += disable_cost;
136 * The cost of reading a page sequentially is 1.0, by definition. Note
137 * that the Unix kernel will typically do some amount of read-ahead
138 * optimization, so that this cost is less than the true cost of
139 * reading a page from disk. We ignore that issue here, but must take
140 * it into account when estimating the cost of non-sequential
143 run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
146 startup_cost += baserel->baserestrictcost.startup;
147 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
148 run_cost += cpu_per_tuple * baserel->tuples;
150 path->startup_cost = startup_cost;
151 path->total_cost = startup_cost + run_cost;
155 * cost_nonsequential_access
156 * Estimate the cost of accessing one page at random from a relation
157 * (or sort temp file) of the given size in pages.
159 * The simplistic model that the cost is random_page_cost is what we want
160 * to use for large relations; but for small ones that is a serious
161 * overestimate because of the effects of caching. This routine tries to
164 * Unfortunately we don't have any good way of estimating the effective cache
165 * size we are working with --- we know that Postgres itself has NBuffers
166 * internal buffers, but the size of the kernel's disk cache is uncertain,
167 * and how much of it we get to use is even less certain. We punt the problem
168 * for now by assuming we are given an effective_cache_size parameter.
170 * Given a guesstimated cache size, we estimate the actual I/O cost per page
171 * with the entirely ad-hoc equations:
172 * if relpages >= effective_cache_size:
173 * random_page_cost * (1 - (effective_cache_size/relpages)/2)
174 * if relpages < effective_cache_size:
175 * 1 + (random_page_cost/2-1) * (relpages/effective_cache_size) ** 2
176 * These give the right asymptotic behavior (=> 1.0 as relpages becomes
177 * small, => random_page_cost as it becomes large) and meet in the middle
178 * with the estimate that the cache is about 50% effective for a relation
179 * of the same size as effective_cache_size. (XXX this is probably all
180 * wrong, but I haven't been able to find any theory about how effective
181 * a disk cache should be presumed to be.)
184 cost_nonsequential_access(double relpages)
188 /* don't crash on bad input data */
189 if (relpages <= 0.0 || effective_cache_size <= 0.0)
190 return random_page_cost;
192 relsize = relpages / effective_cache_size;
195 return random_page_cost * (1.0 - 0.5 / relsize);
197 return 1.0 + (random_page_cost * 0.5 - 1.0) * relsize * relsize;
202 * Determines and returns the cost of scanning a relation using an index.
204 * NOTE: an indexscan plan node can actually represent several passes,
205 * but here we consider the cost of just one pass.
207 * 'root' is the query root
208 * 'baserel' is the base relation the index is for
209 * 'index' is the index to be used
210 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
211 * 'is_injoin' is T if we are considering using the index scan as the inside
212 * of a nestloop join (hence, some of the indexQuals are join clauses)
214 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
215 * Any additional quals evaluated as qpquals may reduce the number of returned
216 * tuples, but they won't reduce the number of tuples we have to fetch from
217 * the table, so they don't reduce the scan cost.
220 cost_index(Path *path, Query *root,
226 Cost startup_cost = 0;
228 Cost indexStartupCost;
230 Selectivity indexSelectivity;
231 double indexCorrelation,
236 double tuples_fetched;
237 double pages_fetched;
241 /* Should only be applied to base relations */
242 Assert(IsA(baserel, RelOptInfo) &&
243 IsA(index, IndexOptInfo));
244 Assert(baserel->relid > 0);
245 Assert(baserel->rtekind == RTE_RELATION);
247 if (!enable_indexscan)
248 startup_cost += disable_cost;
251 * Call index-access-method-specific code to estimate the processing
252 * cost for scanning the index, as well as the selectivity of the
253 * index (ie, the fraction of main-table tuples we will have to
254 * retrieve) and its correlation to the main-table tuple order.
256 OidFunctionCall8(index->amcostestimate,
257 PointerGetDatum(root),
258 PointerGetDatum(baserel),
259 PointerGetDatum(index),
260 PointerGetDatum(indexQuals),
261 PointerGetDatum(&indexStartupCost),
262 PointerGetDatum(&indexTotalCost),
263 PointerGetDatum(&indexSelectivity),
264 PointerGetDatum(&indexCorrelation));
266 /* all costs for touching index itself included here */
267 startup_cost += indexStartupCost;
268 run_cost += indexTotalCost - indexStartupCost;
271 * Estimate number of main-table tuples and pages fetched.
273 * When the index ordering is uncorrelated with the table ordering,
274 * we use an approximation proposed by Mackert and Lohman, "Index Scans
275 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
276 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
277 * The Mackert and Lohman approximation is that the number of pages
280 * min(2TNs/(2T+Ns), T) when T <= b
281 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
282 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
284 * T = # pages in table
285 * N = # tuples in table
286 * s = selectivity = fraction of table to be scanned
287 * b = # buffer pages available (we include kernel space here)
289 * When the index ordering is exactly correlated with the table ordering
290 * (just after a CLUSTER, for example), the number of pages fetched should
291 * be just sT. What's more, these will be sequential fetches, not the
292 * random fetches that occur in the uncorrelated case. So, depending on
293 * the extent of correlation, we should estimate the actual I/O cost
294 * somewhere between s * T * 1.0 and PF * random_cost. We currently
295 * interpolate linearly between these two endpoints based on the
296 * correlation squared (XXX is that appropriate?).
298 * In any case the number of tuples fetched is Ns.
302 tuples_fetched = indexSelectivity * baserel->tuples;
303 /* Don't believe estimates less than 1... */
304 if (tuples_fetched < 1.0)
305 tuples_fetched = 1.0;
307 /* This part is the Mackert and Lohman formula */
309 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
310 b = (effective_cache_size > 1) ? effective_cache_size : 1.0;
315 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
316 if (pages_fetched > T)
323 lim = (2.0 * T * b) / (2.0 * T - b);
324 if (tuples_fetched <= lim)
327 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
332 b + (tuples_fetched - lim) * (T - b) / T;
337 * min_IO_cost corresponds to the perfectly correlated case
338 * (csquared=1), max_IO_cost to the perfectly uncorrelated case
339 * (csquared=0). Note that we just charge random_page_cost per page
340 * in the uncorrelated case, rather than using
341 * cost_nonsequential_access, since we've already accounted for
342 * caching effects by using the Mackert model.
344 min_IO_cost = ceil(indexSelectivity * T);
345 max_IO_cost = pages_fetched * random_page_cost;
348 * Now interpolate based on estimated index order correlation to get
349 * total disk I/O cost for main table accesses.
351 csquared = indexCorrelation * indexCorrelation;
353 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
356 * Estimate CPU costs per tuple.
358 * Normally the indexquals will be removed from the list of restriction
359 * clauses that we have to evaluate as qpquals, so we should subtract
360 * their costs from baserestrictcost. But if we are doing a join then
361 * some of the indexquals are join clauses and shouldn't be subtracted.
362 * Rather than work out exactly how much to subtract, we don't subtract
365 * XXX For a lossy index, not all the quals will be removed and so we
366 * really shouldn't subtract their costs; but detecting that seems more
367 * expensive than it's worth.
369 startup_cost += baserel->baserestrictcost.startup;
370 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
374 QualCost index_qual_cost;
376 cost_qual_eval(&index_qual_cost, indexQuals);
377 cpu_per_tuple -= index_qual_cost.per_tuple;
380 run_cost += cpu_per_tuple * tuples_fetched;
382 path->startup_cost = startup_cost;
383 path->total_cost = startup_cost + run_cost;
388 * Determines and returns the cost of scanning a relation using TIDs.
391 cost_tidscan(Path *path, Query *root,
392 RelOptInfo *baserel, List *tideval)
394 Cost startup_cost = 0;
397 int ntuples = length(tideval);
399 /* Should only be applied to base relations */
400 Assert(baserel->relid > 0);
401 Assert(baserel->rtekind == RTE_RELATION);
404 startup_cost += disable_cost;
406 /* disk costs --- assume each tuple on a different page */
407 run_cost += random_page_cost * ntuples;
410 startup_cost += baserel->baserestrictcost.startup;
411 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
412 run_cost += cpu_per_tuple * ntuples;
414 path->startup_cost = startup_cost;
415 path->total_cost = startup_cost + run_cost;
420 * Determines and returns the cost of scanning a function RTE.
423 cost_functionscan(Path *path, Query *root, RelOptInfo *baserel)
425 Cost startup_cost = 0;
429 /* Should only be applied to base relations that are functions */
430 Assert(baserel->relid > 0);
431 Assert(baserel->rtekind == RTE_FUNCTION);
434 * For now, estimate function's cost at one operator eval per function
435 * call. Someday we should revive the function cost estimate columns
438 cpu_per_tuple = cpu_operator_cost;
440 /* Add scanning CPU costs */
441 startup_cost += baserel->baserestrictcost.startup;
442 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
443 run_cost += cpu_per_tuple * baserel->tuples;
445 path->startup_cost = startup_cost;
446 path->total_cost = startup_cost + run_cost;
451 * Determines and returns the cost of sorting a relation, including
452 * the cost of reading the input data.
454 * If the total volume of data to sort is less than SortMem, we will do
455 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
456 * comparisons for t tuples.
458 * If the total volume exceeds SortMem, we switch to a tape-style merge
459 * algorithm. There will still be about t*log2(t) tuple comparisons in
460 * total, but we will also need to write and read each tuple once per
461 * merge pass. We expect about ceil(log6(r)) merge passes where r is the
462 * number of initial runs formed (log6 because tuplesort.c uses six-tape
463 * merging). Since the average initial run should be about twice SortMem,
465 * disk traffic = 2 * relsize * ceil(log6(p / (2*SortMem)))
466 * cpu = comparison_cost * t * log2(t)
468 * The disk traffic is assumed to be half sequential and half random
469 * accesses (XXX can't we refine that guess?)
471 * We charge two operator evals per tuple comparison, which should be in
472 * the right ballpark in most cases.
474 * 'pathkeys' is a list of sort keys
475 * 'input_cost' is the total cost for reading the input data
476 * 'tuples' is the number of tuples in the relation
477 * 'width' is the average tuple width in bytes
479 * NOTE: some callers currently pass NIL for pathkeys because they
480 * can't conveniently supply the sort keys. Since this routine doesn't
481 * currently do anything with pathkeys anyway, that doesn't matter...
482 * but if it ever does, it should react gracefully to lack of key data.
483 * (Actually, the thing we'd most likely be interested in is just the number
484 * of sort keys, which all callers *could* supply.)
487 cost_sort(Path *path, Query *root,
488 List *pathkeys, Cost input_cost, double tuples, int width)
490 Cost startup_cost = input_cost;
492 double nbytes = relation_byte_size(tuples, width);
493 long sortmembytes = SortMem * 1024L;
496 startup_cost += disable_cost;
499 * We want to be sure the cost of a sort is never estimated as zero,
500 * even if passed-in tuple count is zero. Besides, mustn't do
509 * Assume about two operator evals per tuple comparison and N log2 N
512 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
515 if (nbytes > sortmembytes)
517 double npages = ceil(nbytes / BLCKSZ);
518 double nruns = nbytes / (sortmembytes * 2);
519 double log_runs = ceil(LOG6(nruns));
520 double npageaccesses;
524 npageaccesses = 2.0 * npages * log_runs;
525 /* Assume half are sequential (cost 1), half are not */
526 startup_cost += npageaccesses *
527 (1.0 + cost_nonsequential_access(npages)) * 0.5;
531 * Also charge a small amount (arbitrarily set equal to operator cost)
532 * per extracted tuple.
534 run_cost += cpu_operator_cost * tuples;
536 path->startup_cost = startup_cost;
537 path->total_cost = startup_cost + run_cost;
542 * Determines and returns the cost of materializing a relation, including
543 * the cost of reading the input data.
545 * If the total volume of data to materialize exceeds SortMem, we will need
546 * to write it to disk, so the cost is much higher in that case.
549 cost_material(Path *path,
550 Cost input_cost, double tuples, int width)
552 Cost startup_cost = input_cost;
554 double nbytes = relation_byte_size(tuples, width);
555 long sortmembytes = SortMem * 1024L;
558 if (nbytes > sortmembytes)
560 double npages = ceil(nbytes / BLCKSZ);
562 /* We'll write during startup and read during retrieval */
563 startup_cost += npages;
568 * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
569 * so that it doesn't appear worthwhile to materialize a bare seqscan.
571 run_cost += cpu_tuple_cost * tuples;
573 path->startup_cost = startup_cost;
574 path->total_cost = startup_cost + run_cost;
579 * Determines and returns the cost of performing an Agg plan node,
580 * including the cost of its input.
582 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
583 * are for appropriately-sorted input.
586 cost_agg(Path *path, Query *root,
587 AggStrategy aggstrategy, int numAggs,
588 int numGroupCols, double numGroups,
589 Cost input_startup_cost, Cost input_total_cost,
596 * We charge one cpu_operator_cost per aggregate function per input
597 * tuple, and another one per output tuple (corresponding to transfn
598 * and finalfn calls respectively). If we are grouping, we charge an
599 * additional cpu_operator_cost per grouping column per input tuple
600 * for grouping comparisons.
602 * We will produce a single output tuple if not grouping,
603 * and a tuple per group otherwise.
605 if (aggstrategy == AGG_PLAIN)
607 startup_cost = input_total_cost;
608 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
609 /* we aren't grouping */
610 total_cost = startup_cost;
612 else if (aggstrategy == AGG_SORTED)
614 /* Here we are able to deliver output on-the-fly */
615 startup_cost = input_startup_cost;
616 total_cost = input_total_cost;
617 total_cost += cpu_operator_cost * (input_tuples + numGroups) * numAggs;
618 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
622 /* must be AGG_HASHED */
623 startup_cost = input_total_cost;
624 startup_cost += cpu_operator_cost * input_tuples * numAggs;
625 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
626 total_cost = startup_cost;
627 total_cost += cpu_operator_cost * numGroups * numAggs;
630 path->startup_cost = startup_cost;
631 path->total_cost = total_cost;
636 * Determines and returns the cost of performing a Group plan node,
637 * including the cost of its input.
639 * Note: caller must ensure that input costs are for appropriately-sorted
643 cost_group(Path *path, Query *root,
644 int numGroupCols, double numGroups,
645 Cost input_startup_cost, Cost input_total_cost,
651 startup_cost = input_startup_cost;
652 total_cost = input_total_cost;
655 * Charge one cpu_operator_cost per comparison per input tuple. We
656 * assume all columns get compared at most of the tuples.
658 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
660 path->startup_cost = startup_cost;
661 path->total_cost = total_cost;
666 * Determines and returns the cost of joining two relations using the
667 * nested loop algorithm.
669 * 'path' is already filled in except for the cost fields
672 cost_nestloop(NestPath *path, Query *root)
674 Path *outer_path = path->outerjoinpath;
675 Path *inner_path = path->innerjoinpath;
676 List *restrictlist = path->joinrestrictinfo;
677 Cost startup_cost = 0;
680 QualCost restrict_qual_cost;
681 double outer_path_rows = PATH_ROWS(outer_path);
682 double inner_path_rows = PATH_ROWS(inner_path);
684 Selectivity joininfactor;
686 if (!enable_nestloop)
687 startup_cost += disable_cost;
690 * If we're doing JOIN_IN then we will stop scanning inner tuples for an
691 * outer tuple as soon as we have one match. Account for the effects of
692 * this by scaling down the cost estimates in proportion to the expected
693 * output size. (This assumes that all the quals attached to the join are
694 * IN quals, which should be true.)
696 * Note: it's probably bogus to use the normal selectivity calculation
697 * here when either the outer or inner path is a UniquePath.
699 if (path->jointype == JOIN_IN)
701 Selectivity qual_selec = approx_selectivity(root, restrictlist,
705 qptuples = ceil(qual_selec * outer_path_rows * inner_path_rows);
706 if (qptuples > path->path.parent->rows)
707 joininfactor = path->path.parent->rows / qptuples;
714 /* cost of source data */
717 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
718 * before we can start returning tuples, so the join's startup cost is
719 * their sum. What's not so clear is whether the inner path's
720 * startup_cost must be paid again on each rescan of the inner path.
721 * This is not true if the inner path is materialized or is a hashjoin,
722 * but probably is true otherwise.
724 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
725 run_cost += outer_path->total_cost - outer_path->startup_cost;
726 if (IsA(inner_path, MaterialPath) ||
727 IsA(inner_path, HashPath))
729 /* charge only run cost for each iteration of inner path */
734 * charge startup cost for each iteration of inner path, except we
735 * already charged the first startup_cost in our own startup
737 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
739 run_cost += outer_path_rows *
740 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
743 * Compute number of tuples processed (not number emitted!).
744 * If inner path is an indexscan, be sure to use its estimated output row
745 * count, which may be lower than the restriction-clause-only row count of
746 * its parent. (We don't include this case in the PATH_ROWS macro because
747 * it applies *only* to a nestloop's inner relation.) Note: it is correct
748 * to use the unadjusted inner_path_rows in the above calculation for
749 * joininfactor, since otherwise we'd be double-counting the selectivity
750 * of the join clause being used for the index.
752 if (IsA(inner_path, IndexPath))
753 inner_path_rows = ((IndexPath *) inner_path)->rows;
755 ntuples = inner_path_rows * outer_path_rows;
758 cost_qual_eval(&restrict_qual_cost, restrictlist);
759 startup_cost += restrict_qual_cost.startup;
760 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
761 run_cost += cpu_per_tuple * ntuples;
763 path->path.startup_cost = startup_cost;
764 path->path.total_cost = startup_cost + run_cost;
769 * Determines and returns the cost of joining two relations using the
770 * merge join algorithm.
772 * 'path' is already filled in except for the cost fields
774 * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
775 * outersortkeys and innersortkeys are lists of the keys to be used
776 * to sort the outer and inner relations, or NIL if no explicit
777 * sort is needed because the source path is already ordered.
780 cost_mergejoin(MergePath *path, Query *root)
782 Path *outer_path = path->jpath.outerjoinpath;
783 Path *inner_path = path->jpath.innerjoinpath;
784 List *restrictlist = path->jpath.joinrestrictinfo;
785 List *mergeclauses = path->path_mergeclauses;
786 List *outersortkeys = path->outersortkeys;
787 List *innersortkeys = path->innersortkeys;
788 Cost startup_cost = 0;
791 Selectivity merge_selec;
792 Selectivity qp_selec;
793 QualCost merge_qual_cost;
794 QualCost qp_qual_cost;
795 RestrictInfo *firstclause;
797 double outer_path_rows = PATH_ROWS(outer_path);
798 double inner_path_rows = PATH_ROWS(inner_path);
801 double mergejointuples,
805 Selectivity outerscansel,
807 Selectivity joininfactor;
808 Path sort_path; /* dummy for result of cost_sort */
810 if (!enable_mergejoin)
811 startup_cost += disable_cost;
814 * Compute cost and selectivity of the mergequals and qpquals (other
815 * restriction clauses) separately. We use approx_selectivity here
816 * for speed --- in most cases, any errors won't affect the result much.
818 * Note: it's probably bogus to use the normal selectivity calculation
819 * here when either the outer or inner path is a UniquePath.
821 merge_selec = approx_selectivity(root, mergeclauses,
822 path->jpath.jointype);
823 cost_qual_eval(&merge_qual_cost, mergeclauses);
824 qpquals = set_ptrDifference(restrictlist, mergeclauses);
825 qp_selec = approx_selectivity(root, qpquals,
826 path->jpath.jointype);
827 cost_qual_eval(&qp_qual_cost, qpquals);
830 /* approx # tuples passing the merge quals */
831 mergejointuples = ceil(merge_selec * outer_path_rows * inner_path_rows);
832 /* approx # tuples passing qpquals as well */
833 qptuples = ceil(mergejointuples * qp_selec);
836 * When there are equal merge keys in the outer relation, the mergejoin
837 * must rescan any matching tuples in the inner relation. This means
838 * re-fetching inner tuples. Our cost model for this is that a re-fetch
839 * costs the same as an original fetch, which is probably an overestimate;
840 * but on the other hand we ignore the bookkeeping costs of mark/restore.
841 * Not clear if it's worth developing a more refined model.
843 * The number of re-fetches can be estimated approximately as size of
844 * merge join output minus size of inner relation. Assume that the
845 * distinct key values are 1, 2, ..., and denote the number of values of
846 * each key in the outer relation as m1, m2, ...; in the inner relation,
847 * n1, n2, ... Then we have
849 * size of join = m1 * n1 + m2 * n2 + ...
851 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ...
852 * = m1 * n1 + m2 * n2 + ... - (n1 + n2 + ...)
853 * = size of join - size of inner relation
855 * This equation works correctly for outer tuples having no inner match
856 * (nk = 0), but not for inner tuples having no outer match (mk = 0);
857 * we are effectively subtracting those from the number of rescanned
858 * tuples, when we should not. Can we do better without expensive
859 * selectivity computations?
861 if (IsA(outer_path, UniquePath))
865 rescannedtuples = mergejointuples - inner_path_rows;
866 /* Must clamp because of possible underestimate */
867 if (rescannedtuples < 0)
870 /* We'll inflate inner run cost this much to account for rescanning */
871 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
874 * A merge join will stop as soon as it exhausts either input stream.
875 * Estimate fraction of the left and right inputs that will actually
876 * need to be scanned. We use only the first (most significant) merge
877 * clause for this purpose.
879 * Since this calculation is somewhat expensive, and will be the same for
880 * all mergejoin paths associated with the merge clause, we cache the
881 * results in the RestrictInfo node.
883 firstclause = (RestrictInfo *) lfirst(mergeclauses);
884 if (firstclause->left_mergescansel < 0) /* not computed yet? */
885 mergejoinscansel(root, (Node *) firstclause->clause,
886 &firstclause->left_mergescansel,
887 &firstclause->right_mergescansel);
889 if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
891 /* left side of clause is outer */
892 outerscansel = firstclause->left_mergescansel;
893 innerscansel = firstclause->right_mergescansel;
897 /* left side of clause is inner */
898 outerscansel = firstclause->right_mergescansel;
899 innerscansel = firstclause->left_mergescansel;
902 /* convert selectivity to row count; must scan at least one row */
904 outer_rows = ceil(outer_path_rows * outerscansel);
907 inner_rows = ceil(inner_path_rows * innerscansel);
912 * Readjust scan selectivities to account for above rounding. This is
913 * normally an insignificant effect, but when there are only a few rows
914 * in the inputs, failing to do this makes for a large percentage error.
916 outerscansel = outer_rows / outer_path_rows;
917 innerscansel = inner_rows / inner_path_rows;
919 /* cost of source data */
921 if (outersortkeys) /* do we need to sort outer? */
923 cost_sort(&sort_path,
926 outer_path->total_cost,
928 outer_path->parent->width);
929 startup_cost += sort_path.startup_cost;
930 run_cost += (sort_path.total_cost - sort_path.startup_cost)
935 startup_cost += outer_path->startup_cost;
936 run_cost += (outer_path->total_cost - outer_path->startup_cost)
940 if (innersortkeys) /* do we need to sort inner? */
942 cost_sort(&sort_path,
945 inner_path->total_cost,
947 inner_path->parent->width);
948 startup_cost += sort_path.startup_cost;
949 run_cost += (sort_path.total_cost - sort_path.startup_cost)
950 * innerscansel * rescanratio;
954 startup_cost += inner_path->startup_cost;
955 run_cost += (inner_path->total_cost - inner_path->startup_cost)
956 * innerscansel * rescanratio;
962 * If we're doing JOIN_IN then we will stop outputting inner
963 * tuples for an outer tuple as soon as we have one match. Account for
964 * the effects of this by scaling down the cost estimates in proportion
965 * to the expected output size. (This assumes that all the quals attached
966 * to the join are IN quals, which should be true.)
968 if (path->jpath.jointype == JOIN_IN &&
969 qptuples > path->jpath.path.parent->rows)
970 joininfactor = path->jpath.path.parent->rows / qptuples;
975 * The number of tuple comparisons needed is approximately number of
976 * outer rows plus number of inner rows plus number of rescanned
977 * tuples (can we refine this?). At each one, we need to evaluate
978 * the mergejoin quals. NOTE: JOIN_IN mode does not save any work
979 * here, so do NOT include joininfactor.
981 startup_cost += merge_qual_cost.startup;
982 run_cost += merge_qual_cost.per_tuple *
983 (outer_rows + inner_rows * rescanratio);
986 * For each tuple that gets through the mergejoin proper, we charge
987 * cpu_tuple_cost plus the cost of evaluating additional restriction
988 * clauses that are to be applied at the join. (This is pessimistic
989 * since not all of the quals may get evaluated at each tuple.) This
990 * work is skipped in JOIN_IN mode, so apply the factor.
992 startup_cost += qp_qual_cost.startup;
993 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
994 run_cost += cpu_per_tuple * mergejointuples * joininfactor;
996 path->jpath.path.startup_cost = startup_cost;
997 path->jpath.path.total_cost = startup_cost + run_cost;
1002 * Determines and returns the cost of joining two relations using the
1003 * hash join algorithm.
1005 * 'path' is already filled in except for the cost fields
1007 * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1010 cost_hashjoin(HashPath *path, Query *root)
1012 Path *outer_path = path->jpath.outerjoinpath;
1013 Path *inner_path = path->jpath.innerjoinpath;
1014 List *restrictlist = path->jpath.joinrestrictinfo;
1015 List *hashclauses = path->path_hashclauses;
1016 Cost startup_cost = 0;
1019 Selectivity hash_selec;
1020 Selectivity qp_selec;
1021 QualCost hash_qual_cost;
1022 QualCost qp_qual_cost;
1023 double hashjointuples;
1025 double outer_path_rows = PATH_ROWS(outer_path);
1026 double inner_path_rows = PATH_ROWS(inner_path);
1027 double outerbytes = relation_byte_size(outer_path_rows,
1028 outer_path->parent->width);
1029 double innerbytes = relation_byte_size(inner_path_rows,
1030 inner_path->parent->width);
1031 int num_hashclauses = length(hashclauses);
1033 int physicalbuckets;
1035 Selectivity innerbucketsize;
1036 Selectivity joininfactor;
1040 if (!enable_hashjoin)
1041 startup_cost += disable_cost;
1044 * Compute cost and selectivity of the hashquals and qpquals (other
1045 * restriction clauses) separately. We use approx_selectivity here
1046 * for speed --- in most cases, any errors won't affect the result much.
1048 * Note: it's probably bogus to use the normal selectivity calculation
1049 * here when either the outer or inner path is a UniquePath.
1051 hash_selec = approx_selectivity(root, hashclauses,
1052 path->jpath.jointype);
1053 cost_qual_eval(&hash_qual_cost, hashclauses);
1054 qpquals = set_ptrDifference(restrictlist, hashclauses);
1055 qp_selec = approx_selectivity(root, qpquals,
1056 path->jpath.jointype);
1057 cost_qual_eval(&qp_qual_cost, qpquals);
1060 /* approx # tuples passing the hash quals */
1061 hashjointuples = ceil(hash_selec * outer_path_rows * inner_path_rows);
1062 /* approx # tuples passing qpquals as well */
1063 qptuples = ceil(hashjointuples * qp_selec);
1065 /* cost of source data */
1066 startup_cost += outer_path->startup_cost;
1067 run_cost += outer_path->total_cost - outer_path->startup_cost;
1068 startup_cost += inner_path->total_cost;
1071 * Cost of computing hash function: must do it once per input tuple.
1072 * We charge one cpu_operator_cost for each column's hash function.
1074 * XXX when a hashclause is more complex than a single operator,
1075 * we really should charge the extra eval costs of the left or right
1076 * side, as appropriate, here. This seems more work than it's worth
1079 startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
1080 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1082 /* Get hash table size that executor would use for inner relation */
1083 ExecChooseHashTableSize(inner_path_rows,
1084 inner_path->parent->width,
1090 * Determine bucketsize fraction for inner relation. We use the
1091 * smallest bucketsize estimated for any individual hashclause;
1092 * this is undoubtedly conservative.
1094 * BUT: if inner relation has been unique-ified, we can assume it's
1095 * good for hashing. This is important both because it's the right
1096 * answer, and because we avoid contaminating the cache with a value
1097 * that's wrong for non-unique-ified paths.
1099 if (IsA(inner_path, UniquePath))
1100 innerbucketsize = 1.0 / virtualbuckets;
1103 innerbucketsize = 1.0;
1104 foreach(hcl, hashclauses)
1106 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1107 Selectivity thisbucketsize;
1109 Assert(IsA(restrictinfo, RestrictInfo));
1112 * First we have to figure out which side of the hashjoin clause
1113 * is the inner side.
1115 * Since we tend to visit the same clauses over and over when
1116 * planning a large query, we cache the bucketsize estimate in the
1117 * RestrictInfo node to avoid repeated lookups of statistics.
1119 if (bms_is_subset(restrictinfo->right_relids,
1120 inner_path->parent->relids))
1122 /* righthand side is inner */
1123 thisbucketsize = restrictinfo->right_bucketsize;
1124 if (thisbucketsize < 0)
1126 /* not cached yet */
1128 estimate_hash_bucketsize(root,
1129 (Var *) get_rightop(restrictinfo->clause),
1131 restrictinfo->right_bucketsize = thisbucketsize;
1136 Assert(bms_is_subset(restrictinfo->left_relids,
1137 inner_path->parent->relids));
1138 /* lefthand side is inner */
1139 thisbucketsize = restrictinfo->left_bucketsize;
1140 if (thisbucketsize < 0)
1142 /* not cached yet */
1144 estimate_hash_bucketsize(root,
1145 (Var *) get_leftop(restrictinfo->clause),
1147 restrictinfo->left_bucketsize = thisbucketsize;
1151 if (innerbucketsize > thisbucketsize)
1152 innerbucketsize = thisbucketsize;
1157 * if inner relation is too big then we will need to "batch" the join,
1158 * which implies writing and reading most of the tuples to disk an
1159 * extra time. Charge one cost unit per page of I/O (correct since it
1160 * should be nice and sequential...). Writing the inner rel counts as
1161 * startup cost, all the rest as run cost.
1165 double outerpages = page_size(outer_path_rows,
1166 outer_path->parent->width);
1167 double innerpages = page_size(inner_path_rows,
1168 inner_path->parent->width);
1170 startup_cost += innerpages;
1171 run_cost += innerpages + 2 * outerpages;
1177 * If we're doing JOIN_IN then we will stop comparing inner
1178 * tuples to an outer tuple as soon as we have one match. Account for
1179 * the effects of this by scaling down the cost estimates in proportion
1180 * to the expected output size. (This assumes that all the quals attached
1181 * to the join are IN quals, which should be true.)
1183 if (path->jpath.jointype == JOIN_IN &&
1184 qptuples > path->jpath.path.parent->rows)
1185 joininfactor = path->jpath.path.parent->rows / qptuples;
1190 * The number of tuple comparisons needed is the number of outer
1191 * tuples times the typical number of tuples in a hash bucket, which
1192 * is the inner relation size times its bucketsize fraction. At each
1193 * one, we need to evaluate the hashjoin quals.
1195 startup_cost += hash_qual_cost.startup;
1196 run_cost += hash_qual_cost.per_tuple *
1197 outer_path_rows * ceil(inner_path_rows * innerbucketsize) *
1201 * For each tuple that gets through the hashjoin proper, we charge
1202 * cpu_tuple_cost plus the cost of evaluating additional restriction
1203 * clauses that are to be applied at the join. (This is pessimistic
1204 * since not all of the quals may get evaluated at each tuple.)
1206 startup_cost += qp_qual_cost.startup;
1207 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1208 run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1211 * Bias against putting larger relation on inside. We don't want an
1212 * absolute prohibition, though, since larger relation might have
1213 * better bucketsize --- and we can't trust the size estimates
1214 * unreservedly, anyway. Instead, inflate the run cost by the
1215 * square root of the size ratio. (Why square root? No real good
1216 * reason, but it seems reasonable...)
1218 * Note: before 7.4 we implemented this by inflating startup cost;
1219 * but if there's a disable_cost component in the input paths'
1220 * startup cost, that unfairly penalizes the hash. Probably it'd
1221 * be better to keep track of disable penalty separately from cost.
1223 if (innerbytes > outerbytes && outerbytes > 0)
1224 run_cost *= sqrt(innerbytes / outerbytes);
1226 path->jpath.path.startup_cost = startup_cost;
1227 path->jpath.path.total_cost = startup_cost + run_cost;
1231 * Estimate hash bucketsize fraction (ie, number of entries in a bucket
1232 * divided by total tuples in relation) if the specified Var is used
1235 * XXX This is really pretty bogus since we're effectively assuming that the
1236 * distribution of hash keys will be the same after applying restriction
1237 * clauses as it was in the underlying relation. However, we are not nearly
1238 * smart enough to figure out how the restrict clauses might change the
1239 * distribution, so this will have to do for now.
1241 * We are passed the number of buckets the executor will use for the given
1242 * input relation. If the data were perfectly distributed, with the same
1243 * number of tuples going into each available bucket, then the bucketsize
1244 * fraction would be 1/nbuckets. But this happy state of affairs will occur
1245 * only if (a) there are at least nbuckets distinct data values, and (b)
1246 * we have a not-too-skewed data distribution. Otherwise the buckets will
1247 * be nonuniformly occupied. If the other relation in the join has a key
1248 * distribution similar to this one's, then the most-loaded buckets are
1249 * exactly those that will be probed most often. Therefore, the "average"
1250 * bucket size for costing purposes should really be taken as something close
1251 * to the "worst case" bucket size. We try to estimate this by adjusting the
1252 * fraction if there are too few distinct data values, and then scaling up
1253 * by the ratio of the most common value's frequency to the average frequency.
1255 * If no statistics are available, use a default estimate of 0.1. This will
1256 * discourage use of a hash rather strongly if the inner relation is large,
1257 * which is what we want. We do not want to hash unless we know that the
1258 * inner rel is well-dispersed (or the alternatives seem much worse).
1261 estimate_hash_bucketsize(Query *root, Var *var, int nbuckets)
1266 Form_pg_statistic stats;
1275 * Lookup info about var's relation and attribute; if none available,
1276 * return default estimate.
1278 if (var == NULL || !IsA(var, Var))
1281 relid = getrelid(var->varno, root->rtable);
1282 if (relid == InvalidOid)
1285 rel = find_base_rel(root, var->varno);
1287 if (rel->tuples <= 0.0 || rel->rows <= 0.0)
1288 return 0.1; /* ensure we can divide below */
1290 tuple = SearchSysCache(STATRELATT,
1291 ObjectIdGetDatum(relid),
1292 Int16GetDatum(var->varattno),
1294 if (!HeapTupleIsValid(tuple))
1297 * Perhaps the Var is a system attribute; if so, it will have no
1298 * entry in pg_statistic, but we may be able to guess something
1299 * about its distribution anyway.
1301 switch (var->varattno)
1303 case ObjectIdAttributeNumber:
1304 case SelfItemPointerAttributeNumber:
1305 /* these are unique, so buckets should be well-distributed */
1306 return 1.0 / (double) nbuckets;
1307 case TableOidAttributeNumber:
1308 /* hashing this is a terrible idea... */
1313 stats = (Form_pg_statistic) GETSTRUCT(tuple);
1316 * Obtain number of distinct data values in raw relation.
1318 ndistinct = stats->stadistinct;
1319 if (ndistinct < 0.0)
1320 ndistinct = -ndistinct * rel->tuples;
1322 if (ndistinct <= 0.0) /* ensure we can divide */
1324 ReleaseSysCache(tuple);
1328 /* Also compute avg freq of all distinct data values in raw relation */
1329 avgfreq = (1.0 - stats->stanullfrac) / ndistinct;
1332 * Adjust ndistinct to account for restriction clauses. Observe we
1333 * are assuming that the data distribution is affected uniformly by
1334 * the restriction clauses!
1336 * XXX Possibly better way, but much more expensive: multiply by
1337 * selectivity of rel's restriction clauses that mention the target
1340 ndistinct *= rel->rows / rel->tuples;
1343 * Initial estimate of bucketsize fraction is 1/nbuckets as long as
1344 * the number of buckets is less than the expected number of distinct
1345 * values; otherwise it is 1/ndistinct.
1347 if (ndistinct > (double) nbuckets)
1348 estfract = 1.0 / (double) nbuckets;
1350 estfract = 1.0 / ndistinct;
1353 * Look up the frequency of the most common value, if available.
1357 if (get_attstatsslot(tuple, var->vartype, var->vartypmod,
1358 STATISTIC_KIND_MCV, InvalidOid,
1359 NULL, NULL, &numbers, &nnumbers))
1362 * The first MCV stat is for the most common value.
1365 mcvfreq = numbers[0];
1366 free_attstatsslot(var->vartype, NULL, 0,
1371 * Adjust estimated bucketsize upward to account for skewed
1374 if (avgfreq > 0.0 && mcvfreq > avgfreq)
1375 estfract *= mcvfreq / avgfreq;
1378 * Clamp bucketsize to sane range (the above adjustment could easily
1379 * produce an out-of-range result). We set the lower bound a little
1380 * above zero, since zero isn't a very sane result.
1382 if (estfract < 1.0e-6)
1384 else if (estfract > 1.0)
1387 ReleaseSysCache(tuple);
1389 return (Selectivity) estfract;
1395 * Estimate the CPU costs of evaluating a WHERE clause.
1396 * The input can be either an implicitly-ANDed list of boolean
1397 * expressions, or a list of RestrictInfo nodes.
1398 * The result includes both a one-time (startup) component,
1399 * and a per-evaluation component.
1402 cost_qual_eval(QualCost *cost, List *quals)
1407 cost->per_tuple = 0;
1409 /* We don't charge any cost for the implicit ANDing at top level ... */
1413 Node *qual = (Node *) lfirst(l);
1416 * RestrictInfo nodes contain an eval_cost field reserved for this
1417 * routine's use, so that it's not necessary to evaluate the qual
1418 * clause's cost more than once. If the clause's cost hasn't been
1419 * computed yet, the field's startup value will contain -1.
1421 if (qual && IsA(qual, RestrictInfo))
1423 RestrictInfo *restrictinfo = (RestrictInfo *) qual;
1425 if (restrictinfo->eval_cost.startup < 0)
1427 restrictinfo->eval_cost.startup = 0;
1428 restrictinfo->eval_cost.per_tuple = 0;
1429 cost_qual_eval_walker((Node *) restrictinfo->clause,
1430 &restrictinfo->eval_cost);
1432 cost->startup += restrictinfo->eval_cost.startup;
1433 cost->per_tuple += restrictinfo->eval_cost.per_tuple;
1437 /* If it's a bare expression, must always do it the hard way */
1438 cost_qual_eval_walker(qual, cost);
1444 cost_qual_eval_walker(Node *node, QualCost *total)
1450 * Our basic strategy is to charge one cpu_operator_cost for each
1451 * operator or function node in the given tree. Vars and Consts are
1452 * charged zero, and so are boolean operators (AND, OR, NOT).
1453 * Simplistic, but a lot better than no model at all.
1455 * Should we try to account for the possibility of short-circuit
1456 * evaluation of AND/OR?
1458 if (IsA(node, FuncExpr) ||
1459 IsA(node, OpExpr) ||
1460 IsA(node, DistinctExpr))
1462 total->per_tuple += cpu_operator_cost;
1464 else if (IsA(node, SubLink))
1466 /* This routine should not be applied to un-planned expressions */
1467 elog(ERROR, "cost_qual_eval: can't handle unplanned sub-select");
1469 else if (IsA(node, SubPlan))
1472 * A subplan node in an expression typically indicates that the
1473 * subplan will be executed on each evaluation, so charge accordingly.
1474 * (Sub-selects that can be executed as InitPlans have already been
1475 * removed from the expression.)
1477 * An exception occurs when we have decided we can implement the
1478 * subplan by hashing.
1481 SubPlan *subplan = (SubPlan *) node;
1482 Plan *plan = subplan->plan;
1484 if (subplan->useHashTable)
1487 * If we are using a hash table for the subquery outputs, then
1488 * the cost of evaluating the query is a one-time cost.
1489 * We charge one cpu_operator_cost per tuple for the work of
1490 * loading the hashtable, too.
1492 total->startup += plan->total_cost +
1493 cpu_operator_cost * plan->plan_rows;
1495 * The per-tuple costs include the cost of evaluating the
1496 * lefthand expressions, plus the cost of probing the hashtable.
1497 * Recursion into the exprs list will handle the lefthand
1498 * expressions properly, and will count one cpu_operator_cost
1499 * for each comparison operator. That is probably too low for
1500 * the probing cost, but it's hard to make a better estimate,
1501 * so live with it for now.
1507 * Otherwise we will be rescanning the subplan output on each
1508 * evaluation. We need to estimate how much of the output
1509 * we will actually need to scan. NOTE: this logic should
1510 * agree with the estimates used by make_subplan() in
1513 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
1515 if (subplan->subLinkType == EXISTS_SUBLINK)
1517 /* we only need to fetch 1 tuple */
1518 total->per_tuple += plan_run_cost / plan->plan_rows;
1520 else if (subplan->subLinkType == ALL_SUBLINK ||
1521 subplan->subLinkType == ANY_SUBLINK)
1523 /* assume we need 50% of the tuples */
1524 total->per_tuple += 0.50 * plan_run_cost;
1525 /* also charge a cpu_operator_cost per row examined */
1526 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1530 /* assume we need all tuples */
1531 total->per_tuple += plan_run_cost;
1534 * Also account for subplan's startup cost.
1535 * If the subplan is uncorrelated or undirect correlated,
1536 * AND its topmost node is a Sort or Material node, assume
1537 * that we'll only need to pay its startup cost once;
1538 * otherwise assume we pay the startup cost every time.
1540 if (subplan->parParam == NIL &&
1542 IsA(plan, Material)))
1544 total->startup += plan->startup_cost;
1548 total->per_tuple += plan->startup_cost;
1553 return expression_tree_walker(node, cost_qual_eval_walker,
1559 * approx_selectivity
1560 * Quick-and-dirty estimation of clause selectivities.
1561 * The input can be either an implicitly-ANDed list of boolean
1562 * expressions, or a list of RestrictInfo nodes (typically the latter).
1564 * The "quick" part comes from caching the selectivity estimates so we can
1565 * avoid recomputing them later. (Since the same clauses are typically
1566 * examined over and over in different possible join trees, this makes a
1569 * The "dirty" part comes from the fact that the selectivities of multiple
1570 * clauses are estimated independently and multiplied together. Now
1571 * clauselist_selectivity often can't do any better than that anyhow, but
1572 * for some situations (such as range constraints) it is smarter.
1574 * Since we are only using the results to estimate how many potential
1575 * output tuples are generated and passed through qpqual checking, it
1576 * seems OK to live with the approximation.
1579 approx_selectivity(Query *root, List *quals, JoinType jointype)
1581 Selectivity total = 1.0;
1586 Node *qual = (Node *) lfirst(l);
1590 * RestrictInfo nodes contain a this_selec field reserved for this
1591 * routine's use, so that it's not necessary to evaluate the qual
1592 * clause's selectivity more than once. If the clause's
1593 * selectivity hasn't been computed yet, the field will contain
1596 if (qual && IsA(qual, RestrictInfo))
1598 RestrictInfo *restrictinfo = (RestrictInfo *) qual;
1600 if (restrictinfo->this_selec < 0)
1601 restrictinfo->this_selec =
1602 clause_selectivity(root,
1603 (Node *) restrictinfo->clause,
1606 selec = restrictinfo->this_selec;
1610 /* If it's a bare expression, must always do it the hard way */
1611 selec = clause_selectivity(root, qual, 0, jointype);
1620 * set_baserel_size_estimates
1621 * Set the size estimates for the given base relation.
1623 * The rel's targetlist and restrictinfo list must have been constructed
1626 * We set the following fields of the rel node:
1627 * rows: the estimated number of output tuples (after applying
1628 * restriction clauses).
1629 * width: the estimated average output tuple width in bytes.
1630 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1633 set_baserel_size_estimates(Query *root, RelOptInfo *rel)
1637 /* Should only be applied to base relations */
1638 Assert(rel->relid > 0);
1640 temp = rel->tuples *
1641 restrictlist_selectivity(root,
1642 rel->baserestrictinfo,
1647 * Force estimate to be at least one row, to make explain output look
1648 * better and to avoid possible divide-by-zero when interpolating
1649 * cost. Make it an integer, too.
1658 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1660 set_rel_width(root, rel);
1664 * set_joinrel_size_estimates
1665 * Set the size estimates for the given join relation.
1667 * The rel's targetlist must have been constructed already, and a
1668 * restriction clause list that matches the given component rels must
1671 * Since there is more than one way to make a joinrel for more than two
1672 * base relations, the results we get here could depend on which component
1673 * rel pair is provided. In theory we should get the same answers no matter
1674 * which pair is provided; in practice, since the selectivity estimation
1675 * routines don't handle all cases equally well, we might not. But there's
1676 * not much to be done about it. (Would it make sense to repeat the
1677 * calculations for each pair of input rels that's encountered, and somehow
1678 * average the results? Probably way more trouble than it's worth.)
1680 * It's important that the results for symmetric JoinTypes be symmetric,
1681 * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1682 * rel1, JOIN_RIGHT). Also, JOIN_IN should produce the same result as
1683 * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1685 * We set the same relnode fields as set_baserel_size_estimates() does.
1688 set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
1689 RelOptInfo *outer_rel,
1690 RelOptInfo *inner_rel,
1699 * Compute joinclause selectivity. Note that we are only considering
1700 * clauses that become restriction clauses at this join level; we are
1701 * not double-counting them because they were not considered in
1702 * estimating the sizes of the component rels.
1704 selec = restrictlist_selectivity(root,
1710 * Basically, we multiply size of Cartesian product by selectivity.
1712 * If we are doing an outer join, take that into account: the output
1713 * must be at least as large as the non-nullable input. (Is there any
1714 * chance of being even smarter?)
1716 * For JOIN_IN and variants, the Cartesian product is figured with
1717 * respect to a unique-ified input, and then we can clamp to the size
1718 * of the other input.
1723 temp = outer_rel->rows * inner_rel->rows * selec;
1726 temp = outer_rel->rows * inner_rel->rows * selec;
1727 if (temp < outer_rel->rows)
1728 temp = outer_rel->rows;
1731 temp = outer_rel->rows * inner_rel->rows * selec;
1732 if (temp < inner_rel->rows)
1733 temp = inner_rel->rows;
1736 temp = outer_rel->rows * inner_rel->rows * selec;
1737 if (temp < outer_rel->rows)
1738 temp = outer_rel->rows;
1739 if (temp < inner_rel->rows)
1740 temp = inner_rel->rows;
1743 case JOIN_UNIQUE_INNER:
1744 upath = create_unique_path(root, inner_rel,
1745 inner_rel->cheapest_total_path);
1746 temp = outer_rel->rows * upath->rows * selec;
1747 if (temp > outer_rel->rows)
1748 temp = outer_rel->rows;
1750 case JOIN_REVERSE_IN:
1751 case JOIN_UNIQUE_OUTER:
1752 upath = create_unique_path(root, outer_rel,
1753 outer_rel->cheapest_total_path);
1754 temp = upath->rows * inner_rel->rows * selec;
1755 if (temp > inner_rel->rows)
1756 temp = inner_rel->rows;
1759 elog(ERROR, "set_joinrel_size_estimates: unsupported join type %d",
1761 temp = 0; /* keep compiler quiet */
1766 * Force estimate to be at least one row, to make explain output look
1767 * better and to avoid possible divide-by-zero when interpolating
1768 * cost. Make it an integer, too.
1778 * We could apply set_rel_width() to compute the output tuple width
1779 * from scratch, but at present it's always just the sum of the input
1780 * widths, so why work harder than necessary? If relnode.c is ever
1781 * taught to remove unneeded columns from join targetlists, go back to
1782 * using set_rel_width here.
1784 rel->width = outer_rel->width + inner_rel->width;
1788 * set_function_size_estimates
1789 * Set the size estimates for a base relation that is a function call.
1791 * The rel's targetlist and restrictinfo list must have been constructed
1794 * We set the following fields of the rel node:
1795 * rows: the estimated number of output tuples (after applying
1796 * restriction clauses).
1797 * width: the estimated average output tuple width in bytes.
1798 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1801 set_function_size_estimates(Query *root, RelOptInfo *rel)
1805 /* Should only be applied to base relations that are functions */
1806 Assert(rel->relid > 0);
1807 Assert(rel->rtekind == RTE_FUNCTION);
1810 * Estimate number of rows the function itself will return.
1812 * XXX no idea how to do this yet; but should at least check whether
1813 * function returns set or not...
1817 /* Now estimate number of output rows */
1818 temp = rel->tuples *
1819 restrictlist_selectivity(root,
1820 rel->baserestrictinfo,
1825 * Force estimate to be at least one row, to make explain output look
1826 * better and to avoid possible divide-by-zero when interpolating
1827 * cost. Make it an integer, too.
1836 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1838 set_rel_width(root, rel);
1844 * Set the estimated output width of the relation.
1846 * NB: this works best on base relations because it prefers to look at
1847 * real Vars. It will fail to make use of pg_statistic info when applied
1848 * to a subquery relation, even if the subquery outputs are simple vars
1849 * that we could have gotten info for. Is it worth trying to be smarter
1853 set_rel_width(Query *root, RelOptInfo *rel)
1855 int32 tuple_width = 0;
1858 foreach(tllist, rel->targetlist)
1860 TargetEntry *tle = (TargetEntry *) lfirst(tllist);
1864 * If it's a Var, try to get statistical info from pg_statistic.
1866 if (tle->expr && IsA(tle->expr, Var))
1868 Var *var = (Var *) tle->expr;
1871 relid = getrelid(var->varno, root->rtable);
1872 if (relid != InvalidOid)
1874 item_width = get_attavgwidth(relid, var->varattno);
1877 tuple_width += item_width;
1884 * Not a Var, or can't find statistics for it. Estimate using
1885 * just the type info.
1887 item_width = get_typavgwidth(tle->resdom->restype,
1888 tle->resdom->restypmod);
1889 Assert(item_width > 0);
1890 tuple_width += item_width;
1892 Assert(tuple_width >= 0);
1893 rel->width = tuple_width;
1897 * relation_byte_size
1898 * Estimate the storage space in bytes for a given number of tuples
1899 * of a given width (size in bytes).
1902 relation_byte_size(double tuples, int width)
1904 return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleData)));
1909 * Returns an estimate of the number of pages covered by a given
1910 * number of tuples of a given width (size in bytes).
1913 page_size(double tuples, int width)
1915 return ceil(relation_byte_size(tuples, width) / BLCKSZ);