1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in units of disk accesses: one sequential page
7 * fetch has cost 1. All else is scaled relative to a page fetch, using
8 * the scaling parameters
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to process a typical WHERE operator
15 * We also use a rough estimate "effective_cache_size" of the number of
16 * disk pages in Postgres + OS-level disk cache. (We can't simply use
17 * NBuffers for this purpose because that would ignore the effects of
18 * the kernel's disk cache.)
20 * Obviously, taking constants for these values is an oversimplification,
21 * but it's tough enough to get any useful estimates even at this level of
22 * detail. Note that all of these parameters are user-settable, in case
23 * the default values are drastically off for a particular platform.
25 * We compute two separate costs for each path:
26 * total_cost: total estimated cost to fetch all tuples
27 * startup_cost: cost that is expended before first tuple is fetched
28 * In some scenarios, such as when there is a LIMIT or we are implementing
29 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
30 * path's result. A caller can estimate the cost of fetching a partial
31 * result by interpolating between startup_cost and total_cost. In detail:
32 * actual_cost = startup_cost +
33 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
34 * Note that a base relation's rows count (and, by extension, plan_rows for
35 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
36 * that this equation works properly. (Also, these routines guarantee not to
37 * set the rows count to zero, so there will be no zero divide.) The LIMIT is
38 * applied as a top-level plan node.
40 * For largely historical reasons, most of the routines in this module use
41 * the passed result Path only to store their startup_cost and total_cost
42 * results into. All the input data they need is passed as separate
43 * parameters, even though much of it could be extracted from the Path.
44 * An exception is made for the cost_XXXjoin() routines, which expect all
45 * the non-cost fields of the passed XXXPath to be filled in.
48 * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
49 * Portions Copyright (c) 1994, Regents of the University of California
52 * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.127 2004/05/26 04:41:21 neilc Exp $
54 *-------------------------------------------------------------------------
61 #include "catalog/pg_statistic.h"
62 #include "executor/nodeHash.h"
63 #include "miscadmin.h"
64 #include "optimizer/clauses.h"
65 #include "optimizer/cost.h"
66 #include "optimizer/pathnode.h"
67 #include "optimizer/plancat.h"
68 #include "parser/parsetree.h"
69 #include "utils/selfuncs.h"
70 #include "utils/lsyscache.h"
71 #include "utils/syscache.h"
74 #define LOG2(x) (log(x) / 0.693147180559945)
75 #define LOG6(x) (log(x) / 1.79175946922805)
78 * Some Paths return less than the nominal number of rows of their parent
79 * relations; join nodes need to do this to get the correct input count:
81 #define PATH_ROWS(path) \
82 (IsA(path, UniquePath) ? \
83 ((UniquePath *) (path))->rows : \
87 double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
88 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
89 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
90 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
91 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
93 Cost disable_cost = 100000000.0;
95 bool enable_seqscan = true;
96 bool enable_indexscan = true;
97 bool enable_tidscan = true;
98 bool enable_sort = true;
99 bool enable_hashagg = true;
100 bool enable_nestloop = true;
101 bool enable_mergejoin = true;
102 bool enable_hashjoin = true;
105 static bool cost_qual_eval_walker(Node *node, QualCost *total);
106 static Selectivity approx_selectivity(Query *root, List *quals,
108 static Selectivity join_in_selectivity(JoinPath *path, Query *root);
109 static void set_rel_width(Query *root, RelOptInfo *rel);
110 static double relation_byte_size(double tuples, int width);
111 static double page_size(double tuples, int width);
116 * Force a row-count estimate to a sane value.
119 clamp_row_est(double nrows)
122 * Force estimate to be at least one row, to make explain output look
123 * better and to avoid possible divide-by-zero when interpolating
124 * costs. Make it an integer, too.
137 * Determines and returns the cost of scanning a relation sequentially.
140 cost_seqscan(Path *path, Query *root,
143 Cost startup_cost = 0;
147 /* Should only be applied to base relations */
148 Assert(baserel->relid > 0);
149 Assert(baserel->rtekind == RTE_RELATION);
152 startup_cost += disable_cost;
157 * The cost of reading a page sequentially is 1.0, by definition. Note
158 * that the Unix kernel will typically do some amount of read-ahead
159 * optimization, so that this cost is less than the true cost of
160 * reading a page from disk. We ignore that issue here, but must take
161 * it into account when estimating the cost of non-sequential
164 run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
167 startup_cost += baserel->baserestrictcost.startup;
168 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
169 run_cost += cpu_per_tuple * baserel->tuples;
171 path->startup_cost = startup_cost;
172 path->total_cost = startup_cost + run_cost;
176 * cost_nonsequential_access
177 * Estimate the cost of accessing one page at random from a relation
178 * (or sort temp file) of the given size in pages.
180 * The simplistic model that the cost is random_page_cost is what we want
181 * to use for large relations; but for small ones that is a serious
182 * overestimate because of the effects of caching. This routine tries to
185 * Unfortunately we don't have any good way of estimating the effective cache
186 * size we are working with --- we know that Postgres itself has NBuffers
187 * internal buffers, but the size of the kernel's disk cache is uncertain,
188 * and how much of it we get to use is even less certain. We punt the problem
189 * for now by assuming we are given an effective_cache_size parameter.
191 * Given a guesstimated cache size, we estimate the actual I/O cost per page
192 * with the entirely ad-hoc equations:
193 * if relpages >= effective_cache_size:
194 * random_page_cost * (1 - (effective_cache_size/relpages)/2)
195 * if relpages < effective_cache_size:
196 * 1 + (random_page_cost/2-1) * (relpages/effective_cache_size) ** 2
197 * These give the right asymptotic behavior (=> 1.0 as relpages becomes
198 * small, => random_page_cost as it becomes large) and meet in the middle
199 * with the estimate that the cache is about 50% effective for a relation
200 * of the same size as effective_cache_size. (XXX this is probably all
201 * wrong, but I haven't been able to find any theory about how effective
202 * a disk cache should be presumed to be.)
205 cost_nonsequential_access(double relpages)
209 /* don't crash on bad input data */
210 if (relpages <= 0.0 || effective_cache_size <= 0.0)
211 return random_page_cost;
213 relsize = relpages / effective_cache_size;
216 return random_page_cost * (1.0 - 0.5 / relsize);
218 return 1.0 + (random_page_cost * 0.5 - 1.0) * relsize * relsize;
223 * Determines and returns the cost of scanning a relation using an index.
225 * NOTE: an indexscan plan node can actually represent several passes,
226 * but here we consider the cost of just one pass.
228 * 'root' is the query root
229 * 'baserel' is the base relation the index is for
230 * 'index' is the index to be used
231 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
232 * 'is_injoin' is T if we are considering using the index scan as the inside
233 * of a nestloop join (hence, some of the indexQuals are join clauses)
235 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
236 * Any additional quals evaluated as qpquals may reduce the number of returned
237 * tuples, but they won't reduce the number of tuples we have to fetch from
238 * the table, so they don't reduce the scan cost.
240 * NOTE: as of 7.5, indexQuals is a list of RestrictInfo nodes, where formerly
241 * it was a list of bare clause expressions.
244 cost_index(Path *path, Query *root,
250 Cost startup_cost = 0;
252 Cost indexStartupCost;
254 Selectivity indexSelectivity;
255 double indexCorrelation,
260 double tuples_fetched;
261 double pages_fetched;
265 /* Should only be applied to base relations */
266 Assert(IsA(baserel, RelOptInfo) &&
267 IsA(index, IndexOptInfo));
268 Assert(baserel->relid > 0);
269 Assert(baserel->rtekind == RTE_RELATION);
271 if (!enable_indexscan)
272 startup_cost += disable_cost;
275 * Call index-access-method-specific code to estimate the processing
276 * cost for scanning the index, as well as the selectivity of the
277 * index (ie, the fraction of main-table tuples we will have to
278 * retrieve) and its correlation to the main-table tuple order.
280 OidFunctionCall8(index->amcostestimate,
281 PointerGetDatum(root),
282 PointerGetDatum(baserel),
283 PointerGetDatum(index),
284 PointerGetDatum(indexQuals),
285 PointerGetDatum(&indexStartupCost),
286 PointerGetDatum(&indexTotalCost),
287 PointerGetDatum(&indexSelectivity),
288 PointerGetDatum(&indexCorrelation));
290 /* all costs for touching index itself included here */
291 startup_cost += indexStartupCost;
292 run_cost += indexTotalCost - indexStartupCost;
295 * Estimate number of main-table tuples and pages fetched.
297 * When the index ordering is uncorrelated with the table ordering,
298 * we use an approximation proposed by Mackert and Lohman, "Index Scans
299 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
300 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
301 * The Mackert and Lohman approximation is that the number of pages
304 * min(2TNs/(2T+Ns), T) when T <= b
305 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
306 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
308 * T = # pages in table
309 * N = # tuples in table
310 * s = selectivity = fraction of table to be scanned
311 * b = # buffer pages available (we include kernel space here)
313 * When the index ordering is exactly correlated with the table ordering
314 * (just after a CLUSTER, for example), the number of pages fetched should
315 * be just sT. What's more, these will be sequential fetches, not the
316 * random fetches that occur in the uncorrelated case. So, depending on
317 * the extent of correlation, we should estimate the actual I/O cost
318 * somewhere between s * T * 1.0 and PF * random_cost. We currently
319 * interpolate linearly between these two endpoints based on the
320 * correlation squared (XXX is that appropriate?).
322 * In any case the number of tuples fetched is Ns.
326 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
328 /* This part is the Mackert and Lohman formula */
330 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
331 b = (effective_cache_size > 1) ? effective_cache_size : 1.0;
336 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
337 if (pages_fetched > T)
344 lim = (2.0 * T * b) / (2.0 * T - b);
345 if (tuples_fetched <= lim)
348 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
353 b + (tuples_fetched - lim) * (T - b) / T;
358 * min_IO_cost corresponds to the perfectly correlated case
359 * (csquared=1), max_IO_cost to the perfectly uncorrelated case
360 * (csquared=0). Note that we just charge random_page_cost per page
361 * in the uncorrelated case, rather than using
362 * cost_nonsequential_access, since we've already accounted for
363 * caching effects by using the Mackert model.
365 min_IO_cost = ceil(indexSelectivity * T);
366 max_IO_cost = pages_fetched * random_page_cost;
369 * Now interpolate based on estimated index order correlation to get
370 * total disk I/O cost for main table accesses.
372 csquared = indexCorrelation * indexCorrelation;
374 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
377 * Estimate CPU costs per tuple.
379 * Normally the indexquals will be removed from the list of restriction
380 * clauses that we have to evaluate as qpquals, so we should subtract
381 * their costs from baserestrictcost. But if we are doing a join then
382 * some of the indexquals are join clauses and shouldn't be
383 * subtracted. Rather than work out exactly how much to subtract, we
384 * don't subtract anything.
386 startup_cost += baserel->baserestrictcost.startup;
387 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
391 QualCost index_qual_cost;
393 cost_qual_eval(&index_qual_cost, indexQuals);
394 /* any startup cost still has to be paid ... */
395 cpu_per_tuple -= index_qual_cost.per_tuple;
398 run_cost += cpu_per_tuple * tuples_fetched;
400 path->startup_cost = startup_cost;
401 path->total_cost = startup_cost + run_cost;
406 * Determines and returns the cost of scanning a relation using TIDs.
409 cost_tidscan(Path *path, Query *root,
410 RelOptInfo *baserel, List *tideval)
412 Cost startup_cost = 0;
415 int ntuples = length(tideval);
417 /* Should only be applied to base relations */
418 Assert(baserel->relid > 0);
419 Assert(baserel->rtekind == RTE_RELATION);
422 startup_cost += disable_cost;
424 /* disk costs --- assume each tuple on a different page */
425 run_cost += random_page_cost * ntuples;
428 startup_cost += baserel->baserestrictcost.startup;
429 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
430 run_cost += cpu_per_tuple * ntuples;
432 path->startup_cost = startup_cost;
433 path->total_cost = startup_cost + run_cost;
438 * Determines and returns the cost of scanning a subquery RTE.
441 cost_subqueryscan(Path *path, RelOptInfo *baserel)
447 /* Should only be applied to base relations that are subqueries */
448 Assert(baserel->relid > 0);
449 Assert(baserel->rtekind == RTE_SUBQUERY);
452 * Cost of path is cost of evaluating the subplan, plus cost of
453 * evaluating any restriction clauses that will be attached to the
454 * SubqueryScan node, plus cpu_tuple_cost to account for selection and
455 * projection overhead.
457 path->startup_cost = baserel->subplan->startup_cost;
458 path->total_cost = baserel->subplan->total_cost;
460 startup_cost = baserel->baserestrictcost.startup;
461 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
462 run_cost = cpu_per_tuple * baserel->tuples;
464 path->startup_cost += startup_cost;
465 path->total_cost += startup_cost + run_cost;
470 * Determines and returns the cost of scanning a function RTE.
473 cost_functionscan(Path *path, Query *root, RelOptInfo *baserel)
475 Cost startup_cost = 0;
479 /* Should only be applied to base relations that are functions */
480 Assert(baserel->relid > 0);
481 Assert(baserel->rtekind == RTE_FUNCTION);
484 * For now, estimate function's cost at one operator eval per function
485 * call. Someday we should revive the function cost estimate columns
488 cpu_per_tuple = cpu_operator_cost;
490 /* Add scanning CPU costs */
491 startup_cost += baserel->baserestrictcost.startup;
492 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
493 run_cost += cpu_per_tuple * baserel->tuples;
495 path->startup_cost = startup_cost;
496 path->total_cost = startup_cost + run_cost;
501 * Determines and returns the cost of sorting a relation, including
502 * the cost of reading the input data.
504 * If the total volume of data to sort is less than work_mem, we will do
505 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
506 * comparisons for t tuples.
508 * If the total volume exceeds work_mem, we switch to a tape-style merge
509 * algorithm. There will still be about t*log2(t) tuple comparisons in
510 * total, but we will also need to write and read each tuple once per
511 * merge pass. We expect about ceil(log6(r)) merge passes where r is the
512 * number of initial runs formed (log6 because tuplesort.c uses six-tape
513 * merging). Since the average initial run should be about twice work_mem,
515 * disk traffic = 2 * relsize * ceil(log6(p / (2*work_mem)))
516 * cpu = comparison_cost * t * log2(t)
518 * The disk traffic is assumed to be half sequential and half random
519 * accesses (XXX can't we refine that guess?)
521 * We charge two operator evals per tuple comparison, which should be in
522 * the right ballpark in most cases.
524 * 'pathkeys' is a list of sort keys
525 * 'input_cost' is the total cost for reading the input data
526 * 'tuples' is the number of tuples in the relation
527 * 'width' is the average tuple width in bytes
529 * NOTE: some callers currently pass NIL for pathkeys because they
530 * can't conveniently supply the sort keys. Since this routine doesn't
531 * currently do anything with pathkeys anyway, that doesn't matter...
532 * but if it ever does, it should react gracefully to lack of key data.
533 * (Actually, the thing we'd most likely be interested in is just the number
534 * of sort keys, which all callers *could* supply.)
537 cost_sort(Path *path, Query *root,
538 List *pathkeys, Cost input_cost, double tuples, int width)
540 Cost startup_cost = input_cost;
542 double nbytes = relation_byte_size(tuples, width);
543 long work_mem_bytes = work_mem * 1024L;
546 startup_cost += disable_cost;
549 * We want to be sure the cost of a sort is never estimated as zero,
550 * even if passed-in tuple count is zero. Besides, mustn't do
559 * Assume about two operator evals per tuple comparison and N log2 N
562 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
565 if (nbytes > work_mem_bytes)
567 double npages = ceil(nbytes / BLCKSZ);
568 double nruns = nbytes / (work_mem_bytes * 2);
569 double log_runs = ceil(LOG6(nruns));
570 double npageaccesses;
574 npageaccesses = 2.0 * npages * log_runs;
575 /* Assume half are sequential (cost 1), half are not */
576 startup_cost += npageaccesses *
577 (1.0 + cost_nonsequential_access(npages)) * 0.5;
581 * Also charge a small amount (arbitrarily set equal to operator cost)
582 * per extracted tuple.
584 run_cost += cpu_operator_cost * tuples;
586 path->startup_cost = startup_cost;
587 path->total_cost = startup_cost + run_cost;
592 * Determines and returns the cost of materializing a relation, including
593 * the cost of reading the input data.
595 * If the total volume of data to materialize exceeds work_mem, we will need
596 * to write it to disk, so the cost is much higher in that case.
599 cost_material(Path *path,
600 Cost input_cost, double tuples, int width)
602 Cost startup_cost = input_cost;
604 double nbytes = relation_byte_size(tuples, width);
605 long work_mem_bytes = work_mem * 1024L;
608 if (nbytes > work_mem_bytes)
610 double npages = ceil(nbytes / BLCKSZ);
612 /* We'll write during startup and read during retrieval */
613 startup_cost += npages;
618 * Also charge a small amount per extracted tuple. We use
619 * cpu_tuple_cost so that it doesn't appear worthwhile to materialize
622 run_cost += cpu_tuple_cost * tuples;
624 path->startup_cost = startup_cost;
625 path->total_cost = startup_cost + run_cost;
630 * Determines and returns the cost of performing an Agg plan node,
631 * including the cost of its input.
633 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
634 * are for appropriately-sorted input.
637 cost_agg(Path *path, Query *root,
638 AggStrategy aggstrategy, int numAggs,
639 int numGroupCols, double numGroups,
640 Cost input_startup_cost, Cost input_total_cost,
647 * We charge one cpu_operator_cost per aggregate function per input
648 * tuple, and another one per output tuple (corresponding to transfn
649 * and finalfn calls respectively). If we are grouping, we charge an
650 * additional cpu_operator_cost per grouping column per input tuple
651 * for grouping comparisons.
653 * We will produce a single output tuple if not grouping, and a tuple per
656 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
657 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
658 * input path is already sorted appropriately, AGG_SORTED should be
659 * preferred (since it has no risk of memory overflow). This will
660 * happen as long as the computed total costs are indeed exactly equal
661 * --- but if there's roundoff error we might do the wrong thing. So
662 * be sure that the computations below form the same intermediate
663 * values in the same order.
665 if (aggstrategy == AGG_PLAIN)
667 startup_cost = input_total_cost;
668 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
669 /* we aren't grouping */
670 total_cost = startup_cost;
672 else if (aggstrategy == AGG_SORTED)
674 /* Here we are able to deliver output on-the-fly */
675 startup_cost = input_startup_cost;
676 total_cost = input_total_cost;
677 /* calcs phrased this way to match HASHED case, see note above */
678 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
679 total_cost += cpu_operator_cost * input_tuples * numAggs;
680 total_cost += cpu_operator_cost * numGroups * numAggs;
684 /* must be AGG_HASHED */
685 startup_cost = input_total_cost;
686 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
687 startup_cost += cpu_operator_cost * input_tuples * numAggs;
688 total_cost = startup_cost;
689 total_cost += cpu_operator_cost * numGroups * numAggs;
692 path->startup_cost = startup_cost;
693 path->total_cost = total_cost;
698 * Determines and returns the cost of performing a Group plan node,
699 * including the cost of its input.
701 * Note: caller must ensure that input costs are for appropriately-sorted
705 cost_group(Path *path, Query *root,
706 int numGroupCols, double numGroups,
707 Cost input_startup_cost, Cost input_total_cost,
713 startup_cost = input_startup_cost;
714 total_cost = input_total_cost;
717 * Charge one cpu_operator_cost per comparison per input tuple. We
718 * assume all columns get compared at most of the tuples.
720 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
722 path->startup_cost = startup_cost;
723 path->total_cost = total_cost;
728 * Determines and returns the cost of joining two relations using the
729 * nested loop algorithm.
731 * 'path' is already filled in except for the cost fields
734 cost_nestloop(NestPath *path, Query *root)
736 Path *outer_path = path->outerjoinpath;
737 Path *inner_path = path->innerjoinpath;
738 Cost startup_cost = 0;
741 QualCost restrict_qual_cost;
742 double outer_path_rows = PATH_ROWS(outer_path);
743 double inner_path_rows = PATH_ROWS(inner_path);
745 Selectivity joininfactor;
748 * If inner path is an indexscan, be sure to use its estimated output row
749 * count, which may be lower than the restriction-clause-only row count of
750 * its parent. (We don't include this case in the PATH_ROWS macro because
751 * it applies *only* to a nestloop's inner relation.)
753 if (IsA(inner_path, IndexPath))
754 inner_path_rows = ((IndexPath *) inner_path)->rows;
756 if (!enable_nestloop)
757 startup_cost += disable_cost;
760 * If we're doing JOIN_IN then we will stop scanning inner tuples for
761 * an outer tuple as soon as we have one match. Account for the
762 * effects of this by scaling down the cost estimates in proportion to
763 * the JOIN_IN selectivity. (This assumes that all the quals
764 * attached to the join are IN quals, which should be true.)
766 joininfactor = join_in_selectivity(path, root);
768 /* cost of source data */
771 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
772 * before we can start returning tuples, so the join's startup cost is
773 * their sum. What's not so clear is whether the inner path's
774 * startup_cost must be paid again on each rescan of the inner path.
775 * This is not true if the inner path is materialized or is a
776 * hashjoin, but probably is true otherwise.
778 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
779 run_cost += outer_path->total_cost - outer_path->startup_cost;
780 if (IsA(inner_path, MaterialPath) ||
781 IsA(inner_path, HashPath))
783 /* charge only run cost for each iteration of inner path */
788 * charge startup cost for each iteration of inner path, except we
789 * already charged the first startup_cost in our own startup
791 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
793 run_cost += outer_path_rows *
794 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
797 * Compute number of tuples processed (not number emitted!)
799 ntuples = outer_path_rows * inner_path_rows * joininfactor;
802 cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
803 startup_cost += restrict_qual_cost.startup;
804 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
805 run_cost += cpu_per_tuple * ntuples;
807 path->path.startup_cost = startup_cost;
808 path->path.total_cost = startup_cost + run_cost;
813 * Determines and returns the cost of joining two relations using the
814 * merge join algorithm.
816 * 'path' is already filled in except for the cost fields
818 * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
819 * outersortkeys and innersortkeys are lists of the keys to be used
820 * to sort the outer and inner relations, or NIL if no explicit
821 * sort is needed because the source path is already ordered.
824 cost_mergejoin(MergePath *path, Query *root)
826 Path *outer_path = path->jpath.outerjoinpath;
827 Path *inner_path = path->jpath.innerjoinpath;
828 List *mergeclauses = path->path_mergeclauses;
829 List *outersortkeys = path->outersortkeys;
830 List *innersortkeys = path->innersortkeys;
831 Cost startup_cost = 0;
834 Selectivity merge_selec;
835 QualCost merge_qual_cost;
836 QualCost qp_qual_cost;
837 RestrictInfo *firstclause;
838 double outer_path_rows = PATH_ROWS(outer_path);
839 double inner_path_rows = PATH_ROWS(inner_path);
842 double mergejointuples,
845 Selectivity outerscansel,
847 Selectivity joininfactor;
848 Path sort_path; /* dummy for result of cost_sort */
850 if (!enable_mergejoin)
851 startup_cost += disable_cost;
854 * Compute cost and selectivity of the mergequals and qpquals (other
855 * restriction clauses) separately. We use approx_selectivity here
856 * for speed --- in most cases, any errors won't affect the result
859 * Note: it's probably bogus to use the normal selectivity calculation
860 * here when either the outer or inner path is a UniquePath.
862 merge_selec = approx_selectivity(root, mergeclauses,
863 path->jpath.jointype);
864 cost_qual_eval(&merge_qual_cost, mergeclauses);
865 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
866 qp_qual_cost.startup -= merge_qual_cost.startup;
867 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
869 /* approx # tuples passing the merge quals */
870 mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
873 * When there are equal merge keys in the outer relation, the
874 * mergejoin must rescan any matching tuples in the inner relation.
875 * This means re-fetching inner tuples. Our cost model for this is
876 * that a re-fetch costs the same as an original fetch, which is
877 * probably an overestimate; but on the other hand we ignore the
878 * bookkeeping costs of mark/restore. Not clear if it's worth
879 * developing a more refined model.
881 * The number of re-fetches can be estimated approximately as size of
882 * merge join output minus size of inner relation. Assume that the
883 * distinct key values are 1, 2, ..., and denote the number of values
884 * of each key in the outer relation as m1, m2, ...; in the inner
885 * relation, n1, n2, ... Then we have
887 * size of join = m1 * n1 + m2 * n2 + ...
889 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
890 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
893 * This equation works correctly for outer tuples having no inner match
894 * (nk = 0), but not for inner tuples having no outer match (mk = 0);
895 * we are effectively subtracting those from the number of rescanned
896 * tuples, when we should not. Can we do better without expensive
897 * selectivity computations?
899 if (IsA(outer_path, UniquePath))
903 rescannedtuples = mergejointuples - inner_path_rows;
904 /* Must clamp because of possible underestimate */
905 if (rescannedtuples < 0)
908 /* We'll inflate inner run cost this much to account for rescanning */
909 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
912 * A merge join will stop as soon as it exhausts either input stream.
913 * Estimate fraction of the left and right inputs that will actually
914 * need to be scanned. We use only the first (most significant) merge
915 * clause for this purpose.
917 * Since this calculation is somewhat expensive, and will be the same for
918 * all mergejoin paths associated with the merge clause, we cache the
919 * results in the RestrictInfo node.
923 firstclause = (RestrictInfo *) linitial(mergeclauses);
924 if (firstclause->left_mergescansel < 0) /* not computed yet? */
925 mergejoinscansel(root, (Node *) firstclause->clause,
926 &firstclause->left_mergescansel,
927 &firstclause->right_mergescansel);
929 if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
931 /* left side of clause is outer */
932 outerscansel = firstclause->left_mergescansel;
933 innerscansel = firstclause->right_mergescansel;
937 /* left side of clause is inner */
938 outerscansel = firstclause->right_mergescansel;
939 innerscansel = firstclause->left_mergescansel;
944 /* cope with clauseless mergejoin */
945 outerscansel = innerscansel = 1.0;
948 /* convert selectivity to row count; must scan at least one row */
949 outer_rows = clamp_row_est(outer_path_rows * outerscansel);
950 inner_rows = clamp_row_est(inner_path_rows * innerscansel);
953 * Readjust scan selectivities to account for above rounding. This is
954 * normally an insignificant effect, but when there are only a few
955 * rows in the inputs, failing to do this makes for a large percentage
958 outerscansel = outer_rows / outer_path_rows;
959 innerscansel = inner_rows / inner_path_rows;
961 /* cost of source data */
963 if (outersortkeys) /* do we need to sort outer? */
965 cost_sort(&sort_path,
968 outer_path->total_cost,
970 outer_path->parent->width);
971 startup_cost += sort_path.startup_cost;
972 run_cost += (sort_path.total_cost - sort_path.startup_cost)
977 startup_cost += outer_path->startup_cost;
978 run_cost += (outer_path->total_cost - outer_path->startup_cost)
982 if (innersortkeys) /* do we need to sort inner? */
984 cost_sort(&sort_path,
987 inner_path->total_cost,
989 inner_path->parent->width);
990 startup_cost += sort_path.startup_cost;
991 run_cost += (sort_path.total_cost - sort_path.startup_cost)
992 * innerscansel * rescanratio;
996 startup_cost += inner_path->startup_cost;
997 run_cost += (inner_path->total_cost - inner_path->startup_cost)
998 * innerscansel * rescanratio;
1004 * If we're doing JOIN_IN then we will stop outputting inner tuples
1005 * for an outer tuple as soon as we have one match. Account for the
1006 * effects of this by scaling down the cost estimates in proportion to
1007 * the expected output size. (This assumes that all the quals
1008 * attached to the join are IN quals, which should be true.)
1010 joininfactor = join_in_selectivity(&path->jpath, root);
1013 * The number of tuple comparisons needed is approximately number of
1014 * outer rows plus number of inner rows plus number of rescanned
1015 * tuples (can we refine this?). At each one, we need to evaluate the
1016 * mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
1017 * so do NOT include joininfactor.
1019 startup_cost += merge_qual_cost.startup;
1020 run_cost += merge_qual_cost.per_tuple *
1021 (outer_rows + inner_rows * rescanratio);
1024 * For each tuple that gets through the mergejoin proper, we charge
1025 * cpu_tuple_cost plus the cost of evaluating additional restriction
1026 * clauses that are to be applied at the join. (This is pessimistic
1027 * since not all of the quals may get evaluated at each tuple.) This
1028 * work is skipped in JOIN_IN mode, so apply the factor.
1030 startup_cost += qp_qual_cost.startup;
1031 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1032 run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1034 path->jpath.path.startup_cost = startup_cost;
1035 path->jpath.path.total_cost = startup_cost + run_cost;
1040 * Determines and returns the cost of joining two relations using the
1041 * hash join algorithm.
1043 * 'path' is already filled in except for the cost fields
1045 * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1048 cost_hashjoin(HashPath *path, Query *root)
1050 Path *outer_path = path->jpath.outerjoinpath;
1051 Path *inner_path = path->jpath.innerjoinpath;
1052 List *hashclauses = path->path_hashclauses;
1053 Cost startup_cost = 0;
1056 Selectivity hash_selec;
1057 QualCost hash_qual_cost;
1058 QualCost qp_qual_cost;
1059 double hashjointuples;
1060 double outer_path_rows = PATH_ROWS(outer_path);
1061 double inner_path_rows = PATH_ROWS(inner_path);
1062 double outerbytes = relation_byte_size(outer_path_rows,
1063 outer_path->parent->width);
1064 double innerbytes = relation_byte_size(inner_path_rows,
1065 inner_path->parent->width);
1066 int num_hashclauses = length(hashclauses);
1068 int physicalbuckets;
1070 Selectivity innerbucketsize;
1071 Selectivity joininfactor;
1074 if (!enable_hashjoin)
1075 startup_cost += disable_cost;
1078 * Compute cost and selectivity of the hashquals and qpquals (other
1079 * restriction clauses) separately. We use approx_selectivity here
1080 * for speed --- in most cases, any errors won't affect the result
1083 * Note: it's probably bogus to use the normal selectivity calculation
1084 * here when either the outer or inner path is a UniquePath.
1086 hash_selec = approx_selectivity(root, hashclauses,
1087 path->jpath.jointype);
1088 cost_qual_eval(&hash_qual_cost, hashclauses);
1089 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1090 qp_qual_cost.startup -= hash_qual_cost.startup;
1091 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1093 /* approx # tuples passing the hash quals */
1094 hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1096 /* cost of source data */
1097 startup_cost += outer_path->startup_cost;
1098 run_cost += outer_path->total_cost - outer_path->startup_cost;
1099 startup_cost += inner_path->total_cost;
1102 * Cost of computing hash function: must do it once per input tuple.
1103 * We charge one cpu_operator_cost for each column's hash function.
1105 * XXX when a hashclause is more complex than a single operator, we
1106 * really should charge the extra eval costs of the left or right
1107 * side, as appropriate, here. This seems more work than it's worth
1110 startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
1111 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1113 /* Get hash table size that executor would use for inner relation */
1114 ExecChooseHashTableSize(inner_path_rows,
1115 inner_path->parent->width,
1121 * Determine bucketsize fraction for inner relation. We use the
1122 * smallest bucketsize estimated for any individual hashclause; this
1123 * is undoubtedly conservative.
1125 * BUT: if inner relation has been unique-ified, we can assume it's good
1126 * for hashing. This is important both because it's the right answer,
1127 * and because we avoid contaminating the cache with a value that's
1128 * wrong for non-unique-ified paths.
1130 if (IsA(inner_path, UniquePath))
1131 innerbucketsize = 1.0 / virtualbuckets;
1134 innerbucketsize = 1.0;
1135 foreach(hcl, hashclauses)
1137 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1138 Selectivity thisbucketsize;
1140 Assert(IsA(restrictinfo, RestrictInfo));
1143 * First we have to figure out which side of the hashjoin
1144 * clause is the inner side.
1146 * Since we tend to visit the same clauses over and over when
1147 * planning a large query, we cache the bucketsize estimate in
1148 * the RestrictInfo node to avoid repeated lookups of
1151 if (bms_is_subset(restrictinfo->right_relids,
1152 inner_path->parent->relids))
1154 /* righthand side is inner */
1155 thisbucketsize = restrictinfo->right_bucketsize;
1156 if (thisbucketsize < 0)
1158 /* not cached yet */
1160 estimate_hash_bucketsize(root,
1161 get_rightop(restrictinfo->clause),
1163 restrictinfo->right_bucketsize = thisbucketsize;
1168 Assert(bms_is_subset(restrictinfo->left_relids,
1169 inner_path->parent->relids));
1170 /* lefthand side is inner */
1171 thisbucketsize = restrictinfo->left_bucketsize;
1172 if (thisbucketsize < 0)
1174 /* not cached yet */
1176 estimate_hash_bucketsize(root,
1177 get_leftop(restrictinfo->clause),
1179 restrictinfo->left_bucketsize = thisbucketsize;
1183 if (innerbucketsize > thisbucketsize)
1184 innerbucketsize = thisbucketsize;
1189 * if inner relation is too big then we will need to "batch" the join,
1190 * which implies writing and reading most of the tuples to disk an
1191 * extra time. Charge one cost unit per page of I/O (correct since it
1192 * should be nice and sequential...). Writing the inner rel counts as
1193 * startup cost, all the rest as run cost.
1197 double outerpages = page_size(outer_path_rows,
1198 outer_path->parent->width);
1199 double innerpages = page_size(inner_path_rows,
1200 inner_path->parent->width);
1202 startup_cost += innerpages;
1203 run_cost += innerpages + 2 * outerpages;
1209 * If we're doing JOIN_IN then we will stop comparing inner tuples to
1210 * an outer tuple as soon as we have one match. Account for the
1211 * effects of this by scaling down the cost estimates in proportion to
1212 * the expected output size. (This assumes that all the quals
1213 * attached to the join are IN quals, which should be true.)
1215 joininfactor = join_in_selectivity(&path->jpath, root);
1218 * The number of tuple comparisons needed is the number of outer
1219 * tuples times the typical number of tuples in a hash bucket, which
1220 * is the inner relation size times its bucketsize fraction. At each
1221 * one, we need to evaluate the hashjoin quals.
1223 startup_cost += hash_qual_cost.startup;
1224 run_cost += hash_qual_cost.per_tuple *
1225 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1229 * For each tuple that gets through the hashjoin proper, we charge
1230 * cpu_tuple_cost plus the cost of evaluating additional restriction
1231 * clauses that are to be applied at the join. (This is pessimistic
1232 * since not all of the quals may get evaluated at each tuple.)
1234 startup_cost += qp_qual_cost.startup;
1235 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1236 run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1239 * Bias against putting larger relation on inside. We don't want an
1240 * absolute prohibition, though, since larger relation might have
1241 * better bucketsize --- and we can't trust the size estimates
1242 * unreservedly, anyway. Instead, inflate the run cost by the square
1243 * root of the size ratio. (Why square root? No real good reason,
1244 * but it seems reasonable...)
1246 * Note: before 7.4 we implemented this by inflating startup cost; but if
1247 * there's a disable_cost component in the input paths' startup cost,
1248 * that unfairly penalizes the hash. Probably it'd be better to keep
1249 * track of disable penalty separately from cost.
1251 if (innerbytes > outerbytes && outerbytes > 0)
1252 run_cost *= sqrt(innerbytes / outerbytes);
1254 path->jpath.path.startup_cost = startup_cost;
1255 path->jpath.path.total_cost = startup_cost + run_cost;
1261 * Estimate the CPU costs of evaluating a WHERE clause.
1262 * The input can be either an implicitly-ANDed list of boolean
1263 * expressions, or a list of RestrictInfo nodes.
1264 * The result includes both a one-time (startup) component,
1265 * and a per-evaluation component.
1268 cost_qual_eval(QualCost *cost, List *quals)
1273 cost->per_tuple = 0;
1275 /* We don't charge any cost for the implicit ANDing at top level ... */
1279 Node *qual = (Node *) lfirst(l);
1282 * RestrictInfo nodes contain an eval_cost field reserved for this
1283 * routine's use, so that it's not necessary to evaluate the qual
1284 * clause's cost more than once. If the clause's cost hasn't been
1285 * computed yet, the field's startup value will contain -1.
1287 if (qual && IsA(qual, RestrictInfo))
1289 RestrictInfo *restrictinfo = (RestrictInfo *) qual;
1291 if (restrictinfo->eval_cost.startup < 0)
1293 restrictinfo->eval_cost.startup = 0;
1294 restrictinfo->eval_cost.per_tuple = 0;
1295 cost_qual_eval_walker((Node *) restrictinfo->clause,
1296 &restrictinfo->eval_cost);
1298 cost->startup += restrictinfo->eval_cost.startup;
1299 cost->per_tuple += restrictinfo->eval_cost.per_tuple;
1303 /* If it's a bare expression, must always do it the hard way */
1304 cost_qual_eval_walker(qual, cost);
1310 cost_qual_eval_walker(Node *node, QualCost *total)
1316 * Our basic strategy is to charge one cpu_operator_cost for each
1317 * operator or function node in the given tree. Vars and Consts are
1318 * charged zero, and so are boolean operators (AND, OR, NOT).
1319 * Simplistic, but a lot better than no model at all.
1321 * Should we try to account for the possibility of short-circuit
1322 * evaluation of AND/OR?
1324 if (IsA(node, FuncExpr) ||
1325 IsA(node, OpExpr) ||
1326 IsA(node, DistinctExpr) ||
1327 IsA(node, NullIfExpr))
1328 total->per_tuple += cpu_operator_cost;
1329 else if (IsA(node, ScalarArrayOpExpr))
1331 /* should charge more than 1 op cost, but how many? */
1332 total->per_tuple += cpu_operator_cost * 10;
1334 else if (IsA(node, SubLink))
1336 /* This routine should not be applied to un-planned expressions */
1337 elog(ERROR, "cannot handle unplanned sub-select");
1339 else if (IsA(node, SubPlan))
1342 * A subplan node in an expression typically indicates that the
1343 * subplan will be executed on each evaluation, so charge
1344 * accordingly. (Sub-selects that can be executed as InitPlans
1345 * have already been removed from the expression.)
1347 * An exception occurs when we have decided we can implement the
1348 * subplan by hashing.
1351 SubPlan *subplan = (SubPlan *) node;
1352 Plan *plan = subplan->plan;
1354 if (subplan->useHashTable)
1357 * If we are using a hash table for the subquery outputs, then
1358 * the cost of evaluating the query is a one-time cost. We
1359 * charge one cpu_operator_cost per tuple for the work of
1360 * loading the hashtable, too.
1362 total->startup += plan->total_cost +
1363 cpu_operator_cost * plan->plan_rows;
1366 * The per-tuple costs include the cost of evaluating the
1367 * lefthand expressions, plus the cost of probing the
1368 * hashtable. Recursion into the exprs list will handle the
1369 * lefthand expressions properly, and will count one
1370 * cpu_operator_cost for each comparison operator. That is
1371 * probably too low for the probing cost, but it's hard to
1372 * make a better estimate, so live with it for now.
1378 * Otherwise we will be rescanning the subplan output on each
1379 * evaluation. We need to estimate how much of the output we
1380 * will actually need to scan. NOTE: this logic should agree
1381 * with the estimates used by make_subplan() in
1384 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
1386 if (subplan->subLinkType == EXISTS_SUBLINK)
1388 /* we only need to fetch 1 tuple */
1389 total->per_tuple += plan_run_cost / plan->plan_rows;
1391 else if (subplan->subLinkType == ALL_SUBLINK ||
1392 subplan->subLinkType == ANY_SUBLINK)
1394 /* assume we need 50% of the tuples */
1395 total->per_tuple += 0.50 * plan_run_cost;
1396 /* also charge a cpu_operator_cost per row examined */
1397 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1401 /* assume we need all tuples */
1402 total->per_tuple += plan_run_cost;
1406 * Also account for subplan's startup cost. If the subplan is
1407 * uncorrelated or undirect correlated, AND its topmost node
1408 * is a Sort or Material node, assume that we'll only need to
1409 * pay its startup cost once; otherwise assume we pay the
1410 * startup cost every time.
1412 if (subplan->parParam == NIL &&
1414 IsA(plan, Material)))
1415 total->startup += plan->startup_cost;
1417 total->per_tuple += plan->startup_cost;
1421 return expression_tree_walker(node, cost_qual_eval_walker,
1427 * approx_selectivity
1428 * Quick-and-dirty estimation of clause selectivities.
1429 * The input can be either an implicitly-ANDed list of boolean
1430 * expressions, or a list of RestrictInfo nodes (typically the latter).
1432 * This is quick-and-dirty because we bypass clauselist_selectivity, and
1433 * simply multiply the independent clause selectivities together. Now
1434 * clauselist_selectivity often can't do any better than that anyhow, but
1435 * for some situations (such as range constraints) it is smarter. However,
1436 * we can't effectively cache the results of clauselist_selectivity, whereas
1437 * the individual clause selectivities can be and are cached.
1439 * Since we are only using the results to estimate how many potential
1440 * output tuples are generated and passed through qpqual checking, it
1441 * seems OK to live with the approximation.
1444 approx_selectivity(Query *root, List *quals, JoinType jointype)
1446 Selectivity total = 1.0;
1451 Node *qual = (Node *) lfirst(l);
1453 /* Note that clause_selectivity will be able to cache its result */
1454 total *= clause_selectivity(root, qual, 0, jointype);
1461 * set_baserel_size_estimates
1462 * Set the size estimates for the given base relation.
1464 * The rel's targetlist and restrictinfo list must have been constructed
1467 * We set the following fields of the rel node:
1468 * rows: the estimated number of output tuples (after applying
1469 * restriction clauses).
1470 * width: the estimated average output tuple width in bytes.
1471 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1474 set_baserel_size_estimates(Query *root, RelOptInfo *rel)
1478 /* Should only be applied to base relations */
1479 Assert(rel->relid > 0);
1481 nrows = rel->tuples *
1482 clauselist_selectivity(root,
1483 rel->baserestrictinfo,
1487 rel->rows = clamp_row_est(nrows);
1489 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1491 set_rel_width(root, rel);
1495 * set_joinrel_size_estimates
1496 * Set the size estimates for the given join relation.
1498 * The rel's targetlist must have been constructed already, and a
1499 * restriction clause list that matches the given component rels must
1502 * Since there is more than one way to make a joinrel for more than two
1503 * base relations, the results we get here could depend on which component
1504 * rel pair is provided. In theory we should get the same answers no matter
1505 * which pair is provided; in practice, since the selectivity estimation
1506 * routines don't handle all cases equally well, we might not. But there's
1507 * not much to be done about it. (Would it make sense to repeat the
1508 * calculations for each pair of input rels that's encountered, and somehow
1509 * average the results? Probably way more trouble than it's worth.)
1511 * It's important that the results for symmetric JoinTypes be symmetric,
1512 * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1513 * rel1, JOIN_RIGHT). Also, JOIN_IN should produce the same result as
1514 * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1516 * We set only the rows field here. The width field was already set by
1517 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
1520 set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
1521 RelOptInfo *outer_rel,
1522 RelOptInfo *inner_rel,
1531 * Compute joinclause selectivity. Note that we are only considering
1532 * clauses that become restriction clauses at this join level; we are
1533 * not double-counting them because they were not considered in
1534 * estimating the sizes of the component rels.
1536 selec = clauselist_selectivity(root,
1542 * Basically, we multiply size of Cartesian product by selectivity.
1544 * If we are doing an outer join, take that into account: the output must
1545 * be at least as large as the non-nullable input. (Is there any
1546 * chance of being even smarter?)
1548 * For JOIN_IN and variants, the Cartesian product is figured with
1549 * respect to a unique-ified input, and then we can clamp to the size
1550 * of the other input.
1555 nrows = outer_rel->rows * inner_rel->rows * selec;
1558 nrows = outer_rel->rows * inner_rel->rows * selec;
1559 if (nrows < outer_rel->rows)
1560 nrows = outer_rel->rows;
1563 nrows = outer_rel->rows * inner_rel->rows * selec;
1564 if (nrows < inner_rel->rows)
1565 nrows = inner_rel->rows;
1568 nrows = outer_rel->rows * inner_rel->rows * selec;
1569 if (nrows < outer_rel->rows)
1570 nrows = outer_rel->rows;
1571 if (nrows < inner_rel->rows)
1572 nrows = inner_rel->rows;
1575 case JOIN_UNIQUE_INNER:
1576 upath = create_unique_path(root, inner_rel,
1577 inner_rel->cheapest_total_path);
1578 nrows = outer_rel->rows * upath->rows * selec;
1579 if (nrows > outer_rel->rows)
1580 nrows = outer_rel->rows;
1582 case JOIN_REVERSE_IN:
1583 case JOIN_UNIQUE_OUTER:
1584 upath = create_unique_path(root, outer_rel,
1585 outer_rel->cheapest_total_path);
1586 nrows = upath->rows * inner_rel->rows * selec;
1587 if (nrows > inner_rel->rows)
1588 nrows = inner_rel->rows;
1591 elog(ERROR, "unrecognized join type: %d", (int) jointype);
1592 nrows = 0; /* keep compiler quiet */
1596 rel->rows = clamp_row_est(nrows);
1600 * join_in_selectivity
1601 * Determines the factor by which a JOIN_IN join's result is expected
1602 * to be smaller than an ordinary inner join.
1604 * 'path' is already filled in except for the cost fields
1607 join_in_selectivity(JoinPath *path, Query *root)
1609 RelOptInfo *innerrel;
1610 UniquePath *innerunique;
1614 /* Return 1.0 whenever it's not JOIN_IN */
1615 if (path->jointype != JOIN_IN)
1619 * Return 1.0 if the inner side is already known unique. The case where
1620 * the inner path is already a UniquePath probably cannot happen in
1621 * current usage, but check it anyway for completeness. The interesting
1622 * case is where we've determined the inner relation itself is unique,
1623 * which we can check by looking at the rows estimate for its UniquePath.
1625 if (IsA(path->innerjoinpath, UniquePath))
1627 innerrel = path->innerjoinpath->parent;
1628 innerunique = create_unique_path(root,
1630 innerrel->cheapest_total_path);
1631 if (innerunique->rows >= innerrel->rows)
1635 * Compute same result set_joinrel_size_estimates would compute
1636 * for JOIN_INNER. Note that we use the input rels' absolute size
1637 * estimates, not PATH_ROWS() which might be less; if we used PATH_ROWS()
1638 * we'd be double-counting the effects of any join clauses used in
1641 selec = clauselist_selectivity(root,
1642 path->joinrestrictinfo,
1645 nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
1647 nrows = clamp_row_est(nrows);
1649 /* See if it's larger than the actual JOIN_IN size estimate */
1650 if (nrows > path->path.parent->rows)
1651 return path->path.parent->rows / nrows;
1657 * set_function_size_estimates
1658 * Set the size estimates for a base relation that is a function call.
1660 * The rel's targetlist and restrictinfo list must have been constructed
1663 * We set the same fields as set_baserel_size_estimates.
1666 set_function_size_estimates(Query *root, RelOptInfo *rel)
1668 /* Should only be applied to base relations that are functions */
1669 Assert(rel->relid > 0);
1670 Assert(rel->rtekind == RTE_FUNCTION);
1673 * Estimate number of rows the function itself will return.
1675 * XXX no idea how to do this yet; but should at least check whether
1676 * function returns set or not...
1680 /* Now estimate number of output rows, etc */
1681 set_baserel_size_estimates(root, rel);
1687 * Set the estimated output width of a base relation.
1689 * NB: this works best on plain relations because it prefers to look at
1690 * real Vars. It will fail to make use of pg_statistic info when applied
1691 * to a subquery relation, even if the subquery outputs are simple vars
1692 * that we could have gotten info for. Is it worth trying to be smarter
1695 * The per-attribute width estimates are cached for possible re-use while
1696 * building join relations.
1699 set_rel_width(Query *root, RelOptInfo *rel)
1701 int32 tuple_width = 0;
1704 foreach(tllist, FastListValue(&rel->reltargetlist))
1706 Var *var = (Var *) lfirst(tllist);
1707 int ndx = var->varattno - rel->min_attr;
1711 Assert(IsA(var, Var));
1714 * The width probably hasn't been cached yet, but may as well
1717 if (rel->attr_widths[ndx] > 0)
1719 tuple_width += rel->attr_widths[ndx];
1723 relid = getrelid(var->varno, root->rtable);
1724 if (relid != InvalidOid)
1726 item_width = get_attavgwidth(relid, var->varattno);
1729 rel->attr_widths[ndx] = item_width;
1730 tuple_width += item_width;
1736 * Not a plain relation, or can't find statistics for it. Estimate
1737 * using just the type info.
1739 item_width = get_typavgwidth(var->vartype, var->vartypmod);
1740 Assert(item_width > 0);
1741 rel->attr_widths[ndx] = item_width;
1742 tuple_width += item_width;
1744 Assert(tuple_width >= 0);
1745 rel->width = tuple_width;
1749 * relation_byte_size
1750 * Estimate the storage space in bytes for a given number of tuples
1751 * of a given width (size in bytes).
1754 relation_byte_size(double tuples, int width)
1756 return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
1761 * Returns an estimate of the number of pages covered by a given
1762 * number of tuples of a given width (size in bytes).
1765 page_size(double tuples, int width)
1767 return ceil(relation_byte_size(tuples, width) / BLCKSZ);