1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in units of disk accesses: one sequential page
7 * fetch has cost 1. All else is scaled relative to a page fetch, using
8 * the scaling parameters
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to process a typical WHERE operator
15 * We also use a rough estimate "effective_cache_size" of the number of
16 * disk pages in Postgres + OS-level disk cache. (We can't simply use
17 * NBuffers for this purpose because that would ignore the effects of
18 * the kernel's disk cache.)
20 * Obviously, taking constants for these values is an oversimplification,
21 * but it's tough enough to get any useful estimates even at this level of
22 * detail. Note that all of these parameters are user-settable, in case
23 * the default values are drastically off for a particular platform.
25 * We compute two separate costs for each path:
26 * total_cost: total estimated cost to fetch all tuples
27 * startup_cost: cost that is expended before first tuple is fetched
28 * In some scenarios, such as when there is a LIMIT or we are implementing
29 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
30 * path's result. A caller can estimate the cost of fetching a partial
31 * result by interpolating between startup_cost and total_cost. In detail:
32 * actual_cost = startup_cost +
33 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
34 * Note that a relation's rows count (and, by extension, a Plan's plan_rows)
35 * are set without regard to any LIMIT, so that this equation works properly.
36 * (Also, these routines guarantee not to set the rows count to zero, so there
37 * will be no zero divide.) RelOptInfos, Paths, and Plans themselves never
41 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
42 * Portions Copyright (c) 1994, Regents of the University of California
45 * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.63 2000/09/29 18:21:32 tgl Exp $
47 *-------------------------------------------------------------------------
54 #include "executor/nodeHash.h"
55 #include "miscadmin.h"
56 #include "optimizer/clauses.h"
57 #include "optimizer/cost.h"
58 #include "utils/lsyscache.h"
62 * The length of a variable-length field in bytes (stupid estimate...)
64 #define _DEFAULT_ATTRIBUTE_WIDTH_ 12
67 #define LOG2(x) (log(x) / 0.693147180559945)
68 #define LOG6(x) (log(x) / 1.79175946922805)
71 double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
72 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
73 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
74 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
75 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
77 Cost disable_cost = 100000000.0;
79 bool enable_seqscan = true;
80 bool enable_indexscan = true;
81 bool enable_tidscan = true;
82 bool enable_sort = true;
83 bool enable_nestloop = true;
84 bool enable_mergejoin = true;
85 bool enable_hashjoin = true;
88 static bool cost_qual_eval_walker(Node *node, Cost *total);
89 static void set_rel_width(Query *root, RelOptInfo *rel);
90 static int compute_attribute_width(TargetEntry *tlistentry);
91 static double relation_byte_size(double tuples, int width);
92 static double page_size(double tuples, int width);
97 * Determines and returns the cost of scanning a relation sequentially.
99 * If the relation is a temporary to be materialized from a query
100 * embedded within a data field (determined by 'relid' containing an
101 * attribute reference), then a predetermined constant is returned (we
102 * have NO IDEA how big the result of a POSTQUEL procedure is going to be).
104 * Note: for historical reasons, this routine and the others in this module
105 * use the passed result Path only to store their startup_cost and total_cost
106 * results into. All the input data they need is passed as separate
107 * parameters, even though much of it could be extracted from the result Path.
110 cost_seqscan(Path *path, RelOptInfo *baserel)
112 Cost startup_cost = 0;
116 /* Should only be applied to base relations */
117 Assert(length(baserel->relids) == 1);
118 Assert(!baserel->issubquery);
121 startup_cost += disable_cost;
126 * The cost of reading a page sequentially is 1.0, by definition.
127 * Note that the Unix kernel will typically do some amount of
128 * read-ahead optimization, so that this cost is less than the
129 * true cost of reading a page from disk. We ignore that issue
130 * here, but must take it into account when estimating the cost of
131 * non-sequential accesses!
133 run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
136 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
137 run_cost += cpu_per_tuple * baserel->tuples;
139 path->startup_cost = startup_cost;
140 path->total_cost = startup_cost + run_cost;
144 * cost_nonsequential_access
145 * Estimate the cost of accessing one page at random from a relation
146 * (or sort temp file) of the given size in pages.
148 * The simplistic model that the cost is random_page_cost is what we want
149 * to use for large relations; but for small ones that is a serious
150 * overestimate because of the effects of caching. This routine tries to
153 * Unfortunately we don't have any good way of estimating the effective cache
154 * size we are working with --- we know that Postgres itself has NBuffers
155 * internal buffers, but the size of the kernel's disk cache is uncertain,
156 * and how much of it we get to use is even less certain. We punt the problem
157 * for now by assuming we are given an effective_cache_size parameter.
159 * Given a guesstimated cache size, we estimate the actual I/O cost per page
160 * with the entirely ad-hoc equations:
161 * for rel_size <= effective_cache_size:
162 * 1 + (random_page_cost/2-1) * (rel_size/effective_cache_size) ** 2
163 * for rel_size >= effective_cache_size:
164 * random_page_cost * (1 - (effective_cache_size/rel_size)/2)
165 * These give the right asymptotic behavior (=> 1.0 as rel_size becomes
166 * small, => random_page_cost as it becomes large) and meet in the middle
167 * with the estimate that the cache is about 50% effective for a relation
168 * of the same size as effective_cache_size. (XXX this is probably all
169 * wrong, but I haven't been able to find any theory about how effective
170 * a disk cache should be presumed to be.)
173 cost_nonsequential_access(double relpages)
177 /* don't crash on bad input data */
178 if (relpages <= 0.0 || effective_cache_size <= 0.0)
179 return random_page_cost;
181 relsize = relpages / effective_cache_size;
184 return random_page_cost * (1.0 - 0.5 / relsize);
186 return 1.0 + (random_page_cost * 0.5 - 1.0) * relsize * relsize;
191 * Determines and returns the cost of scanning a relation using an index.
193 * NOTE: an indexscan plan node can actually represent several passes,
194 * but here we consider the cost of just one pass.
196 * 'root' is the query root
197 * 'baserel' is the base relation the index is for
198 * 'index' is the index to be used
199 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
200 * 'is_injoin' is T if we are considering using the index scan as the inside
201 * of a nestloop join (hence, some of the indexQuals are join clauses)
203 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
204 * Any additional quals evaluated as qpquals may reduce the number of returned
205 * tuples, but they won't reduce the number of tuples we have to fetch from
206 * the table, so they don't reduce the scan cost.
209 cost_index(Path *path, Query *root,
215 Cost startup_cost = 0;
218 Cost indexStartupCost;
220 Selectivity indexSelectivity;
221 double tuples_fetched;
222 double pages_fetched;
224 /* Should only be applied to base relations */
225 Assert(IsA(baserel, RelOptInfo) &&IsA(index, IndexOptInfo));
226 Assert(length(baserel->relids) == 1);
227 Assert(!baserel->issubquery);
229 if (!enable_indexscan && !is_injoin)
230 startup_cost += disable_cost;
233 * Call index-access-method-specific code to estimate the processing
234 * cost for scanning the index, as well as the selectivity of the
235 * index (ie, the fraction of main-table tuples we will have to
238 OidFunctionCall7(index->amcostestimate,
239 PointerGetDatum(root),
240 PointerGetDatum(baserel),
241 PointerGetDatum(index),
242 PointerGetDatum(indexQuals),
243 PointerGetDatum(&indexStartupCost),
244 PointerGetDatum(&indexTotalCost),
245 PointerGetDatum(&indexSelectivity));
247 /* all costs for touching index itself included here */
248 startup_cost += indexStartupCost;
249 run_cost += indexTotalCost - indexStartupCost;
252 * Estimate number of main-table tuples and pages fetched.
254 * If the number of tuples is much smaller than the number of pages in
255 * the relation, each tuple will cost a separate nonsequential fetch.
256 * If it is comparable or larger, then probably we will be able to
257 * avoid some fetches. We use a growth rate of log(#tuples/#pages +
258 * 1) --- probably totally bogus, but intuitively it gives the right
259 * shape of curve at least.
261 * XXX if the relation has recently been "clustered" using this index,
262 * then in fact the target tuples will be highly nonuniformly
263 * distributed, and we will be seriously overestimating the scan cost!
264 * Currently we have no way to know whether the relation has been
265 * clustered, nor how much it's been modified since the last
266 * clustering, so we ignore this effect. Would be nice to do better
270 tuples_fetched = indexSelectivity * baserel->tuples;
271 /* Don't believe estimates less than 1... */
272 if (tuples_fetched < 1.0)
273 tuples_fetched = 1.0;
275 if (baserel->pages > 0)
276 pages_fetched = ceil(baserel->pages *
277 log(tuples_fetched / baserel->pages + 1.0));
279 pages_fetched = tuples_fetched;
282 * Now estimate one nonsequential access per page fetched, plus
283 * appropriate CPU costs per tuple.
286 /* disk costs for main table */
287 run_cost += pages_fetched * cost_nonsequential_access(baserel->pages);
290 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
293 * Normally the indexquals will be removed from the list of
294 * restriction clauses that we have to evaluate as qpquals, so we
295 * should subtract their costs from baserestrictcost. For a lossy
296 * index, however, we will have to recheck all the quals and so
297 * mustn't subtract anything. Also, if we are doing a join then some
298 * of the indexquals are join clauses and shouldn't be subtracted.
299 * Rather than work out exactly how much to subtract, we don't
300 * subtract anything in that case either.
302 if (!index->lossy && !is_injoin)
303 cpu_per_tuple -= cost_qual_eval(indexQuals);
305 run_cost += cpu_per_tuple * tuples_fetched;
307 path->startup_cost = startup_cost;
308 path->total_cost = startup_cost + run_cost;
313 * Determines and returns the cost of scanning a relation using tid-s.
316 cost_tidscan(Path *path, RelOptInfo *baserel, List *tideval)
318 Cost startup_cost = 0;
321 int ntuples = length(tideval);
324 startup_cost += disable_cost;
326 /* disk costs --- assume each tuple on a different page */
327 run_cost += random_page_cost * ntuples;
330 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
331 run_cost += cpu_per_tuple * ntuples;
333 path->startup_cost = startup_cost;
334 path->total_cost = startup_cost + run_cost;
339 * Determines and returns the cost of sorting a relation.
341 * The cost of supplying the input data is NOT included; the caller should
342 * add that cost to both startup and total costs returned from this routine!
344 * If the total volume of data to sort is less than SortMem, we will do
345 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
346 * comparisons for t tuples.
348 * If the total volume exceeds SortMem, we switch to a tape-style merge
349 * algorithm. There will still be about t*log2(t) tuple comparisons in
350 * total, but we will also need to write and read each tuple once per
351 * merge pass. We expect about ceil(log6(r)) merge passes where r is the
352 * number of initial runs formed (log6 because tuplesort.c uses six-tape
353 * merging). Since the average initial run should be about twice SortMem,
355 * disk traffic = 2 * relsize * ceil(log6(p / (2*SortMem)))
356 * cpu = comparison_cost * t * log2(t)
358 * The disk traffic is assumed to be half sequential and half random
359 * accesses (XXX can't we refine that guess?)
361 * We charge two operator evals per tuple comparison, which should be in
362 * the right ballpark in most cases.
364 * 'pathkeys' is a list of sort keys
365 * 'tuples' is the number of tuples in the relation
366 * 'width' is the average tuple width in bytes
368 * NOTE: some callers currently pass NIL for pathkeys because they
369 * can't conveniently supply the sort keys. Since this routine doesn't
370 * currently do anything with pathkeys anyway, that doesn't matter...
371 * but if it ever does, it should react gracefully to lack of key data.
374 cost_sort(Path *path, List *pathkeys, double tuples, int width)
376 Cost startup_cost = 0;
378 double nbytes = relation_byte_size(tuples, width);
379 long sortmembytes = SortMem * 1024L;
382 startup_cost += disable_cost;
385 * We want to be sure the cost of a sort is never estimated as zero,
386 * even if passed-in tuple count is zero. Besides, mustn't do
395 * Assume about two operator evals per tuple comparison and N log2 N
398 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
401 if (nbytes > sortmembytes)
403 double npages = ceil(nbytes / BLCKSZ);
404 double nruns = nbytes / (sortmembytes * 2);
405 double log_runs = ceil(LOG6(nruns));
406 double npageaccesses;
410 npageaccesses = 2.0 * npages * log_runs;
411 /* Assume half are sequential (cost 1), half are not */
412 startup_cost += npageaccesses *
413 (1.0 + cost_nonsequential_access(npages)) * 0.5;
417 * Note: should we bother to assign a nonzero run_cost to reflect the
418 * overhead of extracting tuples from the sort result? Probably not
419 * worth worrying about.
421 path->startup_cost = startup_cost;
422 path->total_cost = startup_cost + run_cost;
428 * Determines and returns the cost of joining two relations using the
429 * nested loop algorithm.
431 * 'outer_path' is the path for the outer relation
432 * 'inner_path' is the path for the inner relation
433 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
436 cost_nestloop(Path *path,
441 Cost startup_cost = 0;
446 if (!enable_nestloop)
447 startup_cost += disable_cost;
449 /* cost of source data */
452 * NOTE: we assume that the inner path's startup_cost is paid once,
453 * not over again on each restart. This is certainly correct if the
454 * inner path is materialized. Are there any cases where it is wrong?
456 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
457 run_cost += outer_path->total_cost - outer_path->startup_cost;
458 run_cost += outer_path->parent->rows *
459 (inner_path->total_cost - inner_path->startup_cost);
462 * Number of tuples processed (not number emitted!). If inner path is
463 * an indexscan, be sure to use its estimated output row count, which
464 * may be lower than the restriction-clause-only row count of its
467 if (IsA(inner_path, IndexPath))
468 ntuples = ((IndexPath *) inner_path)->rows;
470 ntuples = inner_path->parent->rows;
471 ntuples *= outer_path->parent->rows;
474 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
475 run_cost += cpu_per_tuple * ntuples;
477 path->startup_cost = startup_cost;
478 path->total_cost = startup_cost + run_cost;
483 * Determines and returns the cost of joining two relations using the
484 * merge join algorithm.
486 * 'outer_path' is the path for the outer relation
487 * 'inner_path' is the path for the inner relation
488 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
489 * 'outersortkeys' and 'innersortkeys' are lists of the keys to be used
490 * to sort the outer and inner relations, or NIL if no explicit
491 * sort is needed because the source path is already ordered
494 cost_mergejoin(Path *path,
501 Cost startup_cost = 0;
505 Path sort_path; /* dummy for result of cost_sort */
507 if (!enable_mergejoin)
508 startup_cost += disable_cost;
510 /* cost of source data */
513 * Note we are assuming that each source tuple is fetched just once,
514 * which is not right in the presence of equal keys. If we had a way
515 * of estimating the proportion of equal keys, we could apply a
516 * correction factor...
518 if (outersortkeys) /* do we need to sort outer? */
520 startup_cost += outer_path->total_cost;
521 cost_sort(&sort_path,
523 outer_path->parent->rows,
524 outer_path->parent->width);
525 startup_cost += sort_path.startup_cost;
526 run_cost += sort_path.total_cost - sort_path.startup_cost;
530 startup_cost += outer_path->startup_cost;
531 run_cost += outer_path->total_cost - outer_path->startup_cost;
534 if (innersortkeys) /* do we need to sort inner? */
536 startup_cost += inner_path->total_cost;
537 cost_sort(&sort_path,
539 inner_path->parent->rows,
540 inner_path->parent->width);
541 startup_cost += sort_path.startup_cost;
542 run_cost += sort_path.total_cost - sort_path.startup_cost;
546 startup_cost += inner_path->startup_cost;
547 run_cost += inner_path->total_cost - inner_path->startup_cost;
551 * Estimate the number of tuples to be processed in the mergejoin
552 * itself as one per tuple in the two source relations. This could be
553 * a drastic underestimate if there are many equal-keyed tuples in
554 * either relation, but we have no good way of estimating that...
556 ntuples = outer_path->parent->rows + inner_path->parent->rows;
559 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
560 run_cost += cpu_per_tuple * ntuples;
562 path->startup_cost = startup_cost;
563 path->total_cost = startup_cost + run_cost;
568 * Determines and returns the cost of joining two relations using the
569 * hash join algorithm.
571 * 'outer_path' is the path for the outer relation
572 * 'inner_path' is the path for the inner relation
573 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
574 * 'innerdisbursion' is an estimate of the disbursion statistic
575 * for the inner hash key.
578 cost_hashjoin(Path *path,
582 Selectivity innerdisbursion)
584 Cost startup_cost = 0;
588 double outerbytes = relation_byte_size(outer_path->parent->rows,
589 outer_path->parent->width);
590 double innerbytes = relation_byte_size(inner_path->parent->rows,
591 inner_path->parent->width);
592 long hashtablebytes = SortMem * 1024L;
594 if (!enable_hashjoin)
595 startup_cost += disable_cost;
597 /* cost of source data */
598 startup_cost += outer_path->startup_cost;
599 run_cost += outer_path->total_cost - outer_path->startup_cost;
600 startup_cost += inner_path->total_cost;
602 /* cost of computing hash function: must do it once per input tuple */
603 startup_cost += cpu_operator_cost * inner_path->parent->rows;
604 run_cost += cpu_operator_cost * outer_path->parent->rows;
607 * The number of tuple comparisons needed is the number of outer
608 * tuples times the typical hash bucket size. nodeHash.c tries for
609 * average bucket loading of NTUP_PER_BUCKET, but that goal will
610 * be reached only if data values are uniformly distributed among
611 * the buckets. To be conservative, we scale up the target bucket
612 * size by the number of inner rows times inner disbursion, giving
613 * an estimate of the typical number of duplicates of each value.
614 * We then charge one cpu_operator_cost per tuple comparison.
616 run_cost += cpu_operator_cost * outer_path->parent->rows *
617 NTUP_PER_BUCKET * ceil(inner_path->parent->rows * innerdisbursion);
620 * Estimate the number of tuples that get through the hashing filter
621 * as one per tuple in the two source relations. This could be a
622 * drastic underestimate if there are many equal-keyed tuples in
623 * either relation, but we have no good way of estimating that...
625 ntuples = outer_path->parent->rows + inner_path->parent->rows;
628 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
629 run_cost += cpu_per_tuple * ntuples;
632 * if inner relation is too big then we will need to "batch" the join,
633 * which implies writing and reading most of the tuples to disk an
634 * extra time. Charge one cost unit per page of I/O (correct since it
635 * should be nice and sequential...). Writing the inner rel counts as
636 * startup cost, all the rest as run cost.
638 if (innerbytes > hashtablebytes)
640 double outerpages = page_size(outer_path->parent->rows,
641 outer_path->parent->width);
642 double innerpages = page_size(inner_path->parent->rows,
643 inner_path->parent->width);
645 startup_cost += innerpages;
646 run_cost += innerpages + 2 * outerpages;
650 * Bias against putting larger relation on inside. We don't want an
651 * absolute prohibition, though, since larger relation might have
652 * better disbursion --- and we can't trust the size estimates
653 * unreservedly, anyway. Instead, inflate the startup cost by the
654 * square root of the size ratio. (Why square root? No real good
655 * reason, but it seems reasonable...)
657 if (innerbytes > outerbytes && outerbytes > 0)
658 startup_cost *= sqrt(innerbytes / outerbytes);
660 path->startup_cost = startup_cost;
661 path->total_cost = startup_cost + run_cost;
667 * Estimate the CPU cost of evaluating a WHERE clause (once).
668 * The input can be either an implicitly-ANDed list of boolean
669 * expressions, or a list of RestrictInfo nodes.
672 cost_qual_eval(List *quals)
676 cost_qual_eval_walker((Node *) quals, &total);
681 cost_qual_eval_walker(Node *node, Cost *total)
687 * Our basic strategy is to charge one cpu_operator_cost for each
688 * operator or function node in the given tree. Vars and Consts are
689 * charged zero, and so are boolean operators (AND, OR, NOT).
690 * Simplistic, but a lot better than no model at all.
692 * Should we try to account for the possibility of short-circuit
693 * evaluation of AND/OR?
697 Expr *expr = (Expr *) node;
699 switch (expr->opType)
703 *total += cpu_operator_cost;
712 * A subplan node in an expression indicates that the
713 * subplan will be executed on each evaluation, so charge
714 * accordingly. (We assume that sub-selects that can be
715 * executed as InitPlans have already been removed from
718 * NOTE: this logic should agree with the estimates used by
719 * make_subplan() in plan/subselect.c.
722 SubPlan *subplan = (SubPlan *) expr->oper;
723 Plan *plan = subplan->plan;
726 if (subplan->sublink->subLinkType == EXISTS_SUBLINK)
728 /* we only need to fetch 1 tuple */
729 subcost = plan->startup_cost +
730 (plan->total_cost - plan->startup_cost) / plan->plan_rows;
732 else if (subplan->sublink->subLinkType == ALL_SUBLINK ||
733 subplan->sublink->subLinkType == ANY_SUBLINK)
735 /* assume we need 50% of the tuples */
736 subcost = plan->startup_cost +
737 0.50 * (plan->total_cost - plan->startup_cost);
738 /* XXX what if subplan has been materialized? */
742 /* assume we need all tuples */
743 subcost = plan->total_cost;
749 /* fall through to examine args of Expr node */
753 * expression_tree_walker doesn't know what to do with RestrictInfo
754 * nodes, but we just want to recurse through them.
756 if (IsA(node, RestrictInfo))
758 RestrictInfo *restrictinfo = (RestrictInfo *) node;
760 return cost_qual_eval_walker((Node *) restrictinfo->clause, total);
762 /* Otherwise, recurse. */
763 return expression_tree_walker(node, cost_qual_eval_walker,
769 * set_baserel_size_estimates
770 * Set the size estimates for the given base relation.
772 * The rel's targetlist and restrictinfo list must have been constructed
775 * We set the following fields of the rel node:
776 * rows: the estimated number of output tuples (after applying
777 * restriction clauses).
778 * width: the estimated average output tuple width in bytes.
779 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
782 set_baserel_size_estimates(Query *root, RelOptInfo *rel)
784 /* Should only be applied to base relations */
785 Assert(length(rel->relids) == 1);
787 rel->rows = rel->tuples *
788 restrictlist_selectivity(root,
789 rel->baserestrictinfo,
790 lfirsti(rel->relids));
793 * Force estimate to be at least one row, to make explain output look
794 * better and to avoid possible divide-by-zero when interpolating
800 rel->baserestrictcost = cost_qual_eval(rel->baserestrictinfo);
802 set_rel_width(root, rel);
806 * set_joinrel_size_estimates
807 * Set the size estimates for the given join relation.
809 * The rel's targetlist must have been constructed already, and a
810 * restriction clause list that matches the given component rels must
813 * Since there is more than one way to make a joinrel for more than two
814 * base relations, the results we get here could depend on which component
815 * rel pair is provided. In theory we should get the same answers no matter
816 * which pair is provided; in practice, since the selectivity estimation
817 * routines don't handle all cases equally well, we might not. But there's
818 * not much to be done about it. (Would it make sense to repeat the
819 * calculations for each pair of input rels that's encountered, and somehow
820 * average the results? Probably way more trouble than it's worth.)
822 * We set the same relnode fields as set_baserel_size_estimates() does.
825 set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
826 RelOptInfo *outer_rel,
827 RelOptInfo *inner_rel,
832 /* cartesian product */
833 temp = outer_rel->rows * inner_rel->rows;
836 * Apply join restrictivity. Note that we are only considering
837 * clauses that become restriction clauses at this join level; we are
838 * not double-counting them because they were not considered in
839 * estimating the sizes of the component rels.
841 temp *= restrictlist_selectivity(root,
846 * Force estimate to be at least one row, to make explain output look
847 * better and to avoid possible divide-by-zero when interpolating
855 * We could apply set_rel_width() to compute the output tuple width
856 * from scratch, but at present it's always just the sum of the input
857 * widths, so why work harder than necessary? If relnode.c is ever
858 * taught to remove unneeded columns from join targetlists, go back to
859 * using set_rel_width here.
861 rel->width = outer_rel->width + inner_rel->width;
866 * Set the estimated output width of the relation.
869 set_rel_width(Query *root, RelOptInfo *rel)
874 foreach(tle, rel->targetlist)
875 tuple_width += compute_attribute_width((TargetEntry *) lfirst(tle));
876 Assert(tuple_width >= 0);
877 rel->width = tuple_width;
881 * compute_attribute_width
882 * Given a target list entry, find the size in bytes of the attribute.
884 * If a field is variable-length, we make a default assumption. Would be
885 * better if VACUUM recorded some stats about the average field width...
886 * also, we have access to the atttypmod, but fail to use it...
889 compute_attribute_width(TargetEntry *tlistentry)
891 int width = get_typlen(tlistentry->resdom->restype);
894 return _DEFAULT_ATTRIBUTE_WIDTH_;
901 * Estimate the storage space in bytes for a given number of tuples
902 * of a given width (size in bytes).
905 relation_byte_size(double tuples, int width)
907 return tuples * ((double) (width + sizeof(HeapTupleData)));
912 * Returns an estimate of the number of pages covered by a given
913 * number of tuples of a given width (size in bytes).
916 page_size(double tuples, int width)
918 return ceil(relation_byte_size(tuples, width) / BLCKSZ);