1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in units of disk accesses: one sequential page
7 * fetch has cost 1. All else is scaled relative to a page fetch, using
8 * the scaling parameters
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to process a typical WHERE operator
15 * We also use a rough estimate "effective_cache_size" of the number of
16 * disk pages in Postgres + OS-level disk cache. (We can't simply use
17 * NBuffers for this purpose because that would ignore the effects of
18 * the kernel's disk cache.)
20 * Obviously, taking constants for these values is an oversimplification,
21 * but it's tough enough to get any useful estimates even at this level of
22 * detail. Note that all of these parameters are user-settable, in case
23 * the default values are drastically off for a particular platform.
25 * We compute two separate costs for each path:
26 * total_cost: total estimated cost to fetch all tuples
27 * startup_cost: cost that is expended before first tuple is fetched
28 * In some scenarios, such as when there is a LIMIT or we are implementing
29 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
30 * path's result. A caller can estimate the cost of fetching a partial
31 * result by interpolating between startup_cost and total_cost. In detail:
32 * actual_cost = startup_cost +
33 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
34 * Note that a relation's rows count (and, by extension, a Plan's plan_rows)
35 * are set without regard to any LIMIT, so that this equation works properly.
36 * (Also, these routines guarantee not to set the rows count to zero, so there
37 * will be no zero divide.) RelOptInfos, Paths, and Plans themselves never
41 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
42 * Portions Copyright (c) 1994, Regents of the University of California
45 * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.60 2000/05/30 04:24:47 tgl Exp $
47 *-------------------------------------------------------------------------
54 #include "executor/nodeHash.h"
55 #include "miscadmin.h"
56 #include "optimizer/clauses.h"
57 #include "optimizer/cost.h"
58 #include "optimizer/internal.h"
59 #include "utils/lsyscache.h"
62 #define LOG2(x) (log(x) / 0.693147180559945)
63 #define LOG6(x) (log(x) / 1.79175946922805)
66 double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
67 Cost random_page_cost = DEFAULT_RANDOM_PAGE_COST;
68 Cost cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
69 Cost cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
70 Cost cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
72 Cost disable_cost = 100000000.0;
74 bool enable_seqscan = true;
75 bool enable_indexscan = true;
76 bool enable_tidscan = true;
77 bool enable_sort = true;
78 bool enable_nestloop = true;
79 bool enable_mergejoin = true;
80 bool enable_hashjoin = true;
83 static bool cost_qual_eval_walker(Node *node, Cost *total);
84 static void set_rel_width(Query *root, RelOptInfo *rel);
85 static int compute_attribute_width(TargetEntry *tlistentry);
86 static double relation_byte_size(double tuples, int width);
87 static double page_size(double tuples, int width);
92 * Determines and returns the cost of scanning a relation sequentially.
94 * If the relation is a temporary to be materialized from a query
95 * embedded within a data field (determined by 'relid' containing an
96 * attribute reference), then a predetermined constant is returned (we
97 * have NO IDEA how big the result of a POSTQUEL procedure is going to be).
99 * Note: for historical reasons, this routine and the others in this module
100 * use the passed result Path only to store their startup_cost and total_cost
101 * results into. All the input data they need is passed as separate
102 * parameters, even though much of it could be extracted from the result Path.
105 cost_seqscan(Path *path, RelOptInfo *baserel)
107 Cost startup_cost = 0;
111 /* Should only be applied to base relations */
112 Assert(length(baserel->relids) == 1);
115 startup_cost += disable_cost;
118 if (lfirsti(baserel->relids) < 0)
122 * cost of sequentially scanning a materialized temporary relation
124 run_cost += _NONAME_SCAN_COST_;
130 * The cost of reading a page sequentially is 1.0, by definition.
131 * Note that the Unix kernel will typically do some amount of
132 * read-ahead optimization, so that this cost is less than the
133 * true cost of reading a page from disk. We ignore that issue
134 * here, but must take it into account when estimating the cost of
135 * non-sequential accesses!
137 run_cost += baserel->pages; /* sequential fetches with cost
142 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
143 run_cost += cpu_per_tuple * baserel->tuples;
145 path->startup_cost = startup_cost;
146 path->total_cost = startup_cost + run_cost;
150 * cost_nonsequential_access
151 * Estimate the cost of accessing one page at random from a relation
152 * (or sort temp file) of the given size in pages.
154 * The simplistic model that the cost is random_page_cost is what we want
155 * to use for large relations; but for small ones that is a serious
156 * overestimate because of the effects of caching. This routine tries to
159 * Unfortunately we don't have any good way of estimating the effective cache
160 * size we are working with --- we know that Postgres itself has NBuffers
161 * internal buffers, but the size of the kernel's disk cache is uncertain,
162 * and how much of it we get to use is even less certain. We punt the problem
163 * for now by assuming we are given an effective_cache_size parameter.
165 * Given a guesstimated cache size, we estimate the actual I/O cost per page
166 * with the entirely ad-hoc equations:
167 * for rel_size <= effective_cache_size:
168 * 1 + (random_page_cost/2-1) * (rel_size/effective_cache_size) ** 2
169 * for rel_size >= effective_cache_size:
170 * random_page_cost * (1 - (effective_cache_size/rel_size)/2)
171 * These give the right asymptotic behavior (=> 1.0 as rel_size becomes
172 * small, => random_page_cost as it becomes large) and meet in the middle
173 * with the estimate that the cache is about 50% effective for a relation
174 * of the same size as effective_cache_size. (XXX this is probably all
175 * wrong, but I haven't been able to find any theory about how effective
176 * a disk cache should be presumed to be.)
179 cost_nonsequential_access(double relpages)
183 /* don't crash on bad input data */
184 if (relpages <= 0.0 || effective_cache_size <= 0.0)
185 return random_page_cost;
187 relsize = relpages / effective_cache_size;
190 return random_page_cost * (1.0 - 0.5 / relsize);
192 return 1.0 + (random_page_cost * 0.5 - 1.0) * relsize * relsize;
197 * Determines and returns the cost of scanning a relation using an index.
199 * NOTE: an indexscan plan node can actually represent several passes,
200 * but here we consider the cost of just one pass.
202 * 'root' is the query root
203 * 'baserel' is the base relation the index is for
204 * 'index' is the index to be used
205 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
206 * 'is_injoin' is T if we are considering using the index scan as the inside
207 * of a nestloop join (hence, some of the indexQuals are join clauses)
209 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
210 * Any additional quals evaluated as qpquals may reduce the number of returned
211 * tuples, but they won't reduce the number of tuples we have to fetch from
212 * the table, so they don't reduce the scan cost.
215 cost_index(Path *path, Query *root,
221 Cost startup_cost = 0;
224 Cost indexStartupCost;
226 Selectivity indexSelectivity;
227 double tuples_fetched;
228 double pages_fetched;
230 /* Should only be applied to base relations */
231 Assert(IsA(baserel, RelOptInfo) &&IsA(index, IndexOptInfo));
232 Assert(length(baserel->relids) == 1);
234 if (!enable_indexscan && !is_injoin)
235 startup_cost += disable_cost;
238 * Call index-access-method-specific code to estimate the processing
239 * cost for scanning the index, as well as the selectivity of the
240 * index (ie, the fraction of main-table tuples we will have to
243 OidFunctionCall7(index->amcostestimate,
244 PointerGetDatum(root),
245 PointerGetDatum(baserel),
246 PointerGetDatum(index),
247 PointerGetDatum(indexQuals),
248 PointerGetDatum(&indexStartupCost),
249 PointerGetDatum(&indexTotalCost),
250 PointerGetDatum(&indexSelectivity));
252 /* all costs for touching index itself included here */
253 startup_cost += indexStartupCost;
254 run_cost += indexTotalCost - indexStartupCost;
257 * Estimate number of main-table tuples and pages fetched.
259 * If the number of tuples is much smaller than the number of pages in
260 * the relation, each tuple will cost a separate nonsequential fetch.
261 * If it is comparable or larger, then probably we will be able to
262 * avoid some fetches. We use a growth rate of log(#tuples/#pages +
263 * 1) --- probably totally bogus, but intuitively it gives the right
264 * shape of curve at least.
266 * XXX if the relation has recently been "clustered" using this index,
267 * then in fact the target tuples will be highly nonuniformly
268 * distributed, and we will be seriously overestimating the scan cost!
269 * Currently we have no way to know whether the relation has been
270 * clustered, nor how much it's been modified since the last
271 * clustering, so we ignore this effect. Would be nice to do better
275 tuples_fetched = indexSelectivity * baserel->tuples;
276 /* Don't believe estimates less than 1... */
277 if (tuples_fetched < 1.0)
278 tuples_fetched = 1.0;
280 if (baserel->pages > 0)
281 pages_fetched = ceil(baserel->pages *
282 log(tuples_fetched / baserel->pages + 1.0));
284 pages_fetched = tuples_fetched;
287 * Now estimate one nonsequential access per page fetched, plus
288 * appropriate CPU costs per tuple.
291 /* disk costs for main table */
292 run_cost += pages_fetched * cost_nonsequential_access(baserel->pages);
295 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
298 * Normally the indexquals will be removed from the list of
299 * restriction clauses that we have to evaluate as qpquals, so we
300 * should subtract their costs from baserestrictcost. For a lossy
301 * index, however, we will have to recheck all the quals and so
302 * mustn't subtract anything. Also, if we are doing a join then some
303 * of the indexquals are join clauses and shouldn't be subtracted.
304 * Rather than work out exactly how much to subtract, we don't
305 * subtract anything in that case either.
307 if (!index->lossy && !is_injoin)
308 cpu_per_tuple -= cost_qual_eval(indexQuals);
310 run_cost += cpu_per_tuple * tuples_fetched;
312 path->startup_cost = startup_cost;
313 path->total_cost = startup_cost + run_cost;
318 * Determines and returns the cost of scanning a relation using tid-s.
321 cost_tidscan(Path *path, RelOptInfo *baserel, List *tideval)
323 Cost startup_cost = 0;
326 int ntuples = length(tideval);
329 startup_cost += disable_cost;
331 /* disk costs --- assume each tuple on a different page */
332 run_cost += random_page_cost * ntuples;
335 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
336 run_cost += cpu_per_tuple * ntuples;
338 path->startup_cost = startup_cost;
339 path->total_cost = startup_cost + run_cost;
344 * Determines and returns the cost of sorting a relation.
346 * The cost of supplying the input data is NOT included; the caller should
347 * add that cost to both startup and total costs returned from this routine!
349 * If the total volume of data to sort is less than SortMem, we will do
350 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
351 * comparisons for t tuples.
353 * If the total volume exceeds SortMem, we switch to a tape-style merge
354 * algorithm. There will still be about t*log2(t) tuple comparisons in
355 * total, but we will also need to write and read each tuple once per
356 * merge pass. We expect about ceil(log6(r)) merge passes where r is the
357 * number of initial runs formed (log6 because tuplesort.c uses six-tape
358 * merging). Since the average initial run should be about twice SortMem,
360 * disk traffic = 2 * relsize * ceil(log6(p / (2*SortMem)))
361 * cpu = comparison_cost * t * log2(t)
363 * The disk traffic is assumed to be half sequential and half random
364 * accesses (XXX can't we refine that guess?)
366 * We charge two operator evals per tuple comparison, which should be in
367 * the right ballpark in most cases.
369 * 'pathkeys' is a list of sort keys
370 * 'tuples' is the number of tuples in the relation
371 * 'width' is the average tuple width in bytes
373 * NOTE: some callers currently pass NIL for pathkeys because they
374 * can't conveniently supply the sort keys. Since this routine doesn't
375 * currently do anything with pathkeys anyway, that doesn't matter...
376 * but if it ever does, it should react gracefully to lack of key data.
379 cost_sort(Path *path, List *pathkeys, double tuples, int width)
381 Cost startup_cost = 0;
383 double nbytes = relation_byte_size(tuples, width);
384 long sortmembytes = SortMem * 1024L;
387 startup_cost += disable_cost;
390 * We want to be sure the cost of a sort is never estimated as zero,
391 * even if passed-in tuple count is zero. Besides, mustn't do
400 * Assume about two operator evals per tuple comparison and N log2 N
403 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
406 if (nbytes > sortmembytes)
408 double npages = ceil(nbytes / BLCKSZ);
409 double nruns = nbytes / (sortmembytes * 2);
410 double log_runs = ceil(LOG6(nruns));
411 double npageaccesses;
415 npageaccesses = 2.0 * npages * log_runs;
416 /* Assume half are sequential (cost 1), half are not */
417 startup_cost += npageaccesses *
418 (1.0 + cost_nonsequential_access(npages)) * 0.5;
422 * Note: should we bother to assign a nonzero run_cost to reflect the
423 * overhead of extracting tuples from the sort result? Probably not
424 * worth worrying about.
426 path->startup_cost = startup_cost;
427 path->total_cost = startup_cost + run_cost;
433 * Determines and returns the cost of joining two relations using the
434 * nested loop algorithm.
436 * 'outer_path' is the path for the outer relation
437 * 'inner_path' is the path for the inner relation
438 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
441 cost_nestloop(Path *path,
446 Cost startup_cost = 0;
451 if (!enable_nestloop)
452 startup_cost += disable_cost;
454 /* cost of source data */
457 * NOTE: we assume that the inner path's startup_cost is paid once,
458 * not over again on each restart. This is certainly correct if the
459 * inner path is materialized. Are there any cases where it is wrong?
461 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
462 run_cost += outer_path->total_cost - outer_path->startup_cost;
463 run_cost += outer_path->parent->rows *
464 (inner_path->total_cost - inner_path->startup_cost);
467 * Number of tuples processed (not number emitted!). If inner path is
468 * an indexscan, be sure to use its estimated output row count, which
469 * may be lower than the restriction-clause-only row count of its
472 if (IsA(inner_path, IndexPath))
473 ntuples = ((IndexPath *) inner_path)->rows;
475 ntuples = inner_path->parent->rows;
476 ntuples *= outer_path->parent->rows;
479 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
480 run_cost += cpu_per_tuple * ntuples;
482 path->startup_cost = startup_cost;
483 path->total_cost = startup_cost + run_cost;
488 * Determines and returns the cost of joining two relations using the
489 * merge join algorithm.
491 * 'outer_path' is the path for the outer relation
492 * 'inner_path' is the path for the inner relation
493 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
494 * 'outersortkeys' and 'innersortkeys' are lists of the keys to be used
495 * to sort the outer and inner relations, or NIL if no explicit
496 * sort is needed because the source path is already ordered
499 cost_mergejoin(Path *path,
506 Cost startup_cost = 0;
510 Path sort_path; /* dummy for result of cost_sort */
512 if (!enable_mergejoin)
513 startup_cost += disable_cost;
515 /* cost of source data */
518 * Note we are assuming that each source tuple is fetched just once,
519 * which is not right in the presence of equal keys. If we had a way
520 * of estimating the proportion of equal keys, we could apply a
521 * correction factor...
523 if (outersortkeys) /* do we need to sort outer? */
525 startup_cost += outer_path->total_cost;
526 cost_sort(&sort_path,
528 outer_path->parent->rows,
529 outer_path->parent->width);
530 startup_cost += sort_path.startup_cost;
531 run_cost += sort_path.total_cost - sort_path.startup_cost;
535 startup_cost += outer_path->startup_cost;
536 run_cost += outer_path->total_cost - outer_path->startup_cost;
539 if (innersortkeys) /* do we need to sort inner? */
541 startup_cost += inner_path->total_cost;
542 cost_sort(&sort_path,
544 inner_path->parent->rows,
545 inner_path->parent->width);
546 startup_cost += sort_path.startup_cost;
547 run_cost += sort_path.total_cost - sort_path.startup_cost;
551 startup_cost += inner_path->startup_cost;
552 run_cost += inner_path->total_cost - inner_path->startup_cost;
556 * Estimate the number of tuples to be processed in the mergejoin
557 * itself as one per tuple in the two source relations. This could be
558 * a drastic underestimate if there are many equal-keyed tuples in
559 * either relation, but we have no good way of estimating that...
561 ntuples = outer_path->parent->rows + inner_path->parent->rows;
564 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
565 run_cost += cpu_per_tuple * ntuples;
567 path->startup_cost = startup_cost;
568 path->total_cost = startup_cost + run_cost;
573 * Determines and returns the cost of joining two relations using the
574 * hash join algorithm.
576 * 'outer_path' is the path for the outer relation
577 * 'inner_path' is the path for the inner relation
578 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
579 * 'innerdisbursion' is an estimate of the disbursion statistic
580 * for the inner hash key.
583 cost_hashjoin(Path *path,
587 Selectivity innerdisbursion)
589 Cost startup_cost = 0;
593 double outerbytes = relation_byte_size(outer_path->parent->rows,
594 outer_path->parent->width);
595 double innerbytes = relation_byte_size(inner_path->parent->rows,
596 inner_path->parent->width);
597 long hashtablebytes = SortMem * 1024L;
599 if (!enable_hashjoin)
600 startup_cost += disable_cost;
602 /* cost of source data */
603 startup_cost += outer_path->startup_cost;
604 run_cost += outer_path->total_cost - outer_path->startup_cost;
605 startup_cost += inner_path->total_cost;
607 /* cost of computing hash function: must do it once per input tuple */
608 startup_cost += cpu_operator_cost * inner_path->parent->rows;
609 run_cost += cpu_operator_cost * outer_path->parent->rows;
612 * The number of tuple comparisons needed is the number of outer
613 * tuples times the typical hash bucket size. nodeHash.c tries for
614 * average bucket loading of NTUP_PER_BUCKET, but that goal will
615 * be reached only if data values are uniformly distributed among
616 * the buckets. To be conservative, we scale up the target bucket
617 * size by the number of inner rows times inner disbursion, giving
618 * an estimate of the typical number of duplicates of each value.
619 * We then charge one cpu_operator_cost per tuple comparison.
621 run_cost += cpu_operator_cost * outer_path->parent->rows *
622 NTUP_PER_BUCKET * ceil(inner_path->parent->rows * innerdisbursion);
625 * Estimate the number of tuples that get through the hashing filter
626 * as one per tuple in the two source relations. This could be a
627 * drastic underestimate if there are many equal-keyed tuples in
628 * either relation, but we have no good way of estimating that...
630 ntuples = outer_path->parent->rows + inner_path->parent->rows;
633 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
634 run_cost += cpu_per_tuple * ntuples;
637 * if inner relation is too big then we will need to "batch" the join,
638 * which implies writing and reading most of the tuples to disk an
639 * extra time. Charge one cost unit per page of I/O (correct since it
640 * should be nice and sequential...). Writing the inner rel counts as
641 * startup cost, all the rest as run cost.
643 if (innerbytes > hashtablebytes)
645 double outerpages = page_size(outer_path->parent->rows,
646 outer_path->parent->width);
647 double innerpages = page_size(inner_path->parent->rows,
648 inner_path->parent->width);
650 startup_cost += innerpages;
651 run_cost += innerpages + 2 * outerpages;
655 * Bias against putting larger relation on inside. We don't want an
656 * absolute prohibition, though, since larger relation might have
657 * better disbursion --- and we can't trust the size estimates
658 * unreservedly, anyway. Instead, inflate the startup cost by the
659 * square root of the size ratio. (Why square root? No real good
660 * reason, but it seems reasonable...)
662 if (innerbytes > outerbytes && outerbytes > 0)
663 startup_cost *= sqrt(innerbytes / outerbytes);
665 path->startup_cost = startup_cost;
666 path->total_cost = startup_cost + run_cost;
672 * Estimate the CPU cost of evaluating a WHERE clause (once).
673 * The input can be either an implicitly-ANDed list of boolean
674 * expressions, or a list of RestrictInfo nodes.
677 cost_qual_eval(List *quals)
681 cost_qual_eval_walker((Node *) quals, &total);
686 cost_qual_eval_walker(Node *node, Cost *total)
692 * Our basic strategy is to charge one cpu_operator_cost for each
693 * operator or function node in the given tree. Vars and Consts are
694 * charged zero, and so are boolean operators (AND, OR, NOT).
695 * Simplistic, but a lot better than no model at all.
697 * Should we try to account for the possibility of short-circuit
698 * evaluation of AND/OR?
702 Expr *expr = (Expr *) node;
704 switch (expr->opType)
708 *total += cpu_operator_cost;
717 * A subplan node in an expression indicates that the
718 * subplan will be executed on each evaluation, so charge
719 * accordingly. (We assume that sub-selects that can be
720 * executed as InitPlans have already been removed from
723 * NOTE: this logic should agree with the estimates used by
724 * make_subplan() in plan/subselect.c.
727 SubPlan *subplan = (SubPlan *) expr->oper;
728 Plan *plan = subplan->plan;
731 if (subplan->sublink->subLinkType == EXISTS_SUBLINK)
733 /* we only need to fetch 1 tuple */
734 subcost = plan->startup_cost +
735 (plan->total_cost - plan->startup_cost) / plan->plan_rows;
737 else if (subplan->sublink->subLinkType == ALL_SUBLINK ||
738 subplan->sublink->subLinkType == ANY_SUBLINK)
740 /* assume we need 50% of the tuples */
741 subcost = plan->startup_cost +
742 0.50 * (plan->total_cost - plan->startup_cost);
743 /* XXX what if subplan has been materialized? */
747 /* assume we need all tuples */
748 subcost = plan->total_cost;
754 /* fall through to examine args of Expr node */
758 * expression_tree_walker doesn't know what to do with RestrictInfo
759 * nodes, but we just want to recurse through them.
761 if (IsA(node, RestrictInfo))
763 RestrictInfo *restrictinfo = (RestrictInfo *) node;
765 return cost_qual_eval_walker((Node *) restrictinfo->clause, total);
767 /* Otherwise, recurse. */
768 return expression_tree_walker(node, cost_qual_eval_walker,
774 * set_baserel_size_estimates
775 * Set the size estimates for the given base relation.
777 * The rel's targetlist and restrictinfo list must have been constructed
780 * We set the following fields of the rel node:
781 * rows: the estimated number of output tuples (after applying
782 * restriction clauses).
783 * width: the estimated average output tuple width in bytes.
784 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
787 set_baserel_size_estimates(Query *root, RelOptInfo *rel)
789 /* Should only be applied to base relations */
790 Assert(length(rel->relids) == 1);
792 rel->rows = rel->tuples *
793 restrictlist_selectivity(root,
794 rel->baserestrictinfo,
795 lfirsti(rel->relids));
798 * Force estimate to be at least one row, to make explain output look
799 * better and to avoid possible divide-by-zero when interpolating
805 rel->baserestrictcost = cost_qual_eval(rel->baserestrictinfo);
807 set_rel_width(root, rel);
811 * set_joinrel_size_estimates
812 * Set the size estimates for the given join relation.
814 * The rel's targetlist must have been constructed already, and a
815 * restriction clause list that matches the given component rels must
818 * Since there is more than one way to make a joinrel for more than two
819 * base relations, the results we get here could depend on which component
820 * rel pair is provided. In theory we should get the same answers no matter
821 * which pair is provided; in practice, since the selectivity estimation
822 * routines don't handle all cases equally well, we might not. But there's
823 * not much to be done about it. (Would it make sense to repeat the
824 * calculations for each pair of input rels that's encountered, and somehow
825 * average the results? Probably way more trouble than it's worth.)
827 * We set the same relnode fields as set_baserel_size_estimates() does.
830 set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
831 RelOptInfo *outer_rel,
832 RelOptInfo *inner_rel,
837 /* cartesian product */
838 temp = outer_rel->rows * inner_rel->rows;
841 * Apply join restrictivity. Note that we are only considering
842 * clauses that become restriction clauses at this join level; we are
843 * not double-counting them because they were not considered in
844 * estimating the sizes of the component rels.
846 temp *= restrictlist_selectivity(root,
851 * Force estimate to be at least one row, to make explain output look
852 * better and to avoid possible divide-by-zero when interpolating
860 * We could apply set_rel_width() to compute the output tuple width
861 * from scratch, but at present it's always just the sum of the input
862 * widths, so why work harder than necessary? If relnode.c is ever
863 * taught to remove unneeded columns from join targetlists, go back to
864 * using set_rel_width here.
866 rel->width = outer_rel->width + inner_rel->width;
871 * Set the estimated output width of the relation.
874 set_rel_width(Query *root, RelOptInfo *rel)
879 foreach(tle, rel->targetlist)
880 tuple_width += compute_attribute_width((TargetEntry *) lfirst(tle));
881 Assert(tuple_width >= 0);
882 rel->width = tuple_width;
886 * compute_attribute_width
887 * Given a target list entry, find the size in bytes of the attribute.
889 * If a field is variable-length, we make a default assumption. Would be
890 * better if VACUUM recorded some stats about the average field width...
891 * also, we have access to the atttypmod, but fail to use it...
894 compute_attribute_width(TargetEntry *tlistentry)
896 int width = get_typlen(tlistentry->resdom->restype);
899 return _DEFAULT_ATTRIBUTE_WIDTH_;
906 * Estimate the storage space in bytes for a given number of tuples
907 * of a given width (size in bytes).
910 relation_byte_size(double tuples, int width)
912 return tuples * ((double) (width + sizeof(HeapTupleData)));
917 * Returns an estimate of the number of pages covered by a given
918 * number of tuples of a given width (size in bytes).
921 page_size(double tuples, int width)
923 return ceil(relation_byte_size(tuples, width) / BLCKSZ);