1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in units of disk accesses: one sequential page
7 * fetch has cost 1. All else is scaled relative to a page fetch, using
8 * the scaling parameters
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to process a typical WHERE operator
15 * We also use a rough estimate "effective_cache_size" of the number of
16 * disk pages in Postgres + OS-level disk cache. (We can't simply use
17 * NBuffers for this purpose because that would ignore the effects of
18 * the kernel's disk cache.)
20 * Obviously, taking constants for these values is an oversimplification,
21 * but it's tough enough to get any useful estimates even at this level of
22 * detail. Note that all of these parameters are user-settable, in case
23 * the default values are drastically off for a particular platform.
25 * We compute two separate costs for each path:
26 * total_cost: total estimated cost to fetch all tuples
27 * startup_cost: cost that is expended before first tuple is fetched
28 * In some scenarios, such as when there is a LIMIT or we are implementing
29 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
30 * path's result. A caller can estimate the cost of fetching a partial
31 * result by interpolating between startup_cost and total_cost. In detail:
32 * actual_cost = startup_cost +
33 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
34 * Note that a relation's rows count (and, by extension, a Plan's plan_rows)
35 * are set without regard to any LIMIT, so that this equation works properly.
36 * (Also, these routines guarantee not to set the rows count to zero, so there
37 * will be no zero divide.) RelOptInfos, Paths, and Plans themselves never
41 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
42 * Portions Copyright (c) 1994, Regents of the University of California
45 * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.58 2000/04/18 05:43:02 tgl Exp $
47 *-------------------------------------------------------------------------
54 #include "executor/nodeHash.h"
55 #include "miscadmin.h"
56 #include "nodes/plannodes.h"
57 #include "optimizer/clauses.h"
58 #include "optimizer/cost.h"
59 #include "optimizer/internal.h"
60 #include "optimizer/tlist.h"
61 #include "utils/lsyscache.h"
64 #define LOG2(x) (log(x) / 0.693147180559945)
65 #define LOG6(x) (log(x) / 1.79175946922805)
68 double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
69 Cost random_page_cost = DEFAULT_RANDOM_PAGE_COST;
70 Cost cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
71 Cost cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
72 Cost cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
74 Cost disable_cost = 100000000.0;
76 bool enable_seqscan = true;
77 bool enable_indexscan = true;
78 bool enable_tidscan = true;
79 bool enable_sort = true;
80 bool enable_nestloop = true;
81 bool enable_mergejoin = true;
82 bool enable_hashjoin = true;
85 static bool cost_qual_eval_walker(Node *node, Cost *total);
86 static void set_rel_width(Query *root, RelOptInfo *rel);
87 static int compute_attribute_width(TargetEntry *tlistentry);
88 static double relation_byte_size(double tuples, int width);
89 static double page_size(double tuples, int width);
94 * Determines and returns the cost of scanning a relation sequentially.
96 * If the relation is a temporary to be materialized from a query
97 * embedded within a data field (determined by 'relid' containing an
98 * attribute reference), then a predetermined constant is returned (we
99 * have NO IDEA how big the result of a POSTQUEL procedure is going to be).
101 * Note: for historical reasons, this routine and the others in this module
102 * use the passed result Path only to store their startup_cost and total_cost
103 * results into. All the input data they need is passed as separate
104 * parameters, even though much of it could be extracted from the result Path.
107 cost_seqscan(Path *path, RelOptInfo *baserel)
109 Cost startup_cost = 0;
113 /* Should only be applied to base relations */
114 Assert(length(baserel->relids) == 1);
117 startup_cost += disable_cost;
120 if (lfirsti(baserel->relids) < 0)
124 * cost of sequentially scanning a materialized temporary relation
126 run_cost += _NONAME_SCAN_COST_;
132 * The cost of reading a page sequentially is 1.0, by definition.
133 * Note that the Unix kernel will typically do some amount of
134 * read-ahead optimization, so that this cost is less than the
135 * true cost of reading a page from disk. We ignore that issue
136 * here, but must take it into account when estimating the cost of
137 * non-sequential accesses!
139 run_cost += baserel->pages; /* sequential fetches with cost
144 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
145 run_cost += cpu_per_tuple * baserel->tuples;
147 path->startup_cost = startup_cost;
148 path->total_cost = startup_cost + run_cost;
152 * cost_nonsequential_access
153 * Estimate the cost of accessing one page at random from a relation
154 * (or sort temp file) of the given size in pages.
156 * The simplistic model that the cost is random_page_cost is what we want
157 * to use for large relations; but for small ones that is a serious
158 * overestimate because of the effects of caching. This routine tries to
161 * Unfortunately we don't have any good way of estimating the effective cache
162 * size we are working with --- we know that Postgres itself has NBuffers
163 * internal buffers, but the size of the kernel's disk cache is uncertain,
164 * and how much of it we get to use is even less certain. We punt the problem
165 * for now by assuming we are given an effective_cache_size parameter.
167 * Given a guesstimated cache size, we estimate the actual I/O cost per page
168 * with the entirely ad-hoc equations:
169 * for rel_size <= effective_cache_size:
170 * 1 + (random_page_cost/2-1) * (rel_size/effective_cache_size) ** 2
171 * for rel_size >= effective_cache_size:
172 * random_page_cost * (1 - (effective_cache_size/rel_size)/2)
173 * These give the right asymptotic behavior (=> 1.0 as rel_size becomes
174 * small, => random_page_cost as it becomes large) and meet in the middle
175 * with the estimate that the cache is about 50% effective for a relation
176 * of the same size as effective_cache_size. (XXX this is probably all
177 * wrong, but I haven't been able to find any theory about how effective
178 * a disk cache should be presumed to be.)
181 cost_nonsequential_access(double relpages)
185 /* don't crash on bad input data */
186 if (relpages <= 0.0 || effective_cache_size <= 0.0)
187 return random_page_cost;
189 relsize = relpages / effective_cache_size;
192 return random_page_cost * (1.0 - 0.5 / relsize);
194 return 1.0 + (random_page_cost * 0.5 - 1.0) * relsize * relsize;
199 * Determines and returns the cost of scanning a relation using an index.
201 * NOTE: an indexscan plan node can actually represent several passes,
202 * but here we consider the cost of just one pass.
204 * 'root' is the query root
205 * 'baserel' is the base relation the index is for
206 * 'index' is the index to be used
207 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
208 * 'is_injoin' is T if we are considering using the index scan as the inside
209 * of a nestloop join (hence, some of the indexQuals are join clauses)
211 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
212 * Any additional quals evaluated as qpquals may reduce the number of returned
213 * tuples, but they won't reduce the number of tuples we have to fetch from
214 * the table, so they don't reduce the scan cost.
217 cost_index(Path *path, Query *root,
223 Cost startup_cost = 0;
226 Cost indexStartupCost;
228 Selectivity indexSelectivity;
229 double tuples_fetched;
230 double pages_fetched;
232 /* Should only be applied to base relations */
233 Assert(IsA(baserel, RelOptInfo) &&IsA(index, IndexOptInfo));
234 Assert(length(baserel->relids) == 1);
236 if (!enable_indexscan && !is_injoin)
237 startup_cost += disable_cost;
240 * Call index-access-method-specific code to estimate the processing
241 * cost for scanning the index, as well as the selectivity of the
242 * index (ie, the fraction of main-table tuples we will have to
245 fmgr(index->amcostestimate, root, baserel, index, indexQuals,
246 &indexStartupCost, &indexTotalCost, &indexSelectivity);
248 /* all costs for touching index itself included here */
249 startup_cost += indexStartupCost;
250 run_cost += indexTotalCost - indexStartupCost;
253 * Estimate number of main-table tuples and pages fetched.
255 * If the number of tuples is much smaller than the number of pages in
256 * the relation, each tuple will cost a separate nonsequential fetch.
257 * If it is comparable or larger, then probably we will be able to
258 * avoid some fetches. We use a growth rate of log(#tuples/#pages +
259 * 1) --- probably totally bogus, but intuitively it gives the right
260 * shape of curve at least.
262 * XXX if the relation has recently been "clustered" using this index,
263 * then in fact the target tuples will be highly nonuniformly
264 * distributed, and we will be seriously overestimating the scan cost!
265 * Currently we have no way to know whether the relation has been
266 * clustered, nor how much it's been modified since the last
267 * clustering, so we ignore this effect. Would be nice to do better
271 tuples_fetched = indexSelectivity * baserel->tuples;
272 /* Don't believe estimates less than 1... */
273 if (tuples_fetched < 1.0)
274 tuples_fetched = 1.0;
276 if (baserel->pages > 0)
277 pages_fetched = ceil(baserel->pages *
278 log(tuples_fetched / baserel->pages + 1.0));
280 pages_fetched = tuples_fetched;
283 * Now estimate one nonsequential access per page fetched, plus
284 * appropriate CPU costs per tuple.
287 /* disk costs for main table */
288 run_cost += pages_fetched * cost_nonsequential_access(baserel->pages);
291 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
294 * Normally the indexquals will be removed from the list of
295 * restriction clauses that we have to evaluate as qpquals, so we
296 * should subtract their costs from baserestrictcost. For a lossy
297 * index, however, we will have to recheck all the quals and so
298 * mustn't subtract anything. Also, if we are doing a join then some
299 * of the indexquals are join clauses and shouldn't be subtracted.
300 * Rather than work out exactly how much to subtract, we don't
301 * subtract anything in that case either.
303 if (!index->lossy && !is_injoin)
304 cpu_per_tuple -= cost_qual_eval(indexQuals);
306 run_cost += cpu_per_tuple * tuples_fetched;
308 path->startup_cost = startup_cost;
309 path->total_cost = startup_cost + run_cost;
314 * Determines and returns the cost of scanning a relation using tid-s.
317 cost_tidscan(Path *path, RelOptInfo *baserel, List *tideval)
319 Cost startup_cost = 0;
322 int ntuples = length(tideval);
325 startup_cost += disable_cost;
327 /* disk costs --- assume each tuple on a different page */
328 run_cost += random_page_cost * ntuples;
331 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
332 run_cost += cpu_per_tuple * ntuples;
334 path->startup_cost = startup_cost;
335 path->total_cost = startup_cost + run_cost;
340 * Determines and returns the cost of sorting a relation.
342 * The cost of supplying the input data is NOT included; the caller should
343 * add that cost to both startup and total costs returned from this routine!
345 * If the total volume of data to sort is less than SortMem, we will do
346 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
347 * comparisons for t tuples.
349 * If the total volume exceeds SortMem, we switch to a tape-style merge
350 * algorithm. There will still be about t*log2(t) tuple comparisons in
351 * total, but we will also need to write and read each tuple once per
352 * merge pass. We expect about ceil(log6(r)) merge passes where r is the
353 * number of initial runs formed (log6 because tuplesort.c uses six-tape
354 * merging). Since the average initial run should be about twice SortMem,
356 * disk traffic = 2 * relsize * ceil(log6(p / (2*SortMem)))
357 * cpu = comparison_cost * t * log2(t)
359 * The disk traffic is assumed to be half sequential and half random
360 * accesses (XXX can't we refine that guess?)
362 * We charge two operator evals per tuple comparison, which should be in
363 * the right ballpark in most cases.
365 * 'pathkeys' is a list of sort keys
366 * 'tuples' is the number of tuples in the relation
367 * 'width' is the average tuple width in bytes
369 * NOTE: some callers currently pass NIL for pathkeys because they
370 * can't conveniently supply the sort keys. Since this routine doesn't
371 * currently do anything with pathkeys anyway, that doesn't matter...
372 * but if it ever does, it should react gracefully to lack of key data.
375 cost_sort(Path *path, List *pathkeys, double tuples, int width)
377 Cost startup_cost = 0;
379 double nbytes = relation_byte_size(tuples, width);
380 long sortmembytes = SortMem * 1024L;
383 startup_cost += disable_cost;
386 * We want to be sure the cost of a sort is never estimated as zero,
387 * even if passed-in tuple count is zero. Besides, mustn't do
396 * Assume about two operator evals per tuple comparison and N log2 N
399 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
402 if (nbytes > sortmembytes)
404 double npages = ceil(nbytes / BLCKSZ);
405 double nruns = nbytes / (sortmembytes * 2);
406 double log_runs = ceil(LOG6(nruns));
407 double npageaccesses;
411 npageaccesses = 2.0 * npages * log_runs;
412 /* Assume half are sequential (cost 1), half are not */
413 startup_cost += npageaccesses *
414 (1.0 + cost_nonsequential_access(npages)) * 0.5;
418 * Note: should we bother to assign a nonzero run_cost to reflect the
419 * overhead of extracting tuples from the sort result? Probably not
420 * worth worrying about.
422 path->startup_cost = startup_cost;
423 path->total_cost = startup_cost + run_cost;
429 * Determines and returns the cost of joining two relations using the
430 * nested loop algorithm.
432 * 'outer_path' is the path for the outer relation
433 * 'inner_path' is the path for the inner relation
434 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
437 cost_nestloop(Path *path,
442 Cost startup_cost = 0;
447 if (!enable_nestloop)
448 startup_cost += disable_cost;
450 /* cost of source data */
453 * NOTE: we assume that the inner path's startup_cost is paid once,
454 * not over again on each restart. This is certainly correct if the
455 * inner path is materialized. Are there any cases where it is wrong?
457 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
458 run_cost += outer_path->total_cost - outer_path->startup_cost;
459 run_cost += outer_path->parent->rows *
460 (inner_path->total_cost - inner_path->startup_cost);
463 * Number of tuples processed (not number emitted!). If inner path is
464 * an indexscan, be sure to use its estimated output row count, which
465 * may be lower than the restriction-clause-only row count of its
468 if (IsA(inner_path, IndexPath))
469 ntuples = ((IndexPath *) inner_path)->rows;
471 ntuples = inner_path->parent->rows;
472 ntuples *= outer_path->parent->rows;
475 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
476 run_cost += cpu_per_tuple * ntuples;
478 path->startup_cost = startup_cost;
479 path->total_cost = startup_cost + run_cost;
484 * Determines and returns the cost of joining two relations using the
485 * merge join algorithm.
487 * 'outer_path' is the path for the outer relation
488 * 'inner_path' is the path for the inner relation
489 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
490 * 'outersortkeys' and 'innersortkeys' are lists of the keys to be used
491 * to sort the outer and inner relations, or NIL if no explicit
492 * sort is needed because the source path is already ordered
495 cost_mergejoin(Path *path,
502 Cost startup_cost = 0;
506 Path sort_path; /* dummy for result of cost_sort */
508 if (!enable_mergejoin)
509 startup_cost += disable_cost;
511 /* cost of source data */
514 * Note we are assuming that each source tuple is fetched just once,
515 * which is not right in the presence of equal keys. If we had a way
516 * of estimating the proportion of equal keys, we could apply a
517 * correction factor...
519 if (outersortkeys) /* do we need to sort outer? */
521 startup_cost += outer_path->total_cost;
522 cost_sort(&sort_path,
524 outer_path->parent->rows,
525 outer_path->parent->width);
526 startup_cost += sort_path.startup_cost;
527 run_cost += sort_path.total_cost - sort_path.startup_cost;
531 startup_cost += outer_path->startup_cost;
532 run_cost += outer_path->total_cost - outer_path->startup_cost;
535 if (innersortkeys) /* do we need to sort inner? */
537 startup_cost += inner_path->total_cost;
538 cost_sort(&sort_path,
540 inner_path->parent->rows,
541 inner_path->parent->width);
542 startup_cost += sort_path.startup_cost;
543 run_cost += sort_path.total_cost - sort_path.startup_cost;
547 startup_cost += inner_path->startup_cost;
548 run_cost += inner_path->total_cost - inner_path->startup_cost;
552 * Estimate the number of tuples to be processed in the mergejoin
553 * itself as one per tuple in the two source relations. This could be
554 * a drastic underestimate if there are many equal-keyed tuples in
555 * either relation, but we have no good way of estimating that...
557 ntuples = outer_path->parent->rows + inner_path->parent->rows;
560 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
561 run_cost += cpu_per_tuple * ntuples;
563 path->startup_cost = startup_cost;
564 path->total_cost = startup_cost + run_cost;
569 * Determines and returns the cost of joining two relations using the
570 * hash join algorithm.
572 * 'outer_path' is the path for the outer relation
573 * 'inner_path' is the path for the inner relation
574 * 'restrictlist' are the RestrictInfo nodes to be applied at the join
575 * 'innerdisbursion' is an estimate of the disbursion statistic
576 * for the inner hash key.
579 cost_hashjoin(Path *path,
583 Selectivity innerdisbursion)
585 Cost startup_cost = 0;
589 double outerbytes = relation_byte_size(outer_path->parent->rows,
590 outer_path->parent->width);
591 double innerbytes = relation_byte_size(inner_path->parent->rows,
592 inner_path->parent->width);
593 long hashtablebytes = SortMem * 1024L;
595 if (!enable_hashjoin)
596 startup_cost += disable_cost;
598 /* cost of source data */
599 startup_cost += outer_path->startup_cost;
600 run_cost += outer_path->total_cost - outer_path->startup_cost;
601 startup_cost += inner_path->total_cost;
603 /* cost of computing hash function: must do it once per input tuple */
604 startup_cost += cpu_operator_cost * inner_path->parent->rows;
605 run_cost += cpu_operator_cost * outer_path->parent->rows;
608 * The number of tuple comparisons needed is the number of outer
609 * tuples times the typical hash bucket size. nodeHash.c tries for
610 * average bucket loading of NTUP_PER_BUCKET, but that goal will
611 * be reached only if data values are uniformly distributed among
612 * the buckets. To be conservative, we scale up the target bucket
613 * size by the number of inner rows times inner disbursion, giving
614 * an estimate of the typical number of duplicates of each value.
615 * We then charge one cpu_operator_cost per tuple comparison.
617 run_cost += cpu_operator_cost * outer_path->parent->rows *
618 NTUP_PER_BUCKET * ceil(inner_path->parent->rows * innerdisbursion);
621 * Estimate the number of tuples that get through the hashing filter
622 * as one per tuple in the two source relations. This could be a
623 * drastic underestimate if there are many equal-keyed tuples in
624 * either relation, but we have no good way of estimating that...
626 ntuples = outer_path->parent->rows + inner_path->parent->rows;
629 cpu_per_tuple = cpu_tuple_cost + cost_qual_eval(restrictlist);
630 run_cost += cpu_per_tuple * ntuples;
633 * if inner relation is too big then we will need to "batch" the join,
634 * which implies writing and reading most of the tuples to disk an
635 * extra time. Charge one cost unit per page of I/O (correct since it
636 * should be nice and sequential...). Writing the inner rel counts as
637 * startup cost, all the rest as run cost.
639 if (innerbytes > hashtablebytes)
641 double outerpages = page_size(outer_path->parent->rows,
642 outer_path->parent->width);
643 double innerpages = page_size(inner_path->parent->rows,
644 inner_path->parent->width);
646 startup_cost += innerpages;
647 run_cost += innerpages + 2 * outerpages;
651 * Bias against putting larger relation on inside. We don't want an
652 * absolute prohibition, though, since larger relation might have
653 * better disbursion --- and we can't trust the size estimates
654 * unreservedly, anyway. Instead, inflate the startup cost by the
655 * square root of the size ratio. (Why square root? No real good
656 * reason, but it seems reasonable...)
658 if (innerbytes > outerbytes && outerbytes > 0)
659 startup_cost *= sqrt(innerbytes / outerbytes);
661 path->startup_cost = startup_cost;
662 path->total_cost = startup_cost + run_cost;
668 * Estimate the CPU cost of evaluating a WHERE clause (once).
669 * The input can be either an implicitly-ANDed list of boolean
670 * expressions, or a list of RestrictInfo nodes.
673 cost_qual_eval(List *quals)
677 cost_qual_eval_walker((Node *) quals, &total);
682 cost_qual_eval_walker(Node *node, Cost *total)
688 * Our basic strategy is to charge one cpu_operator_cost for each
689 * operator or function node in the given tree. Vars and Consts are
690 * charged zero, and so are boolean operators (AND, OR, NOT).
691 * Simplistic, but a lot better than no model at all.
693 * Should we try to account for the possibility of short-circuit
694 * evaluation of AND/OR?
698 Expr *expr = (Expr *) node;
700 switch (expr->opType)
704 *total += cpu_operator_cost;
713 * A subplan node in an expression indicates that the
714 * subplan will be executed on each evaluation, so charge
715 * accordingly. (We assume that sub-selects that can be
716 * executed as InitPlans have already been removed from
719 * NOTE: this logic should agree with the estimates used by
720 * make_subplan() in plan/subselect.c.
723 SubPlan *subplan = (SubPlan *) expr->oper;
724 Plan *plan = subplan->plan;
727 if (subplan->sublink->subLinkType == EXISTS_SUBLINK)
729 /* we only need to fetch 1 tuple */
730 subcost = plan->startup_cost +
731 (plan->total_cost - plan->startup_cost) / plan->plan_rows;
733 else if (subplan->sublink->subLinkType == ALL_SUBLINK ||
734 subplan->sublink->subLinkType == ANY_SUBLINK)
736 /* assume we need 50% of the tuples */
737 subcost = plan->startup_cost +
738 0.50 * (plan->total_cost - plan->startup_cost);
739 /* XXX what if subplan has been materialized? */
743 /* assume we need all tuples */
744 subcost = plan->total_cost;
750 /* fall through to examine args of Expr node */
754 * expression_tree_walker doesn't know what to do with RestrictInfo
755 * nodes, but we just want to recurse through them.
757 if (IsA(node, RestrictInfo))
759 RestrictInfo *restrictinfo = (RestrictInfo *) node;
761 return cost_qual_eval_walker((Node *) restrictinfo->clause, total);
763 /* Otherwise, recurse. */
764 return expression_tree_walker(node, cost_qual_eval_walker,
770 * set_baserel_size_estimates
771 * Set the size estimates for the given base relation.
773 * The rel's targetlist and restrictinfo list must have been constructed
776 * We set the following fields of the rel node:
777 * rows: the estimated number of output tuples (after applying
778 * restriction clauses).
779 * width: the estimated average output tuple width in bytes.
780 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
783 set_baserel_size_estimates(Query *root, RelOptInfo *rel)
785 /* Should only be applied to base relations */
786 Assert(length(rel->relids) == 1);
788 rel->rows = rel->tuples *
789 restrictlist_selectivity(root,
790 rel->baserestrictinfo,
791 lfirsti(rel->relids));
794 * Force estimate to be at least one row, to make explain output look
795 * better and to avoid possible divide-by-zero when interpolating
801 rel->baserestrictcost = cost_qual_eval(rel->baserestrictinfo);
803 set_rel_width(root, rel);
807 * set_joinrel_size_estimates
808 * Set the size estimates for the given join relation.
810 * The rel's targetlist must have been constructed already, and a
811 * restriction clause list that matches the given component rels must
814 * Since there is more than one way to make a joinrel for more than two
815 * base relations, the results we get here could depend on which component
816 * rel pair is provided. In theory we should get the same answers no matter
817 * which pair is provided; in practice, since the selectivity estimation
818 * routines don't handle all cases equally well, we might not. But there's
819 * not much to be done about it. (Would it make sense to repeat the
820 * calculations for each pair of input rels that's encountered, and somehow
821 * average the results? Probably way more trouble than it's worth.)
823 * We set the same relnode fields as set_baserel_size_estimates() does.
826 set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
827 RelOptInfo *outer_rel,
828 RelOptInfo *inner_rel,
833 /* cartesian product */
834 temp = outer_rel->rows * inner_rel->rows;
837 * Apply join restrictivity. Note that we are only considering
838 * clauses that become restriction clauses at this join level; we are
839 * not double-counting them because they were not considered in
840 * estimating the sizes of the component rels.
842 temp *= restrictlist_selectivity(root,
847 * Force estimate to be at least one row, to make explain output look
848 * better and to avoid possible divide-by-zero when interpolating
856 * We could apply set_rel_width() to compute the output tuple width
857 * from scratch, but at present it's always just the sum of the input
858 * widths, so why work harder than necessary? If relnode.c is ever
859 * taught to remove unneeded columns from join targetlists, go back to
860 * using set_rel_width here.
862 rel->width = outer_rel->width + inner_rel->width;
867 * Set the estimated output width of the relation.
870 set_rel_width(Query *root, RelOptInfo *rel)
875 foreach(tle, rel->targetlist)
876 tuple_width += compute_attribute_width((TargetEntry *) lfirst(tle));
877 Assert(tuple_width >= 0);
878 rel->width = tuple_width;
882 * compute_attribute_width
883 * Given a target list entry, find the size in bytes of the attribute.
885 * If a field is variable-length, we make a default assumption. Would be
886 * better if VACUUM recorded some stats about the average field width...
887 * also, we have access to the atttypmod, but fail to use it...
890 compute_attribute_width(TargetEntry *tlistentry)
892 int width = get_typlen(tlistentry->resdom->restype);
895 return _DEFAULT_ATTRIBUTE_WIDTH_;
902 * Estimate the storage space in bytes for a given number of tuples
903 * of a given width (size in bytes).
906 relation_byte_size(double tuples, int width)
908 return tuples * ((double) (width + sizeof(HeapTupleData)));
913 * Returns an estimate of the number of pages covered by a given
914 * number of tuples of a given width (size in bytes).
917 page_size(double tuples, int width)
919 return ceil(relation_byte_size(tuples, width) / BLCKSZ);