1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in arbitrary units established by these basic
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
15 * We expect that the kernel will typically do some amount of read-ahead
16 * optimization; this in conjunction with seek costs means that seq_page_cost
17 * is normally considerably less than random_page_cost. (However, if the
18 * database is fully cached in RAM, it is reasonable to set them equal.)
20 * We also use a rough estimate "effective_cache_size" of the number of
21 * disk pages in Postgres + OS-level disk cache. (We can't simply use
22 * NBuffers for this purpose because that would ignore the effects of
23 * the kernel's disk cache.)
25 * Obviously, taking constants for these values is an oversimplification,
26 * but it's tough enough to get any useful estimates even at this level of
27 * detail. Note that all of these parameters are user-settable, in case
28 * the default values are drastically off for a particular platform.
30 * We compute two separate costs for each path:
31 * total_cost: total estimated cost to fetch all tuples
32 * startup_cost: cost that is expended before first tuple is fetched
33 * In some scenarios, such as when there is a LIMIT or we are implementing
34 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35 * path's result. A caller can estimate the cost of fetching a partial
36 * result by interpolating between startup_cost and total_cost. In detail:
37 * actual_cost = startup_cost +
38 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39 * Note that a base relation's rows count (and, by extension, plan_rows for
40 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41 * that this equation works properly. (Also, these routines guarantee not to
42 * set the rows count to zero, so there will be no zero divide.) The LIMIT is
43 * applied as a top-level plan node.
45 * For largely historical reasons, most of the routines in this module use
46 * the passed result Path only to store their startup_cost and total_cost
47 * results into. All the input data they need is passed as separate
48 * parameters, even though much of it could be extracted from the Path.
49 * An exception is made for the cost_XXXjoin() routines, which expect all
50 * the non-cost fields of the passed XXXPath to be filled in.
53 * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
54 * Portions Copyright (c) 1994, Regents of the University of California
57 * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.189 2007/11/15 22:25:15 momjian Exp $
59 *-------------------------------------------------------------------------
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "optimizer/clauses.h"
69 #include "optimizer/cost.h"
70 #include "optimizer/pathnode.h"
71 #include "optimizer/planmain.h"
72 #include "parser/parsetree.h"
73 #include "parser/parse_expr.h"
74 #include "utils/lsyscache.h"
75 #include "utils/selfuncs.h"
76 #include "utils/tuplesort.h"
79 #define LOG2(x) (log(x) / 0.693147180559945)
82 * Some Paths return less than the nominal number of rows of their parent
83 * relations; join nodes need to do this to get the correct input count:
85 #define PATH_ROWS(path) \
86 (IsA(path, UniquePath) ? \
87 ((UniquePath *) (path))->rows : \
91 double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
92 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
93 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
94 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
95 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
97 int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
99 Cost disable_cost = 100000000.0;
101 bool enable_seqscan = true;
102 bool enable_indexscan = true;
103 bool enable_bitmapscan = true;
104 bool enable_tidscan = true;
105 bool enable_sort = true;
106 bool enable_hashagg = true;
107 bool enable_nestloop = true;
108 bool enable_mergejoin = true;
109 bool enable_hashjoin = true;
115 } cost_qual_eval_context;
117 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
120 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
121 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
123 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
124 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
125 static double relation_byte_size(double tuples, int width);
126 static double page_size(double tuples, int width);
131 * Force a row-count estimate to a sane value.
134 clamp_row_est(double nrows)
137 * Force estimate to be at least one row, to make explain output look
138 * better and to avoid possible divide-by-zero when interpolating costs.
139 * Make it an integer, too.
152 * Determines and returns the cost of scanning a relation sequentially.
155 cost_seqscan(Path *path, PlannerInfo *root,
158 Cost startup_cost = 0;
162 /* Should only be applied to base relations */
163 Assert(baserel->relid > 0);
164 Assert(baserel->rtekind == RTE_RELATION);
167 startup_cost += disable_cost;
172 run_cost += seq_page_cost * baserel->pages;
175 startup_cost += baserel->baserestrictcost.startup;
176 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
177 run_cost += cpu_per_tuple * baserel->tuples;
179 path->startup_cost = startup_cost;
180 path->total_cost = startup_cost + run_cost;
185 * Determines and returns the cost of scanning a relation using an index.
187 * 'index' is the index to be used
188 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
189 * 'outer_rel' is the outer relation when we are considering using the index
190 * scan as the inside of a nestloop join (hence, some of the indexQuals
191 * are join clauses, and we should expect repeated scans of the index);
192 * NULL for a plain index scan
194 * cost_index() takes an IndexPath not just a Path, because it sets a few
195 * additional fields of the IndexPath besides startup_cost and total_cost.
196 * These fields are needed if the IndexPath is used in a BitmapIndexScan.
198 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
199 * Any additional quals evaluated as qpquals may reduce the number of returned
200 * tuples, but they won't reduce the number of tuples we have to fetch from
201 * the table, so they don't reduce the scan cost.
203 * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
204 * it was a list of bare clause expressions.
207 cost_index(IndexPath *path, PlannerInfo *root,
210 RelOptInfo *outer_rel)
212 RelOptInfo *baserel = index->rel;
213 Cost startup_cost = 0;
215 Cost indexStartupCost;
217 Selectivity indexSelectivity;
218 double indexCorrelation,
223 double tuples_fetched;
224 double pages_fetched;
226 /* Should only be applied to base relations */
227 Assert(IsA(baserel, RelOptInfo) &&
228 IsA(index, IndexOptInfo));
229 Assert(baserel->relid > 0);
230 Assert(baserel->rtekind == RTE_RELATION);
232 if (!enable_indexscan)
233 startup_cost += disable_cost;
236 * Call index-access-method-specific code to estimate the processing cost
237 * for scanning the index, as well as the selectivity of the index (ie,
238 * the fraction of main-table tuples we will have to retrieve) and its
239 * correlation to the main-table tuple order.
241 OidFunctionCall8(index->amcostestimate,
242 PointerGetDatum(root),
243 PointerGetDatum(index),
244 PointerGetDatum(indexQuals),
245 PointerGetDatum(outer_rel),
246 PointerGetDatum(&indexStartupCost),
247 PointerGetDatum(&indexTotalCost),
248 PointerGetDatum(&indexSelectivity),
249 PointerGetDatum(&indexCorrelation));
252 * Save amcostestimate's results for possible use in bitmap scan planning.
253 * We don't bother to save indexStartupCost or indexCorrelation, because a
254 * bitmap scan doesn't care about either.
256 path->indextotalcost = indexTotalCost;
257 path->indexselectivity = indexSelectivity;
259 /* all costs for touching index itself included here */
260 startup_cost += indexStartupCost;
261 run_cost += indexTotalCost - indexStartupCost;
263 /* estimate number of main-table tuples fetched */
264 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
267 * Estimate number of main-table pages fetched, and compute I/O cost.
269 * When the index ordering is uncorrelated with the table ordering,
270 * we use an approximation proposed by Mackert and Lohman (see
271 * index_pages_fetched() for details) to compute the number of pages
272 * fetched, and then charge random_page_cost per page fetched.
274 * When the index ordering is exactly correlated with the table ordering
275 * (just after a CLUSTER, for example), the number of pages fetched should
276 * be exactly selectivity * table_size. What's more, all but the first
277 * will be sequential fetches, not the random fetches that occur in the
278 * uncorrelated case. So if the number of pages is more than 1, we
280 * random_page_cost + (pages_fetched - 1) * seq_page_cost
281 * For partially-correlated indexes, we ought to charge somewhere between
282 * these two estimates. We currently interpolate linearly between the
283 * estimates based on the correlation squared (XXX is that appropriate?).
286 if (outer_rel != NULL && outer_rel->rows > 1)
289 * For repeated indexscans, the appropriate estimate for the
290 * uncorrelated case is to scale up the number of tuples fetched in
291 * the Mackert and Lohman formula by the number of scans, so that we
292 * estimate the number of pages fetched by all the scans; then
293 * pro-rate the costs for one scan. In this case we assume all the
294 * fetches are random accesses.
296 double num_scans = outer_rel->rows;
298 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
300 (double) index->pages,
303 max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
306 * In the perfectly correlated case, the number of pages touched by
307 * each scan is selectivity * table_size, and we can use the Mackert
308 * and Lohman formula at the page level to estimate how much work is
309 * saved by caching across scans. We still assume all the fetches are
310 * random, though, which is an overestimate that's hard to correct for
311 * without double-counting the cache effects. (But in most cases
312 * where such a plan is actually interesting, only one page would get
313 * fetched per scan anyway, so it shouldn't matter much.)
315 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
317 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
319 (double) index->pages,
322 min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
327 * Normal case: apply the Mackert and Lohman formula, and then
328 * interpolate between that and the correlation-derived result.
330 pages_fetched = index_pages_fetched(tuples_fetched,
332 (double) index->pages,
335 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
336 max_IO_cost = pages_fetched * random_page_cost;
338 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
339 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
340 min_IO_cost = random_page_cost;
341 if (pages_fetched > 1)
342 min_IO_cost += (pages_fetched - 1) * seq_page_cost;
346 * Now interpolate based on estimated index order correlation to get total
347 * disk I/O cost for main table accesses.
349 csquared = indexCorrelation * indexCorrelation;
351 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
354 * Estimate CPU costs per tuple.
356 * Normally the indexquals will be removed from the list of restriction
357 * clauses that we have to evaluate as qpquals, so we should subtract
358 * their costs from baserestrictcost. But if we are doing a join then
359 * some of the indexquals are join clauses and shouldn't be subtracted.
360 * Rather than work out exactly how much to subtract, we don't subtract
363 startup_cost += baserel->baserestrictcost.startup;
364 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
366 if (outer_rel == NULL)
368 QualCost index_qual_cost;
370 cost_qual_eval(&index_qual_cost, indexQuals, root);
371 /* any startup cost still has to be paid ... */
372 cpu_per_tuple -= index_qual_cost.per_tuple;
375 run_cost += cpu_per_tuple * tuples_fetched;
377 path->path.startup_cost = startup_cost;
378 path->path.total_cost = startup_cost + run_cost;
382 * index_pages_fetched
383 * Estimate the number of pages actually fetched after accounting for
386 * We use an approximation proposed by Mackert and Lohman, "Index Scans
387 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
388 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
389 * The Mackert and Lohman approximation is that the number of pages
392 * min(2TNs/(2T+Ns), T) when T <= b
393 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
394 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
396 * T = # pages in table
397 * N = # tuples in table
398 * s = selectivity = fraction of table to be scanned
399 * b = # buffer pages available (we include kernel space here)
401 * We assume that effective_cache_size is the total number of buffer pages
402 * available for the whole query, and pro-rate that space across all the
403 * tables in the query and the index currently under consideration. (This
404 * ignores space needed for other indexes used by the query, but since we
405 * don't know which indexes will get used, we can't estimate that very well;
406 * and in any case counting all the tables may well be an overestimate, since
407 * depending on the join plan not all the tables may be scanned concurrently.)
409 * The product Ns is the number of tuples fetched; we pass in that
410 * product rather than calculating it here. "pages" is the number of pages
411 * in the object under consideration (either an index or a table).
412 * "index_pages" is the amount to add to the total table space, which was
413 * computed for us by query_planner.
415 * Caller is expected to have ensured that tuples_fetched is greater than zero
416 * and rounded to integer (see clamp_row_est). The result will likewise be
417 * greater than zero and integral.
420 index_pages_fetched(double tuples_fetched, BlockNumber pages,
421 double index_pages, PlannerInfo *root)
423 double pages_fetched;
428 /* T is # pages in table, but don't allow it to be zero */
429 T = (pages > 1) ? (double) pages : 1.0;
431 /* Compute number of pages assumed to be competing for cache space */
432 total_pages = root->total_table_pages + index_pages;
433 total_pages = Max(total_pages, 1.0);
434 Assert(T <= total_pages);
436 /* b is pro-rated share of effective_cache_size */
437 b = (double) effective_cache_size *T / total_pages;
439 /* force it positive and integral */
445 /* This part is the Mackert and Lohman formula */
449 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
450 if (pages_fetched >= T)
453 pages_fetched = ceil(pages_fetched);
459 lim = (2.0 * T * b) / (2.0 * T - b);
460 if (tuples_fetched <= lim)
463 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
468 b + (tuples_fetched - lim) * (T - b) / T;
470 pages_fetched = ceil(pages_fetched);
472 return pages_fetched;
476 * get_indexpath_pages
477 * Determine the total size of the indexes used in a bitmap index path.
479 * Note: if the same index is used more than once in a bitmap tree, we will
480 * count it multiple times, which perhaps is the wrong thing ... but it's
481 * not completely clear, and detecting duplicates is difficult, so ignore it
485 get_indexpath_pages(Path *bitmapqual)
490 if (IsA(bitmapqual, BitmapAndPath))
492 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
494 foreach(l, apath->bitmapquals)
496 result += get_indexpath_pages((Path *) lfirst(l));
499 else if (IsA(bitmapqual, BitmapOrPath))
501 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
503 foreach(l, opath->bitmapquals)
505 result += get_indexpath_pages((Path *) lfirst(l));
508 else if (IsA(bitmapqual, IndexPath))
510 IndexPath *ipath = (IndexPath *) bitmapqual;
512 result = (double) ipath->indexinfo->pages;
515 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
521 * cost_bitmap_heap_scan
522 * Determines and returns the cost of scanning a relation using a bitmap
523 * index-then-heap plan.
525 * 'baserel' is the relation to be scanned
526 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
527 * 'outer_rel' is the outer relation when we are considering using the bitmap
528 * scan as the inside of a nestloop join (hence, some of the indexQuals
529 * are join clauses, and we should expect repeated scans of the table);
530 * NULL for a plain bitmap scan
532 * Note: if this is a join inner path, the component IndexPaths in bitmapqual
533 * should have been costed accordingly.
536 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
537 Path *bitmapqual, RelOptInfo *outer_rel)
539 Cost startup_cost = 0;
542 Selectivity indexSelectivity;
545 double tuples_fetched;
546 double pages_fetched;
549 /* Should only be applied to base relations */
550 Assert(IsA(baserel, RelOptInfo));
551 Assert(baserel->relid > 0);
552 Assert(baserel->rtekind == RTE_RELATION);
554 if (!enable_bitmapscan)
555 startup_cost += disable_cost;
558 * Fetch total cost of obtaining the bitmap, as well as its total
561 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
563 startup_cost += indexTotalCost;
566 * Estimate number of main-table pages fetched.
568 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
570 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
572 if (outer_rel != NULL && outer_rel->rows > 1)
575 * For repeated bitmap scans, scale up the number of tuples fetched in
576 * the Mackert and Lohman formula by the number of scans, so that we
577 * estimate the number of pages fetched by all the scans. Then
578 * pro-rate for one scan.
580 double num_scans = outer_rel->rows;
582 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
584 get_indexpath_pages(bitmapqual),
586 pages_fetched /= num_scans;
591 * For a single scan, the number of heap pages that need to be fetched
592 * is the same as the Mackert and Lohman formula for the case T <= b
593 * (ie, no re-reads needed).
595 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
597 if (pages_fetched >= T)
600 pages_fetched = ceil(pages_fetched);
603 * For small numbers of pages we should charge random_page_cost apiece,
604 * while if nearly all the table's pages are being read, it's more
605 * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
606 * too. For lack of a better idea, interpolate like this to determine the
609 if (pages_fetched >= 2.0)
610 cost_per_page = random_page_cost -
611 (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
613 cost_per_page = random_page_cost;
615 run_cost += pages_fetched * cost_per_page;
618 * Estimate CPU costs per tuple.
620 * Often the indexquals don't need to be rechecked at each tuple ... but
621 * not always, especially not if there are enough tuples involved that the
622 * bitmaps become lossy. For the moment, just assume they will be
625 startup_cost += baserel->baserestrictcost.startup;
626 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
628 run_cost += cpu_per_tuple * tuples_fetched;
630 path->startup_cost = startup_cost;
631 path->total_cost = startup_cost + run_cost;
635 * cost_bitmap_tree_node
636 * Extract cost and selectivity from a bitmap tree node (index/and/or)
639 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
641 if (IsA(path, IndexPath))
643 *cost = ((IndexPath *) path)->indextotalcost;
644 *selec = ((IndexPath *) path)->indexselectivity;
647 * Charge a small amount per retrieved tuple to reflect the costs of
648 * manipulating the bitmap. This is mostly to make sure that a bitmap
649 * scan doesn't look to be the same cost as an indexscan to retrieve a
652 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
654 else if (IsA(path, BitmapAndPath))
656 *cost = path->total_cost;
657 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
659 else if (IsA(path, BitmapOrPath))
661 *cost = path->total_cost;
662 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
666 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
667 *cost = *selec = 0; /* keep compiler quiet */
672 * cost_bitmap_and_node
673 * Estimate the cost of a BitmapAnd node
675 * Note that this considers only the costs of index scanning and bitmap
676 * creation, not the eventual heap access. In that sense the object isn't
677 * truly a Path, but it has enough path-like properties (costs in particular)
678 * to warrant treating it as one.
681 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
688 * We estimate AND selectivity on the assumption that the inputs are
689 * independent. This is probably often wrong, but we don't have the info
692 * The runtime cost of the BitmapAnd itself is estimated at 100x
693 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
694 * definitely too simplistic?
698 foreach(l, path->bitmapquals)
700 Path *subpath = (Path *) lfirst(l);
702 Selectivity subselec;
704 cost_bitmap_tree_node(subpath, &subCost, &subselec);
708 totalCost += subCost;
709 if (l != list_head(path->bitmapquals))
710 totalCost += 100.0 * cpu_operator_cost;
712 path->bitmapselectivity = selec;
713 path->path.startup_cost = totalCost;
714 path->path.total_cost = totalCost;
718 * cost_bitmap_or_node
719 * Estimate the cost of a BitmapOr node
721 * See comments for cost_bitmap_and_node.
724 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
731 * We estimate OR selectivity on the assumption that the inputs are
732 * non-overlapping, since that's often the case in "x IN (list)" type
733 * situations. Of course, we clamp to 1.0 at the end.
735 * The runtime cost of the BitmapOr itself is estimated at 100x
736 * cpu_operator_cost for each tbm_union needed. Probably too small,
737 * definitely too simplistic? We are aware that the tbm_unions are
738 * optimized out when the inputs are BitmapIndexScans.
742 foreach(l, path->bitmapquals)
744 Path *subpath = (Path *) lfirst(l);
746 Selectivity subselec;
748 cost_bitmap_tree_node(subpath, &subCost, &subselec);
752 totalCost += subCost;
753 if (l != list_head(path->bitmapquals) &&
754 !IsA(subpath, IndexPath))
755 totalCost += 100.0 * cpu_operator_cost;
757 path->bitmapselectivity = Min(selec, 1.0);
758 path->path.startup_cost = totalCost;
759 path->path.total_cost = totalCost;
764 * Determines and returns the cost of scanning a relation using TIDs.
767 cost_tidscan(Path *path, PlannerInfo *root,
768 RelOptInfo *baserel, List *tidquals)
770 Cost startup_cost = 0;
772 bool isCurrentOf = false;
774 QualCost tid_qual_cost;
778 /* Should only be applied to base relations */
779 Assert(baserel->relid > 0);
780 Assert(baserel->rtekind == RTE_RELATION);
782 /* Count how many tuples we expect to retrieve */
786 if (IsA(lfirst(l), ScalarArrayOpExpr))
788 /* Each element of the array yields 1 tuple */
789 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
790 Node *arraynode = (Node *) lsecond(saop->args);
792 ntuples += estimate_array_length(arraynode);
794 else if (IsA(lfirst(l), CurrentOfExpr))
796 /* CURRENT OF yields 1 tuple */
802 /* It's just CTID = something, count 1 tuple */
808 * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
809 * understands how to do it correctly. Therefore, honor enable_tidscan
810 * only when CURRENT OF isn't present. Also note that cost_qual_eval
811 * counts a CurrentOfExpr as having startup cost disable_cost, which we
812 * subtract off here; that's to prevent other plan types such as seqscan
817 Assert(baserel->baserestrictcost.startup >= disable_cost);
818 startup_cost -= disable_cost;
820 else if (!enable_tidscan)
821 startup_cost += disable_cost;
824 * The TID qual expressions will be computed once, any other baserestrict
825 * quals once per retrived tuple.
827 cost_qual_eval(&tid_qual_cost, tidquals, root);
829 /* disk costs --- assume each tuple on a different page */
830 run_cost += random_page_cost * ntuples;
833 startup_cost += baserel->baserestrictcost.startup +
834 tid_qual_cost.per_tuple;
835 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
836 tid_qual_cost.per_tuple;
837 run_cost += cpu_per_tuple * ntuples;
839 path->startup_cost = startup_cost;
840 path->total_cost = startup_cost + run_cost;
845 * Determines and returns the cost of scanning a subquery RTE.
848 cost_subqueryscan(Path *path, RelOptInfo *baserel)
854 /* Should only be applied to base relations that are subqueries */
855 Assert(baserel->relid > 0);
856 Assert(baserel->rtekind == RTE_SUBQUERY);
859 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
860 * any restriction clauses that will be attached to the SubqueryScan node,
861 * plus cpu_tuple_cost to account for selection and projection overhead.
863 path->startup_cost = baserel->subplan->startup_cost;
864 path->total_cost = baserel->subplan->total_cost;
866 startup_cost = baserel->baserestrictcost.startup;
867 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
868 run_cost = cpu_per_tuple * baserel->tuples;
870 path->startup_cost += startup_cost;
871 path->total_cost += startup_cost + run_cost;
876 * Determines and returns the cost of scanning a function RTE.
879 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
881 Cost startup_cost = 0;
887 /* Should only be applied to base relations that are functions */
888 Assert(baserel->relid > 0);
889 rte = planner_rt_fetch(baserel->relid, root);
890 Assert(rte->rtekind == RTE_FUNCTION);
892 /* Estimate costs of executing the function expression */
893 cost_qual_eval_node(&exprcost, rte->funcexpr, root);
895 startup_cost += exprcost.startup;
896 cpu_per_tuple = exprcost.per_tuple;
898 /* Add scanning CPU costs */
899 startup_cost += baserel->baserestrictcost.startup;
900 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
901 run_cost += cpu_per_tuple * baserel->tuples;
903 path->startup_cost = startup_cost;
904 path->total_cost = startup_cost + run_cost;
909 * Determines and returns the cost of scanning a VALUES RTE.
912 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
914 Cost startup_cost = 0;
918 /* Should only be applied to base relations that are values lists */
919 Assert(baserel->relid > 0);
920 Assert(baserel->rtekind == RTE_VALUES);
923 * For now, estimate list evaluation cost at one operator eval per list
924 * (probably pretty bogus, but is it worth being smarter?)
926 cpu_per_tuple = cpu_operator_cost;
928 /* Add scanning CPU costs */
929 startup_cost += baserel->baserestrictcost.startup;
930 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
931 run_cost += cpu_per_tuple * baserel->tuples;
933 path->startup_cost = startup_cost;
934 path->total_cost = startup_cost + run_cost;
939 * Determines and returns the cost of sorting a relation, including
940 * the cost of reading the input data.
942 * If the total volume of data to sort is less than work_mem, we will do
943 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
944 * comparisons for t tuples.
946 * If the total volume exceeds work_mem, we switch to a tape-style merge
947 * algorithm. There will still be about t*log2(t) tuple comparisons in
948 * total, but we will also need to write and read each tuple once per
949 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
950 * number of initial runs formed and M is the merge order used by tuplesort.c.
951 * Since the average initial run should be about twice work_mem, we have
952 * disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
953 * cpu = comparison_cost * t * log2(t)
955 * If the sort is bounded (i.e., only the first k result tuples are needed)
956 * and k tuples can fit into work_mem, we use a heap method that keeps only
957 * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
959 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
960 * accesses (XXX can't we refine that guess?)
962 * We charge two operator evals per tuple comparison, which should be in
963 * the right ballpark in most cases.
965 * 'pathkeys' is a list of sort keys
966 * 'input_cost' is the total cost for reading the input data
967 * 'tuples' is the number of tuples in the relation
968 * 'width' is the average tuple width in bytes
969 * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
971 * NOTE: some callers currently pass NIL for pathkeys because they
972 * can't conveniently supply the sort keys. Since this routine doesn't
973 * currently do anything with pathkeys anyway, that doesn't matter...
974 * but if it ever does, it should react gracefully to lack of key data.
975 * (Actually, the thing we'd most likely be interested in is just the number
976 * of sort keys, which all callers *could* supply.)
979 cost_sort(Path *path, PlannerInfo *root,
980 List *pathkeys, Cost input_cost, double tuples, int width,
983 Cost startup_cost = input_cost;
985 double input_bytes = relation_byte_size(tuples, width);
987 double output_tuples;
988 long work_mem_bytes = work_mem * 1024L;
991 startup_cost += disable_cost;
994 * We want to be sure the cost of a sort is never estimated as zero, even
995 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1000 /* Do we have a useful LIMIT? */
1001 if (limit_tuples > 0 && limit_tuples < tuples)
1003 output_tuples = limit_tuples;
1004 output_bytes = relation_byte_size(output_tuples, width);
1008 output_tuples = tuples;
1009 output_bytes = input_bytes;
1012 if (output_bytes > work_mem_bytes)
1015 * We'll have to use a disk-based sort of all the tuples
1017 double npages = ceil(input_bytes / BLCKSZ);
1018 double nruns = (input_bytes / work_mem_bytes) * 0.5;
1019 double mergeorder = tuplesort_merge_order(work_mem_bytes);
1021 double npageaccesses;
1026 * Assume about two operator evals per tuple comparison and N log2 N
1029 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1033 /* Compute logM(r) as log(r) / log(M) */
1034 if (nruns > mergeorder)
1035 log_runs = ceil(log(nruns) / log(mergeorder));
1038 npageaccesses = 2.0 * npages * log_runs;
1039 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1040 startup_cost += npageaccesses *
1041 (seq_page_cost * 0.75 + random_page_cost * 0.25);
1043 else if (tuples > 2 * output_tuples || input_bytes > work_mem_bytes)
1046 * We'll use a bounded heap-sort keeping just K tuples in memory, for
1047 * a total number of tuple comparisons of N log2 K; but the constant
1048 * factor is a bit higher than for quicksort. Tweak it so that the
1049 * cost curve is continuous at the crossover point.
1051 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(2.0 * output_tuples);
1055 /* We'll use plain quicksort on all the input tuples */
1056 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
1060 * Also charge a small amount (arbitrarily set equal to operator cost) per
1061 * extracted tuple. Note it's correct to use tuples not output_tuples
1062 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1063 * counting the LIMIT otherwise.
1065 run_cost += cpu_operator_cost * tuples;
1067 path->startup_cost = startup_cost;
1068 path->total_cost = startup_cost + run_cost;
1072 * sort_exceeds_work_mem
1073 * Given a finished Sort plan node, detect whether it is expected to
1074 * spill to disk (ie, will need more than work_mem workspace)
1076 * This assumes there will be no available LIMIT.
1079 sort_exceeds_work_mem(Sort *sort)
1081 double input_bytes = relation_byte_size(sort->plan.plan_rows,
1082 sort->plan.plan_width);
1083 long work_mem_bytes = work_mem * 1024L;
1085 return (input_bytes > work_mem_bytes);
1090 * Determines and returns the cost of materializing a relation, including
1091 * the cost of reading the input data.
1093 * If the total volume of data to materialize exceeds work_mem, we will need
1094 * to write it to disk, so the cost is much higher in that case.
1097 cost_material(Path *path,
1098 Cost input_cost, double tuples, int width)
1100 Cost startup_cost = input_cost;
1102 double nbytes = relation_byte_size(tuples, width);
1103 long work_mem_bytes = work_mem * 1024L;
1106 if (nbytes > work_mem_bytes)
1108 double npages = ceil(nbytes / BLCKSZ);
1110 /* We'll write during startup and read during retrieval */
1111 startup_cost += seq_page_cost * npages;
1112 run_cost += seq_page_cost * npages;
1116 * Charge a very small amount per inserted tuple, to reflect bookkeeping
1117 * costs. We use cpu_tuple_cost/10 for this. This is needed to break the
1118 * tie that would otherwise exist between nestloop with A outer,
1119 * materialized B inner and nestloop with B outer, materialized A inner.
1120 * The extra cost ensures we'll prefer materializing the smaller rel.
1122 startup_cost += cpu_tuple_cost * 0.1 * tuples;
1125 * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
1126 * so that it doesn't appear worthwhile to materialize a bare seqscan.
1128 run_cost += cpu_tuple_cost * tuples;
1130 path->startup_cost = startup_cost;
1131 path->total_cost = startup_cost + run_cost;
1136 * Determines and returns the cost of performing an Agg plan node,
1137 * including the cost of its input.
1139 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1140 * are for appropriately-sorted input.
1143 cost_agg(Path *path, PlannerInfo *root,
1144 AggStrategy aggstrategy, int numAggs,
1145 int numGroupCols, double numGroups,
1146 Cost input_startup_cost, Cost input_total_cost,
1147 double input_tuples)
1153 * We charge one cpu_operator_cost per aggregate function per input tuple,
1154 * and another one per output tuple (corresponding to transfn and finalfn
1155 * calls respectively). If we are grouping, we charge an additional
1156 * cpu_operator_cost per grouping column per input tuple for grouping
1159 * We will produce a single output tuple if not grouping, and a tuple per
1160 * group otherwise. We charge cpu_tuple_cost for each output tuple.
1162 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1163 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1164 * input path is already sorted appropriately, AGG_SORTED should be
1165 * preferred (since it has no risk of memory overflow). This will happen
1166 * as long as the computed total costs are indeed exactly equal --- but if
1167 * there's roundoff error we might do the wrong thing. So be sure that
1168 * the computations below form the same intermediate values in the same
1171 * Note: ideally we should use the pg_proc.procost costs of each
1172 * aggregate's component functions, but for now that seems like an
1173 * excessive amount of work.
1175 if (aggstrategy == AGG_PLAIN)
1177 startup_cost = input_total_cost;
1178 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1179 /* we aren't grouping */
1180 total_cost = startup_cost + cpu_tuple_cost;
1182 else if (aggstrategy == AGG_SORTED)
1184 /* Here we are able to deliver output on-the-fly */
1185 startup_cost = input_startup_cost;
1186 total_cost = input_total_cost;
1187 /* calcs phrased this way to match HASHED case, see note above */
1188 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1189 total_cost += cpu_operator_cost * input_tuples * numAggs;
1190 total_cost += cpu_operator_cost * numGroups * numAggs;
1191 total_cost += cpu_tuple_cost * numGroups;
1195 /* must be AGG_HASHED */
1196 startup_cost = input_total_cost;
1197 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1198 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1199 total_cost = startup_cost;
1200 total_cost += cpu_operator_cost * numGroups * numAggs;
1201 total_cost += cpu_tuple_cost * numGroups;
1204 path->startup_cost = startup_cost;
1205 path->total_cost = total_cost;
1210 * Determines and returns the cost of performing a Group plan node,
1211 * including the cost of its input.
1213 * Note: caller must ensure that input costs are for appropriately-sorted
1217 cost_group(Path *path, PlannerInfo *root,
1218 int numGroupCols, double numGroups,
1219 Cost input_startup_cost, Cost input_total_cost,
1220 double input_tuples)
1225 startup_cost = input_startup_cost;
1226 total_cost = input_total_cost;
1229 * Charge one cpu_operator_cost per comparison per input tuple. We assume
1230 * all columns get compared at most of the tuples.
1232 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1234 path->startup_cost = startup_cost;
1235 path->total_cost = total_cost;
1239 * If a nestloop's inner path is an indexscan, be sure to use its estimated
1240 * output row count, which may be lower than the restriction-clause-only row
1241 * count of its parent. (We don't include this case in the PATH_ROWS macro
1242 * because it applies *only* to a nestloop's inner relation.) We have to
1243 * be prepared to recurse through Append nodes in case of an appendrel.
1246 nestloop_inner_path_rows(Path *path)
1250 if (IsA(path, IndexPath))
1251 result = ((IndexPath *) path)->rows;
1252 else if (IsA(path, BitmapHeapPath))
1253 result = ((BitmapHeapPath *) path)->rows;
1254 else if (IsA(path, AppendPath))
1259 foreach(l, ((AppendPath *) path)->subpaths)
1261 result += nestloop_inner_path_rows((Path *) lfirst(l));
1265 result = PATH_ROWS(path);
1272 * Determines and returns the cost of joining two relations using the
1273 * nested loop algorithm.
1275 * 'path' is already filled in except for the cost fields
1278 cost_nestloop(NestPath *path, PlannerInfo *root)
1280 Path *outer_path = path->outerjoinpath;
1281 Path *inner_path = path->innerjoinpath;
1282 Cost startup_cost = 0;
1285 QualCost restrict_qual_cost;
1286 double outer_path_rows = PATH_ROWS(outer_path);
1287 double inner_path_rows = nestloop_inner_path_rows(inner_path);
1289 Selectivity joininfactor;
1291 if (!enable_nestloop)
1292 startup_cost += disable_cost;
1295 * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1296 * outer tuple as soon as we have one match. Account for the effects of
1297 * this by scaling down the cost estimates in proportion to the JOIN_IN
1298 * selectivity. (This assumes that all the quals attached to the join are
1299 * IN quals, which should be true.)
1301 joininfactor = join_in_selectivity(path, root);
1303 /* cost of source data */
1306 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1307 * before we can start returning tuples, so the join's startup cost is
1308 * their sum. What's not so clear is whether the inner path's
1309 * startup_cost must be paid again on each rescan of the inner path. This
1310 * is not true if the inner path is materialized or is a hashjoin, but
1311 * probably is true otherwise.
1313 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1314 run_cost += outer_path->total_cost - outer_path->startup_cost;
1315 if (IsA(inner_path, MaterialPath) ||
1316 IsA(inner_path, HashPath))
1318 /* charge only run cost for each iteration of inner path */
1323 * charge startup cost for each iteration of inner path, except we
1324 * already charged the first startup_cost in our own startup
1326 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1328 run_cost += outer_path_rows *
1329 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1332 * Compute number of tuples processed (not number emitted!)
1334 ntuples = outer_path_rows * inner_path_rows * joininfactor;
1337 cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
1338 startup_cost += restrict_qual_cost.startup;
1339 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1340 run_cost += cpu_per_tuple * ntuples;
1342 path->path.startup_cost = startup_cost;
1343 path->path.total_cost = startup_cost + run_cost;
1348 * Determines and returns the cost of joining two relations using the
1349 * merge join algorithm.
1351 * 'path' is already filled in except for the cost fields
1353 * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1354 * outersortkeys and innersortkeys are lists of the keys to be used
1355 * to sort the outer and inner relations, or NIL if no explicit
1356 * sort is needed because the source path is already ordered.
1359 cost_mergejoin(MergePath *path, PlannerInfo *root)
1361 Path *outer_path = path->jpath.outerjoinpath;
1362 Path *inner_path = path->jpath.innerjoinpath;
1363 List *mergeclauses = path->path_mergeclauses;
1364 List *outersortkeys = path->outersortkeys;
1365 List *innersortkeys = path->innersortkeys;
1366 Cost startup_cost = 0;
1369 Selectivity merge_selec;
1370 QualCost merge_qual_cost;
1371 QualCost qp_qual_cost;
1372 double outer_path_rows = PATH_ROWS(outer_path);
1373 double inner_path_rows = PATH_ROWS(inner_path);
1376 double mergejointuples,
1379 Selectivity outerscansel,
1381 Selectivity joininfactor;
1382 Path sort_path; /* dummy for result of cost_sort */
1384 if (!enable_mergejoin)
1385 startup_cost += disable_cost;
1388 * Compute cost and selectivity of the mergequals and qpquals (other
1389 * restriction clauses) separately. We use approx_selectivity here for
1390 * speed --- in most cases, any errors won't affect the result much.
1392 * Note: it's probably bogus to use the normal selectivity calculation
1393 * here when either the outer or inner path is a UniquePath.
1395 merge_selec = approx_selectivity(root, mergeclauses,
1396 path->jpath.jointype);
1397 cost_qual_eval(&merge_qual_cost, mergeclauses, root);
1398 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1399 qp_qual_cost.startup -= merge_qual_cost.startup;
1400 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1402 /* approx # tuples passing the merge quals */
1403 mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1406 * When there are equal merge keys in the outer relation, the mergejoin
1407 * must rescan any matching tuples in the inner relation. This means
1408 * re-fetching inner tuples. Our cost model for this is that a re-fetch
1409 * costs the same as an original fetch, which is probably an overestimate;
1410 * but on the other hand we ignore the bookkeeping costs of mark/restore.
1411 * Not clear if it's worth developing a more refined model.
1413 * The number of re-fetches can be estimated approximately as size of
1414 * merge join output minus size of inner relation. Assume that the
1415 * distinct key values are 1, 2, ..., and denote the number of values of
1416 * each key in the outer relation as m1, m2, ...; in the inner relation,
1417 * n1, n2, ... Then we have
1419 * size of join = m1 * n1 + m2 * n2 + ...
1421 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1422 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1425 * This equation works correctly for outer tuples having no inner match
1426 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1427 * are effectively subtracting those from the number of rescanned tuples,
1428 * when we should not. Can we do better without expensive selectivity
1431 if (IsA(outer_path, UniquePath))
1432 rescannedtuples = 0;
1435 rescannedtuples = mergejointuples - inner_path_rows;
1436 /* Must clamp because of possible underestimate */
1437 if (rescannedtuples < 0)
1438 rescannedtuples = 0;
1440 /* We'll inflate inner run cost this much to account for rescanning */
1441 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1444 * A merge join will stop as soon as it exhausts either input stream
1445 * (unless it's an outer join, in which case the outer side has to be
1446 * scanned all the way anyway). Estimate fraction of the left and right
1447 * inputs that will actually need to be scanned. We use only the first
1448 * (most significant) merge clause for this purpose. Since
1449 * mergejoinscansel() is a fairly expensive computation, we cache the
1450 * results in the merge clause RestrictInfo.
1452 if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1454 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1459 MergeScanSelCache *cache;
1461 /* Get the input pathkeys to determine the sort-order details */
1462 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1463 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1466 opathkey = (PathKey *) linitial(opathkeys);
1467 ipathkey = (PathKey *) linitial(ipathkeys);
1468 /* debugging check */
1469 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1470 opathkey->pk_strategy != ipathkey->pk_strategy ||
1471 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1472 elog(ERROR, "left and right pathkeys do not match in mergejoin");
1474 /* Get the selectivity with caching */
1475 cache = cached_scansel(root, firstclause, opathkey);
1477 if (bms_is_subset(firstclause->left_relids,
1478 outer_path->parent->relids))
1480 /* left side of clause is outer */
1481 outerscansel = cache->leftscansel;
1482 innerscansel = cache->rightscansel;
1486 /* left side of clause is inner */
1487 outerscansel = cache->rightscansel;
1488 innerscansel = cache->leftscansel;
1490 if (path->jpath.jointype == JOIN_LEFT)
1492 else if (path->jpath.jointype == JOIN_RIGHT)
1497 /* cope with clauseless or full mergejoin */
1498 outerscansel = innerscansel = 1.0;
1501 /* convert selectivity to row count; must scan at least one row */
1502 outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1503 inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1506 * Readjust scan selectivities to account for above rounding. This is
1507 * normally an insignificant effect, but when there are only a few rows in
1508 * the inputs, failing to do this makes for a large percentage error.
1510 outerscansel = outer_rows / outer_path_rows;
1511 innerscansel = inner_rows / inner_path_rows;
1513 /* cost of source data */
1515 if (outersortkeys) /* do we need to sort outer? */
1517 cost_sort(&sort_path,
1520 outer_path->total_cost,
1522 outer_path->parent->width,
1524 startup_cost += sort_path.startup_cost;
1525 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1530 startup_cost += outer_path->startup_cost;
1531 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1535 if (innersortkeys) /* do we need to sort inner? */
1537 cost_sort(&sort_path,
1540 inner_path->total_cost,
1542 inner_path->parent->width,
1544 startup_cost += sort_path.startup_cost;
1545 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1546 * innerscansel * rescanratio;
1550 startup_cost += inner_path->startup_cost;
1551 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1552 * innerscansel * rescanratio;
1558 * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1559 * outer tuple as soon as we have one match. Account for the effects of
1560 * this by scaling down the cost estimates in proportion to the expected
1561 * output size. (This assumes that all the quals attached to the join are
1562 * IN quals, which should be true.)
1564 joininfactor = join_in_selectivity(&path->jpath, root);
1567 * The number of tuple comparisons needed is approximately number of outer
1568 * rows plus number of inner rows plus number of rescanned tuples (can we
1569 * refine this?). At each one, we need to evaluate the mergejoin quals.
1570 * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1573 startup_cost += merge_qual_cost.startup;
1574 run_cost += merge_qual_cost.per_tuple *
1575 (outer_rows + inner_rows * rescanratio);
1578 * For each tuple that gets through the mergejoin proper, we charge
1579 * cpu_tuple_cost plus the cost of evaluating additional restriction
1580 * clauses that are to be applied at the join. (This is pessimistic since
1581 * not all of the quals may get evaluated at each tuple.) This work is
1582 * skipped in JOIN_IN mode, so apply the factor.
1584 startup_cost += qp_qual_cost.startup;
1585 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1586 run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1588 path->jpath.path.startup_cost = startup_cost;
1589 path->jpath.path.total_cost = startup_cost + run_cost;
1593 * run mergejoinscansel() with caching
1595 static MergeScanSelCache *
1596 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
1598 MergeScanSelCache *cache;
1600 Selectivity leftscansel,
1602 MemoryContext oldcontext;
1604 /* Do we have this result already? */
1605 foreach(lc, rinfo->scansel_cache)
1607 cache = (MergeScanSelCache *) lfirst(lc);
1608 if (cache->opfamily == pathkey->pk_opfamily &&
1609 cache->strategy == pathkey->pk_strategy &&
1610 cache->nulls_first == pathkey->pk_nulls_first)
1614 /* Nope, do the computation */
1615 mergejoinscansel(root,
1616 (Node *) rinfo->clause,
1617 pathkey->pk_opfamily,
1618 pathkey->pk_strategy,
1619 pathkey->pk_nulls_first,
1623 /* Cache the result in suitably long-lived workspace */
1624 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
1626 cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
1627 cache->opfamily = pathkey->pk_opfamily;
1628 cache->strategy = pathkey->pk_strategy;
1629 cache->nulls_first = pathkey->pk_nulls_first;
1630 cache->leftscansel = leftscansel;
1631 cache->rightscansel = rightscansel;
1633 rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
1635 MemoryContextSwitchTo(oldcontext);
1642 * Determines and returns the cost of joining two relations using the
1643 * hash join algorithm.
1645 * 'path' is already filled in except for the cost fields
1647 * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1650 cost_hashjoin(HashPath *path, PlannerInfo *root)
1652 Path *outer_path = path->jpath.outerjoinpath;
1653 Path *inner_path = path->jpath.innerjoinpath;
1654 List *hashclauses = path->path_hashclauses;
1655 Cost startup_cost = 0;
1658 Selectivity hash_selec;
1659 QualCost hash_qual_cost;
1660 QualCost qp_qual_cost;
1661 double hashjointuples;
1662 double outer_path_rows = PATH_ROWS(outer_path);
1663 double inner_path_rows = PATH_ROWS(inner_path);
1664 int num_hashclauses = list_length(hashclauses);
1667 double virtualbuckets;
1668 Selectivity innerbucketsize;
1669 Selectivity joininfactor;
1672 if (!enable_hashjoin)
1673 startup_cost += disable_cost;
1676 * Compute cost and selectivity of the hashquals and qpquals (other
1677 * restriction clauses) separately. We use approx_selectivity here for
1678 * speed --- in most cases, any errors won't affect the result much.
1680 * Note: it's probably bogus to use the normal selectivity calculation
1681 * here when either the outer or inner path is a UniquePath.
1683 hash_selec = approx_selectivity(root, hashclauses,
1684 path->jpath.jointype);
1685 cost_qual_eval(&hash_qual_cost, hashclauses, root);
1686 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1687 qp_qual_cost.startup -= hash_qual_cost.startup;
1688 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1690 /* approx # tuples passing the hash quals */
1691 hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1693 /* cost of source data */
1694 startup_cost += outer_path->startup_cost;
1695 run_cost += outer_path->total_cost - outer_path->startup_cost;
1696 startup_cost += inner_path->total_cost;
1699 * Cost of computing hash function: must do it once per input tuple. We
1700 * charge one cpu_operator_cost for each column's hash function. Also,
1701 * tack on one cpu_tuple_cost per inner row, to model the costs of
1702 * inserting the row into the hashtable.
1704 * XXX when a hashclause is more complex than a single operator, we really
1705 * should charge the extra eval costs of the left or right side, as
1706 * appropriate, here. This seems more work than it's worth at the moment.
1708 startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1710 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1712 /* Get hash table size that executor would use for inner relation */
1713 ExecChooseHashTableSize(inner_path_rows,
1714 inner_path->parent->width,
1717 virtualbuckets = (double) numbuckets *(double) numbatches;
1720 * Determine bucketsize fraction for inner relation. We use the smallest
1721 * bucketsize estimated for any individual hashclause; this is undoubtedly
1724 * BUT: if inner relation has been unique-ified, we can assume it's good
1725 * for hashing. This is important both because it's the right answer, and
1726 * because we avoid contaminating the cache with a value that's wrong for
1727 * non-unique-ified paths.
1729 if (IsA(inner_path, UniquePath))
1730 innerbucketsize = 1.0 / virtualbuckets;
1733 innerbucketsize = 1.0;
1734 foreach(hcl, hashclauses)
1736 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1737 Selectivity thisbucketsize;
1739 Assert(IsA(restrictinfo, RestrictInfo));
1742 * First we have to figure out which side of the hashjoin clause
1743 * is the inner side.
1745 * Since we tend to visit the same clauses over and over when
1746 * planning a large query, we cache the bucketsize estimate in the
1747 * RestrictInfo node to avoid repeated lookups of statistics.
1749 if (bms_is_subset(restrictinfo->right_relids,
1750 inner_path->parent->relids))
1752 /* righthand side is inner */
1753 thisbucketsize = restrictinfo->right_bucketsize;
1754 if (thisbucketsize < 0)
1756 /* not cached yet */
1758 estimate_hash_bucketsize(root,
1759 get_rightop(restrictinfo->clause),
1761 restrictinfo->right_bucketsize = thisbucketsize;
1766 Assert(bms_is_subset(restrictinfo->left_relids,
1767 inner_path->parent->relids));
1768 /* lefthand side is inner */
1769 thisbucketsize = restrictinfo->left_bucketsize;
1770 if (thisbucketsize < 0)
1772 /* not cached yet */
1774 estimate_hash_bucketsize(root,
1775 get_leftop(restrictinfo->clause),
1777 restrictinfo->left_bucketsize = thisbucketsize;
1781 if (innerbucketsize > thisbucketsize)
1782 innerbucketsize = thisbucketsize;
1787 * If inner relation is too big then we will need to "batch" the join,
1788 * which implies writing and reading most of the tuples to disk an extra
1789 * time. Charge seq_page_cost per page, since the I/O should be nice and
1790 * sequential. Writing the inner rel counts as startup cost, all the rest
1795 double outerpages = page_size(outer_path_rows,
1796 outer_path->parent->width);
1797 double innerpages = page_size(inner_path_rows,
1798 inner_path->parent->width);
1800 startup_cost += seq_page_cost * innerpages;
1801 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
1807 * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1808 * outer tuple as soon as we have one match. Account for the effects of
1809 * this by scaling down the cost estimates in proportion to the expected
1810 * output size. (This assumes that all the quals attached to the join are
1811 * IN quals, which should be true.)
1813 joininfactor = join_in_selectivity(&path->jpath, root);
1816 * The number of tuple comparisons needed is the number of outer tuples
1817 * times the typical number of tuples in a hash bucket, which is the inner
1818 * relation size times its bucketsize fraction. At each one, we need to
1819 * evaluate the hashjoin quals. But actually, charging the full qual eval
1820 * cost at each tuple is pessimistic, since we don't evaluate the quals
1821 * unless the hash values match exactly. For lack of a better idea, halve
1822 * the cost estimate to allow for that.
1824 startup_cost += hash_qual_cost.startup;
1825 run_cost += hash_qual_cost.per_tuple *
1826 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1830 * For each tuple that gets through the hashjoin proper, we charge
1831 * cpu_tuple_cost plus the cost of evaluating additional restriction
1832 * clauses that are to be applied at the join. (This is pessimistic since
1833 * not all of the quals may get evaluated at each tuple.)
1835 startup_cost += qp_qual_cost.startup;
1836 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1837 run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1839 path->jpath.path.startup_cost = startup_cost;
1840 path->jpath.path.total_cost = startup_cost + run_cost;
1846 * Estimate the CPU costs of evaluating a WHERE clause.
1847 * The input can be either an implicitly-ANDed list of boolean
1848 * expressions, or a list of RestrictInfo nodes. (The latter is
1849 * preferred since it allows caching of the results.)
1850 * The result includes both a one-time (startup) component,
1851 * and a per-evaluation component.
1854 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
1856 cost_qual_eval_context context;
1859 context.root = root;
1860 context.total.startup = 0;
1861 context.total.per_tuple = 0;
1863 /* We don't charge any cost for the implicit ANDing at top level ... */
1867 Node *qual = (Node *) lfirst(l);
1869 cost_qual_eval_walker(qual, &context);
1872 *cost = context.total;
1876 * cost_qual_eval_node
1877 * As above, for a single RestrictInfo or expression.
1880 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
1882 cost_qual_eval_context context;
1884 context.root = root;
1885 context.total.startup = 0;
1886 context.total.per_tuple = 0;
1888 cost_qual_eval_walker(qual, &context);
1890 *cost = context.total;
1894 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
1900 * RestrictInfo nodes contain an eval_cost field reserved for this
1901 * routine's use, so that it's not necessary to evaluate the qual clause's
1902 * cost more than once. If the clause's cost hasn't been computed yet,
1903 * the field's startup value will contain -1.
1905 if (IsA(node, RestrictInfo))
1907 RestrictInfo *rinfo = (RestrictInfo *) node;
1909 if (rinfo->eval_cost.startup < 0)
1911 cost_qual_eval_context locContext;
1913 locContext.root = context->root;
1914 locContext.total.startup = 0;
1915 locContext.total.per_tuple = 0;
1918 * For an OR clause, recurse into the marked-up tree so that we
1919 * set the eval_cost for contained RestrictInfos too.
1921 if (rinfo->orclause)
1922 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
1924 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
1927 * If the RestrictInfo is marked pseudoconstant, it will be tested
1928 * only once, so treat its cost as all startup cost.
1930 if (rinfo->pseudoconstant)
1932 /* count one execution during startup */
1933 locContext.total.startup += locContext.total.per_tuple;
1934 locContext.total.per_tuple = 0;
1936 rinfo->eval_cost = locContext.total;
1938 context->total.startup += rinfo->eval_cost.startup;
1939 context->total.per_tuple += rinfo->eval_cost.per_tuple;
1940 /* do NOT recurse into children */
1945 * For each operator or function node in the given tree, we charge the
1946 * estimated execution cost given by pg_proc.procost (remember to multiply
1947 * this by cpu_operator_cost).
1949 * Vars and Consts are charged zero, and so are boolean operators (AND,
1950 * OR, NOT). Simplistic, but a lot better than no model at all.
1952 * Should we try to account for the possibility of short-circuit
1953 * evaluation of AND/OR? Probably *not*, because that would make the
1954 * results depend on the clause ordering, and we are not in any position
1955 * to expect that the current ordering of the clauses is the one that's
1956 * going to end up being used. (Is it worth applying order_qual_clauses
1957 * much earlier in the planning process to fix this?)
1959 if (IsA(node, FuncExpr))
1961 context->total.per_tuple +=
1962 get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
1964 else if (IsA(node, OpExpr) ||
1965 IsA(node, DistinctExpr) ||
1966 IsA(node, NullIfExpr))
1968 /* rely on struct equivalence to treat these all alike */
1969 set_opfuncid((OpExpr *) node);
1970 context->total.per_tuple +=
1971 get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
1973 else if (IsA(node, ScalarArrayOpExpr))
1976 * Estimate that the operator will be applied to about half of the
1977 * array elements before the answer is determined.
1979 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1980 Node *arraynode = (Node *) lsecond(saop->args);
1982 set_sa_opfuncid(saop);
1983 context->total.per_tuple += get_func_cost(saop->opfuncid) *
1984 cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1986 else if (IsA(node, CoerceViaIO))
1988 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
1993 /* check the result type's input function */
1994 getTypeInputInfo(iocoerce->resulttype,
1995 &iofunc, &typioparam);
1996 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
1997 /* check the input type's output function */
1998 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
1999 &iofunc, &typisvarlena);
2000 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2002 else if (IsA(node, ArrayCoerceExpr))
2004 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
2005 Node *arraynode = (Node *) acoerce->arg;
2007 if (OidIsValid(acoerce->elemfuncid))
2008 context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
2009 cpu_operator_cost * estimate_array_length(arraynode);
2011 else if (IsA(node, RowCompareExpr))
2013 /* Conservatively assume we will check all the columns */
2014 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
2017 foreach(lc, rcexpr->opnos)
2019 Oid opid = lfirst_oid(lc);
2021 context->total.per_tuple += get_func_cost(get_opcode(opid)) *
2025 else if (IsA(node, CurrentOfExpr))
2027 /* Report high cost to prevent selection of anything but TID scan */
2028 context->total.startup += disable_cost;
2030 else if (IsA(node, SubLink))
2032 /* This routine should not be applied to un-planned expressions */
2033 elog(ERROR, "cannot handle unplanned sub-select");
2035 else if (IsA(node, SubPlan))
2038 * A subplan node in an expression typically indicates that the
2039 * subplan will be executed on each evaluation, so charge accordingly.
2040 * (Sub-selects that can be executed as InitPlans have already been
2041 * removed from the expression.)
2043 * An exception occurs when we have decided we can implement the
2044 * subplan by hashing.
2046 SubPlan *subplan = (SubPlan *) node;
2047 Plan *plan = planner_subplan_get_plan(context->root, subplan);
2049 if (subplan->useHashTable)
2052 * If we are using a hash table for the subquery outputs, then the
2053 * cost of evaluating the query is a one-time cost. We charge one
2054 * cpu_operator_cost per tuple for the work of loading the
2057 context->total.startup += plan->total_cost +
2058 cpu_operator_cost * plan->plan_rows;
2061 * The per-tuple costs include the cost of evaluating the lefthand
2062 * expressions, plus the cost of probing the hashtable. Recursion
2063 * into the testexpr will handle the lefthand expressions
2064 * properly, and will count one cpu_operator_cost for each
2065 * comparison operator. That is probably too low for the probing
2066 * cost, but it's hard to make a better estimate, so live with it
2073 * Otherwise we will be rescanning the subplan output on each
2074 * evaluation. We need to estimate how much of the output we will
2075 * actually need to scan. NOTE: this logic should agree with
2076 * get_initplan_cost, below, and with the estimates used by
2077 * make_subplan() in plan/subselect.c.
2079 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
2081 if (subplan->subLinkType == EXISTS_SUBLINK)
2083 /* we only need to fetch 1 tuple */
2084 context->total.per_tuple += plan_run_cost / plan->plan_rows;
2086 else if (subplan->subLinkType == ALL_SUBLINK ||
2087 subplan->subLinkType == ANY_SUBLINK)
2089 /* assume we need 50% of the tuples */
2090 context->total.per_tuple += 0.50 * plan_run_cost;
2091 /* also charge a cpu_operator_cost per row examined */
2092 context->total.per_tuple +=
2093 0.50 * plan->plan_rows * cpu_operator_cost;
2097 /* assume we need all tuples */
2098 context->total.per_tuple += plan_run_cost;
2102 * Also account for subplan's startup cost. If the subplan is
2103 * uncorrelated or undirect correlated, AND its topmost node is a
2104 * Sort or Material node, assume that we'll only need to pay its
2105 * startup cost once; otherwise assume we pay the startup cost
2108 if (subplan->parParam == NIL &&
2110 IsA(plan, Material)))
2111 context->total.startup += plan->startup_cost;
2113 context->total.per_tuple += plan->startup_cost;
2117 /* recurse into children */
2118 return expression_tree_walker(node, cost_qual_eval_walker,
2125 * Get the expected cost of evaluating an initPlan.
2127 * Keep this in sync with cost_qual_eval_walker's handling of subplans, above,
2128 * and with the estimates used by make_subplan() in plan/subselect.c.
2131 get_initplan_cost(PlannerInfo *root, SubPlan *subplan)
2134 Plan *plan = planner_subplan_get_plan(root, subplan);
2136 /* initPlans never use hashtables */
2137 Assert(!subplan->useHashTable);
2138 /* they are never ALL or ANY, either */
2139 Assert(!(subplan->subLinkType == ALL_SUBLINK ||
2140 subplan->subLinkType == ANY_SUBLINK));
2142 if (subplan->subLinkType == EXISTS_SUBLINK)
2144 /* we only need to fetch 1 tuple */
2145 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
2147 result = plan->startup_cost;
2148 result += plan_run_cost / plan->plan_rows;
2152 /* assume we need all tuples */
2153 result = plan->total_cost;
2161 * approx_selectivity
2162 * Quick-and-dirty estimation of clause selectivities.
2163 * The input can be either an implicitly-ANDed list of boolean
2164 * expressions, or a list of RestrictInfo nodes (typically the latter).
2166 * This is quick-and-dirty because we bypass clauselist_selectivity, and
2167 * simply multiply the independent clause selectivities together. Now
2168 * clauselist_selectivity often can't do any better than that anyhow, but
2169 * for some situations (such as range constraints) it is smarter. However,
2170 * we can't effectively cache the results of clauselist_selectivity, whereas
2171 * the individual clause selectivities can be and are cached.
2173 * Since we are only using the results to estimate how many potential
2174 * output tuples are generated and passed through qpqual checking, it
2175 * seems OK to live with the approximation.
2178 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
2180 Selectivity total = 1.0;
2185 Node *qual = (Node *) lfirst(l);
2187 /* Note that clause_selectivity will be able to cache its result */
2188 total *= clause_selectivity(root, qual, 0, jointype);
2195 * set_baserel_size_estimates
2196 * Set the size estimates for the given base relation.
2198 * The rel's targetlist and restrictinfo list must have been constructed
2201 * We set the following fields of the rel node:
2202 * rows: the estimated number of output tuples (after applying
2203 * restriction clauses).
2204 * width: the estimated average output tuple width in bytes.
2205 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
2208 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2212 /* Should only be applied to base relations */
2213 Assert(rel->relid > 0);
2215 nrows = rel->tuples *
2216 clauselist_selectivity(root,
2217 rel->baserestrictinfo,
2221 rel->rows = clamp_row_est(nrows);
2223 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
2225 set_rel_width(root, rel);
2229 * set_joinrel_size_estimates
2230 * Set the size estimates for the given join relation.
2232 * The rel's targetlist must have been constructed already, and a
2233 * restriction clause list that matches the given component rels must
2236 * Since there is more than one way to make a joinrel for more than two
2237 * base relations, the results we get here could depend on which component
2238 * rel pair is provided. In theory we should get the same answers no matter
2239 * which pair is provided; in practice, since the selectivity estimation
2240 * routines don't handle all cases equally well, we might not. But there's
2241 * not much to be done about it. (Would it make sense to repeat the
2242 * calculations for each pair of input rels that's encountered, and somehow
2243 * average the results? Probably way more trouble than it's worth.)
2245 * It's important that the results for symmetric JoinTypes be symmetric,
2246 * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
2247 * rel1, JOIN_RIGHT). Also, JOIN_IN should produce the same result as
2248 * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
2250 * We set only the rows field here. The width field was already set by
2251 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
2254 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
2255 RelOptInfo *outer_rel,
2256 RelOptInfo *inner_rel,
2266 * Compute joinclause selectivity. Note that we are only considering
2267 * clauses that become restriction clauses at this join level; we are not
2268 * double-counting them because they were not considered in estimating the
2269 * sizes of the component rels.
2271 * For an outer join, we have to distinguish the selectivity of the join's
2272 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
2273 * down". For inner joins we just count them all as joinclauses.
2275 if (IS_OUTER_JOIN(jointype))
2277 List *joinquals = NIL;
2278 List *pushedquals = NIL;
2281 /* Grovel through the clauses to separate into two lists */
2282 foreach(l, restrictlist)
2284 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2286 Assert(IsA(rinfo, RestrictInfo));
2287 if (rinfo->is_pushed_down)
2288 pushedquals = lappend(pushedquals, rinfo);
2290 joinquals = lappend(joinquals, rinfo);
2293 /* Get the separate selectivities */
2294 jselec = clauselist_selectivity(root,
2298 pselec = clauselist_selectivity(root,
2303 /* Avoid leaking a lot of ListCells */
2304 list_free(joinquals);
2305 list_free(pushedquals);
2309 jselec = clauselist_selectivity(root,
2313 pselec = 0.0; /* not used, keep compiler quiet */
2317 * Basically, we multiply size of Cartesian product by selectivity.
2319 * If we are doing an outer join, take that into account: the joinqual
2320 * selectivity has to be clamped using the knowledge that the output must
2321 * be at least as large as the non-nullable input. However, any
2322 * pushed-down quals are applied after the outer join, so their
2323 * selectivity applies fully.
2325 * For JOIN_IN and variants, the Cartesian product is figured with respect
2326 * to a unique-ified input, and then we can clamp to the size of the other
2332 nrows = outer_rel->rows * inner_rel->rows * jselec;
2335 nrows = outer_rel->rows * inner_rel->rows * jselec;
2336 if (nrows < outer_rel->rows)
2337 nrows = outer_rel->rows;
2341 nrows = outer_rel->rows * inner_rel->rows * jselec;
2342 if (nrows < inner_rel->rows)
2343 nrows = inner_rel->rows;
2347 nrows = outer_rel->rows * inner_rel->rows * jselec;
2348 if (nrows < outer_rel->rows)
2349 nrows = outer_rel->rows;
2350 if (nrows < inner_rel->rows)
2351 nrows = inner_rel->rows;
2355 case JOIN_UNIQUE_INNER:
2356 upath = create_unique_path(root, inner_rel,
2357 inner_rel->cheapest_total_path);
2358 nrows = outer_rel->rows * upath->rows * jselec;
2359 if (nrows > outer_rel->rows)
2360 nrows = outer_rel->rows;
2362 case JOIN_REVERSE_IN:
2363 case JOIN_UNIQUE_OUTER:
2364 upath = create_unique_path(root, outer_rel,
2365 outer_rel->cheapest_total_path);
2366 nrows = upath->rows * inner_rel->rows * jselec;
2367 if (nrows > inner_rel->rows)
2368 nrows = inner_rel->rows;
2371 elog(ERROR, "unrecognized join type: %d", (int) jointype);
2372 nrows = 0; /* keep compiler quiet */
2376 rel->rows = clamp_row_est(nrows);
2380 * join_in_selectivity
2381 * Determines the factor by which a JOIN_IN join's result is expected
2382 * to be smaller than an ordinary inner join.
2384 * 'path' is already filled in except for the cost fields
2387 join_in_selectivity(JoinPath *path, PlannerInfo *root)
2389 RelOptInfo *innerrel;
2390 UniquePath *innerunique;
2394 /* Return 1.0 whenever it's not JOIN_IN */
2395 if (path->jointype != JOIN_IN)
2399 * Return 1.0 if the inner side is already known unique. The case where
2400 * the inner path is already a UniquePath probably cannot happen in
2401 * current usage, but check it anyway for completeness. The interesting
2402 * case is where we've determined the inner relation itself is unique,
2403 * which we can check by looking at the rows estimate for its UniquePath.
2405 if (IsA(path->innerjoinpath, UniquePath))
2407 innerrel = path->innerjoinpath->parent;
2408 innerunique = create_unique_path(root,
2410 innerrel->cheapest_total_path);
2411 if (innerunique->rows >= innerrel->rows)
2415 * Compute same result set_joinrel_size_estimates would compute for
2416 * JOIN_INNER. Note that we use the input rels' absolute size estimates,
2417 * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
2418 * double-counting the effects of any join clauses used in input scans.
2420 selec = clauselist_selectivity(root,
2421 path->joinrestrictinfo,
2424 nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
2426 nrows = clamp_row_est(nrows);
2428 /* See if it's larger than the actual JOIN_IN size estimate */
2429 if (nrows > path->path.parent->rows)
2430 return path->path.parent->rows / nrows;
2436 * set_function_size_estimates
2437 * Set the size estimates for a base relation that is a function call.
2439 * The rel's targetlist and restrictinfo list must have been constructed
2442 * We set the same fields as set_baserel_size_estimates.
2445 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2449 /* Should only be applied to base relations that are functions */
2450 Assert(rel->relid > 0);
2451 rte = planner_rt_fetch(rel->relid, root);
2452 Assert(rte->rtekind == RTE_FUNCTION);
2454 /* Estimate number of rows the function itself will return */
2455 rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
2457 /* Now estimate number of output rows, etc */
2458 set_baserel_size_estimates(root, rel);
2462 * set_values_size_estimates
2463 * Set the size estimates for a base relation that is a values list.
2465 * The rel's targetlist and restrictinfo list must have been constructed
2468 * We set the same fields as set_baserel_size_estimates.
2471 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2475 /* Should only be applied to base relations that are values lists */
2476 Assert(rel->relid > 0);
2477 rte = planner_rt_fetch(rel->relid, root);
2478 Assert(rte->rtekind == RTE_VALUES);
2481 * Estimate number of rows the values list will return. We know this
2482 * precisely based on the list length (well, barring set-returning
2483 * functions in list items, but that's a refinement not catered for
2484 * anywhere else either).
2486 rel->tuples = list_length(rte->values_lists);
2488 /* Now estimate number of output rows, etc */
2489 set_baserel_size_estimates(root, rel);
2495 * Set the estimated output width of a base relation.
2497 * NB: this works best on plain relations because it prefers to look at
2498 * real Vars. It will fail to make use of pg_statistic info when applied
2499 * to a subquery relation, even if the subquery outputs are simple vars
2500 * that we could have gotten info for. Is it worth trying to be smarter
2503 * The per-attribute width estimates are cached for possible re-use while
2504 * building join relations.
2507 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2509 int32 tuple_width = 0;
2514 * Usually (perhaps always), all the Vars have the same reloid, so we can
2515 * save some redundant list-searching by doing getrelid just once.
2518 rel_reloid = getrelid(rel->relid, root->parse->rtable);
2520 rel_reloid = InvalidOid; /* probably can't happen */
2522 foreach(tllist, rel->reltargetlist)
2524 Var *var = (Var *) lfirst(tllist);
2529 /* For now, punt on whole-row child Vars */
2532 tuple_width += 32; /* arbitrary */
2536 ndx = var->varattno - rel->min_attr;
2539 * The width probably hasn't been cached yet, but may as well check
2541 if (rel->attr_widths[ndx] > 0)
2543 tuple_width += rel->attr_widths[ndx];
2547 if (var->varno == rel->relid)
2548 var_reloid = rel_reloid;
2550 var_reloid = getrelid(var->varno, root->parse->rtable);
2552 if (var_reloid != InvalidOid)
2554 item_width = get_attavgwidth(var_reloid, var->varattno);
2557 rel->attr_widths[ndx] = item_width;
2558 tuple_width += item_width;
2564 * Not a plain relation, or can't find statistics for it. Estimate
2565 * using just the type info.
2567 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2568 Assert(item_width > 0);
2569 rel->attr_widths[ndx] = item_width;
2570 tuple_width += item_width;
2572 Assert(tuple_width >= 0);
2573 rel->width = tuple_width;
2577 * relation_byte_size
2578 * Estimate the storage space in bytes for a given number of tuples
2579 * of a given width (size in bytes).
2582 relation_byte_size(double tuples, int width)
2584 return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2589 * Returns an estimate of the number of pages covered by a given
2590 * number of tuples of a given width (size in bytes).
2593 page_size(double tuples, int width)
2595 return ceil(relation_byte_size(tuples, width) / BLCKSZ);