1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in arbitrary units established by these basic
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
15 * We expect that the kernel will typically do some amount of read-ahead
16 * optimization; this in conjunction with seek costs means that seq_page_cost
17 * is normally considerably less than random_page_cost. (However, if the
18 * database is fully cached in RAM, it is reasonable to set them equal.)
20 * We also use a rough estimate "effective_cache_size" of the number of
21 * disk pages in Postgres + OS-level disk cache. (We can't simply use
22 * NBuffers for this purpose because that would ignore the effects of
23 * the kernel's disk cache.)
25 * Obviously, taking constants for these values is an oversimplification,
26 * but it's tough enough to get any useful estimates even at this level of
27 * detail. Note that all of these parameters are user-settable, in case
28 * the default values are drastically off for a particular platform.
30 * We compute two separate costs for each path:
31 * total_cost: total estimated cost to fetch all tuples
32 * startup_cost: cost that is expended before first tuple is fetched
33 * In some scenarios, such as when there is a LIMIT or we are implementing
34 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
35 * path's result. A caller can estimate the cost of fetching a partial
36 * result by interpolating between startup_cost and total_cost. In detail:
37 * actual_cost = startup_cost +
38 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
39 * Note that a base relation's rows count (and, by extension, plan_rows for
40 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
41 * that this equation works properly. (Also, these routines guarantee not to
42 * set the rows count to zero, so there will be no zero divide.) The LIMIT is
43 * applied as a top-level plan node.
45 * For largely historical reasons, most of the routines in this module use
46 * the passed result Path only to store their startup_cost and total_cost
47 * results into. All the input data they need is passed as separate
48 * parameters, even though much of it could be extracted from the Path.
49 * An exception is made for the cost_XXXjoin() routines, which expect all
50 * the non-cost fields of the passed XXXPath to be filled in.
53 * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
54 * Portions Copyright (c) 1994, Regents of the University of California
57 * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.174 2007/01/10 18:06:03 tgl Exp $
59 *-------------------------------------------------------------------------
66 #include "executor/nodeHash.h"
67 #include "miscadmin.h"
68 #include "optimizer/clauses.h"
69 #include "optimizer/cost.h"
70 #include "optimizer/pathnode.h"
71 #include "parser/parsetree.h"
72 #include "utils/lsyscache.h"
73 #include "utils/selfuncs.h"
74 #include "utils/tuplesort.h"
77 #define LOG2(x) (log(x) / 0.693147180559945)
80 * Some Paths return less than the nominal number of rows of their parent
81 * relations; join nodes need to do this to get the correct input count:
83 #define PATH_ROWS(path) \
84 (IsA(path, UniquePath) ? \
85 ((UniquePath *) (path))->rows : \
89 double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
90 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
91 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
92 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
93 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
95 int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
97 Cost disable_cost = 100000000.0;
99 bool enable_seqscan = true;
100 bool enable_indexscan = true;
101 bool enable_bitmapscan = true;
102 bool enable_tidscan = true;
103 bool enable_sort = true;
104 bool enable_hashagg = true;
105 bool enable_nestloop = true;
106 bool enable_mergejoin = true;
107 bool enable_hashjoin = true;
110 static bool cost_qual_eval_walker(Node *node, QualCost *total);
111 static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
113 static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
114 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
115 static double relation_byte_size(double tuples, int width);
116 static double page_size(double tuples, int width);
121 * Force a row-count estimate to a sane value.
124 clamp_row_est(double nrows)
127 * Force estimate to be at least one row, to make explain output look
128 * better and to avoid possible divide-by-zero when interpolating costs.
129 * Make it an integer, too.
142 * Determines and returns the cost of scanning a relation sequentially.
145 cost_seqscan(Path *path, PlannerInfo *root,
148 Cost startup_cost = 0;
152 /* Should only be applied to base relations */
153 Assert(baserel->relid > 0);
154 Assert(baserel->rtekind == RTE_RELATION);
157 startup_cost += disable_cost;
162 run_cost += seq_page_cost * baserel->pages;
165 startup_cost += baserel->baserestrictcost.startup;
166 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
167 run_cost += cpu_per_tuple * baserel->tuples;
169 path->startup_cost = startup_cost;
170 path->total_cost = startup_cost + run_cost;
175 * Determines and returns the cost of scanning a relation using an index.
177 * 'index' is the index to be used
178 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
179 * 'outer_rel' is the outer relation when we are considering using the index
180 * scan as the inside of a nestloop join (hence, some of the indexQuals
181 * are join clauses, and we should expect repeated scans of the index);
182 * NULL for a plain index scan
184 * cost_index() takes an IndexPath not just a Path, because it sets a few
185 * additional fields of the IndexPath besides startup_cost and total_cost.
186 * These fields are needed if the IndexPath is used in a BitmapIndexScan.
188 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
189 * Any additional quals evaluated as qpquals may reduce the number of returned
190 * tuples, but they won't reduce the number of tuples we have to fetch from
191 * the table, so they don't reduce the scan cost.
193 * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
194 * it was a list of bare clause expressions.
197 cost_index(IndexPath *path, PlannerInfo *root,
200 RelOptInfo *outer_rel)
202 RelOptInfo *baserel = index->rel;
203 Cost startup_cost = 0;
205 Cost indexStartupCost;
207 Selectivity indexSelectivity;
208 double indexCorrelation,
213 double tuples_fetched;
214 double pages_fetched;
216 /* Should only be applied to base relations */
217 Assert(IsA(baserel, RelOptInfo) &&
218 IsA(index, IndexOptInfo));
219 Assert(baserel->relid > 0);
220 Assert(baserel->rtekind == RTE_RELATION);
222 if (!enable_indexscan)
223 startup_cost += disable_cost;
226 * Call index-access-method-specific code to estimate the processing cost
227 * for scanning the index, as well as the selectivity of the index (ie,
228 * the fraction of main-table tuples we will have to retrieve) and its
229 * correlation to the main-table tuple order.
231 OidFunctionCall8(index->amcostestimate,
232 PointerGetDatum(root),
233 PointerGetDatum(index),
234 PointerGetDatum(indexQuals),
235 PointerGetDatum(outer_rel),
236 PointerGetDatum(&indexStartupCost),
237 PointerGetDatum(&indexTotalCost),
238 PointerGetDatum(&indexSelectivity),
239 PointerGetDatum(&indexCorrelation));
242 * Save amcostestimate's results for possible use in bitmap scan planning.
243 * We don't bother to save indexStartupCost or indexCorrelation, because a
244 * bitmap scan doesn't care about either.
246 path->indextotalcost = indexTotalCost;
247 path->indexselectivity = indexSelectivity;
249 /* all costs for touching index itself included here */
250 startup_cost += indexStartupCost;
251 run_cost += indexTotalCost - indexStartupCost;
253 /* estimate number of main-table tuples fetched */
254 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
257 * Estimate number of main-table pages fetched, and compute I/O cost.
259 * When the index ordering is uncorrelated with the table ordering,
260 * we use an approximation proposed by Mackert and Lohman (see
261 * index_pages_fetched() for details) to compute the number of pages
262 * fetched, and then charge random_page_cost per page fetched.
264 * When the index ordering is exactly correlated with the table ordering
265 * (just after a CLUSTER, for example), the number of pages fetched should
266 * be exactly selectivity * table_size. What's more, all but the first
267 * will be sequential fetches, not the random fetches that occur in the
268 * uncorrelated case. So if the number of pages is more than 1, we
270 * random_page_cost + (pages_fetched - 1) * seq_page_cost
271 * For partially-correlated indexes, we ought to charge somewhere between
272 * these two estimates. We currently interpolate linearly between the
273 * estimates based on the correlation squared (XXX is that appropriate?).
276 if (outer_rel != NULL && outer_rel->rows > 1)
279 * For repeated indexscans, the appropriate estimate for the
280 * uncorrelated case is to scale up the number of tuples fetched in
281 * the Mackert and Lohman formula by the number of scans, so that we
282 * estimate the number of pages fetched by all the scans; then
283 * pro-rate the costs for one scan. In this case we assume all the
284 * fetches are random accesses.
286 double num_scans = outer_rel->rows;
288 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
290 (double) index->pages,
293 max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
296 * In the perfectly correlated case, the number of pages touched
297 * by each scan is selectivity * table_size, and we can use the
298 * Mackert and Lohman formula at the page level to estimate how
299 * much work is saved by caching across scans. We still assume
300 * all the fetches are random, though, which is an overestimate
301 * that's hard to correct for without double-counting the cache
302 * effects. (But in most cases where such a plan is actually
303 * interesting, only one page would get fetched per scan anyway,
304 * so it shouldn't matter much.)
306 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
308 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
310 (double) index->pages,
313 min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
318 * Normal case: apply the Mackert and Lohman formula, and then
319 * interpolate between that and the correlation-derived result.
321 pages_fetched = index_pages_fetched(tuples_fetched,
323 (double) index->pages,
326 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
327 max_IO_cost = pages_fetched * random_page_cost;
329 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
330 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
331 min_IO_cost = random_page_cost;
332 if (pages_fetched > 1)
333 min_IO_cost += (pages_fetched - 1) * seq_page_cost;
337 * Now interpolate based on estimated index order correlation to get
338 * total disk I/O cost for main table accesses.
340 csquared = indexCorrelation * indexCorrelation;
342 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
345 * Estimate CPU costs per tuple.
347 * Normally the indexquals will be removed from the list of restriction
348 * clauses that we have to evaluate as qpquals, so we should subtract
349 * their costs from baserestrictcost. But if we are doing a join then
350 * some of the indexquals are join clauses and shouldn't be subtracted.
351 * Rather than work out exactly how much to subtract, we don't subtract
354 startup_cost += baserel->baserestrictcost.startup;
355 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
357 if (outer_rel == NULL)
359 QualCost index_qual_cost;
361 cost_qual_eval(&index_qual_cost, indexQuals);
362 /* any startup cost still has to be paid ... */
363 cpu_per_tuple -= index_qual_cost.per_tuple;
366 run_cost += cpu_per_tuple * tuples_fetched;
368 path->path.startup_cost = startup_cost;
369 path->path.total_cost = startup_cost + run_cost;
373 * index_pages_fetched
374 * Estimate the number of pages actually fetched after accounting for
377 * We use an approximation proposed by Mackert and Lohman, "Index Scans
378 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
379 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
380 * The Mackert and Lohman approximation is that the number of pages
383 * min(2TNs/(2T+Ns), T) when T <= b
384 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
385 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
387 * T = # pages in table
388 * N = # tuples in table
389 * s = selectivity = fraction of table to be scanned
390 * b = # buffer pages available (we include kernel space here)
392 * We assume that effective_cache_size is the total number of buffer pages
393 * available for the whole query, and pro-rate that space across all the
394 * tables in the query and the index currently under consideration. (This
395 * ignores space needed for other indexes used by the query, but since we
396 * don't know which indexes will get used, we can't estimate that very well;
397 * and in any case counting all the tables may well be an overestimate, since
398 * depending on the join plan not all the tables may be scanned concurrently.)
400 * The product Ns is the number of tuples fetched; we pass in that
401 * product rather than calculating it here. "pages" is the number of pages
402 * in the object under consideration (either an index or a table).
403 * "index_pages" is the amount to add to the total table space, which was
404 * computed for us by query_planner.
406 * Caller is expected to have ensured that tuples_fetched is greater than zero
407 * and rounded to integer (see clamp_row_est). The result will likewise be
408 * greater than zero and integral.
411 index_pages_fetched(double tuples_fetched, BlockNumber pages,
412 double index_pages, PlannerInfo *root)
414 double pages_fetched;
419 /* T is # pages in table, but don't allow it to be zero */
420 T = (pages > 1) ? (double) pages : 1.0;
422 /* Compute number of pages assumed to be competing for cache space */
423 total_pages = root->total_table_pages + index_pages;
424 total_pages = Max(total_pages, 1.0);
425 Assert(T <= total_pages);
427 /* b is pro-rated share of effective_cache_size */
428 b = (double) effective_cache_size *T / total_pages;
430 /* force it positive and integral */
436 /* This part is the Mackert and Lohman formula */
440 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
441 if (pages_fetched >= T)
444 pages_fetched = ceil(pages_fetched);
450 lim = (2.0 * T * b) / (2.0 * T - b);
451 if (tuples_fetched <= lim)
454 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
459 b + (tuples_fetched - lim) * (T - b) / T;
461 pages_fetched = ceil(pages_fetched);
463 return pages_fetched;
467 * get_indexpath_pages
468 * Determine the total size of the indexes used in a bitmap index path.
470 * Note: if the same index is used more than once in a bitmap tree, we will
471 * count it multiple times, which perhaps is the wrong thing ... but it's
472 * not completely clear, and detecting duplicates is difficult, so ignore it
476 get_indexpath_pages(Path *bitmapqual)
481 if (IsA(bitmapqual, BitmapAndPath))
483 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
485 foreach(l, apath->bitmapquals)
487 result += get_indexpath_pages((Path *) lfirst(l));
490 else if (IsA(bitmapqual, BitmapOrPath))
492 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
494 foreach(l, opath->bitmapquals)
496 result += get_indexpath_pages((Path *) lfirst(l));
499 else if (IsA(bitmapqual, IndexPath))
501 IndexPath *ipath = (IndexPath *) bitmapqual;
503 result = (double) ipath->indexinfo->pages;
506 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
512 * cost_bitmap_heap_scan
513 * Determines and returns the cost of scanning a relation using a bitmap
514 * index-then-heap plan.
516 * 'baserel' is the relation to be scanned
517 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
518 * 'outer_rel' is the outer relation when we are considering using the bitmap
519 * scan as the inside of a nestloop join (hence, some of the indexQuals
520 * are join clauses, and we should expect repeated scans of the table);
521 * NULL for a plain bitmap scan
523 * Note: if this is a join inner path, the component IndexPaths in bitmapqual
524 * should have been costed accordingly.
527 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
528 Path *bitmapqual, RelOptInfo *outer_rel)
530 Cost startup_cost = 0;
533 Selectivity indexSelectivity;
536 double tuples_fetched;
537 double pages_fetched;
540 /* Should only be applied to base relations */
541 Assert(IsA(baserel, RelOptInfo));
542 Assert(baserel->relid > 0);
543 Assert(baserel->rtekind == RTE_RELATION);
545 if (!enable_bitmapscan)
546 startup_cost += disable_cost;
549 * Fetch total cost of obtaining the bitmap, as well as its total
552 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
554 startup_cost += indexTotalCost;
557 * Estimate number of main-table pages fetched.
559 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
561 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
563 if (outer_rel != NULL && outer_rel->rows > 1)
566 * For repeated bitmap scans, scale up the number of tuples fetched in
567 * the Mackert and Lohman formula by the number of scans, so that we
568 * estimate the number of pages fetched by all the scans. Then
569 * pro-rate for one scan.
571 double num_scans = outer_rel->rows;
573 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
575 get_indexpath_pages(bitmapqual),
577 pages_fetched /= num_scans;
582 * For a single scan, the number of heap pages that need to be fetched
583 * is the same as the Mackert and Lohman formula for the case T <= b
584 * (ie, no re-reads needed).
586 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
588 if (pages_fetched >= T)
591 pages_fetched = ceil(pages_fetched);
594 * For small numbers of pages we should charge random_page_cost apiece,
595 * while if nearly all the table's pages are being read, it's more
596 * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
597 * too. For lack of a better idea, interpolate like this to determine the
600 if (pages_fetched >= 2.0)
601 cost_per_page = random_page_cost -
602 (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
604 cost_per_page = random_page_cost;
606 run_cost += pages_fetched * cost_per_page;
609 * Estimate CPU costs per tuple.
611 * Often the indexquals don't need to be rechecked at each tuple ... but
612 * not always, especially not if there are enough tuples involved that the
613 * bitmaps become lossy. For the moment, just assume they will be
616 startup_cost += baserel->baserestrictcost.startup;
617 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
619 run_cost += cpu_per_tuple * tuples_fetched;
621 path->startup_cost = startup_cost;
622 path->total_cost = startup_cost + run_cost;
626 * cost_bitmap_tree_node
627 * Extract cost and selectivity from a bitmap tree node (index/and/or)
630 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
632 if (IsA(path, IndexPath))
634 *cost = ((IndexPath *) path)->indextotalcost;
635 *selec = ((IndexPath *) path)->indexselectivity;
637 * Charge a small amount per retrieved tuple to reflect the costs of
638 * manipulating the bitmap. This is mostly to make sure that a bitmap
639 * scan doesn't look to be the same cost as an indexscan to retrieve
642 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
644 else if (IsA(path, BitmapAndPath))
646 *cost = path->total_cost;
647 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
649 else if (IsA(path, BitmapOrPath))
651 *cost = path->total_cost;
652 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
656 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
657 *cost = *selec = 0; /* keep compiler quiet */
662 * cost_bitmap_and_node
663 * Estimate the cost of a BitmapAnd node
665 * Note that this considers only the costs of index scanning and bitmap
666 * creation, not the eventual heap access. In that sense the object isn't
667 * truly a Path, but it has enough path-like properties (costs in particular)
668 * to warrant treating it as one.
671 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
678 * We estimate AND selectivity on the assumption that the inputs are
679 * independent. This is probably often wrong, but we don't have the info
682 * The runtime cost of the BitmapAnd itself is estimated at 100x
683 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
684 * definitely too simplistic?
688 foreach(l, path->bitmapquals)
690 Path *subpath = (Path *) lfirst(l);
692 Selectivity subselec;
694 cost_bitmap_tree_node(subpath, &subCost, &subselec);
698 totalCost += subCost;
699 if (l != list_head(path->bitmapquals))
700 totalCost += 100.0 * cpu_operator_cost;
702 path->bitmapselectivity = selec;
703 path->path.startup_cost = totalCost;
704 path->path.total_cost = totalCost;
708 * cost_bitmap_or_node
709 * Estimate the cost of a BitmapOr node
711 * See comments for cost_bitmap_and_node.
714 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
721 * We estimate OR selectivity on the assumption that the inputs are
722 * non-overlapping, since that's often the case in "x IN (list)" type
723 * situations. Of course, we clamp to 1.0 at the end.
725 * The runtime cost of the BitmapOr itself is estimated at 100x
726 * cpu_operator_cost for each tbm_union needed. Probably too small,
727 * definitely too simplistic? We are aware that the tbm_unions are
728 * optimized out when the inputs are BitmapIndexScans.
732 foreach(l, path->bitmapquals)
734 Path *subpath = (Path *) lfirst(l);
736 Selectivity subselec;
738 cost_bitmap_tree_node(subpath, &subCost, &subselec);
742 totalCost += subCost;
743 if (l != list_head(path->bitmapquals) &&
744 !IsA(subpath, IndexPath))
745 totalCost += 100.0 * cpu_operator_cost;
747 path->bitmapselectivity = Min(selec, 1.0);
748 path->path.startup_cost = totalCost;
749 path->path.total_cost = totalCost;
754 * Determines and returns the cost of scanning a relation using TIDs.
757 cost_tidscan(Path *path, PlannerInfo *root,
758 RelOptInfo *baserel, List *tidquals)
760 Cost startup_cost = 0;
766 /* Should only be applied to base relations */
767 Assert(baserel->relid > 0);
768 Assert(baserel->rtekind == RTE_RELATION);
771 startup_cost += disable_cost;
773 /* Count how many tuples we expect to retrieve */
777 if (IsA(lfirst(l), ScalarArrayOpExpr))
779 /* Each element of the array yields 1 tuple */
780 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
781 Node *arraynode = (Node *) lsecond(saop->args);
783 ntuples += estimate_array_length(arraynode);
787 /* It's just CTID = something, count 1 tuple */
792 /* disk costs --- assume each tuple on a different page */
793 run_cost += random_page_cost * ntuples;
796 startup_cost += baserel->baserestrictcost.startup;
797 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
798 run_cost += cpu_per_tuple * ntuples;
800 path->startup_cost = startup_cost;
801 path->total_cost = startup_cost + run_cost;
806 * Determines and returns the cost of scanning a subquery RTE.
809 cost_subqueryscan(Path *path, RelOptInfo *baserel)
815 /* Should only be applied to base relations that are subqueries */
816 Assert(baserel->relid > 0);
817 Assert(baserel->rtekind == RTE_SUBQUERY);
820 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
821 * any restriction clauses that will be attached to the SubqueryScan node,
822 * plus cpu_tuple_cost to account for selection and projection overhead.
824 path->startup_cost = baserel->subplan->startup_cost;
825 path->total_cost = baserel->subplan->total_cost;
827 startup_cost = baserel->baserestrictcost.startup;
828 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
829 run_cost = cpu_per_tuple * baserel->tuples;
831 path->startup_cost += startup_cost;
832 path->total_cost += startup_cost + run_cost;
837 * Determines and returns the cost of scanning a function RTE.
840 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
842 Cost startup_cost = 0;
846 /* Should only be applied to base relations that are functions */
847 Assert(baserel->relid > 0);
848 Assert(baserel->rtekind == RTE_FUNCTION);
851 * For now, estimate function's cost at one operator eval per function
852 * call. Someday we should revive the function cost estimate columns in
855 cpu_per_tuple = cpu_operator_cost;
857 /* Add scanning CPU costs */
858 startup_cost += baserel->baserestrictcost.startup;
859 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
860 run_cost += cpu_per_tuple * baserel->tuples;
862 path->startup_cost = startup_cost;
863 path->total_cost = startup_cost + run_cost;
868 * Determines and returns the cost of scanning a VALUES RTE.
871 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
873 Cost startup_cost = 0;
877 /* Should only be applied to base relations that are values lists */
878 Assert(baserel->relid > 0);
879 Assert(baserel->rtekind == RTE_VALUES);
882 * For now, estimate list evaluation cost at one operator eval per list
883 * (probably pretty bogus, but is it worth being smarter?)
885 cpu_per_tuple = cpu_operator_cost;
887 /* Add scanning CPU costs */
888 startup_cost += baserel->baserestrictcost.startup;
889 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
890 run_cost += cpu_per_tuple * baserel->tuples;
892 path->startup_cost = startup_cost;
893 path->total_cost = startup_cost + run_cost;
898 * Determines and returns the cost of sorting a relation, including
899 * the cost of reading the input data.
901 * If the total volume of data to sort is less than work_mem, we will do
902 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
903 * comparisons for t tuples.
905 * If the total volume exceeds work_mem, we switch to a tape-style merge
906 * algorithm. There will still be about t*log2(t) tuple comparisons in
907 * total, but we will also need to write and read each tuple once per
908 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
909 * number of initial runs formed and M is the merge order used by tuplesort.c.
910 * Since the average initial run should be about twice work_mem, we have
911 * disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
912 * cpu = comparison_cost * t * log2(t)
914 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
915 * accesses (XXX can't we refine that guess?)
917 * We charge two operator evals per tuple comparison, which should be in
918 * the right ballpark in most cases.
920 * 'pathkeys' is a list of sort keys
921 * 'input_cost' is the total cost for reading the input data
922 * 'tuples' is the number of tuples in the relation
923 * 'width' is the average tuple width in bytes
925 * NOTE: some callers currently pass NIL for pathkeys because they
926 * can't conveniently supply the sort keys. Since this routine doesn't
927 * currently do anything with pathkeys anyway, that doesn't matter...
928 * but if it ever does, it should react gracefully to lack of key data.
929 * (Actually, the thing we'd most likely be interested in is just the number
930 * of sort keys, which all callers *could* supply.)
933 cost_sort(Path *path, PlannerInfo *root,
934 List *pathkeys, Cost input_cost, double tuples, int width)
936 Cost startup_cost = input_cost;
938 double nbytes = relation_byte_size(tuples, width);
939 long work_mem_bytes = work_mem * 1024L;
942 startup_cost += disable_cost;
945 * We want to be sure the cost of a sort is never estimated as zero, even
946 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
954 * Assume about two operator evals per tuple comparison and N log2 N
957 startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
960 if (nbytes > work_mem_bytes)
962 double npages = ceil(nbytes / BLCKSZ);
963 double nruns = (nbytes / work_mem_bytes) * 0.5;
964 double mergeorder = tuplesort_merge_order(work_mem_bytes);
966 double npageaccesses;
968 /* Compute logM(r) as log(r) / log(M) */
969 if (nruns > mergeorder)
970 log_runs = ceil(log(nruns) / log(mergeorder));
973 npageaccesses = 2.0 * npages * log_runs;
974 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
975 startup_cost += npageaccesses *
976 (seq_page_cost * 0.75 + random_page_cost * 0.25);
980 * Also charge a small amount (arbitrarily set equal to operator cost) per
983 run_cost += cpu_operator_cost * tuples;
985 path->startup_cost = startup_cost;
986 path->total_cost = startup_cost + run_cost;
991 * Determines and returns the cost of materializing a relation, including
992 * the cost of reading the input data.
994 * If the total volume of data to materialize exceeds work_mem, we will need
995 * to write it to disk, so the cost is much higher in that case.
998 cost_material(Path *path,
999 Cost input_cost, double tuples, int width)
1001 Cost startup_cost = input_cost;
1003 double nbytes = relation_byte_size(tuples, width);
1004 long work_mem_bytes = work_mem * 1024L;
1007 if (nbytes > work_mem_bytes)
1009 double npages = ceil(nbytes / BLCKSZ);
1011 /* We'll write during startup and read during retrieval */
1012 startup_cost += seq_page_cost * npages;
1013 run_cost += seq_page_cost * npages;
1017 * Charge a very small amount per inserted tuple, to reflect bookkeeping
1018 * costs. We use cpu_tuple_cost/10 for this. This is needed to break the
1019 * tie that would otherwise exist between nestloop with A outer,
1020 * materialized B inner and nestloop with B outer, materialized A inner.
1021 * The extra cost ensures we'll prefer materializing the smaller rel.
1023 startup_cost += cpu_tuple_cost * 0.1 * tuples;
1026 * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
1027 * so that it doesn't appear worthwhile to materialize a bare seqscan.
1029 run_cost += cpu_tuple_cost * tuples;
1031 path->startup_cost = startup_cost;
1032 path->total_cost = startup_cost + run_cost;
1037 * Determines and returns the cost of performing an Agg plan node,
1038 * including the cost of its input.
1040 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1041 * are for appropriately-sorted input.
1044 cost_agg(Path *path, PlannerInfo *root,
1045 AggStrategy aggstrategy, int numAggs,
1046 int numGroupCols, double numGroups,
1047 Cost input_startup_cost, Cost input_total_cost,
1048 double input_tuples)
1054 * We charge one cpu_operator_cost per aggregate function per input tuple,
1055 * and another one per output tuple (corresponding to transfn and finalfn
1056 * calls respectively). If we are grouping, we charge an additional
1057 * cpu_operator_cost per grouping column per input tuple for grouping
1060 * We will produce a single output tuple if not grouping, and a tuple per
1061 * group otherwise. We charge cpu_tuple_cost for each output tuple.
1063 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1064 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1065 * input path is already sorted appropriately, AGG_SORTED should be
1066 * preferred (since it has no risk of memory overflow). This will happen
1067 * as long as the computed total costs are indeed exactly equal --- but if
1068 * there's roundoff error we might do the wrong thing. So be sure that
1069 * the computations below form the same intermediate values in the same
1072 if (aggstrategy == AGG_PLAIN)
1074 startup_cost = input_total_cost;
1075 startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
1076 /* we aren't grouping */
1077 total_cost = startup_cost + cpu_tuple_cost;
1079 else if (aggstrategy == AGG_SORTED)
1081 /* Here we are able to deliver output on-the-fly */
1082 startup_cost = input_startup_cost;
1083 total_cost = input_total_cost;
1084 /* calcs phrased this way to match HASHED case, see note above */
1085 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1086 total_cost += cpu_operator_cost * input_tuples * numAggs;
1087 total_cost += cpu_operator_cost * numGroups * numAggs;
1088 total_cost += cpu_tuple_cost * numGroups;
1092 /* must be AGG_HASHED */
1093 startup_cost = input_total_cost;
1094 startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
1095 startup_cost += cpu_operator_cost * input_tuples * numAggs;
1096 total_cost = startup_cost;
1097 total_cost += cpu_operator_cost * numGroups * numAggs;
1098 total_cost += cpu_tuple_cost * numGroups;
1101 path->startup_cost = startup_cost;
1102 path->total_cost = total_cost;
1107 * Determines and returns the cost of performing a Group plan node,
1108 * including the cost of its input.
1110 * Note: caller must ensure that input costs are for appropriately-sorted
1114 cost_group(Path *path, PlannerInfo *root,
1115 int numGroupCols, double numGroups,
1116 Cost input_startup_cost, Cost input_total_cost,
1117 double input_tuples)
1122 startup_cost = input_startup_cost;
1123 total_cost = input_total_cost;
1126 * Charge one cpu_operator_cost per comparison per input tuple. We assume
1127 * all columns get compared at most of the tuples.
1129 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1131 path->startup_cost = startup_cost;
1132 path->total_cost = total_cost;
1136 * If a nestloop's inner path is an indexscan, be sure to use its estimated
1137 * output row count, which may be lower than the restriction-clause-only row
1138 * count of its parent. (We don't include this case in the PATH_ROWS macro
1139 * because it applies *only* to a nestloop's inner relation.) We have to
1140 * be prepared to recurse through Append nodes in case of an appendrel.
1143 nestloop_inner_path_rows(Path *path)
1147 if (IsA(path, IndexPath))
1148 result = ((IndexPath *) path)->rows;
1149 else if (IsA(path, BitmapHeapPath))
1150 result = ((BitmapHeapPath *) path)->rows;
1151 else if (IsA(path, AppendPath))
1156 foreach(l, ((AppendPath *) path)->subpaths)
1158 result += nestloop_inner_path_rows((Path *) lfirst(l));
1162 result = PATH_ROWS(path);
1169 * Determines and returns the cost of joining two relations using the
1170 * nested loop algorithm.
1172 * 'path' is already filled in except for the cost fields
1175 cost_nestloop(NestPath *path, PlannerInfo *root)
1177 Path *outer_path = path->outerjoinpath;
1178 Path *inner_path = path->innerjoinpath;
1179 Cost startup_cost = 0;
1182 QualCost restrict_qual_cost;
1183 double outer_path_rows = PATH_ROWS(outer_path);
1184 double inner_path_rows = nestloop_inner_path_rows(inner_path);
1186 Selectivity joininfactor;
1188 if (!enable_nestloop)
1189 startup_cost += disable_cost;
1192 * If we're doing JOIN_IN then we will stop scanning inner tuples for an
1193 * outer tuple as soon as we have one match. Account for the effects of
1194 * this by scaling down the cost estimates in proportion to the JOIN_IN
1195 * selectivity. (This assumes that all the quals attached to the join are
1196 * IN quals, which should be true.)
1198 joininfactor = join_in_selectivity(path, root);
1200 /* cost of source data */
1203 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1204 * before we can start returning tuples, so the join's startup cost is
1205 * their sum. What's not so clear is whether the inner path's
1206 * startup_cost must be paid again on each rescan of the inner path. This
1207 * is not true if the inner path is materialized or is a hashjoin, but
1208 * probably is true otherwise.
1210 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1211 run_cost += outer_path->total_cost - outer_path->startup_cost;
1212 if (IsA(inner_path, MaterialPath) ||
1213 IsA(inner_path, HashPath))
1215 /* charge only run cost for each iteration of inner path */
1220 * charge startup cost for each iteration of inner path, except we
1221 * already charged the first startup_cost in our own startup
1223 run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
1225 run_cost += outer_path_rows *
1226 (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
1229 * Compute number of tuples processed (not number emitted!)
1231 ntuples = outer_path_rows * inner_path_rows * joininfactor;
1234 cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
1235 startup_cost += restrict_qual_cost.startup;
1236 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1237 run_cost += cpu_per_tuple * ntuples;
1239 path->path.startup_cost = startup_cost;
1240 path->path.total_cost = startup_cost + run_cost;
1245 * Determines and returns the cost of joining two relations using the
1246 * merge join algorithm.
1248 * 'path' is already filled in except for the cost fields
1250 * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1251 * outersortkeys and innersortkeys are lists of the keys to be used
1252 * to sort the outer and inner relations, or NIL if no explicit
1253 * sort is needed because the source path is already ordered.
1256 cost_mergejoin(MergePath *path, PlannerInfo *root)
1258 Path *outer_path = path->jpath.outerjoinpath;
1259 Path *inner_path = path->jpath.innerjoinpath;
1260 List *mergeclauses = path->path_mergeclauses;
1261 Oid *mergeFamilies = path->path_mergeFamilies;
1262 int *mergeStrategies = path->path_mergeStrategies;
1263 List *outersortkeys = path->outersortkeys;
1264 List *innersortkeys = path->innersortkeys;
1265 Cost startup_cost = 0;
1268 Selectivity merge_selec;
1269 QualCost merge_qual_cost;
1270 QualCost qp_qual_cost;
1271 RestrictInfo *firstclause;
1272 double outer_path_rows = PATH_ROWS(outer_path);
1273 double inner_path_rows = PATH_ROWS(inner_path);
1276 double mergejointuples,
1279 Selectivity outerscansel,
1281 Selectivity joininfactor;
1282 Path sort_path; /* dummy for result of cost_sort */
1284 if (!enable_mergejoin)
1285 startup_cost += disable_cost;
1288 * Compute cost and selectivity of the mergequals and qpquals (other
1289 * restriction clauses) separately. We use approx_selectivity here for
1290 * speed --- in most cases, any errors won't affect the result much.
1292 * Note: it's probably bogus to use the normal selectivity calculation
1293 * here when either the outer or inner path is a UniquePath.
1295 merge_selec = approx_selectivity(root, mergeclauses,
1296 path->jpath.jointype);
1297 cost_qual_eval(&merge_qual_cost, mergeclauses);
1298 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1299 qp_qual_cost.startup -= merge_qual_cost.startup;
1300 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1302 /* approx # tuples passing the merge quals */
1303 mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
1306 * When there are equal merge keys in the outer relation, the mergejoin
1307 * must rescan any matching tuples in the inner relation. This means
1308 * re-fetching inner tuples. Our cost model for this is that a re-fetch
1309 * costs the same as an original fetch, which is probably an overestimate;
1310 * but on the other hand we ignore the bookkeeping costs of mark/restore.
1311 * Not clear if it's worth developing a more refined model.
1313 * The number of re-fetches can be estimated approximately as size of
1314 * merge join output minus size of inner relation. Assume that the
1315 * distinct key values are 1, 2, ..., and denote the number of values of
1316 * each key in the outer relation as m1, m2, ...; in the inner relation,
1317 * n1, n2, ... Then we have
1319 * size of join = m1 * n1 + m2 * n2 + ...
1321 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1322 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1325 * This equation works correctly for outer tuples having no inner match
1326 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1327 * are effectively subtracting those from the number of rescanned tuples,
1328 * when we should not. Can we do better without expensive selectivity
1331 if (IsA(outer_path, UniquePath))
1332 rescannedtuples = 0;
1335 rescannedtuples = mergejointuples - inner_path_rows;
1336 /* Must clamp because of possible underestimate */
1337 if (rescannedtuples < 0)
1338 rescannedtuples = 0;
1340 /* We'll inflate inner run cost this much to account for rescanning */
1341 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1344 * A merge join will stop as soon as it exhausts either input stream
1345 * (unless it's an outer join, in which case the outer side has to be
1346 * scanned all the way anyway). Estimate fraction of the left and right
1347 * inputs that will actually need to be scanned. We use only the first
1348 * (most significant) merge clause for this purpose.
1350 * Since this calculation is somewhat expensive, and will be the same for
1351 * all mergejoin paths associated with the merge clause, we cache the
1352 * results in the RestrictInfo node. XXX that won't work anymore once
1353 * we support multiple possible orderings!
1355 if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1357 firstclause = (RestrictInfo *) linitial(mergeclauses);
1358 if (firstclause->left_mergescansel < 0) /* not computed yet? */
1359 mergejoinscansel(root, (Node *) firstclause->clause,
1362 &firstclause->left_mergescansel,
1363 &firstclause->right_mergescansel);
1365 if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
1367 /* left side of clause is outer */
1368 outerscansel = firstclause->left_mergescansel;
1369 innerscansel = firstclause->right_mergescansel;
1373 /* left side of clause is inner */
1374 outerscansel = firstclause->right_mergescansel;
1375 innerscansel = firstclause->left_mergescansel;
1377 if (path->jpath.jointype == JOIN_LEFT)
1379 else if (path->jpath.jointype == JOIN_RIGHT)
1384 /* cope with clauseless or full mergejoin */
1385 outerscansel = innerscansel = 1.0;
1388 /* convert selectivity to row count; must scan at least one row */
1389 outer_rows = clamp_row_est(outer_path_rows * outerscansel);
1390 inner_rows = clamp_row_est(inner_path_rows * innerscansel);
1393 * Readjust scan selectivities to account for above rounding. This is
1394 * normally an insignificant effect, but when there are only a few rows in
1395 * the inputs, failing to do this makes for a large percentage error.
1397 outerscansel = outer_rows / outer_path_rows;
1398 innerscansel = inner_rows / inner_path_rows;
1400 /* cost of source data */
1402 if (outersortkeys) /* do we need to sort outer? */
1404 cost_sort(&sort_path,
1407 outer_path->total_cost,
1409 outer_path->parent->width);
1410 startup_cost += sort_path.startup_cost;
1411 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1416 startup_cost += outer_path->startup_cost;
1417 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1421 if (innersortkeys) /* do we need to sort inner? */
1423 cost_sort(&sort_path,
1426 inner_path->total_cost,
1428 inner_path->parent->width);
1429 startup_cost += sort_path.startup_cost;
1430 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1431 * innerscansel * rescanratio;
1435 startup_cost += inner_path->startup_cost;
1436 run_cost += (inner_path->total_cost - inner_path->startup_cost)
1437 * innerscansel * rescanratio;
1443 * If we're doing JOIN_IN then we will stop outputting inner tuples for an
1444 * outer tuple as soon as we have one match. Account for the effects of
1445 * this by scaling down the cost estimates in proportion to the expected
1446 * output size. (This assumes that all the quals attached to the join are
1447 * IN quals, which should be true.)
1449 joininfactor = join_in_selectivity(&path->jpath, root);
1452 * The number of tuple comparisons needed is approximately number of outer
1453 * rows plus number of inner rows plus number of rescanned tuples (can we
1454 * refine this?). At each one, we need to evaluate the mergejoin quals.
1455 * NOTE: JOIN_IN mode does not save any work here, so do NOT include
1458 startup_cost += merge_qual_cost.startup;
1459 run_cost += merge_qual_cost.per_tuple *
1460 (outer_rows + inner_rows * rescanratio);
1463 * For each tuple that gets through the mergejoin proper, we charge
1464 * cpu_tuple_cost plus the cost of evaluating additional restriction
1465 * clauses that are to be applied at the join. (This is pessimistic since
1466 * not all of the quals may get evaluated at each tuple.) This work is
1467 * skipped in JOIN_IN mode, so apply the factor.
1469 startup_cost += qp_qual_cost.startup;
1470 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1471 run_cost += cpu_per_tuple * mergejointuples * joininfactor;
1473 path->jpath.path.startup_cost = startup_cost;
1474 path->jpath.path.total_cost = startup_cost + run_cost;
1479 * Determines and returns the cost of joining two relations using the
1480 * hash join algorithm.
1482 * 'path' is already filled in except for the cost fields
1484 * Note: path's hashclauses should be a subset of the joinrestrictinfo list
1487 cost_hashjoin(HashPath *path, PlannerInfo *root)
1489 Path *outer_path = path->jpath.outerjoinpath;
1490 Path *inner_path = path->jpath.innerjoinpath;
1491 List *hashclauses = path->path_hashclauses;
1492 Cost startup_cost = 0;
1495 Selectivity hash_selec;
1496 QualCost hash_qual_cost;
1497 QualCost qp_qual_cost;
1498 double hashjointuples;
1499 double outer_path_rows = PATH_ROWS(outer_path);
1500 double inner_path_rows = PATH_ROWS(inner_path);
1501 int num_hashclauses = list_length(hashclauses);
1504 double virtualbuckets;
1505 Selectivity innerbucketsize;
1506 Selectivity joininfactor;
1509 if (!enable_hashjoin)
1510 startup_cost += disable_cost;
1513 * Compute cost and selectivity of the hashquals and qpquals (other
1514 * restriction clauses) separately. We use approx_selectivity here for
1515 * speed --- in most cases, any errors won't affect the result much.
1517 * Note: it's probably bogus to use the normal selectivity calculation
1518 * here when either the outer or inner path is a UniquePath.
1520 hash_selec = approx_selectivity(root, hashclauses,
1521 path->jpath.jointype);
1522 cost_qual_eval(&hash_qual_cost, hashclauses);
1523 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
1524 qp_qual_cost.startup -= hash_qual_cost.startup;
1525 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
1527 /* approx # tuples passing the hash quals */
1528 hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
1530 /* cost of source data */
1531 startup_cost += outer_path->startup_cost;
1532 run_cost += outer_path->total_cost - outer_path->startup_cost;
1533 startup_cost += inner_path->total_cost;
1536 * Cost of computing hash function: must do it once per input tuple. We
1537 * charge one cpu_operator_cost for each column's hash function. Also,
1538 * tack on one cpu_tuple_cost per inner row, to model the costs of
1539 * inserting the row into the hashtable.
1541 * XXX when a hashclause is more complex than a single operator, we really
1542 * should charge the extra eval costs of the left or right side, as
1543 * appropriate, here. This seems more work than it's worth at the moment.
1545 startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
1547 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
1549 /* Get hash table size that executor would use for inner relation */
1550 ExecChooseHashTableSize(inner_path_rows,
1551 inner_path->parent->width,
1554 virtualbuckets = (double) numbuckets *(double) numbatches;
1557 * Determine bucketsize fraction for inner relation. We use the smallest
1558 * bucketsize estimated for any individual hashclause; this is undoubtedly
1561 * BUT: if inner relation has been unique-ified, we can assume it's good
1562 * for hashing. This is important both because it's the right answer, and
1563 * because we avoid contaminating the cache with a value that's wrong for
1564 * non-unique-ified paths.
1566 if (IsA(inner_path, UniquePath))
1567 innerbucketsize = 1.0 / virtualbuckets;
1570 innerbucketsize = 1.0;
1571 foreach(hcl, hashclauses)
1573 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
1574 Selectivity thisbucketsize;
1576 Assert(IsA(restrictinfo, RestrictInfo));
1579 * First we have to figure out which side of the hashjoin clause
1580 * is the inner side.
1582 * Since we tend to visit the same clauses over and over when
1583 * planning a large query, we cache the bucketsize estimate in the
1584 * RestrictInfo node to avoid repeated lookups of statistics.
1586 if (bms_is_subset(restrictinfo->right_relids,
1587 inner_path->parent->relids))
1589 /* righthand side is inner */
1590 thisbucketsize = restrictinfo->right_bucketsize;
1591 if (thisbucketsize < 0)
1593 /* not cached yet */
1595 estimate_hash_bucketsize(root,
1596 get_rightop(restrictinfo->clause),
1598 restrictinfo->right_bucketsize = thisbucketsize;
1603 Assert(bms_is_subset(restrictinfo->left_relids,
1604 inner_path->parent->relids));
1605 /* lefthand side is inner */
1606 thisbucketsize = restrictinfo->left_bucketsize;
1607 if (thisbucketsize < 0)
1609 /* not cached yet */
1611 estimate_hash_bucketsize(root,
1612 get_leftop(restrictinfo->clause),
1614 restrictinfo->left_bucketsize = thisbucketsize;
1618 if (innerbucketsize > thisbucketsize)
1619 innerbucketsize = thisbucketsize;
1624 * If inner relation is too big then we will need to "batch" the join,
1625 * which implies writing and reading most of the tuples to disk an extra
1626 * time. Charge seq_page_cost per page, since the I/O should be nice and
1627 * sequential. Writing the inner rel counts as startup cost,
1628 * all the rest as run cost.
1632 double outerpages = page_size(outer_path_rows,
1633 outer_path->parent->width);
1634 double innerpages = page_size(inner_path_rows,
1635 inner_path->parent->width);
1637 startup_cost += seq_page_cost * innerpages;
1638 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
1644 * If we're doing JOIN_IN then we will stop comparing inner tuples to an
1645 * outer tuple as soon as we have one match. Account for the effects of
1646 * this by scaling down the cost estimates in proportion to the expected
1647 * output size. (This assumes that all the quals attached to the join are
1648 * IN quals, which should be true.)
1650 joininfactor = join_in_selectivity(&path->jpath, root);
1653 * The number of tuple comparisons needed is the number of outer tuples
1654 * times the typical number of tuples in a hash bucket, which is the inner
1655 * relation size times its bucketsize fraction. At each one, we need to
1656 * evaluate the hashjoin quals. But actually, charging the full qual eval
1657 * cost at each tuple is pessimistic, since we don't evaluate the quals
1658 * unless the hash values match exactly. For lack of a better idea, halve
1659 * the cost estimate to allow for that.
1661 startup_cost += hash_qual_cost.startup;
1662 run_cost += hash_qual_cost.per_tuple *
1663 outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
1667 * For each tuple that gets through the hashjoin proper, we charge
1668 * cpu_tuple_cost plus the cost of evaluating additional restriction
1669 * clauses that are to be applied at the join. (This is pessimistic since
1670 * not all of the quals may get evaluated at each tuple.)
1672 startup_cost += qp_qual_cost.startup;
1673 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
1674 run_cost += cpu_per_tuple * hashjointuples * joininfactor;
1676 path->jpath.path.startup_cost = startup_cost;
1677 path->jpath.path.total_cost = startup_cost + run_cost;
1683 * Estimate the CPU costs of evaluating a WHERE clause.
1684 * The input can be either an implicitly-ANDed list of boolean
1685 * expressions, or a list of RestrictInfo nodes.
1686 * The result includes both a one-time (startup) component,
1687 * and a per-evaluation component.
1690 cost_qual_eval(QualCost *cost, List *quals)
1695 cost->per_tuple = 0;
1697 /* We don't charge any cost for the implicit ANDing at top level ... */
1701 Node *qual = (Node *) lfirst(l);
1704 * RestrictInfo nodes contain an eval_cost field reserved for this
1705 * routine's use, so that it's not necessary to evaluate the qual
1706 * clause's cost more than once. If the clause's cost hasn't been
1707 * computed yet, the field's startup value will contain -1.
1709 * If the RestrictInfo is marked pseudoconstant, it will be tested
1710 * only once, so treat its cost as all startup cost.
1712 if (qual && IsA(qual, RestrictInfo))
1714 RestrictInfo *rinfo = (RestrictInfo *) qual;
1716 if (rinfo->eval_cost.startup < 0)
1718 rinfo->eval_cost.startup = 0;
1719 rinfo->eval_cost.per_tuple = 0;
1720 cost_qual_eval_walker((Node *) rinfo->clause,
1722 if (rinfo->pseudoconstant)
1724 /* count one execution during startup */
1725 rinfo->eval_cost.startup += rinfo->eval_cost.per_tuple;
1726 rinfo->eval_cost.per_tuple = 0;
1729 cost->startup += rinfo->eval_cost.startup;
1730 cost->per_tuple += rinfo->eval_cost.per_tuple;
1734 /* If it's a bare expression, must always do it the hard way */
1735 cost_qual_eval_walker(qual, cost);
1741 cost_qual_eval_walker(Node *node, QualCost *total)
1747 * Our basic strategy is to charge one cpu_operator_cost for each operator
1748 * or function node in the given tree. Vars and Consts are charged zero,
1749 * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
1750 * better than no model at all.
1752 * Should we try to account for the possibility of short-circuit
1753 * evaluation of AND/OR?
1755 if (IsA(node, FuncExpr) ||
1756 IsA(node, OpExpr) ||
1757 IsA(node, DistinctExpr) ||
1758 IsA(node, NullIfExpr))
1759 total->per_tuple += cpu_operator_cost;
1760 else if (IsA(node, ScalarArrayOpExpr))
1763 * Estimate that the operator will be applied to about half of the
1764 * array elements before the answer is determined.
1766 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
1767 Node *arraynode = (Node *) lsecond(saop->args);
1770 cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
1772 else if (IsA(node, RowCompareExpr))
1774 /* Conservatively assume we will check all the columns */
1775 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
1777 total->per_tuple += cpu_operator_cost * list_length(rcexpr->opnos);
1779 else if (IsA(node, SubLink))
1781 /* This routine should not be applied to un-planned expressions */
1782 elog(ERROR, "cannot handle unplanned sub-select");
1784 else if (IsA(node, SubPlan))
1787 * A subplan node in an expression typically indicates that the
1788 * subplan will be executed on each evaluation, so charge accordingly.
1789 * (Sub-selects that can be executed as InitPlans have already been
1790 * removed from the expression.)
1792 * An exception occurs when we have decided we can implement the
1793 * subplan by hashing.
1795 SubPlan *subplan = (SubPlan *) node;
1796 Plan *plan = subplan->plan;
1798 if (subplan->useHashTable)
1801 * If we are using a hash table for the subquery outputs, then the
1802 * cost of evaluating the query is a one-time cost. We charge one
1803 * cpu_operator_cost per tuple for the work of loading the
1806 total->startup += plan->total_cost +
1807 cpu_operator_cost * plan->plan_rows;
1810 * The per-tuple costs include the cost of evaluating the lefthand
1811 * expressions, plus the cost of probing the hashtable. Recursion
1812 * into the testexpr will handle the lefthand expressions
1813 * properly, and will count one cpu_operator_cost for each
1814 * comparison operator. That is probably too low for the probing
1815 * cost, but it's hard to make a better estimate, so live with it
1822 * Otherwise we will be rescanning the subplan output on each
1823 * evaluation. We need to estimate how much of the output we will
1824 * actually need to scan. NOTE: this logic should agree with the
1825 * estimates used by make_subplan() in plan/subselect.c.
1827 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
1829 if (subplan->subLinkType == EXISTS_SUBLINK)
1831 /* we only need to fetch 1 tuple */
1832 total->per_tuple += plan_run_cost / plan->plan_rows;
1834 else if (subplan->subLinkType == ALL_SUBLINK ||
1835 subplan->subLinkType == ANY_SUBLINK)
1837 /* assume we need 50% of the tuples */
1838 total->per_tuple += 0.50 * plan_run_cost;
1839 /* also charge a cpu_operator_cost per row examined */
1840 total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
1844 /* assume we need all tuples */
1845 total->per_tuple += plan_run_cost;
1849 * Also account for subplan's startup cost. If the subplan is
1850 * uncorrelated or undirect correlated, AND its topmost node is a
1851 * Sort or Material node, assume that we'll only need to pay its
1852 * startup cost once; otherwise assume we pay the startup cost
1855 if (subplan->parParam == NIL &&
1857 IsA(plan, Material)))
1858 total->startup += plan->startup_cost;
1860 total->per_tuple += plan->startup_cost;
1864 return expression_tree_walker(node, cost_qual_eval_walker,
1870 * approx_selectivity
1871 * Quick-and-dirty estimation of clause selectivities.
1872 * The input can be either an implicitly-ANDed list of boolean
1873 * expressions, or a list of RestrictInfo nodes (typically the latter).
1875 * This is quick-and-dirty because we bypass clauselist_selectivity, and
1876 * simply multiply the independent clause selectivities together. Now
1877 * clauselist_selectivity often can't do any better than that anyhow, but
1878 * for some situations (such as range constraints) it is smarter. However,
1879 * we can't effectively cache the results of clauselist_selectivity, whereas
1880 * the individual clause selectivities can be and are cached.
1882 * Since we are only using the results to estimate how many potential
1883 * output tuples are generated and passed through qpqual checking, it
1884 * seems OK to live with the approximation.
1887 approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
1889 Selectivity total = 1.0;
1894 Node *qual = (Node *) lfirst(l);
1896 /* Note that clause_selectivity will be able to cache its result */
1897 total *= clause_selectivity(root, qual, 0, jointype);
1904 * set_baserel_size_estimates
1905 * Set the size estimates for the given base relation.
1907 * The rel's targetlist and restrictinfo list must have been constructed
1910 * We set the following fields of the rel node:
1911 * rows: the estimated number of output tuples (after applying
1912 * restriction clauses).
1913 * width: the estimated average output tuple width in bytes.
1914 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
1917 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
1921 /* Should only be applied to base relations */
1922 Assert(rel->relid > 0);
1924 nrows = rel->tuples *
1925 clauselist_selectivity(root,
1926 rel->baserestrictinfo,
1930 rel->rows = clamp_row_est(nrows);
1932 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
1934 set_rel_width(root, rel);
1938 * set_joinrel_size_estimates
1939 * Set the size estimates for the given join relation.
1941 * The rel's targetlist must have been constructed already, and a
1942 * restriction clause list that matches the given component rels must
1945 * Since there is more than one way to make a joinrel for more than two
1946 * base relations, the results we get here could depend on which component
1947 * rel pair is provided. In theory we should get the same answers no matter
1948 * which pair is provided; in practice, since the selectivity estimation
1949 * routines don't handle all cases equally well, we might not. But there's
1950 * not much to be done about it. (Would it make sense to repeat the
1951 * calculations for each pair of input rels that's encountered, and somehow
1952 * average the results? Probably way more trouble than it's worth.)
1954 * It's important that the results for symmetric JoinTypes be symmetric,
1955 * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
1956 * rel1, JOIN_RIGHT). Also, JOIN_IN should produce the same result as
1957 * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
1959 * We set only the rows field here. The width field was already set by
1960 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
1963 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
1964 RelOptInfo *outer_rel,
1965 RelOptInfo *inner_rel,
1975 * Compute joinclause selectivity. Note that we are only considering
1976 * clauses that become restriction clauses at this join level; we are not
1977 * double-counting them because they were not considered in estimating the
1978 * sizes of the component rels.
1980 * For an outer join, we have to distinguish the selectivity of the
1981 * join's own clauses (JOIN/ON conditions) from any clauses that were
1982 * "pushed down". For inner joins we just count them all as joinclauses.
1984 if (IS_OUTER_JOIN(jointype))
1986 List *joinquals = NIL;
1987 List *pushedquals = NIL;
1990 /* Grovel through the clauses to separate into two lists */
1991 foreach(l, restrictlist)
1993 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
1995 Assert(IsA(rinfo, RestrictInfo));
1996 if (rinfo->is_pushed_down)
1997 pushedquals = lappend(pushedquals, rinfo);
1999 joinquals = lappend(joinquals, rinfo);
2002 /* Get the separate selectivities */
2003 jselec = clauselist_selectivity(root,
2007 pselec = clauselist_selectivity(root,
2012 /* Avoid leaking a lot of ListCells */
2013 list_free(joinquals);
2014 list_free(pushedquals);
2018 jselec = clauselist_selectivity(root,
2022 pselec = 0.0; /* not used, keep compiler quiet */
2026 * Basically, we multiply size of Cartesian product by selectivity.
2028 * If we are doing an outer join, take that into account: the joinqual
2029 * selectivity has to be clamped using the knowledge that the output must
2030 * be at least as large as the non-nullable input. However, any
2031 * pushed-down quals are applied after the outer join, so their
2032 * selectivity applies fully.
2034 * For JOIN_IN and variants, the Cartesian product is figured with respect
2035 * to a unique-ified input, and then we can clamp to the size of the other
2041 nrows = outer_rel->rows * inner_rel->rows * jselec;
2044 nrows = outer_rel->rows * inner_rel->rows * jselec;
2045 if (nrows < outer_rel->rows)
2046 nrows = outer_rel->rows;
2050 nrows = outer_rel->rows * inner_rel->rows * jselec;
2051 if (nrows < inner_rel->rows)
2052 nrows = inner_rel->rows;
2056 nrows = outer_rel->rows * inner_rel->rows * jselec;
2057 if (nrows < outer_rel->rows)
2058 nrows = outer_rel->rows;
2059 if (nrows < inner_rel->rows)
2060 nrows = inner_rel->rows;
2064 case JOIN_UNIQUE_INNER:
2065 upath = create_unique_path(root, inner_rel,
2066 inner_rel->cheapest_total_path);
2067 nrows = outer_rel->rows * upath->rows * jselec;
2068 if (nrows > outer_rel->rows)
2069 nrows = outer_rel->rows;
2071 case JOIN_REVERSE_IN:
2072 case JOIN_UNIQUE_OUTER:
2073 upath = create_unique_path(root, outer_rel,
2074 outer_rel->cheapest_total_path);
2075 nrows = upath->rows * inner_rel->rows * jselec;
2076 if (nrows > inner_rel->rows)
2077 nrows = inner_rel->rows;
2080 elog(ERROR, "unrecognized join type: %d", (int) jointype);
2081 nrows = 0; /* keep compiler quiet */
2085 rel->rows = clamp_row_est(nrows);
2089 * join_in_selectivity
2090 * Determines the factor by which a JOIN_IN join's result is expected
2091 * to be smaller than an ordinary inner join.
2093 * 'path' is already filled in except for the cost fields
2096 join_in_selectivity(JoinPath *path, PlannerInfo *root)
2098 RelOptInfo *innerrel;
2099 UniquePath *innerunique;
2103 /* Return 1.0 whenever it's not JOIN_IN */
2104 if (path->jointype != JOIN_IN)
2108 * Return 1.0 if the inner side is already known unique. The case where
2109 * the inner path is already a UniquePath probably cannot happen in
2110 * current usage, but check it anyway for completeness. The interesting
2111 * case is where we've determined the inner relation itself is unique,
2112 * which we can check by looking at the rows estimate for its UniquePath.
2114 if (IsA(path->innerjoinpath, UniquePath))
2116 innerrel = path->innerjoinpath->parent;
2117 innerunique = create_unique_path(root,
2119 innerrel->cheapest_total_path);
2120 if (innerunique->rows >= innerrel->rows)
2124 * Compute same result set_joinrel_size_estimates would compute for
2125 * JOIN_INNER. Note that we use the input rels' absolute size estimates,
2126 * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
2127 * double-counting the effects of any join clauses used in input scans.
2129 selec = clauselist_selectivity(root,
2130 path->joinrestrictinfo,
2133 nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
2135 nrows = clamp_row_est(nrows);
2137 /* See if it's larger than the actual JOIN_IN size estimate */
2138 if (nrows > path->path.parent->rows)
2139 return path->path.parent->rows / nrows;
2145 * set_function_size_estimates
2146 * Set the size estimates for a base relation that is a function call.
2148 * The rel's targetlist and restrictinfo list must have been constructed
2151 * We set the same fields as set_baserel_size_estimates.
2154 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2158 /* Should only be applied to base relations that are functions */
2159 Assert(rel->relid > 0);
2160 rte = rt_fetch(rel->relid, root->parse->rtable);
2161 Assert(rte->rtekind == RTE_FUNCTION);
2164 * Estimate number of rows the function itself will return.
2166 * XXX no idea how to do this yet; but we can at least check whether
2167 * function returns set or not...
2169 if (expression_returns_set(rte->funcexpr))
2174 /* Now estimate number of output rows, etc */
2175 set_baserel_size_estimates(root, rel);
2179 * set_values_size_estimates
2180 * Set the size estimates for a base relation that is a values list.
2182 * The rel's targetlist and restrictinfo list must have been constructed
2185 * We set the same fields as set_baserel_size_estimates.
2188 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2192 /* Should only be applied to base relations that are values lists */
2193 Assert(rel->relid > 0);
2194 rte = rt_fetch(rel->relid, root->parse->rtable);
2195 Assert(rte->rtekind == RTE_VALUES);
2198 * Estimate number of rows the values list will return. We know this
2199 * precisely based on the list length (well, barring set-returning
2200 * functions in list items, but that's a refinement not catered for
2201 * anywhere else either).
2203 rel->tuples = list_length(rte->values_lists);
2205 /* Now estimate number of output rows, etc */
2206 set_baserel_size_estimates(root, rel);
2212 * Set the estimated output width of a base relation.
2214 * NB: this works best on plain relations because it prefers to look at
2215 * real Vars. It will fail to make use of pg_statistic info when applied
2216 * to a subquery relation, even if the subquery outputs are simple vars
2217 * that we could have gotten info for. Is it worth trying to be smarter
2220 * The per-attribute width estimates are cached for possible re-use while
2221 * building join relations.
2224 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
2226 int32 tuple_width = 0;
2229 foreach(tllist, rel->reltargetlist)
2231 Var *var = (Var *) lfirst(tllist);
2236 /* For now, punt on whole-row child Vars */
2239 tuple_width += 32; /* arbitrary */
2243 ndx = var->varattno - rel->min_attr;
2246 * The width probably hasn't been cached yet, but may as well check
2248 if (rel->attr_widths[ndx] > 0)
2250 tuple_width += rel->attr_widths[ndx];
2254 relid = getrelid(var->varno, root->parse->rtable);
2255 if (relid != InvalidOid)
2257 item_width = get_attavgwidth(relid, var->varattno);
2260 rel->attr_widths[ndx] = item_width;
2261 tuple_width += item_width;
2267 * Not a plain relation, or can't find statistics for it. Estimate
2268 * using just the type info.
2270 item_width = get_typavgwidth(var->vartype, var->vartypmod);
2271 Assert(item_width > 0);
2272 rel->attr_widths[ndx] = item_width;
2273 tuple_width += item_width;
2275 Assert(tuple_width >= 0);
2276 rel->width = tuple_width;
2280 * relation_byte_size
2281 * Estimate the storage space in bytes for a given number of tuples
2282 * of a given width (size in bytes).
2285 relation_byte_size(double tuples, int width)
2287 return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
2292 * Returns an estimate of the number of pages covered by a given
2293 * number of tuples of a given width (size in bytes).
2296 page_size(double tuples, int width)
2298 return ceil(relation_byte_size(tuples, width) / BLCKSZ);