1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in arbitrary units established by these basic
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
15 * We expect that the kernel will typically do some amount of read-ahead
16 * optimization; this in conjunction with seek costs means that seq_page_cost
17 * is normally considerably less than random_page_cost. (However, if the
18 * database is fully cached in RAM, it is reasonable to set them equal.)
20 * We also use a rough estimate "effective_cache_size" of the number of
21 * disk pages in Postgres + OS-level disk cache. (We can't simply use
22 * NBuffers for this purpose because that would ignore the effects of
23 * the kernel's disk cache.)
25 * Obviously, taking constants for these values is an oversimplification,
26 * but it's tough enough to get any useful estimates even at this level of
27 * detail. Note that all of these parameters are user-settable, in case
28 * the default values are drastically off for a particular platform.
30 * seq_page_cost and random_page_cost can also be overridden for an individual
31 * tablespace, in case some data is on a fast disk and other data is on a slow
32 * disk. Per-tablespace overrides never apply to temporary work files such as
33 * an external sort or a materialize node that overflows work_mem.
35 * We compute two separate costs for each path:
36 * total_cost: total estimated cost to fetch all tuples
37 * startup_cost: cost that is expended before first tuple is fetched
38 * In some scenarios, such as when there is a LIMIT or we are implementing
39 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
40 * path's result. A caller can estimate the cost of fetching a partial
41 * result by interpolating between startup_cost and total_cost. In detail:
42 * actual_cost = startup_cost +
43 * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
44 * Note that a base relation's rows count (and, by extension, plan_rows for
45 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
46 * that this equation works properly. (Also, these routines guarantee not to
47 * set the rows count to zero, so there will be no zero divide.) The LIMIT is
48 * applied as a top-level plan node.
50 * For largely historical reasons, most of the routines in this module use
51 * the passed result Path only to store their results (rows, startup_cost and
52 * total_cost) into. All the input data they need is passed as separate
53 * parameters, even though much of it could be extracted from the Path.
54 * An exception is made for the cost_XXXjoin() routines, which expect all
55 * the other fields of the passed XXXPath to be filled in, and similarly
56 * cost_index() assumes the passed IndexPath is valid except for its output
60 * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
61 * Portions Copyright (c) 1994, Regents of the University of California
64 * src/backend/optimizer/path/costsize.c
66 *-------------------------------------------------------------------------
72 #include <float.h> /* for _isnan */
76 #include "access/htup_details.h"
77 #include "access/tsmapi.h"
78 #include "executor/executor.h"
79 #include "executor/nodeHash.h"
80 #include "miscadmin.h"
81 #include "nodes/nodeFuncs.h"
82 #include "optimizer/clauses.h"
83 #include "optimizer/cost.h"
84 #include "optimizer/pathnode.h"
85 #include "optimizer/paths.h"
86 #include "optimizer/placeholder.h"
87 #include "optimizer/plancat.h"
88 #include "optimizer/planmain.h"
89 #include "optimizer/restrictinfo.h"
90 #include "parser/parsetree.h"
91 #include "utils/lsyscache.h"
92 #include "utils/selfuncs.h"
93 #include "utils/spccache.h"
94 #include "utils/tuplesort.h"
97 #define LOG2(x) (log(x) / 0.693147180559945)
100 double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
101 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
102 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
103 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
104 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
106 int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
108 Cost disable_cost = 1.0e10;
110 bool enable_seqscan = true;
111 bool enable_indexscan = true;
112 bool enable_indexonlyscan = true;
113 bool enable_bitmapscan = true;
114 bool enable_tidscan = true;
115 bool enable_sort = true;
116 bool enable_hashagg = true;
117 bool enable_nestloop = true;
118 bool enable_material = true;
119 bool enable_mergejoin = true;
120 bool enable_hashjoin = true;
126 } cost_qual_eval_context;
128 static List *extract_nonindex_conditions(List *qual_clauses, List *indexquals);
129 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
132 static void cost_rescan(PlannerInfo *root, Path *path,
133 Cost *rescan_startup_cost, Cost *rescan_total_cost);
134 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
135 static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
136 ParamPathInfo *param_info,
137 QualCost *qpqual_cost);
138 static bool has_indexed_join_quals(NestPath *joinpath);
139 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
141 static double calc_joinrel_size_estimate(PlannerInfo *root,
144 SpecialJoinInfo *sjinfo,
146 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
147 static double relation_byte_size(double tuples, int width);
148 static double page_size(double tuples, int width);
153 * Force a row-count estimate to a sane value.
156 clamp_row_est(double nrows)
159 * Force estimate to be at least one row, to make explain output look
160 * better and to avoid possible divide-by-zero when interpolating costs.
161 * Make it an integer, too.
174 * Determines and returns the cost of scanning a relation sequentially.
176 * 'baserel' is the relation to be scanned
177 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
180 cost_seqscan(Path *path, PlannerInfo *root,
181 RelOptInfo *baserel, ParamPathInfo *param_info)
183 Cost startup_cost = 0;
185 double spc_seq_page_cost;
186 QualCost qpqual_cost;
189 /* Should only be applied to base relations */
190 Assert(baserel->relid > 0);
191 Assert(baserel->rtekind == RTE_RELATION);
193 /* Mark the path with the correct row estimate */
195 path->rows = param_info->ppi_rows;
197 path->rows = baserel->rows;
200 startup_cost += disable_cost;
202 /* fetch estimated page cost for tablespace containing table */
203 get_tablespace_page_costs(baserel->reltablespace,
210 run_cost += spc_seq_page_cost * baserel->pages;
213 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
215 startup_cost += qpqual_cost.startup;
216 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
217 run_cost += cpu_per_tuple * baserel->tuples;
219 path->startup_cost = startup_cost;
220 path->total_cost = startup_cost + run_cost;
225 * Determines and returns the cost of scanning a relation using sampling.
227 * 'baserel' is the relation to be scanned
228 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
231 cost_samplescan(Path *path, PlannerInfo *root,
232 RelOptInfo *baserel, ParamPathInfo *param_info)
234 Cost startup_cost = 0;
237 TableSampleClause *tsc;
239 double spc_seq_page_cost,
240 spc_random_page_cost,
242 QualCost qpqual_cost;
245 /* Should only be applied to base relations with tablesample clauses */
246 Assert(baserel->relid > 0);
247 rte = planner_rt_fetch(baserel->relid, root);
248 Assert(rte->rtekind == RTE_RELATION);
249 tsc = rte->tablesample;
251 tsm = GetTsmRoutine(tsc->tsmhandler);
253 /* Mark the path with the correct row estimate */
255 path->rows = param_info->ppi_rows;
257 path->rows = baserel->rows;
259 /* fetch estimated page cost for tablespace containing table */
260 get_tablespace_page_costs(baserel->reltablespace,
261 &spc_random_page_cost,
264 /* if NextSampleBlock is used, assume random access, else sequential */
265 spc_page_cost = (tsm->NextSampleBlock != NULL) ?
266 spc_random_page_cost : spc_seq_page_cost;
269 * disk costs (recall that baserel->pages has already been set to the
270 * number of pages the sampling method will visit)
272 run_cost += spc_page_cost * baserel->pages;
275 * CPU costs (recall that baserel->tuples has already been set to the
276 * number of tuples the sampling method will select). Note that we ignore
277 * execution cost of the TABLESAMPLE parameter expressions; they will be
278 * evaluated only once per scan, and in most usages they'll likely be
279 * simple constants anyway. We also don't charge anything for the
280 * calculations the sampling method might do internally.
282 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
284 startup_cost += qpqual_cost.startup;
285 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
286 run_cost += cpu_per_tuple * baserel->tuples;
288 path->startup_cost = startup_cost;
289 path->total_cost = startup_cost + run_cost;
294 * Determines and returns the cost of scanning a relation using an index.
296 * 'path' describes the indexscan under consideration, and is complete
297 * except for the fields to be set by this routine
298 * 'loop_count' is the number of repetitions of the indexscan to factor into
299 * estimates of caching behavior
301 * In addition to rows, startup_cost and total_cost, cost_index() sets the
302 * path's indextotalcost and indexselectivity fields. These values will be
303 * needed if the IndexPath is used in a BitmapIndexScan.
305 * NOTE: path->indexquals must contain only clauses usable as index
306 * restrictions. Any additional quals evaluated as qpquals may reduce the
307 * number of returned tuples, but they won't reduce the number of tuples
308 * we have to fetch from the table, so they don't reduce the scan cost.
311 cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
313 IndexOptInfo *index = path->indexinfo;
314 RelOptInfo *baserel = index->rel;
315 bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
317 Cost startup_cost = 0;
319 Cost indexStartupCost;
321 Selectivity indexSelectivity;
322 double indexCorrelation,
324 double spc_seq_page_cost,
325 spc_random_page_cost;
328 QualCost qpqual_cost;
330 double tuples_fetched;
331 double pages_fetched;
333 /* Should only be applied to base relations */
334 Assert(IsA(baserel, RelOptInfo) &&
335 IsA(index, IndexOptInfo));
336 Assert(baserel->relid > 0);
337 Assert(baserel->rtekind == RTE_RELATION);
340 * Mark the path with the correct row estimate, and identify which quals
341 * will need to be enforced as qpquals.
343 if (path->path.param_info)
345 path->path.rows = path->path.param_info->ppi_rows;
346 /* qpquals come from the rel's restriction clauses and ppi_clauses */
347 qpquals = list_concat(
348 extract_nonindex_conditions(baserel->baserestrictinfo,
350 extract_nonindex_conditions(path->path.param_info->ppi_clauses,
355 path->path.rows = baserel->rows;
356 /* qpquals come from just the rel's restriction clauses */
357 qpquals = extract_nonindex_conditions(baserel->baserestrictinfo,
361 if (!enable_indexscan)
362 startup_cost += disable_cost;
363 /* we don't need to check enable_indexonlyscan; indxpath.c does that */
366 * Call index-access-method-specific code to estimate the processing cost
367 * for scanning the index, as well as the selectivity of the index (ie,
368 * the fraction of main-table tuples we will have to retrieve) and its
369 * correlation to the main-table tuple order.
371 OidFunctionCall7(index->amcostestimate,
372 PointerGetDatum(root),
373 PointerGetDatum(path),
374 Float8GetDatum(loop_count),
375 PointerGetDatum(&indexStartupCost),
376 PointerGetDatum(&indexTotalCost),
377 PointerGetDatum(&indexSelectivity),
378 PointerGetDatum(&indexCorrelation));
381 * Save amcostestimate's results for possible use in bitmap scan planning.
382 * We don't bother to save indexStartupCost or indexCorrelation, because a
383 * bitmap scan doesn't care about either.
385 path->indextotalcost = indexTotalCost;
386 path->indexselectivity = indexSelectivity;
388 /* all costs for touching index itself included here */
389 startup_cost += indexStartupCost;
390 run_cost += indexTotalCost - indexStartupCost;
392 /* estimate number of main-table tuples fetched */
393 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
395 /* fetch estimated page costs for tablespace containing table */
396 get_tablespace_page_costs(baserel->reltablespace,
397 &spc_random_page_cost,
401 * Estimate number of main-table pages fetched, and compute I/O cost.
403 * When the index ordering is uncorrelated with the table ordering,
404 * we use an approximation proposed by Mackert and Lohman (see
405 * index_pages_fetched() for details) to compute the number of pages
406 * fetched, and then charge spc_random_page_cost per page fetched.
408 * When the index ordering is exactly correlated with the table ordering
409 * (just after a CLUSTER, for example), the number of pages fetched should
410 * be exactly selectivity * table_size. What's more, all but the first
411 * will be sequential fetches, not the random fetches that occur in the
412 * uncorrelated case. So if the number of pages is more than 1, we
414 * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
415 * For partially-correlated indexes, we ought to charge somewhere between
416 * these two estimates. We currently interpolate linearly between the
417 * estimates based on the correlation squared (XXX is that appropriate?).
419 * If it's an index-only scan, then we will not need to fetch any heap
420 * pages for which the visibility map shows all tuples are visible.
421 * Hence, reduce the estimated number of heap fetches accordingly.
422 * We use the measured fraction of the entire heap that is all-visible,
423 * which might not be particularly relevant to the subset of the heap
424 * that this query will fetch; but it's not clear how to do better.
430 * For repeated indexscans, the appropriate estimate for the
431 * uncorrelated case is to scale up the number of tuples fetched in
432 * the Mackert and Lohman formula by the number of scans, so that we
433 * estimate the number of pages fetched by all the scans; then
434 * pro-rate the costs for one scan. In this case we assume all the
435 * fetches are random accesses.
437 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
439 (double) index->pages,
443 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
445 max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
448 * In the perfectly correlated case, the number of pages touched by
449 * each scan is selectivity * table_size, and we can use the Mackert
450 * and Lohman formula at the page level to estimate how much work is
451 * saved by caching across scans. We still assume all the fetches are
452 * random, though, which is an overestimate that's hard to correct for
453 * without double-counting the cache effects. (But in most cases
454 * where such a plan is actually interesting, only one page would get
455 * fetched per scan anyway, so it shouldn't matter much.)
457 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
459 pages_fetched = index_pages_fetched(pages_fetched * loop_count,
461 (double) index->pages,
465 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
467 min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
472 * Normal case: apply the Mackert and Lohman formula, and then
473 * interpolate between that and the correlation-derived result.
475 pages_fetched = index_pages_fetched(tuples_fetched,
477 (double) index->pages,
481 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
483 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
484 max_IO_cost = pages_fetched * spc_random_page_cost;
486 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
487 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
490 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
492 if (pages_fetched > 0)
494 min_IO_cost = spc_random_page_cost;
495 if (pages_fetched > 1)
496 min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
503 * Now interpolate based on estimated index order correlation to get total
504 * disk I/O cost for main table accesses.
506 csquared = indexCorrelation * indexCorrelation;
508 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
511 * Estimate CPU costs per tuple.
513 * What we want here is cpu_tuple_cost plus the evaluation costs of any
514 * qual clauses that we have to evaluate as qpquals.
516 cost_qual_eval(&qpqual_cost, qpquals, root);
518 startup_cost += qpqual_cost.startup;
519 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
521 run_cost += cpu_per_tuple * tuples_fetched;
523 path->path.startup_cost = startup_cost;
524 path->path.total_cost = startup_cost + run_cost;
528 * extract_nonindex_conditions
530 * Given a list of quals to be enforced in an indexscan, extract the ones that
531 * will have to be applied as qpquals (ie, the index machinery won't handle
532 * them). The actual rules for this appear in create_indexscan_plan() in
533 * createplan.c, but the full rules are fairly expensive and we don't want to
534 * go to that much effort for index paths that don't get selected for the
535 * final plan. So we approximate it as quals that don't appear directly in
536 * indexquals and also are not redundant children of the same EquivalenceClass
537 * as some indexqual. This method neglects some infrequently-relevant
538 * considerations such as clauses that needn't be checked because they are
539 * implied by a partial index's predicate. It does not seem worth the cycles
540 * to try to factor those things in at this stage, even though createplan.c
541 * will take pains to remove such unnecessary clauses from the qpquals list if
542 * this path is selected for use.
545 extract_nonindex_conditions(List *qual_clauses, List *indexquals)
550 foreach(lc, qual_clauses)
552 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
554 Assert(IsA(rinfo, RestrictInfo));
555 if (rinfo->pseudoconstant)
556 continue; /* we may drop pseudoconstants here */
557 if (list_member_ptr(indexquals, rinfo))
558 continue; /* simple duplicate */
559 if (is_redundant_derived_clause(rinfo, indexquals))
560 continue; /* derived from same EquivalenceClass */
561 /* ... skip the predicate proof attempts createplan.c will try ... */
562 result = lappend(result, rinfo);
568 * index_pages_fetched
569 * Estimate the number of pages actually fetched after accounting for
572 * We use an approximation proposed by Mackert and Lohman, "Index Scans
573 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
574 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
575 * The Mackert and Lohman approximation is that the number of pages
578 * min(2TNs/(2T+Ns), T) when T <= b
579 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
580 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
582 * T = # pages in table
583 * N = # tuples in table
584 * s = selectivity = fraction of table to be scanned
585 * b = # buffer pages available (we include kernel space here)
587 * We assume that effective_cache_size is the total number of buffer pages
588 * available for the whole query, and pro-rate that space across all the
589 * tables in the query and the index currently under consideration. (This
590 * ignores space needed for other indexes used by the query, but since we
591 * don't know which indexes will get used, we can't estimate that very well;
592 * and in any case counting all the tables may well be an overestimate, since
593 * depending on the join plan not all the tables may be scanned concurrently.)
595 * The product Ns is the number of tuples fetched; we pass in that
596 * product rather than calculating it here. "pages" is the number of pages
597 * in the object under consideration (either an index or a table).
598 * "index_pages" is the amount to add to the total table space, which was
599 * computed for us by query_planner.
601 * Caller is expected to have ensured that tuples_fetched is greater than zero
602 * and rounded to integer (see clamp_row_est). The result will likewise be
603 * greater than zero and integral.
606 index_pages_fetched(double tuples_fetched, BlockNumber pages,
607 double index_pages, PlannerInfo *root)
609 double pages_fetched;
614 /* T is # pages in table, but don't allow it to be zero */
615 T = (pages > 1) ? (double) pages : 1.0;
617 /* Compute number of pages assumed to be competing for cache space */
618 total_pages = root->total_table_pages + index_pages;
619 total_pages = Max(total_pages, 1.0);
620 Assert(T <= total_pages);
622 /* b is pro-rated share of effective_cache_size */
623 b = (double) effective_cache_size *T / total_pages;
625 /* force it positive and integral */
631 /* This part is the Mackert and Lohman formula */
635 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
636 if (pages_fetched >= T)
639 pages_fetched = ceil(pages_fetched);
645 lim = (2.0 * T * b) / (2.0 * T - b);
646 if (tuples_fetched <= lim)
649 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
654 b + (tuples_fetched - lim) * (T - b) / T;
656 pages_fetched = ceil(pages_fetched);
658 return pages_fetched;
662 * get_indexpath_pages
663 * Determine the total size of the indexes used in a bitmap index path.
665 * Note: if the same index is used more than once in a bitmap tree, we will
666 * count it multiple times, which perhaps is the wrong thing ... but it's
667 * not completely clear, and detecting duplicates is difficult, so ignore it
671 get_indexpath_pages(Path *bitmapqual)
676 if (IsA(bitmapqual, BitmapAndPath))
678 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
680 foreach(l, apath->bitmapquals)
682 result += get_indexpath_pages((Path *) lfirst(l));
685 else if (IsA(bitmapqual, BitmapOrPath))
687 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
689 foreach(l, opath->bitmapquals)
691 result += get_indexpath_pages((Path *) lfirst(l));
694 else if (IsA(bitmapqual, IndexPath))
696 IndexPath *ipath = (IndexPath *) bitmapqual;
698 result = (double) ipath->indexinfo->pages;
701 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
707 * cost_bitmap_heap_scan
708 * Determines and returns the cost of scanning a relation using a bitmap
709 * index-then-heap plan.
711 * 'baserel' is the relation to be scanned
712 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
713 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
714 * 'loop_count' is the number of repetitions of the indexscan to factor into
715 * estimates of caching behavior
717 * Note: the component IndexPaths in bitmapqual should have been costed
718 * using the same loop_count.
721 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
722 ParamPathInfo *param_info,
723 Path *bitmapqual, double loop_count)
725 Cost startup_cost = 0;
728 Selectivity indexSelectivity;
729 QualCost qpqual_cost;
732 double tuples_fetched;
733 double pages_fetched;
734 double spc_seq_page_cost,
735 spc_random_page_cost;
738 /* Should only be applied to base relations */
739 Assert(IsA(baserel, RelOptInfo));
740 Assert(baserel->relid > 0);
741 Assert(baserel->rtekind == RTE_RELATION);
743 /* Mark the path with the correct row estimate */
745 path->rows = param_info->ppi_rows;
747 path->rows = baserel->rows;
749 if (!enable_bitmapscan)
750 startup_cost += disable_cost;
753 * Fetch total cost of obtaining the bitmap, as well as its total
756 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
758 startup_cost += indexTotalCost;
760 /* Fetch estimated page costs for tablespace containing table. */
761 get_tablespace_page_costs(baserel->reltablespace,
762 &spc_random_page_cost,
766 * Estimate number of main-table pages fetched.
768 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
770 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
775 * For repeated bitmap scans, scale up the number of tuples fetched in
776 * the Mackert and Lohman formula by the number of scans, so that we
777 * estimate the number of pages fetched by all the scans. Then
778 * pro-rate for one scan.
780 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
782 get_indexpath_pages(bitmapqual),
784 pages_fetched /= loop_count;
789 * For a single scan, the number of heap pages that need to be fetched
790 * is the same as the Mackert and Lohman formula for the case T <= b
791 * (ie, no re-reads needed).
793 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
795 if (pages_fetched >= T)
798 pages_fetched = ceil(pages_fetched);
801 * For small numbers of pages we should charge spc_random_page_cost
802 * apiece, while if nearly all the table's pages are being read, it's more
803 * appropriate to charge spc_seq_page_cost apiece. The effect is
804 * nonlinear, too. For lack of a better idea, interpolate like this to
805 * determine the cost per page.
807 if (pages_fetched >= 2.0)
808 cost_per_page = spc_random_page_cost -
809 (spc_random_page_cost - spc_seq_page_cost)
810 * sqrt(pages_fetched / T);
812 cost_per_page = spc_random_page_cost;
814 run_cost += pages_fetched * cost_per_page;
817 * Estimate CPU costs per tuple.
819 * Often the indexquals don't need to be rechecked at each tuple ... but
820 * not always, especially not if there are enough tuples involved that the
821 * bitmaps become lossy. For the moment, just assume they will be
822 * rechecked always. This means we charge the full freight for all the
825 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
827 startup_cost += qpqual_cost.startup;
828 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
830 run_cost += cpu_per_tuple * tuples_fetched;
832 path->startup_cost = startup_cost;
833 path->total_cost = startup_cost + run_cost;
837 * cost_bitmap_tree_node
838 * Extract cost and selectivity from a bitmap tree node (index/and/or)
841 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
843 if (IsA(path, IndexPath))
845 *cost = ((IndexPath *) path)->indextotalcost;
846 *selec = ((IndexPath *) path)->indexselectivity;
849 * Charge a small amount per retrieved tuple to reflect the costs of
850 * manipulating the bitmap. This is mostly to make sure that a bitmap
851 * scan doesn't look to be the same cost as an indexscan to retrieve a
854 *cost += 0.1 * cpu_operator_cost * path->rows;
856 else if (IsA(path, BitmapAndPath))
858 *cost = path->total_cost;
859 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
861 else if (IsA(path, BitmapOrPath))
863 *cost = path->total_cost;
864 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
868 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
869 *cost = *selec = 0; /* keep compiler quiet */
874 * cost_bitmap_and_node
875 * Estimate the cost of a BitmapAnd node
877 * Note that this considers only the costs of index scanning and bitmap
878 * creation, not the eventual heap access. In that sense the object isn't
879 * truly a Path, but it has enough path-like properties (costs in particular)
880 * to warrant treating it as one. We don't bother to set the path rows field,
884 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
891 * We estimate AND selectivity on the assumption that the inputs are
892 * independent. This is probably often wrong, but we don't have the info
895 * The runtime cost of the BitmapAnd itself is estimated at 100x
896 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
897 * definitely too simplistic?
901 foreach(l, path->bitmapquals)
903 Path *subpath = (Path *) lfirst(l);
905 Selectivity subselec;
907 cost_bitmap_tree_node(subpath, &subCost, &subselec);
911 totalCost += subCost;
912 if (l != list_head(path->bitmapquals))
913 totalCost += 100.0 * cpu_operator_cost;
915 path->bitmapselectivity = selec;
916 path->path.rows = 0; /* per above, not used */
917 path->path.startup_cost = totalCost;
918 path->path.total_cost = totalCost;
922 * cost_bitmap_or_node
923 * Estimate the cost of a BitmapOr node
925 * See comments for cost_bitmap_and_node.
928 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
935 * We estimate OR selectivity on the assumption that the inputs are
936 * non-overlapping, since that's often the case in "x IN (list)" type
937 * situations. Of course, we clamp to 1.0 at the end.
939 * The runtime cost of the BitmapOr itself is estimated at 100x
940 * cpu_operator_cost for each tbm_union needed. Probably too small,
941 * definitely too simplistic? We are aware that the tbm_unions are
942 * optimized out when the inputs are BitmapIndexScans.
946 foreach(l, path->bitmapquals)
948 Path *subpath = (Path *) lfirst(l);
950 Selectivity subselec;
952 cost_bitmap_tree_node(subpath, &subCost, &subselec);
956 totalCost += subCost;
957 if (l != list_head(path->bitmapquals) &&
958 !IsA(subpath, IndexPath))
959 totalCost += 100.0 * cpu_operator_cost;
961 path->bitmapselectivity = Min(selec, 1.0);
962 path->path.rows = 0; /* per above, not used */
963 path->path.startup_cost = totalCost;
964 path->path.total_cost = totalCost;
969 * Determines and returns the cost of scanning a relation using TIDs.
971 * 'baserel' is the relation to be scanned
972 * 'tidquals' is the list of TID-checkable quals
973 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
976 cost_tidscan(Path *path, PlannerInfo *root,
977 RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
979 Cost startup_cost = 0;
981 bool isCurrentOf = false;
982 QualCost qpqual_cost;
984 QualCost tid_qual_cost;
987 double spc_random_page_cost;
989 /* Should only be applied to base relations */
990 Assert(baserel->relid > 0);
991 Assert(baserel->rtekind == RTE_RELATION);
993 /* Mark the path with the correct row estimate */
995 path->rows = param_info->ppi_rows;
997 path->rows = baserel->rows;
999 /* Count how many tuples we expect to retrieve */
1001 foreach(l, tidquals)
1003 if (IsA(lfirst(l), ScalarArrayOpExpr))
1005 /* Each element of the array yields 1 tuple */
1006 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
1007 Node *arraynode = (Node *) lsecond(saop->args);
1009 ntuples += estimate_array_length(arraynode);
1011 else if (IsA(lfirst(l), CurrentOfExpr))
1013 /* CURRENT OF yields 1 tuple */
1019 /* It's just CTID = something, count 1 tuple */
1025 * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1026 * understands how to do it correctly. Therefore, honor enable_tidscan
1027 * only when CURRENT OF isn't present. Also note that cost_qual_eval
1028 * counts a CurrentOfExpr as having startup cost disable_cost, which we
1029 * subtract off here; that's to prevent other plan types such as seqscan
1034 Assert(baserel->baserestrictcost.startup >= disable_cost);
1035 startup_cost -= disable_cost;
1037 else if (!enable_tidscan)
1038 startup_cost += disable_cost;
1041 * The TID qual expressions will be computed once, any other baserestrict
1042 * quals once per retrieved tuple.
1044 cost_qual_eval(&tid_qual_cost, tidquals, root);
1046 /* fetch estimated page cost for tablespace containing table */
1047 get_tablespace_page_costs(baserel->reltablespace,
1048 &spc_random_page_cost,
1051 /* disk costs --- assume each tuple on a different page */
1052 run_cost += spc_random_page_cost * ntuples;
1054 /* Add scanning CPU costs */
1055 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1057 /* XXX currently we assume TID quals are a subset of qpquals */
1058 startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1059 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1060 tid_qual_cost.per_tuple;
1061 run_cost += cpu_per_tuple * ntuples;
1063 path->startup_cost = startup_cost;
1064 path->total_cost = startup_cost + run_cost;
1069 * Determines and returns the cost of scanning a subquery RTE.
1071 * 'baserel' is the relation to be scanned
1072 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1075 cost_subqueryscan(Path *path, PlannerInfo *root,
1076 RelOptInfo *baserel, ParamPathInfo *param_info)
1080 QualCost qpqual_cost;
1083 /* Should only be applied to base relations that are subqueries */
1084 Assert(baserel->relid > 0);
1085 Assert(baserel->rtekind == RTE_SUBQUERY);
1087 /* Mark the path with the correct row estimate */
1089 path->rows = param_info->ppi_rows;
1091 path->rows = baserel->rows;
1094 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1095 * any restriction clauses that will be attached to the SubqueryScan node,
1096 * plus cpu_tuple_cost to account for selection and projection overhead.
1098 path->startup_cost = baserel->subplan->startup_cost;
1099 path->total_cost = baserel->subplan->total_cost;
1101 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1103 startup_cost = qpqual_cost.startup;
1104 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1105 run_cost = cpu_per_tuple * baserel->tuples;
1107 path->startup_cost += startup_cost;
1108 path->total_cost += startup_cost + run_cost;
1113 * Determines and returns the cost of scanning a function RTE.
1115 * 'baserel' is the relation to be scanned
1116 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1119 cost_functionscan(Path *path, PlannerInfo *root,
1120 RelOptInfo *baserel, ParamPathInfo *param_info)
1122 Cost startup_cost = 0;
1124 QualCost qpqual_cost;
1129 /* Should only be applied to base relations that are functions */
1130 Assert(baserel->relid > 0);
1131 rte = planner_rt_fetch(baserel->relid, root);
1132 Assert(rte->rtekind == RTE_FUNCTION);
1134 /* Mark the path with the correct row estimate */
1136 path->rows = param_info->ppi_rows;
1138 path->rows = baserel->rows;
1141 * Estimate costs of executing the function expression(s).
1143 * Currently, nodeFunctionscan.c always executes the functions to
1144 * completion before returning any rows, and caches the results in a
1145 * tuplestore. So the function eval cost is all startup cost, and per-row
1146 * costs are minimal.
1148 * XXX in principle we ought to charge tuplestore spill costs if the
1149 * number of rows is large. However, given how phony our rowcount
1150 * estimates for functions tend to be, there's not a lot of point in that
1151 * refinement right now.
1153 cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1155 startup_cost += exprcost.startup + exprcost.per_tuple;
1157 /* Add scanning CPU costs */
1158 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1160 startup_cost += qpqual_cost.startup;
1161 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1162 run_cost += cpu_per_tuple * baserel->tuples;
1164 path->startup_cost = startup_cost;
1165 path->total_cost = startup_cost + run_cost;
1170 * Determines and returns the cost of scanning a VALUES RTE.
1172 * 'baserel' is the relation to be scanned
1173 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1176 cost_valuesscan(Path *path, PlannerInfo *root,
1177 RelOptInfo *baserel, ParamPathInfo *param_info)
1179 Cost startup_cost = 0;
1181 QualCost qpqual_cost;
1184 /* Should only be applied to base relations that are values lists */
1185 Assert(baserel->relid > 0);
1186 Assert(baserel->rtekind == RTE_VALUES);
1188 /* Mark the path with the correct row estimate */
1190 path->rows = param_info->ppi_rows;
1192 path->rows = baserel->rows;
1195 * For now, estimate list evaluation cost at one operator eval per list
1196 * (probably pretty bogus, but is it worth being smarter?)
1198 cpu_per_tuple = cpu_operator_cost;
1200 /* Add scanning CPU costs */
1201 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1203 startup_cost += qpqual_cost.startup;
1204 cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1205 run_cost += cpu_per_tuple * baserel->tuples;
1207 path->startup_cost = startup_cost;
1208 path->total_cost = startup_cost + run_cost;
1213 * Determines and returns the cost of scanning a CTE RTE.
1215 * Note: this is used for both self-reference and regular CTEs; the
1216 * possible cost differences are below the threshold of what we could
1217 * estimate accurately anyway. Note that the costs of evaluating the
1218 * referenced CTE query are added into the final plan as initplan costs,
1219 * and should NOT be counted here.
1222 cost_ctescan(Path *path, PlannerInfo *root,
1223 RelOptInfo *baserel, ParamPathInfo *param_info)
1225 Cost startup_cost = 0;
1227 QualCost qpqual_cost;
1230 /* Should only be applied to base relations that are CTEs */
1231 Assert(baserel->relid > 0);
1232 Assert(baserel->rtekind == RTE_CTE);
1234 /* Mark the path with the correct row estimate */
1236 path->rows = param_info->ppi_rows;
1238 path->rows = baserel->rows;
1240 /* Charge one CPU tuple cost per row for tuplestore manipulation */
1241 cpu_per_tuple = cpu_tuple_cost;
1243 /* Add scanning CPU costs */
1244 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1246 startup_cost += qpqual_cost.startup;
1247 cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1248 run_cost += cpu_per_tuple * baserel->tuples;
1250 path->startup_cost = startup_cost;
1251 path->total_cost = startup_cost + run_cost;
1255 * cost_recursive_union
1256 * Determines and returns the cost of performing a recursive union,
1257 * and also the estimated output size.
1259 * We are given Plans for the nonrecursive and recursive terms.
1261 * Note that the arguments and output are Plans, not Paths as in most of
1262 * the rest of this module. That's because we don't bother setting up a
1263 * Path representation for recursive union --- we have only one way to do it.
1266 cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
1272 /* We probably have decent estimates for the non-recursive term */
1273 startup_cost = nrterm->startup_cost;
1274 total_cost = nrterm->total_cost;
1275 total_rows = nrterm->plan_rows;
1278 * We arbitrarily assume that about 10 recursive iterations will be
1279 * needed, and that we've managed to get a good fix on the cost and output
1280 * size of each one of them. These are mighty shaky assumptions but it's
1281 * hard to see how to do better.
1283 total_cost += 10 * rterm->total_cost;
1284 total_rows += 10 * rterm->plan_rows;
1287 * Also charge cpu_tuple_cost per row to account for the costs of
1288 * manipulating the tuplestores. (We don't worry about possible
1289 * spill-to-disk costs.)
1291 total_cost += cpu_tuple_cost * total_rows;
1293 runion->startup_cost = startup_cost;
1294 runion->total_cost = total_cost;
1295 runion->plan_rows = total_rows;
1296 runion->plan_width = Max(nrterm->plan_width, rterm->plan_width);
1301 * Determines and returns the cost of sorting a relation, including
1302 * the cost of reading the input data.
1304 * If the total volume of data to sort is less than sort_mem, we will do
1305 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1306 * comparisons for t tuples.
1308 * If the total volume exceeds sort_mem, we switch to a tape-style merge
1309 * algorithm. There will still be about t*log2(t) tuple comparisons in
1310 * total, but we will also need to write and read each tuple once per
1311 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1312 * number of initial runs formed and M is the merge order used by tuplesort.c.
1313 * Since the average initial run should be about twice sort_mem, we have
1314 * disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem)))
1315 * cpu = comparison_cost * t * log2(t)
1317 * If the sort is bounded (i.e., only the first k result tuples are needed)
1318 * and k tuples can fit into sort_mem, we use a heap method that keeps only
1319 * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1321 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1322 * accesses (XXX can't we refine that guess?)
1324 * By default, we charge two operator evals per tuple comparison, which should
1325 * be in the right ballpark in most cases. The caller can tweak this by
1326 * specifying nonzero comparison_cost; typically that's used for any extra
1327 * work that has to be done to prepare the inputs to the comparison operators.
1329 * 'pathkeys' is a list of sort keys
1330 * 'input_cost' is the total cost for reading the input data
1331 * 'tuples' is the number of tuples in the relation
1332 * 'width' is the average tuple width in bytes
1333 * 'comparison_cost' is the extra cost per comparison, if any
1334 * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1335 * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1337 * NOTE: some callers currently pass NIL for pathkeys because they
1338 * can't conveniently supply the sort keys. Since this routine doesn't
1339 * currently do anything with pathkeys anyway, that doesn't matter...
1340 * but if it ever does, it should react gracefully to lack of key data.
1341 * (Actually, the thing we'd most likely be interested in is just the number
1342 * of sort keys, which all callers *could* supply.)
1345 cost_sort(Path *path, PlannerInfo *root,
1346 List *pathkeys, Cost input_cost, double tuples, int width,
1347 Cost comparison_cost, int sort_mem,
1348 double limit_tuples)
1350 Cost startup_cost = input_cost;
1352 double input_bytes = relation_byte_size(tuples, width);
1353 double output_bytes;
1354 double output_tuples;
1355 long sort_mem_bytes = sort_mem * 1024L;
1358 startup_cost += disable_cost;
1360 path->rows = tuples;
1363 * We want to be sure the cost of a sort is never estimated as zero, even
1364 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1369 /* Include the default cost-per-comparison */
1370 comparison_cost += 2.0 * cpu_operator_cost;
1372 /* Do we have a useful LIMIT? */
1373 if (limit_tuples > 0 && limit_tuples < tuples)
1375 output_tuples = limit_tuples;
1376 output_bytes = relation_byte_size(output_tuples, width);
1380 output_tuples = tuples;
1381 output_bytes = input_bytes;
1384 if (output_bytes > sort_mem_bytes)
1387 * We'll have to use a disk-based sort of all the tuples
1389 double npages = ceil(input_bytes / BLCKSZ);
1390 double nruns = (input_bytes / sort_mem_bytes) * 0.5;
1391 double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1393 double npageaccesses;
1398 * Assume about N log2 N comparisons
1400 startup_cost += comparison_cost * tuples * LOG2(tuples);
1404 /* Compute logM(r) as log(r) / log(M) */
1405 if (nruns > mergeorder)
1406 log_runs = ceil(log(nruns) / log(mergeorder));
1409 npageaccesses = 2.0 * npages * log_runs;
1410 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1411 startup_cost += npageaccesses *
1412 (seq_page_cost * 0.75 + random_page_cost * 0.25);
1414 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1417 * We'll use a bounded heap-sort keeping just K tuples in memory, for
1418 * a total number of tuple comparisons of N log2 K; but the constant
1419 * factor is a bit higher than for quicksort. Tweak it so that the
1420 * cost curve is continuous at the crossover point.
1422 startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1426 /* We'll use plain quicksort on all the input tuples */
1427 startup_cost += comparison_cost * tuples * LOG2(tuples);
1431 * Also charge a small amount (arbitrarily set equal to operator cost) per
1432 * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1433 * doesn't do qual-checking or projection, so it has less overhead than
1434 * most plan nodes. Note it's correct to use tuples not output_tuples
1435 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1436 * counting the LIMIT otherwise.
1438 run_cost += cpu_operator_cost * tuples;
1440 path->startup_cost = startup_cost;
1441 path->total_cost = startup_cost + run_cost;
1446 * Determines and returns the cost of a MergeAppend node.
1448 * MergeAppend merges several pre-sorted input streams, using a heap that
1449 * at any given instant holds the next tuple from each stream. If there
1450 * are N streams, we need about N*log2(N) tuple comparisons to construct
1451 * the heap at startup, and then for each output tuple, about log2(N)
1452 * comparisons to delete the top heap entry and another log2(N) comparisons
1453 * to insert its successor from the same stream.
1455 * (The effective value of N will drop once some of the input streams are
1456 * exhausted, but it seems unlikely to be worth trying to account for that.)
1458 * The heap is never spilled to disk, since we assume N is not very large.
1459 * So this is much simpler than cost_sort.
1461 * As in cost_sort, we charge two operator evals per tuple comparison.
1463 * 'pathkeys' is a list of sort keys
1464 * 'n_streams' is the number of input streams
1465 * 'input_startup_cost' is the sum of the input streams' startup costs
1466 * 'input_total_cost' is the sum of the input streams' total costs
1467 * 'tuples' is the number of tuples in all the streams
1470 cost_merge_append(Path *path, PlannerInfo *root,
1471 List *pathkeys, int n_streams,
1472 Cost input_startup_cost, Cost input_total_cost,
1475 Cost startup_cost = 0;
1477 Cost comparison_cost;
1484 N = (n_streams < 2) ? 2.0 : (double) n_streams;
1487 /* Assumed cost per tuple comparison */
1488 comparison_cost = 2.0 * cpu_operator_cost;
1490 /* Heap creation cost */
1491 startup_cost += comparison_cost * N * logN;
1493 /* Per-tuple heap maintenance cost */
1494 run_cost += tuples * comparison_cost * 2.0 * logN;
1497 * Also charge a small amount (arbitrarily set equal to operator cost) per
1498 * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend
1499 * node doesn't do qual-checking or projection, so it has less overhead
1500 * than most plan nodes.
1502 run_cost += cpu_operator_cost * tuples;
1504 path->startup_cost = startup_cost + input_startup_cost;
1505 path->total_cost = startup_cost + run_cost + input_total_cost;
1510 * Determines and returns the cost of materializing a relation, including
1511 * the cost of reading the input data.
1513 * If the total volume of data to materialize exceeds work_mem, we will need
1514 * to write it to disk, so the cost is much higher in that case.
1516 * Note that here we are estimating the costs for the first scan of the
1517 * relation, so the materialization is all overhead --- any savings will
1518 * occur only on rescan, which is estimated in cost_rescan.
1521 cost_material(Path *path,
1522 Cost input_startup_cost, Cost input_total_cost,
1523 double tuples, int width)
1525 Cost startup_cost = input_startup_cost;
1526 Cost run_cost = input_total_cost - input_startup_cost;
1527 double nbytes = relation_byte_size(tuples, width);
1528 long work_mem_bytes = work_mem * 1024L;
1530 path->rows = tuples;
1533 * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
1534 * reflect bookkeeping overhead. (This rate must be more than what
1535 * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
1536 * if it is exactly the same then there will be a cost tie between
1537 * nestloop with A outer, materialized B inner and nestloop with B outer,
1538 * materialized A inner. The extra cost ensures we'll prefer
1539 * materializing the smaller rel.) Note that this is normally a good deal
1540 * less than cpu_tuple_cost; which is OK because a Material plan node
1541 * doesn't do qual-checking or projection, so it's got less overhead than
1544 run_cost += 2 * cpu_operator_cost * tuples;
1547 * If we will spill to disk, charge at the rate of seq_page_cost per page.
1548 * This cost is assumed to be evenly spread through the plan run phase,
1549 * which isn't exactly accurate but our cost model doesn't allow for
1550 * nonuniform costs within the run phase.
1552 if (nbytes > work_mem_bytes)
1554 double npages = ceil(nbytes / BLCKSZ);
1556 run_cost += seq_page_cost * npages;
1559 path->startup_cost = startup_cost;
1560 path->total_cost = startup_cost + run_cost;
1565 * Determines and returns the cost of performing an Agg plan node,
1566 * including the cost of its input.
1568 * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
1569 * we are using a hashed Agg node just to do grouping).
1571 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1572 * are for appropriately-sorted input.
1575 cost_agg(Path *path, PlannerInfo *root,
1576 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
1577 int numGroupCols, double numGroups,
1578 Cost input_startup_cost, Cost input_total_cost,
1579 double input_tuples)
1581 double output_tuples;
1584 AggClauseCosts dummy_aggcosts;
1586 /* Use all-zero per-aggregate costs if NULL is passed */
1587 if (aggcosts == NULL)
1589 Assert(aggstrategy == AGG_HASHED);
1590 MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
1591 aggcosts = &dummy_aggcosts;
1595 * The transCost.per_tuple component of aggcosts should be charged once
1596 * per input tuple, corresponding to the costs of evaluating the aggregate
1597 * transfns and their input expressions (with any startup cost of course
1598 * charged but once). The finalCost component is charged once per output
1599 * tuple, corresponding to the costs of evaluating the finalfns.
1601 * If we are grouping, we charge an additional cpu_operator_cost per
1602 * grouping column per input tuple for grouping comparisons.
1604 * We will produce a single output tuple if not grouping, and a tuple per
1605 * group otherwise. We charge cpu_tuple_cost for each output tuple.
1607 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1608 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1609 * input path is already sorted appropriately, AGG_SORTED should be
1610 * preferred (since it has no risk of memory overflow). This will happen
1611 * as long as the computed total costs are indeed exactly equal --- but if
1612 * there's roundoff error we might do the wrong thing. So be sure that
1613 * the computations below form the same intermediate values in the same
1616 if (aggstrategy == AGG_PLAIN)
1618 startup_cost = input_total_cost;
1619 startup_cost += aggcosts->transCost.startup;
1620 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1621 startup_cost += aggcosts->finalCost;
1622 /* we aren't grouping */
1623 total_cost = startup_cost + cpu_tuple_cost;
1626 else if (aggstrategy == AGG_SORTED)
1628 /* Here we are able to deliver output on-the-fly */
1629 startup_cost = input_startup_cost;
1630 total_cost = input_total_cost;
1631 /* calcs phrased this way to match HASHED case, see note above */
1632 total_cost += aggcosts->transCost.startup;
1633 total_cost += aggcosts->transCost.per_tuple * input_tuples;
1634 total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1635 total_cost += aggcosts->finalCost * numGroups;
1636 total_cost += cpu_tuple_cost * numGroups;
1637 output_tuples = numGroups;
1641 /* must be AGG_HASHED */
1642 startup_cost = input_total_cost;
1643 startup_cost += aggcosts->transCost.startup;
1644 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1645 startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1646 total_cost = startup_cost;
1647 total_cost += aggcosts->finalCost * numGroups;
1648 total_cost += cpu_tuple_cost * numGroups;
1649 output_tuples = numGroups;
1652 path->rows = output_tuples;
1653 path->startup_cost = startup_cost;
1654 path->total_cost = total_cost;
1659 * Determines and returns the cost of performing a WindowAgg plan node,
1660 * including the cost of its input.
1662 * Input is assumed already properly sorted.
1665 cost_windowagg(Path *path, PlannerInfo *root,
1666 List *windowFuncs, int numPartCols, int numOrderCols,
1667 Cost input_startup_cost, Cost input_total_cost,
1668 double input_tuples)
1674 startup_cost = input_startup_cost;
1675 total_cost = input_total_cost;
1678 * Window functions are assumed to cost their stated execution cost, plus
1679 * the cost of evaluating their input expressions, per tuple. Since they
1680 * may in fact evaluate their inputs at multiple rows during each cycle,
1681 * this could be a drastic underestimate; but without a way to know how
1682 * many rows the window function will fetch, it's hard to do better. In
1683 * any case, it's a good estimate for all the built-in window functions,
1684 * so we'll just do this for now.
1686 foreach(lc, windowFuncs)
1688 WindowFunc *wfunc = (WindowFunc *) lfirst(lc);
1692 Assert(IsA(wfunc, WindowFunc));
1694 wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
1696 /* also add the input expressions' cost to per-input-row costs */
1697 cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
1698 startup_cost += argcosts.startup;
1699 wfunccost += argcosts.per_tuple;
1702 * Add the filter's cost to per-input-row costs. XXX We should reduce
1703 * input expression costs according to filter selectivity.
1705 cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
1706 startup_cost += argcosts.startup;
1707 wfunccost += argcosts.per_tuple;
1709 total_cost += wfunccost * input_tuples;
1713 * We also charge cpu_operator_cost per grouping column per tuple for
1714 * grouping comparisons, plus cpu_tuple_cost per tuple for general
1717 * XXX this neglects costs of spooling the data to disk when it overflows
1718 * work_mem. Sooner or later that should get accounted for.
1720 total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
1721 total_cost += cpu_tuple_cost * input_tuples;
1723 path->rows = input_tuples;
1724 path->startup_cost = startup_cost;
1725 path->total_cost = total_cost;
1730 * Determines and returns the cost of performing a Group plan node,
1731 * including the cost of its input.
1733 * Note: caller must ensure that input costs are for appropriately-sorted
1737 cost_group(Path *path, PlannerInfo *root,
1738 int numGroupCols, double numGroups,
1739 Cost input_startup_cost, Cost input_total_cost,
1740 double input_tuples)
1745 startup_cost = input_startup_cost;
1746 total_cost = input_total_cost;
1749 * Charge one cpu_operator_cost per comparison per input tuple. We assume
1750 * all columns get compared at most of the tuples.
1752 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1754 path->rows = numGroups;
1755 path->startup_cost = startup_cost;
1756 path->total_cost = total_cost;
1760 * initial_cost_nestloop
1761 * Preliminary estimate of the cost of a nestloop join path.
1763 * This must quickly produce lower-bound estimates of the path's startup and
1764 * total costs. If we are unable to eliminate the proposed path from
1765 * consideration using the lower bounds, final_cost_nestloop will be called
1766 * to obtain the final estimates.
1768 * The exact division of labor between this function and final_cost_nestloop
1769 * is private to them, and represents a tradeoff between speed of the initial
1770 * estimate and getting a tight lower bound. We choose to not examine the
1771 * join quals here, since that's by far the most expensive part of the
1772 * calculations. The end result is that CPU-cost considerations must be
1773 * left for the second phase; and for SEMI/ANTI joins, we must also postpone
1774 * incorporation of the inner path's run cost.
1776 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
1777 * other data to be used by final_cost_nestloop
1778 * 'jointype' is the type of join to be performed
1779 * 'outer_path' is the outer input to the join
1780 * 'inner_path' is the inner input to the join
1781 * 'sjinfo' is extra info about the join for selectivity estimation
1782 * 'semifactors' contains valid data if jointype is SEMI or ANTI
1785 initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
1787 Path *outer_path, Path *inner_path,
1788 SpecialJoinInfo *sjinfo,
1789 SemiAntiJoinFactors *semifactors)
1791 Cost startup_cost = 0;
1793 double outer_path_rows = outer_path->rows;
1794 Cost inner_rescan_start_cost;
1795 Cost inner_rescan_total_cost;
1796 Cost inner_run_cost;
1797 Cost inner_rescan_run_cost;
1799 /* estimate costs to rescan the inner relation */
1800 cost_rescan(root, inner_path,
1801 &inner_rescan_start_cost,
1802 &inner_rescan_total_cost);
1804 /* cost of source data */
1807 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1808 * before we can start returning tuples, so the join's startup cost is
1809 * their sum. We'll also pay the inner path's rescan startup cost
1812 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1813 run_cost += outer_path->total_cost - outer_path->startup_cost;
1814 if (outer_path_rows > 1)
1815 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
1817 inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
1818 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
1820 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
1823 * SEMI or ANTI join: executor will stop after first match.
1825 * Getting decent estimates requires inspection of the join quals,
1826 * which we choose to postpone to final_cost_nestloop.
1829 /* Save private data for final_cost_nestloop */
1830 workspace->inner_run_cost = inner_run_cost;
1831 workspace->inner_rescan_run_cost = inner_rescan_run_cost;
1835 /* Normal case; we'll scan whole input rel for each outer row */
1836 run_cost += inner_run_cost;
1837 if (outer_path_rows > 1)
1838 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
1841 /* CPU costs left for later */
1843 /* Public result fields */
1844 workspace->startup_cost = startup_cost;
1845 workspace->total_cost = startup_cost + run_cost;
1846 /* Save private data for final_cost_nestloop */
1847 workspace->run_cost = run_cost;
1851 * final_cost_nestloop
1852 * Final estimate of the cost and result size of a nestloop join path.
1854 * 'path' is already filled in except for the rows and cost fields
1855 * 'workspace' is the result from initial_cost_nestloop
1856 * 'sjinfo' is extra info about the join for selectivity estimation
1857 * 'semifactors' contains valid data if path->jointype is SEMI or ANTI
1860 final_cost_nestloop(PlannerInfo *root, NestPath *path,
1861 JoinCostWorkspace *workspace,
1862 SpecialJoinInfo *sjinfo,
1863 SemiAntiJoinFactors *semifactors)
1865 Path *outer_path = path->outerjoinpath;
1866 Path *inner_path = path->innerjoinpath;
1867 double outer_path_rows = outer_path->rows;
1868 double inner_path_rows = inner_path->rows;
1869 Cost startup_cost = workspace->startup_cost;
1870 Cost run_cost = workspace->run_cost;
1872 QualCost restrict_qual_cost;
1875 /* Mark the path with the correct row estimate */
1876 if (path->path.param_info)
1877 path->path.rows = path->path.param_info->ppi_rows;
1879 path->path.rows = path->path.parent->rows;
1882 * We could include disable_cost in the preliminary estimate, but that
1883 * would amount to optimizing for the case where the join method is
1884 * disabled, which doesn't seem like the way to bet.
1886 if (!enable_nestloop)
1887 startup_cost += disable_cost;
1889 /* cost of inner-relation source data (we already dealt with outer rel) */
1891 if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI)
1894 * SEMI or ANTI join: executor will stop after first match.
1896 Cost inner_run_cost = workspace->inner_run_cost;
1897 Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
1898 double outer_matched_rows;
1899 Selectivity inner_scan_frac;
1902 * For an outer-rel row that has at least one match, we can expect the
1903 * inner scan to stop after a fraction 1/(match_count+1) of the inner
1904 * rows, if the matches are evenly distributed. Since they probably
1905 * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
1906 * that fraction. (If we used a larger fuzz factor, we'd have to
1907 * clamp inner_scan_frac to at most 1.0; but since match_count is at
1908 * least 1, no such clamp is needed now.)
1910 outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
1911 inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
1914 * Compute number of tuples processed (not number emitted!). First,
1915 * account for successfully-matched outer rows.
1917 ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
1920 * Now we need to estimate the actual costs of scanning the inner
1921 * relation, which may be quite a bit less than N times inner_run_cost
1922 * due to early scan stops. We consider two cases. If the inner path
1923 * is an indexscan using all the joinquals as indexquals, then an
1924 * unmatched outer row results in an indexscan returning no rows,
1925 * which is probably quite cheap. Otherwise, the executor will have
1926 * to scan the whole inner rel for an unmatched row; not so cheap.
1928 if (has_indexed_join_quals(path))
1931 * Successfully-matched outer rows will only require scanning
1932 * inner_scan_frac of the inner relation. In this case, we don't
1933 * need to charge the full inner_run_cost even when that's more
1934 * than inner_rescan_run_cost, because we can assume that none of
1935 * the inner scans ever scan the whole inner relation. So it's
1936 * okay to assume that all the inner scan executions can be
1937 * fractions of the full cost, even if materialization is reducing
1938 * the rescan cost. At this writing, it's impossible to get here
1939 * for a materialized inner scan, so inner_run_cost and
1940 * inner_rescan_run_cost will be the same anyway; but just in
1941 * case, use inner_run_cost for the first matched tuple and
1942 * inner_rescan_run_cost for additional ones.
1944 run_cost += inner_run_cost * inner_scan_frac;
1945 if (outer_matched_rows > 1)
1946 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
1949 * Add the cost of inner-scan executions for unmatched outer rows.
1950 * We estimate this as the same cost as returning the first tuple
1951 * of a nonempty scan. We consider that these are all rescans,
1952 * since we used inner_run_cost once already.
1954 run_cost += (outer_path_rows - outer_matched_rows) *
1955 inner_rescan_run_cost / inner_path_rows;
1958 * We won't be evaluating any quals at all for unmatched rows, so
1959 * don't add them to ntuples.
1965 * Here, a complicating factor is that rescans may be cheaper than
1966 * first scans. If we never scan all the way to the end of the
1967 * inner rel, it might be (depending on the plan type) that we'd
1968 * never pay the whole inner first-scan run cost. However it is
1969 * difficult to estimate whether that will happen (and it could
1970 * not happen if there are any unmatched outer rows!), so be
1971 * conservative and always charge the whole first-scan cost once.
1973 run_cost += inner_run_cost;
1975 /* Add inner run cost for additional outer tuples having matches */
1976 if (outer_matched_rows > 1)
1977 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
1979 /* Add inner run cost for unmatched outer tuples */
1980 run_cost += (outer_path_rows - outer_matched_rows) *
1981 inner_rescan_run_cost;
1983 /* And count the unmatched join tuples as being processed */
1984 ntuples += (outer_path_rows - outer_matched_rows) *
1990 /* Normal-case source costs were included in preliminary estimate */
1992 /* Compute number of tuples processed (not number emitted!) */
1993 ntuples = outer_path_rows * inner_path_rows;
1997 cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
1998 startup_cost += restrict_qual_cost.startup;
1999 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
2000 run_cost += cpu_per_tuple * ntuples;
2002 path->path.startup_cost = startup_cost;
2003 path->path.total_cost = startup_cost + run_cost;
2007 * initial_cost_mergejoin
2008 * Preliminary estimate of the cost of a mergejoin path.
2010 * This must quickly produce lower-bound estimates of the path's startup and
2011 * total costs. If we are unable to eliminate the proposed path from
2012 * consideration using the lower bounds, final_cost_mergejoin will be called
2013 * to obtain the final estimates.
2015 * The exact division of labor between this function and final_cost_mergejoin
2016 * is private to them, and represents a tradeoff between speed of the initial
2017 * estimate and getting a tight lower bound. We choose to not examine the
2018 * join quals here, except for obtaining the scan selectivity estimate which
2019 * is really essential (but fortunately, use of caching keeps the cost of
2020 * getting that down to something reasonable).
2021 * We also assume that cost_sort is cheap enough to use here.
2023 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2024 * other data to be used by final_cost_mergejoin
2025 * 'jointype' is the type of join to be performed
2026 * 'mergeclauses' is the list of joinclauses to be used as merge clauses
2027 * 'outer_path' is the outer input to the join
2028 * 'inner_path' is the inner input to the join
2029 * 'outersortkeys' is the list of sort keys for the outer path
2030 * 'innersortkeys' is the list of sort keys for the inner path
2031 * 'sjinfo' is extra info about the join for selectivity estimation
2033 * Note: outersortkeys and innersortkeys should be NIL if no explicit
2034 * sort is needed because the respective source path is already ordered.
2037 initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2040 Path *outer_path, Path *inner_path,
2041 List *outersortkeys, List *innersortkeys,
2042 SpecialJoinInfo *sjinfo)
2044 Cost startup_cost = 0;
2046 double outer_path_rows = outer_path->rows;
2047 double inner_path_rows = inner_path->rows;
2048 Cost inner_run_cost;
2053 Selectivity outerstartsel,
2057 Path sort_path; /* dummy for result of cost_sort */
2059 /* Protect some assumptions below that rowcounts aren't zero or NaN */
2060 if (outer_path_rows <= 0 || isnan(outer_path_rows))
2061 outer_path_rows = 1;
2062 if (inner_path_rows <= 0 || isnan(inner_path_rows))
2063 inner_path_rows = 1;
2066 * A merge join will stop as soon as it exhausts either input stream
2067 * (unless it's an outer join, in which case the outer side has to be
2068 * scanned all the way anyway). Estimate fraction of the left and right
2069 * inputs that will actually need to be scanned. Likewise, we can
2070 * estimate the number of rows that will be skipped before the first join
2071 * pair is found, which should be factored into startup cost. We use only
2072 * the first (most significant) merge clause for this purpose. Since
2073 * mergejoinscansel() is a fairly expensive computation, we cache the
2074 * results in the merge clause RestrictInfo.
2076 if (mergeclauses && jointype != JOIN_FULL)
2078 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
2083 MergeScanSelCache *cache;
2085 /* Get the input pathkeys to determine the sort-order details */
2086 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
2087 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
2090 opathkey = (PathKey *) linitial(opathkeys);
2091 ipathkey = (PathKey *) linitial(ipathkeys);
2092 /* debugging check */
2093 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
2094 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
2095 opathkey->pk_strategy != ipathkey->pk_strategy ||
2096 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
2097 elog(ERROR, "left and right pathkeys do not match in mergejoin");
2099 /* Get the selectivity with caching */
2100 cache = cached_scansel(root, firstclause, opathkey);
2102 if (bms_is_subset(firstclause->left_relids,
2103 outer_path->parent->relids))
2105 /* left side of clause is outer */
2106 outerstartsel = cache->leftstartsel;
2107 outerendsel = cache->leftendsel;
2108 innerstartsel = cache->rightstartsel;
2109 innerendsel = cache->rightendsel;
2113 /* left side of clause is inner */
2114 outerstartsel = cache->rightstartsel;
2115 outerendsel = cache->rightendsel;
2116 innerstartsel = cache->leftstartsel;
2117 innerendsel = cache->leftendsel;
2119 if (jointype == JOIN_LEFT ||
2120 jointype == JOIN_ANTI)
2122 outerstartsel = 0.0;
2125 else if (jointype == JOIN_RIGHT)
2127 innerstartsel = 0.0;
2133 /* cope with clauseless or full mergejoin */
2134 outerstartsel = innerstartsel = 0.0;
2135 outerendsel = innerendsel = 1.0;
2139 * Convert selectivities to row counts. We force outer_rows and
2140 * inner_rows to be at least 1, but the skip_rows estimates can be zero.
2142 outer_skip_rows = rint(outer_path_rows * outerstartsel);
2143 inner_skip_rows = rint(inner_path_rows * innerstartsel);
2144 outer_rows = clamp_row_est(outer_path_rows * outerendsel);
2145 inner_rows = clamp_row_est(inner_path_rows * innerendsel);
2147 Assert(outer_skip_rows <= outer_rows);
2148 Assert(inner_skip_rows <= inner_rows);
2151 * Readjust scan selectivities to account for above rounding. This is
2152 * normally an insignificant effect, but when there are only a few rows in
2153 * the inputs, failing to do this makes for a large percentage error.
2155 outerstartsel = outer_skip_rows / outer_path_rows;
2156 innerstartsel = inner_skip_rows / inner_path_rows;
2157 outerendsel = outer_rows / outer_path_rows;
2158 innerendsel = inner_rows / inner_path_rows;
2160 Assert(outerstartsel <= outerendsel);
2161 Assert(innerstartsel <= innerendsel);
2163 /* cost of source data */
2165 if (outersortkeys) /* do we need to sort outer? */
2167 cost_sort(&sort_path,
2170 outer_path->total_cost,
2172 outer_path->parent->width,
2176 startup_cost += sort_path.startup_cost;
2177 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2179 run_cost += (sort_path.total_cost - sort_path.startup_cost)
2180 * (outerendsel - outerstartsel);
2184 startup_cost += outer_path->startup_cost;
2185 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
2187 run_cost += (outer_path->total_cost - outer_path->startup_cost)
2188 * (outerendsel - outerstartsel);
2191 if (innersortkeys) /* do we need to sort inner? */
2193 cost_sort(&sort_path,
2196 inner_path->total_cost,
2198 inner_path->parent->width,
2202 startup_cost += sort_path.startup_cost;
2203 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2205 inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
2206 * (innerendsel - innerstartsel);
2210 startup_cost += inner_path->startup_cost;
2211 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
2213 inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
2214 * (innerendsel - innerstartsel);
2218 * We can't yet determine whether rescanning occurs, or whether
2219 * materialization of the inner input should be done. The minimum
2220 * possible inner input cost, regardless of rescan and materialization
2221 * considerations, is inner_run_cost. We include that in
2222 * workspace->total_cost, but not yet in run_cost.
2225 /* CPU costs left for later */
2227 /* Public result fields */
2228 workspace->startup_cost = startup_cost;
2229 workspace->total_cost = startup_cost + run_cost + inner_run_cost;
2230 /* Save private data for final_cost_mergejoin */
2231 workspace->run_cost = run_cost;
2232 workspace->inner_run_cost = inner_run_cost;
2233 workspace->outer_rows = outer_rows;
2234 workspace->inner_rows = inner_rows;
2235 workspace->outer_skip_rows = outer_skip_rows;
2236 workspace->inner_skip_rows = inner_skip_rows;
2240 * final_cost_mergejoin
2241 * Final estimate of the cost and result size of a mergejoin path.
2243 * Unlike other costsize functions, this routine makes one actual decision:
2244 * whether we should materialize the inner path. We do that either because
2245 * the inner path can't support mark/restore, or because it's cheaper to
2246 * use an interposed Material node to handle mark/restore. When the decision
2247 * is cost-based it would be logically cleaner to build and cost two separate
2248 * paths with and without that flag set; but that would require repeating most
2249 * of the cost calculations, which are not all that cheap. Since the choice
2250 * will not affect output pathkeys or startup cost, only total cost, there is
2251 * no possibility of wanting to keep both paths. So it seems best to make
2252 * the decision here and record it in the path's materialize_inner field.
2254 * 'path' is already filled in except for the rows and cost fields and
2256 * 'workspace' is the result from initial_cost_mergejoin
2257 * 'sjinfo' is extra info about the join for selectivity estimation
2260 final_cost_mergejoin(PlannerInfo *root, MergePath *path,
2261 JoinCostWorkspace *workspace,
2262 SpecialJoinInfo *sjinfo)
2264 Path *outer_path = path->jpath.outerjoinpath;
2265 Path *inner_path = path->jpath.innerjoinpath;
2266 double inner_path_rows = inner_path->rows;
2267 List *mergeclauses = path->path_mergeclauses;
2268 List *innersortkeys = path->innersortkeys;
2269 Cost startup_cost = workspace->startup_cost;
2270 Cost run_cost = workspace->run_cost;
2271 Cost inner_run_cost = workspace->inner_run_cost;
2272 double outer_rows = workspace->outer_rows;
2273 double inner_rows = workspace->inner_rows;
2274 double outer_skip_rows = workspace->outer_skip_rows;
2275 double inner_skip_rows = workspace->inner_skip_rows;
2279 QualCost merge_qual_cost;
2280 QualCost qp_qual_cost;
2281 double mergejointuples,
2285 /* Protect some assumptions below that rowcounts aren't zero or NaN */
2286 if (inner_path_rows <= 0 || isnan(inner_path_rows))
2287 inner_path_rows = 1;
2289 /* Mark the path with the correct row estimate */
2290 if (path->jpath.path.param_info)
2291 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2293 path->jpath.path.rows = path->jpath.path.parent->rows;
2296 * We could include disable_cost in the preliminary estimate, but that
2297 * would amount to optimizing for the case where the join method is
2298 * disabled, which doesn't seem like the way to bet.
2300 if (!enable_mergejoin)
2301 startup_cost += disable_cost;
2304 * Compute cost of the mergequals and qpquals (other restriction clauses)
2307 cost_qual_eval(&merge_qual_cost, mergeclauses, root);
2308 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2309 qp_qual_cost.startup -= merge_qual_cost.startup;
2310 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
2313 * Get approx # tuples passing the mergequals. We use approx_tuple_count
2314 * here because we need an estimate done with JOIN_INNER semantics.
2316 mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
2319 * When there are equal merge keys in the outer relation, the mergejoin
2320 * must rescan any matching tuples in the inner relation. This means
2321 * re-fetching inner tuples; we have to estimate how often that happens.
2323 * For regular inner and outer joins, the number of re-fetches can be
2324 * estimated approximately as size of merge join output minus size of
2325 * inner relation. Assume that the distinct key values are 1, 2, ..., and
2326 * denote the number of values of each key in the outer relation as m1,
2327 * m2, ...; in the inner relation, n1, n2, ... Then we have
2329 * size of join = m1 * n1 + m2 * n2 + ...
2331 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
2332 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
2335 * This equation works correctly for outer tuples having no inner match
2336 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
2337 * are effectively subtracting those from the number of rescanned tuples,
2338 * when we should not. Can we do better without expensive selectivity
2341 * The whole issue is moot if we are working from a unique-ified outer
2344 if (IsA(outer_path, UniquePath))
2345 rescannedtuples = 0;
2348 rescannedtuples = mergejointuples - inner_path_rows;
2349 /* Must clamp because of possible underestimate */
2350 if (rescannedtuples < 0)
2351 rescannedtuples = 0;
2353 /* We'll inflate various costs this much to account for rescanning */
2354 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
2357 * Decide whether we want to materialize the inner input to shield it from
2358 * mark/restore and performing re-fetches. Our cost model for regular
2359 * re-fetches is that a re-fetch costs the same as an original fetch,
2360 * which is probably an overestimate; but on the other hand we ignore the
2361 * bookkeeping costs of mark/restore. Not clear if it's worth developing
2362 * a more refined model. So we just need to inflate the inner run cost by
2365 bare_inner_cost = inner_run_cost * rescanratio;
2368 * When we interpose a Material node the re-fetch cost is assumed to be
2369 * just cpu_operator_cost per tuple, independently of the underlying
2370 * plan's cost; and we charge an extra cpu_operator_cost per original
2371 * fetch as well. Note that we're assuming the materialize node will
2372 * never spill to disk, since it only has to remember tuples back to the
2373 * last mark. (If there are a huge number of duplicates, our other cost
2374 * factors will make the path so expensive that it probably won't get
2375 * chosen anyway.) So we don't use cost_rescan here.
2377 * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2378 * of the generated Material node.
2380 mat_inner_cost = inner_run_cost +
2381 cpu_operator_cost * inner_path_rows * rescanratio;
2384 * Prefer materializing if it looks cheaper, unless the user has asked to
2385 * suppress materialization.
2387 if (enable_material && mat_inner_cost < bare_inner_cost)
2388 path->materialize_inner = true;
2391 * Even if materializing doesn't look cheaper, we *must* do it if the
2392 * inner path is to be used directly (without sorting) and it doesn't
2393 * support mark/restore.
2395 * Since the inner side must be ordered, and only Sorts and IndexScans can
2396 * create order to begin with, and they both support mark/restore, you
2397 * might think there's no problem --- but you'd be wrong. Nestloop and
2398 * merge joins can *preserve* the order of their inputs, so they can be
2399 * selected as the input of a mergejoin, and they don't support
2400 * mark/restore at present.
2402 * We don't test the value of enable_material here, because
2403 * materialization is required for correctness in this case, and turning
2404 * it off does not entitle us to deliver an invalid plan.
2406 else if (innersortkeys == NIL &&
2407 !ExecSupportsMarkRestore(inner_path))
2408 path->materialize_inner = true;
2411 * Also, force materializing if the inner path is to be sorted and the
2412 * sort is expected to spill to disk. This is because the final merge
2413 * pass can be done on-the-fly if it doesn't have to support mark/restore.
2414 * We don't try to adjust the cost estimates for this consideration,
2417 * Since materialization is a performance optimization in this case,
2418 * rather than necessary for correctness, we skip it if enable_material is
2421 else if (enable_material && innersortkeys != NIL &&
2422 relation_byte_size(inner_path_rows, inner_path->parent->width) >
2424 path->materialize_inner = true;
2426 path->materialize_inner = false;
2428 /* Charge the right incremental cost for the chosen case */
2429 if (path->materialize_inner)
2430 run_cost += mat_inner_cost;
2432 run_cost += bare_inner_cost;
2437 * The number of tuple comparisons needed is approximately number of outer
2438 * rows plus number of inner rows plus number of rescanned tuples (can we
2439 * refine this?). At each one, we need to evaluate the mergejoin quals.
2441 startup_cost += merge_qual_cost.startup;
2442 startup_cost += merge_qual_cost.per_tuple *
2443 (outer_skip_rows + inner_skip_rows * rescanratio);
2444 run_cost += merge_qual_cost.per_tuple *
2445 ((outer_rows - outer_skip_rows) +
2446 (inner_rows - inner_skip_rows) * rescanratio);
2449 * For each tuple that gets through the mergejoin proper, we charge
2450 * cpu_tuple_cost plus the cost of evaluating additional restriction
2451 * clauses that are to be applied at the join. (This is pessimistic since
2452 * not all of the quals may get evaluated at each tuple.)
2454 * Note: we could adjust for SEMI/ANTI joins skipping some qual
2455 * evaluations here, but it's probably not worth the trouble.
2457 startup_cost += qp_qual_cost.startup;
2458 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2459 run_cost += cpu_per_tuple * mergejointuples;
2461 path->jpath.path.startup_cost = startup_cost;
2462 path->jpath.path.total_cost = startup_cost + run_cost;
2466 * run mergejoinscansel() with caching
2468 static MergeScanSelCache *
2469 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
2471 MergeScanSelCache *cache;
2473 Selectivity leftstartsel,
2477 MemoryContext oldcontext;
2479 /* Do we have this result already? */
2480 foreach(lc, rinfo->scansel_cache)
2482 cache = (MergeScanSelCache *) lfirst(lc);
2483 if (cache->opfamily == pathkey->pk_opfamily &&
2484 cache->collation == pathkey->pk_eclass->ec_collation &&
2485 cache->strategy == pathkey->pk_strategy &&
2486 cache->nulls_first == pathkey->pk_nulls_first)
2490 /* Nope, do the computation */
2491 mergejoinscansel(root,
2492 (Node *) rinfo->clause,
2493 pathkey->pk_opfamily,
2494 pathkey->pk_strategy,
2495 pathkey->pk_nulls_first,
2501 /* Cache the result in suitably long-lived workspace */
2502 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
2504 cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
2505 cache->opfamily = pathkey->pk_opfamily;
2506 cache->collation = pathkey->pk_eclass->ec_collation;
2507 cache->strategy = pathkey->pk_strategy;
2508 cache->nulls_first = pathkey->pk_nulls_first;
2509 cache->leftstartsel = leftstartsel;
2510 cache->leftendsel = leftendsel;
2511 cache->rightstartsel = rightstartsel;
2512 cache->rightendsel = rightendsel;
2514 rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
2516 MemoryContextSwitchTo(oldcontext);
2522 * initial_cost_hashjoin
2523 * Preliminary estimate of the cost of a hashjoin path.
2525 * This must quickly produce lower-bound estimates of the path's startup and
2526 * total costs. If we are unable to eliminate the proposed path from
2527 * consideration using the lower bounds, final_cost_hashjoin will be called
2528 * to obtain the final estimates.
2530 * The exact division of labor between this function and final_cost_hashjoin
2531 * is private to them, and represents a tradeoff between speed of the initial
2532 * estimate and getting a tight lower bound. We choose to not examine the
2533 * join quals here (other than by counting the number of hash clauses),
2534 * so we can't do much with CPU costs. We do assume that
2535 * ExecChooseHashTableSize is cheap enough to use here.
2537 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2538 * other data to be used by final_cost_hashjoin
2539 * 'jointype' is the type of join to be performed
2540 * 'hashclauses' is the list of joinclauses to be used as hash clauses
2541 * 'outer_path' is the outer input to the join
2542 * 'inner_path' is the inner input to the join
2543 * 'sjinfo' is extra info about the join for selectivity estimation
2544 * 'semifactors' contains valid data if jointype is SEMI or ANTI
2547 initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2550 Path *outer_path, Path *inner_path,
2551 SpecialJoinInfo *sjinfo,
2552 SemiAntiJoinFactors *semifactors)
2554 Cost startup_cost = 0;
2556 double outer_path_rows = outer_path->rows;
2557 double inner_path_rows = inner_path->rows;
2558 int num_hashclauses = list_length(hashclauses);
2563 /* cost of source data */
2564 startup_cost += outer_path->startup_cost;
2565 run_cost += outer_path->total_cost - outer_path->startup_cost;
2566 startup_cost += inner_path->total_cost;
2569 * Cost of computing hash function: must do it once per input tuple. We
2570 * charge one cpu_operator_cost for each column's hash function. Also,
2571 * tack on one cpu_tuple_cost per inner row, to model the costs of
2572 * inserting the row into the hashtable.
2574 * XXX when a hashclause is more complex than a single operator, we really
2575 * should charge the extra eval costs of the left or right side, as
2576 * appropriate, here. This seems more work than it's worth at the moment.
2578 startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
2580 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
2583 * Get hash table size that executor would use for inner relation.
2585 * XXX for the moment, always assume that skew optimization will be
2586 * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
2587 * trying to determine that for sure.
2589 * XXX at some point it might be interesting to try to account for skew
2590 * optimization in the cost estimate, but for now, we don't.
2592 ExecChooseHashTableSize(inner_path_rows,
2593 inner_path->parent->width,
2600 * If inner relation is too big then we will need to "batch" the join,
2601 * which implies writing and reading most of the tuples to disk an extra
2602 * time. Charge seq_page_cost per page, since the I/O should be nice and
2603 * sequential. Writing the inner rel counts as startup cost, all the rest
2608 double outerpages = page_size(outer_path_rows,
2609 outer_path->parent->width);
2610 double innerpages = page_size(inner_path_rows,
2611 inner_path->parent->width);
2613 startup_cost += seq_page_cost * innerpages;
2614 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2617 /* CPU costs left for later */
2619 /* Public result fields */
2620 workspace->startup_cost = startup_cost;
2621 workspace->total_cost = startup_cost + run_cost;
2622 /* Save private data for final_cost_hashjoin */
2623 workspace->run_cost = run_cost;
2624 workspace->numbuckets = numbuckets;
2625 workspace->numbatches = numbatches;
2629 * final_cost_hashjoin
2630 * Final estimate of the cost and result size of a hashjoin path.
2632 * Note: the numbatches estimate is also saved into 'path' for use later
2634 * 'path' is already filled in except for the rows and cost fields and
2636 * 'workspace' is the result from initial_cost_hashjoin
2637 * 'sjinfo' is extra info about the join for selectivity estimation
2638 * 'semifactors' contains valid data if path->jointype is SEMI or ANTI
2641 final_cost_hashjoin(PlannerInfo *root, HashPath *path,
2642 JoinCostWorkspace *workspace,
2643 SpecialJoinInfo *sjinfo,
2644 SemiAntiJoinFactors *semifactors)
2646 Path *outer_path = path->jpath.outerjoinpath;
2647 Path *inner_path = path->jpath.innerjoinpath;
2648 double outer_path_rows = outer_path->rows;
2649 double inner_path_rows = inner_path->rows;
2650 List *hashclauses = path->path_hashclauses;
2651 Cost startup_cost = workspace->startup_cost;
2652 Cost run_cost = workspace->run_cost;
2653 int numbuckets = workspace->numbuckets;
2654 int numbatches = workspace->numbatches;
2656 QualCost hash_qual_cost;
2657 QualCost qp_qual_cost;
2658 double hashjointuples;
2659 double virtualbuckets;
2660 Selectivity innerbucketsize;
2663 /* Mark the path with the correct row estimate */
2664 if (path->jpath.path.param_info)
2665 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2667 path->jpath.path.rows = path->jpath.path.parent->rows;
2670 * We could include disable_cost in the preliminary estimate, but that
2671 * would amount to optimizing for the case where the join method is
2672 * disabled, which doesn't seem like the way to bet.
2674 if (!enable_hashjoin)
2675 startup_cost += disable_cost;
2677 /* mark the path with estimated # of batches */
2678 path->num_batches = numbatches;
2680 /* and compute the number of "virtual" buckets in the whole join */
2681 virtualbuckets = (double) numbuckets *(double) numbatches;
2684 * Determine bucketsize fraction for inner relation. We use the smallest
2685 * bucketsize estimated for any individual hashclause; this is undoubtedly
2688 * BUT: if inner relation has been unique-ified, we can assume it's good
2689 * for hashing. This is important both because it's the right answer, and
2690 * because we avoid contaminating the cache with a value that's wrong for
2691 * non-unique-ified paths.
2693 if (IsA(inner_path, UniquePath))
2694 innerbucketsize = 1.0 / virtualbuckets;
2697 innerbucketsize = 1.0;
2698 foreach(hcl, hashclauses)
2700 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
2701 Selectivity thisbucketsize;
2703 Assert(IsA(restrictinfo, RestrictInfo));
2706 * First we have to figure out which side of the hashjoin clause
2707 * is the inner side.
2709 * Since we tend to visit the same clauses over and over when
2710 * planning a large query, we cache the bucketsize estimate in the
2711 * RestrictInfo node to avoid repeated lookups of statistics.
2713 if (bms_is_subset(restrictinfo->right_relids,
2714 inner_path->parent->relids))
2716 /* righthand side is inner */
2717 thisbucketsize = restrictinfo->right_bucketsize;
2718 if (thisbucketsize < 0)
2720 /* not cached yet */
2722 estimate_hash_bucketsize(root,
2723 get_rightop(restrictinfo->clause),
2725 restrictinfo->right_bucketsize = thisbucketsize;
2730 Assert(bms_is_subset(restrictinfo->left_relids,
2731 inner_path->parent->relids));
2732 /* lefthand side is inner */
2733 thisbucketsize = restrictinfo->left_bucketsize;
2734 if (thisbucketsize < 0)
2736 /* not cached yet */
2738 estimate_hash_bucketsize(root,
2739 get_leftop(restrictinfo->clause),
2741 restrictinfo->left_bucketsize = thisbucketsize;
2745 if (innerbucketsize > thisbucketsize)
2746 innerbucketsize = thisbucketsize;
2751 * Compute cost of the hashquals and qpquals (other restriction clauses)
2754 cost_qual_eval(&hash_qual_cost, hashclauses, root);
2755 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2756 qp_qual_cost.startup -= hash_qual_cost.startup;
2757 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
2761 if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI)
2763 double outer_matched_rows;
2764 Selectivity inner_scan_frac;
2767 * SEMI or ANTI join: executor will stop after first match.
2769 * For an outer-rel row that has at least one match, we can expect the
2770 * bucket scan to stop after a fraction 1/(match_count+1) of the
2771 * bucket's rows, if the matches are evenly distributed. Since they
2772 * probably aren't quite evenly distributed, we apply a fuzz factor of
2773 * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
2774 * to clamp inner_scan_frac to at most 1.0; but since match_count is
2775 * at least 1, no such clamp is needed now.)
2777 outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
2778 inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
2780 startup_cost += hash_qual_cost.startup;
2781 run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
2782 clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
2785 * For unmatched outer-rel rows, the picture is quite a lot different.
2786 * In the first place, there is no reason to assume that these rows
2787 * preferentially hit heavily-populated buckets; instead assume they
2788 * are uncorrelated with the inner distribution and so they see an
2789 * average bucket size of inner_path_rows / virtualbuckets. In the
2790 * second place, it seems likely that they will have few if any exact
2791 * hash-code matches and so very few of the tuples in the bucket will
2792 * actually require eval of the hash quals. We don't have any good
2793 * way to estimate how many will, but for the moment assume that the
2794 * effective cost per bucket entry is one-tenth what it is for
2797 run_cost += hash_qual_cost.per_tuple *
2798 (outer_path_rows - outer_matched_rows) *
2799 clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
2801 /* Get # of tuples that will pass the basic join */
2802 if (path->jpath.jointype == JOIN_SEMI)
2803 hashjointuples = outer_matched_rows;
2805 hashjointuples = outer_path_rows - outer_matched_rows;
2810 * The number of tuple comparisons needed is the number of outer
2811 * tuples times the typical number of tuples in a hash bucket, which
2812 * is the inner relation size times its bucketsize fraction. At each
2813 * one, we need to evaluate the hashjoin quals. But actually,
2814 * charging the full qual eval cost at each tuple is pessimistic,
2815 * since we don't evaluate the quals unless the hash values match
2816 * exactly. For lack of a better idea, halve the cost estimate to
2819 startup_cost += hash_qual_cost.startup;
2820 run_cost += hash_qual_cost.per_tuple * outer_path_rows *
2821 clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
2824 * Get approx # tuples passing the hashquals. We use
2825 * approx_tuple_count here because we need an estimate done with
2826 * JOIN_INNER semantics.
2828 hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
2832 * For each tuple that gets through the hashjoin proper, we charge
2833 * cpu_tuple_cost plus the cost of evaluating additional restriction
2834 * clauses that are to be applied at the join. (This is pessimistic since
2835 * not all of the quals may get evaluated at each tuple.)
2837 startup_cost += qp_qual_cost.startup;
2838 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2839 run_cost += cpu_per_tuple * hashjointuples;
2841 path->jpath.path.startup_cost = startup_cost;
2842 path->jpath.path.total_cost = startup_cost + run_cost;
2848 * Figure the costs for a SubPlan (or initplan).
2850 * Note: we could dig the subplan's Plan out of the root list, but in practice
2851 * all callers have it handy already, so we make them pass it.
2854 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
2858 /* Figure any cost for evaluating the testexpr */
2859 cost_qual_eval(&sp_cost,
2860 make_ands_implicit((Expr *) subplan->testexpr),
2863 if (subplan->useHashTable)
2866 * If we are using a hash table for the subquery outputs, then the
2867 * cost of evaluating the query is a one-time cost. We charge one
2868 * cpu_operator_cost per tuple for the work of loading the hashtable,
2871 sp_cost.startup += plan->total_cost +
2872 cpu_operator_cost * plan->plan_rows;
2875 * The per-tuple costs include the cost of evaluating the lefthand
2876 * expressions, plus the cost of probing the hashtable. We already
2877 * accounted for the lefthand expressions as part of the testexpr, and
2878 * will also have counted one cpu_operator_cost for each comparison
2879 * operator. That is probably too low for the probing cost, but it's
2880 * hard to make a better estimate, so live with it for now.
2886 * Otherwise we will be rescanning the subplan output on each
2887 * evaluation. We need to estimate how much of the output we will
2888 * actually need to scan. NOTE: this logic should agree with the
2889 * tuple_fraction estimates used by make_subplan() in
2892 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
2894 if (subplan->subLinkType == EXISTS_SUBLINK)
2896 /* we only need to fetch 1 tuple */
2897 sp_cost.per_tuple += plan_run_cost / plan->plan_rows;
2899 else if (subplan->subLinkType == ALL_SUBLINK ||
2900 subplan->subLinkType == ANY_SUBLINK)
2902 /* assume we need 50% of the tuples */
2903 sp_cost.per_tuple += 0.50 * plan_run_cost;
2904 /* also charge a cpu_operator_cost per row examined */
2905 sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
2909 /* assume we need all tuples */
2910 sp_cost.per_tuple += plan_run_cost;
2914 * Also account for subplan's startup cost. If the subplan is
2915 * uncorrelated or undirect correlated, AND its topmost node is one
2916 * that materializes its output, assume that we'll only need to pay
2917 * its startup cost once; otherwise assume we pay the startup cost
2920 if (subplan->parParam == NIL &&
2921 ExecMaterializesOutput(nodeTag(plan)))
2922 sp_cost.startup += plan->startup_cost;
2924 sp_cost.per_tuple += plan->startup_cost;
2927 subplan->startup_cost = sp_cost.startup;
2928 subplan->per_call_cost = sp_cost.per_tuple;
2934 * Given a finished Path, estimate the costs of rescanning it after
2935 * having done so the first time. For some Path types a rescan is
2936 * cheaper than an original scan (if no parameters change), and this
2937 * function embodies knowledge about that. The default is to return
2938 * the same costs stored in the Path. (Note that the cost estimates
2939 * actually stored in Paths are always for first scans.)
2941 * This function is not currently intended to model effects such as rescans
2942 * being cheaper due to disk block caching; what we are concerned with is
2943 * plan types wherein the executor caches results explicitly, or doesn't
2944 * redo startup calculations, etc.
2947 cost_rescan(PlannerInfo *root, Path *path,
2948 Cost *rescan_startup_cost, /* output parameters */
2949 Cost *rescan_total_cost)
2951 switch (path->pathtype)
2953 case T_FunctionScan:
2956 * Currently, nodeFunctionscan.c always executes the function to
2957 * completion before returning any rows, and caches the results in
2958 * a tuplestore. So the function eval cost is all startup cost
2959 * and isn't paid over again on rescans. However, all run costs
2960 * will be paid over again.
2962 *rescan_startup_cost = 0;
2963 *rescan_total_cost = path->total_cost - path->startup_cost;
2968 * Assume that all of the startup cost represents hash table
2969 * building, which we won't have to do over.
2971 *rescan_startup_cost = 0;
2972 *rescan_total_cost = path->total_cost - path->startup_cost;
2975 case T_WorkTableScan:
2978 * These plan types materialize their final result in a
2979 * tuplestore or tuplesort object. So the rescan cost is only
2980 * cpu_tuple_cost per tuple, unless the result is large enough
2983 Cost run_cost = cpu_tuple_cost * path->rows;
2984 double nbytes = relation_byte_size(path->rows,
2985 path->parent->width);
2986 long work_mem_bytes = work_mem * 1024L;
2988 if (nbytes > work_mem_bytes)
2990 /* It will spill, so account for re-read cost */
2991 double npages = ceil(nbytes / BLCKSZ);
2993 run_cost += seq_page_cost * npages;
2995 *rescan_startup_cost = 0;
2996 *rescan_total_cost = run_cost;
3003 * These plan types not only materialize their results, but do
3004 * not implement qual filtering or projection. So they are
3005 * even cheaper to rescan than the ones above. We charge only
3006 * cpu_operator_cost per tuple. (Note: keep that in sync with
3007 * the run_cost charge in cost_sort, and also see comments in
3008 * cost_material before you change it.)
3010 Cost run_cost = cpu_operator_cost * path->rows;
3011 double nbytes = relation_byte_size(path->rows,
3012 path->parent->width);
3013 long work_mem_bytes = work_mem * 1024L;
3015 if (nbytes > work_mem_bytes)
3017 /* It will spill, so account for re-read cost */
3018 double npages = ceil(nbytes / BLCKSZ);
3020 run_cost += seq_page_cost * npages;
3022 *rescan_startup_cost = 0;
3023 *rescan_total_cost = run_cost;
3027 *rescan_startup_cost = path->startup_cost;
3028 *rescan_total_cost = path->total_cost;
3036 * Estimate the CPU costs of evaluating a WHERE clause.
3037 * The input can be either an implicitly-ANDed list of boolean
3038 * expressions, or a list of RestrictInfo nodes. (The latter is
3039 * preferred since it allows caching of the results.)
3040 * The result includes both a one-time (startup) component,
3041 * and a per-evaluation component.
3044 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
3046 cost_qual_eval_context context;
3049 context.root = root;
3050 context.total.startup = 0;
3051 context.total.per_tuple = 0;
3053 /* We don't charge any cost for the implicit ANDing at top level ... */
3057 Node *qual = (Node *) lfirst(l);
3059 cost_qual_eval_walker(qual, &context);
3062 *cost = context.total;
3066 * cost_qual_eval_node
3067 * As above, for a single RestrictInfo or expression.
3070 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
3072 cost_qual_eval_context context;
3074 context.root = root;
3075 context.total.startup = 0;
3076 context.total.per_tuple = 0;
3078 cost_qual_eval_walker(qual, &context);
3080 *cost = context.total;
3084 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
3090 * RestrictInfo nodes contain an eval_cost field reserved for this
3091 * routine's use, so that it's not necessary to evaluate the qual clause's
3092 * cost more than once. If the clause's cost hasn't been computed yet,
3093 * the field's startup value will contain -1.
3095 if (IsA(node, RestrictInfo))
3097 RestrictInfo *rinfo = (RestrictInfo *) node;
3099 if (rinfo->eval_cost.startup < 0)
3101 cost_qual_eval_context locContext;
3103 locContext.root = context->root;
3104 locContext.total.startup = 0;
3105 locContext.total.per_tuple = 0;
3108 * For an OR clause, recurse into the marked-up tree so that we
3109 * set the eval_cost for contained RestrictInfos too.
3111 if (rinfo->orclause)
3112 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
3114 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
3117 * If the RestrictInfo is marked pseudoconstant, it will be tested
3118 * only once, so treat its cost as all startup cost.
3120 if (rinfo->pseudoconstant)
3122 /* count one execution during startup */
3123 locContext.total.startup += locContext.total.per_tuple;
3124 locContext.total.per_tuple = 0;
3126 rinfo->eval_cost = locContext.total;
3128 context->total.startup += rinfo->eval_cost.startup;
3129 context->total.per_tuple += rinfo->eval_cost.per_tuple;
3130 /* do NOT recurse into children */
3135 * For each operator or function node in the given tree, we charge the
3136 * estimated execution cost given by pg_proc.procost (remember to multiply
3137 * this by cpu_operator_cost).
3139 * Vars and Consts are charged zero, and so are boolean operators (AND,
3140 * OR, NOT). Simplistic, but a lot better than no model at all.
3142 * Should we try to account for the possibility of short-circuit
3143 * evaluation of AND/OR? Probably *not*, because that would make the
3144 * results depend on the clause ordering, and we are not in any position
3145 * to expect that the current ordering of the clauses is the one that's
3146 * going to end up being used. The above per-RestrictInfo caching would
3147 * not mix well with trying to re-order clauses anyway.
3149 * Another issue that is entirely ignored here is that if a set-returning
3150 * function is below top level in the tree, the functions/operators above
3151 * it will need to be evaluated multiple times. In practical use, such
3152 * cases arise so seldom as to not be worth the added complexity needed;
3153 * moreover, since our rowcount estimates for functions tend to be pretty
3154 * phony, the results would also be pretty phony.
3156 if (IsA(node, FuncExpr))
3158 context->total.per_tuple +=
3159 get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
3161 else if (IsA(node, OpExpr) ||
3162 IsA(node, DistinctExpr) ||
3163 IsA(node, NullIfExpr))
3165 /* rely on struct equivalence to treat these all alike */
3166 set_opfuncid((OpExpr *) node);
3167 context->total.per_tuple +=
3168 get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
3170 else if (IsA(node, ScalarArrayOpExpr))
3173 * Estimate that the operator will be applied to about half of the
3174 * array elements before the answer is determined.
3176 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
3177 Node *arraynode = (Node *) lsecond(saop->args);
3179 set_sa_opfuncid(saop);
3180 context->total.per_tuple += get_func_cost(saop->opfuncid) *
3181 cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
3183 else if (IsA(node, Aggref) ||
3184 IsA(node, WindowFunc))
3187 * Aggref and WindowFunc nodes are (and should be) treated like Vars,
3188 * ie, zero execution cost in the current model, because they behave
3189 * essentially like Vars in execQual.c. We disregard the costs of
3190 * their input expressions for the same reason. The actual execution
3191 * costs of the aggregate/window functions and their arguments have to
3192 * be factored into plan-node-specific costing of the Agg or WindowAgg
3195 return false; /* don't recurse into children */
3197 else if (IsA(node, CoerceViaIO))
3199 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
3204 /* check the result type's input function */
3205 getTypeInputInfo(iocoerce->resulttype,
3206 &iofunc, &typioparam);
3207 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3208 /* check the input type's output function */
3209 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
3210 &iofunc, &typisvarlena);
3211 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3213 else if (IsA(node, ArrayCoerceExpr))
3215 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
3216 Node *arraynode = (Node *) acoerce->arg;
3218 if (OidIsValid(acoerce->elemfuncid))
3219 context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
3220 cpu_operator_cost * estimate_array_length(arraynode);
3222 else if (IsA(node, RowCompareExpr))
3224 /* Conservatively assume we will check all the columns */
3225 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
3228 foreach(lc, rcexpr->opnos)
3230 Oid opid = lfirst_oid(lc);
3232 context->total.per_tuple += get_func_cost(get_opcode(opid)) *
3236 else if (IsA(node, CurrentOfExpr))
3238 /* Report high cost to prevent selection of anything but TID scan */
3239 context->total.startup += disable_cost;
3241 else if (IsA(node, SubLink))
3243 /* This routine should not be applied to un-planned expressions */
3244 elog(ERROR, "cannot handle unplanned sub-select");
3246 else if (IsA(node, SubPlan))
3249 * A subplan node in an expression typically indicates that the
3250 * subplan will be executed on each evaluation, so charge accordingly.
3251 * (Sub-selects that can be executed as InitPlans have already been
3252 * removed from the expression.)
3254 SubPlan *subplan = (SubPlan *) node;
3256 context->total.startup += subplan->startup_cost;
3257 context->total.per_tuple += subplan->per_call_cost;
3260 * We don't want to recurse into the testexpr, because it was already
3261 * counted in the SubPlan node's costs. So we're done.
3265 else if (IsA(node, AlternativeSubPlan))
3268 * Arbitrarily use the first alternative plan for costing. (We should
3269 * certainly only include one alternative, and we don't yet have
3270 * enough information to know which one the executor is most likely to
3273 AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
3275 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
3279 /* recurse into children */
3280 return expression_tree_walker(node, cost_qual_eval_walker,
3285 * get_restriction_qual_cost
3286 * Compute evaluation costs of a baserel's restriction quals, plus any
3287 * movable join quals that have been pushed down to the scan.
3288 * Results are returned into *qpqual_cost.
3290 * This is a convenience subroutine that works for seqscans and other cases
3291 * where all the given quals will be evaluated the hard way. It's not useful
3292 * for cost_index(), for example, where the index machinery takes care of
3293 * some of the quals. We assume baserestrictcost was previously set by
3294 * set_baserel_size_estimates().
3297 get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
3298 ParamPathInfo *param_info,
3299 QualCost *qpqual_cost)
3303 /* Include costs of pushed-down clauses */
3304 cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
3306 qpqual_cost->startup += baserel->baserestrictcost.startup;
3307 qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
3310 *qpqual_cost = baserel->baserestrictcost;
3315 * compute_semi_anti_join_factors
3316 * Estimate how much of the inner input a SEMI or ANTI join
3317 * can be expected to scan.
3319 * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
3320 * inner rows as soon as it finds a match to the current outer row.
3321 * We should therefore adjust some of the cost components for this effect.
3322 * This function computes some estimates needed for these adjustments.
3323 * These estimates will be the same regardless of the particular paths used
3324 * for the outer and inner relation, so we compute these once and then pass
3325 * them to all the join cost estimation functions.
3328 * outerrel: outer relation under consideration
3329 * innerrel: inner relation under consideration
3330 * jointype: must be JOIN_SEMI or JOIN_ANTI
3331 * sjinfo: SpecialJoinInfo relevant to this join
3332 * restrictlist: join quals
3333 * Output parameters:
3334 * *semifactors is filled in (see relation.h for field definitions)
3337 compute_semi_anti_join_factors(PlannerInfo *root,
3338 RelOptInfo *outerrel,
3339 RelOptInfo *innerrel,
3341 SpecialJoinInfo *sjinfo,
3343 SemiAntiJoinFactors *semifactors)
3347 Selectivity avgmatch;
3348 SpecialJoinInfo norm_sjinfo;
3352 /* Should only be called in these cases */
3353 Assert(jointype == JOIN_SEMI || jointype == JOIN_ANTI);
3356 * In an ANTI join, we must ignore clauses that are "pushed down", since
3357 * those won't affect the match logic. In a SEMI join, we do not
3358 * distinguish joinquals from "pushed down" quals, so just use the whole
3359 * restrictinfo list.
3361 if (jointype == JOIN_ANTI)
3364 foreach(l, restrictlist)
3366 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
3368 Assert(IsA(rinfo, RestrictInfo));
3369 if (!rinfo->is_pushed_down)
3370 joinquals = lappend(joinquals, rinfo);
3374 joinquals = restrictlist;
3377 * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
3379 jselec = clauselist_selectivity(root,
3386 * Also get the normal inner-join selectivity of the join clauses.
3388 norm_sjinfo.type = T_SpecialJoinInfo;
3389 norm_sjinfo.min_lefthand = outerrel->relids;
3390 norm_sjinfo.min_righthand = innerrel->relids;
3391 norm_sjinfo.syn_lefthand = outerrel->relids;
3392 norm_sjinfo.syn_righthand = innerrel->relids;
3393 norm_sjinfo.jointype = JOIN_INNER;
3394 /* we don't bother trying to make the remaining fields valid */
3395 norm_sjinfo.lhs_strict = false;
3396 norm_sjinfo.delay_upper_joins = false;
3397 norm_sjinfo.semi_can_btree = false;
3398 norm_sjinfo.semi_can_hash = false;
3399 norm_sjinfo.semi_operators = NIL;
3400 norm_sjinfo.semi_rhs_exprs = NIL;
3402 nselec = clauselist_selectivity(root,
3408 /* Avoid leaking a lot of ListCells */
3409 if (jointype == JOIN_ANTI)
3410 list_free(joinquals);
3413 * jselec can be interpreted as the fraction of outer-rel rows that have
3414 * any matches (this is true for both SEMI and ANTI cases). And nselec is
3415 * the fraction of the Cartesian product that matches. So, the average
3416 * number of matches for each outer-rel row that has at least one match is
3417 * nselec * inner_rows / jselec.
3419 * Note: it is correct to use the inner rel's "rows" count here, even
3420 * though we might later be considering a parameterized inner path with
3421 * fewer rows. This is because we have included all the join clauses in
3422 * the selectivity estimate.
3424 if (jselec > 0) /* protect against zero divide */
3426 avgmatch = nselec * innerrel->rows / jselec;
3427 /* Clamp to sane range */
3428 avgmatch = Max(1.0, avgmatch);
3433 semifactors->outer_match_frac = jselec;
3434 semifactors->match_count = avgmatch;
3438 * has_indexed_join_quals
3439 * Check whether all the joinquals of a nestloop join are used as
3440 * inner index quals.
3442 * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
3443 * indexscan) that uses all the joinquals as indexquals, we can assume that an
3444 * unmatched outer tuple is cheap to process, whereas otherwise it's probably
3448 has_indexed_join_quals(NestPath *joinpath)
3450 Relids joinrelids = joinpath->path.parent->relids;
3451 Path *innerpath = joinpath->innerjoinpath;
3456 /* If join still has quals to evaluate, it's not fast */
3457 if (joinpath->joinrestrictinfo != NIL)
3459 /* Nor if the inner path isn't parameterized at all */
3460 if (innerpath->param_info == NULL)
3463 /* Find the indexclauses list for the inner scan */
3464 switch (innerpath->pathtype)
3467 case T_IndexOnlyScan:
3468 indexclauses = ((IndexPath *) innerpath)->indexclauses;
3470 case T_BitmapHeapScan:
3472 /* Accept only a simple bitmap scan, not AND/OR cases */
3473 Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
3475 if (IsA(bmqual, IndexPath))
3476 indexclauses = ((IndexPath *) bmqual)->indexclauses;
3484 * If it's not a simple indexscan, it probably doesn't run quickly
3485 * for zero rows out, even if it's a parameterized path using all
3492 * Examine the inner path's param clauses. Any that are from the outer
3493 * path must be found in the indexclauses list, either exactly or in an
3494 * equivalent form generated by equivclass.c. Also, we must find at least
3495 * one such clause, else it's a clauseless join which isn't fast.
3498 foreach(lc, innerpath->param_info->ppi_clauses)
3500 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
3502 if (join_clause_is_movable_into(rinfo,
3503 innerpath->parent->relids,
3506 if (!(list_member_ptr(indexclauses, rinfo) ||
3507 is_redundant_derived_clause(rinfo, indexclauses)))
3517 * approx_tuple_count
3518 * Quick-and-dirty estimation of the number of join rows passing
3519 * a set of qual conditions.
3521 * The quals can be either an implicitly-ANDed list of boolean expressions,
3522 * or a list of RestrictInfo nodes (typically the latter).
3524 * We intentionally compute the selectivity under JOIN_INNER rules, even
3525 * if it's some type of outer join. This is appropriate because we are
3526 * trying to figure out how many tuples pass the initial merge or hash
3529 * This is quick-and-dirty because we bypass clauselist_selectivity, and
3530 * simply multiply the independent clause selectivities together. Now
3531 * clauselist_selectivity often can't do any better than that anyhow, but
3532 * for some situations (such as range constraints) it is smarter. However,
3533 * we can't effectively cache the results of clauselist_selectivity, whereas
3534 * the individual clause selectivities can be and are cached.
3536 * Since we are only using the results to estimate how many potential
3537 * output tuples are generated and passed through qpqual checking, it
3538 * seems OK to live with the approximation.
3541 approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
3544 double outer_tuples = path->outerjoinpath->rows;
3545 double inner_tuples = path->innerjoinpath->rows;
3546 SpecialJoinInfo sjinfo;
3547 Selectivity selec = 1.0;
3551 * Make up a SpecialJoinInfo for JOIN_INNER semantics.
3553 sjinfo.type = T_SpecialJoinInfo;
3554 sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
3555 sjinfo.min_righthand = path->innerjoinpath->parent->relids;
3556 sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
3557 sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
3558 sjinfo.jointype = JOIN_INNER;
3559 /* we don't bother trying to make the remaining fields valid */
3560 sjinfo.lhs_strict = false;
3561 sjinfo.delay_upper_joins = false;
3562 sjinfo.semi_can_btree = false;
3563 sjinfo.semi_can_hash = false;
3564 sjinfo.semi_operators = NIL;
3565 sjinfo.semi_rhs_exprs = NIL;
3567 /* Get the approximate selectivity */
3570 Node *qual = (Node *) lfirst(l);
3572 /* Note that clause_selectivity will be able to cache its result */
3573 selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
3576 /* Apply it to the input relation sizes */
3577 tuples = selec * outer_tuples * inner_tuples;
3579 return clamp_row_est(tuples);
3584 * set_baserel_size_estimates
3585 * Set the size estimates for the given base relation.
3587 * The rel's targetlist and restrictinfo list must have been constructed
3588 * already, and rel->tuples must be set.
3590 * We set the following fields of the rel node:
3591 * rows: the estimated number of output tuples (after applying
3592 * restriction clauses).
3593 * width: the estimated average output tuple width in bytes.
3594 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
3597 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3601 /* Should only be applied to base relations */
3602 Assert(rel->relid > 0);
3604 nrows = rel->tuples *
3605 clauselist_selectivity(root,
3606 rel->baserestrictinfo,
3611 rel->rows = clamp_row_est(nrows);
3613 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
3615 set_rel_width(root, rel);
3619 * get_parameterized_baserel_size
3620 * Make a size estimate for a parameterized scan of a base relation.
3622 * 'param_clauses' lists the additional join clauses to be used.
3624 * set_baserel_size_estimates must have been applied already.
3627 get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
3628 List *param_clauses)
3634 * Estimate the number of rows returned by the parameterized scan, knowing
3635 * that it will apply all the extra join clauses as well as the rel's own
3636 * restriction clauses. Note that we force the clauses to be treated as
3637 * non-join clauses during selectivity estimation.
3639 allclauses = list_concat(list_copy(param_clauses),
3640 rel->baserestrictinfo);
3641 nrows = rel->tuples *
3642 clauselist_selectivity(root,
3644 rel->relid, /* do not use 0! */
3647 nrows = clamp_row_est(nrows);
3648 /* For safety, make sure result is not more than the base estimate */
3649 if (nrows > rel->rows)
3655 * set_joinrel_size_estimates
3656 * Set the size estimates for the given join relation.
3658 * The rel's targetlist must have been constructed already, and a
3659 * restriction clause list that matches the given component rels must
3662 * Since there is more than one way to make a joinrel for more than two
3663 * base relations, the results we get here could depend on which component
3664 * rel pair is provided. In theory we should get the same answers no matter
3665 * which pair is provided; in practice, since the selectivity estimation
3666 * routines don't handle all cases equally well, we might not. But there's
3667 * not much to be done about it. (Would it make sense to repeat the
3668 * calculations for each pair of input rels that's encountered, and somehow
3669 * average the results? Probably way more trouble than it's worth, and
3670 * anyway we must keep the rowcount estimate the same for all paths for the
3673 * We set only the rows field here. The width field was already set by
3674 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
3677 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
3678 RelOptInfo *outer_rel,
3679 RelOptInfo *inner_rel,
3680 SpecialJoinInfo *sjinfo,
3683 rel->rows = calc_joinrel_size_estimate(root,
3691 * get_parameterized_joinrel_size
3692 * Make a size estimate for a parameterized scan of a join relation.
3694 * 'rel' is the joinrel under consideration.
3695 * 'outer_rows', 'inner_rows' are the sizes of the (probably also
3696 * parameterized) join inputs under consideration.
3697 * 'sjinfo' is any SpecialJoinInfo relevant to this join.
3698 * 'restrict_clauses' lists the join clauses that need to be applied at the
3699 * join node (including any movable clauses that were moved down to this join,
3700 * and not including any movable clauses that were pushed down into the
3703 * set_joinrel_size_estimates must have been applied already.
3706 get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
3709 SpecialJoinInfo *sjinfo,
3710 List *restrict_clauses)
3715 * Estimate the number of rows returned by the parameterized join as the
3716 * sizes of the input paths times the selectivity of the clauses that have
3717 * ended up at this join node.
3719 * As with set_joinrel_size_estimates, the rowcount estimate could depend
3720 * on the pair of input paths provided, though ideally we'd get the same
3721 * estimate for any pair with the same parameterization.
3723 nrows = calc_joinrel_size_estimate(root,
3728 /* For safety, make sure result is not more than the base estimate */
3729 if (nrows > rel->rows)
3735 * calc_joinrel_size_estimate
3736 * Workhorse for set_joinrel_size_estimates and
3737 * get_parameterized_joinrel_size.
3740 calc_joinrel_size_estimate(PlannerInfo *root,
3743 SpecialJoinInfo *sjinfo,
3746 JoinType jointype = sjinfo->jointype;
3752 * Compute joinclause selectivity. Note that we are only considering
3753 * clauses that become restriction clauses at this join level; we are not
3754 * double-counting them because they were not considered in estimating the
3755 * sizes of the component rels.
3757 * For an outer join, we have to distinguish the selectivity of the join's
3758 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
3759 * down". For inner joins we just count them all as joinclauses.
3761 if (IS_OUTER_JOIN(jointype))
3763 List *joinquals = NIL;
3764 List *pushedquals = NIL;
3767 /* Grovel through the clauses to separate into two lists */
3768 foreach(l, restrictlist)
3770 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
3772 Assert(IsA(rinfo, RestrictInfo));
3773 if (rinfo->is_pushed_down)
3774 pushedquals = lappend(pushedquals, rinfo);
3776 joinquals = lappend(joinquals, rinfo);
3779 /* Get the separate selectivities */
3780 jselec = clauselist_selectivity(root,
3785 pselec = clauselist_selectivity(root,
3791 /* Avoid leaking a lot of ListCells */
3792 list_free(joinquals);
3793 list_free(pushedquals);
3797 jselec = clauselist_selectivity(root,
3802 pselec = 0.0; /* not used, keep compiler quiet */
3806 * Basically, we multiply size of Cartesian product by selectivity.
3808 * If we are doing an outer join, take that into account: the joinqual
3809 * selectivity has to be clamped using the knowledge that the output must
3810 * be at least as large as the non-nullable input. However, any
3811 * pushed-down quals are applied after the outer join, so their
3812 * selectivity applies fully.
3814 * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
3815 * of LHS rows that have matches, and we apply that straightforwardly.
3820 nrows = outer_rows * inner_rows * jselec;
3823 nrows = outer_rows * inner_rows * jselec;
3824 if (nrows < outer_rows)
3829 nrows = outer_rows * inner_rows * jselec;
3830 if (nrows < outer_rows)
3832 if (nrows < inner_rows)
3837 nrows = outer_rows * jselec;
3838 /* pselec not used */
3841 nrows = outer_rows * (1.0 - jselec);
3845 /* other values not expected here */
3846 elog(ERROR, "unrecognized join type: %d", (int) jointype);
3847 nrows = 0; /* keep compiler quiet */
3851 return clamp_row_est(nrows);
3855 * set_subquery_size_estimates
3856 * Set the size estimates for a base relation that is a subquery.
3858 * The rel's targetlist and restrictinfo list must have been constructed
3859 * already, and the plan for the subquery must have been completed.
3860 * We look at the subquery's plan and PlannerInfo to extract data.
3862 * We set the same fields as set_baserel_size_estimates.
3865 set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3867 PlannerInfo *subroot = rel->subroot;
3868 RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY;
3871 /* Should only be applied to base relations that are subqueries */
3872 Assert(rel->relid > 0);
3873 rte = planner_rt_fetch(rel->relid, root);
3874 Assert(rte->rtekind == RTE_SUBQUERY);
3876 /* Copy raw number of output rows from subplan */
3877 rel->tuples = rel->subplan->plan_rows;
3880 * Compute per-output-column width estimates by examining the subquery's
3881 * targetlist. For any output that is a plain Var, get the width estimate
3882 * that was made while planning the subquery. Otherwise, we leave it to
3883 * set_rel_width to fill in a datatype-based default estimate.
3885 foreach(lc, subroot->parse->targetList)
3887 TargetEntry *te = (TargetEntry *) lfirst(lc);
3888 Node *texpr = (Node *) te->expr;
3889 int32 item_width = 0;
3891 Assert(IsA(te, TargetEntry));
3892 /* junk columns aren't visible to upper query */
3897 * The subquery could be an expansion of a view that's had columns
3898 * added to it since the current query was parsed, so that there are
3899 * non-junk tlist columns in it that don't correspond to any column
3900 * visible at our query level. Ignore such columns.
3902 if (te->resno < rel->min_attr || te->resno > rel->max_attr)
3906 * XXX This currently doesn't work for subqueries containing set
3907 * operations, because the Vars in their tlists are bogus references
3908 * to the first leaf subquery, which wouldn't give the right answer
3909 * even if we could still get to its PlannerInfo.
3911 * Also, the subquery could be an appendrel for which all branches are
3912 * known empty due to constraint exclusion, in which case
3913 * set_append_rel_pathlist will have left the attr_widths set to zero.
3915 * In either case, we just leave the width estimate zero until
3916 * set_rel_width fixes it.
3918 if (IsA(texpr, Var) &&
3919 subroot->parse->setOperations == NULL)
3921 Var *var = (Var *) texpr;
3922 RelOptInfo *subrel = find_base_rel(subroot, var->varno);
3924 item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
3926 rel->attr_widths[te->resno - rel->min_attr] = item_width;
3929 /* Now estimate number of output rows, etc */
3930 set_baserel_size_estimates(root, rel);
3934 * set_function_size_estimates
3935 * Set the size estimates for a base relation that is a function call.
3937 * The rel's targetlist and restrictinfo list must have been constructed
3940 * We set the same fields as set_baserel_size_estimates.
3943 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3948 /* Should only be applied to base relations that are functions */
3949 Assert(rel->relid > 0);
3950 rte = planner_rt_fetch(rel->relid, root);
3951 Assert(rte->rtekind == RTE_FUNCTION);
3954 * Estimate number of rows the functions will return. The rowcount of the
3955 * node is that of the largest function result.
3958 foreach(lc, rte->functions)
3960 RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
3961 double ntup = expression_returns_set_rows(rtfunc->funcexpr);
3963 if (ntup > rel->tuples)
3967 /* Now estimate number of output rows, etc */
3968 set_baserel_size_estimates(root, rel);
3972 * set_values_size_estimates
3973 * Set the size estimates for a base relation that is a values list.
3975 * The rel's targetlist and restrictinfo list must have been constructed
3978 * We set the same fields as set_baserel_size_estimates.
3981 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3985 /* Should only be applied to base relations that are values lists */
3986 Assert(rel->relid > 0);
3987 rte = planner_rt_fetch(rel->relid, root);
3988 Assert(rte->rtekind == RTE_VALUES);
3991 * Estimate number of rows the values list will return. We know this
3992 * precisely based on the list length (well, barring set-returning
3993 * functions in list items, but that's a refinement not catered for
3994 * anywhere else either).
3996 rel->tuples = list_length(rte->values_lists);
3998 /* Now estimate number of output rows, etc */
3999 set_baserel_size_estimates(root, rel);
4003 * set_cte_size_estimates
4004 * Set the size estimates for a base relation that is a CTE reference.
4006 * The rel's targetlist and restrictinfo list must have been constructed
4007 * already, and we need the completed plan for the CTE (if a regular CTE)
4008 * or the non-recursive term (if a self-reference).
4010 * We set the same fields as set_baserel_size_estimates.
4013 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
4017 /* Should only be applied to base relations that are CTE references */
4018 Assert(rel->relid > 0);
4019 rte = planner_rt_fetch(rel->relid, root);
4020 Assert(rte->rtekind == RTE_CTE);
4022 if (rte->self_reference)
4025 * In a self-reference, arbitrarily assume the average worktable size
4026 * is about 10 times the nonrecursive term's size.
4028 rel->tuples = 10 * cteplan->plan_rows;
4032 /* Otherwise just believe the CTE plan's output estimate */
4033 rel->tuples = cteplan->plan_rows;
4036 /* Now estimate number of output rows, etc */
4037 set_baserel_size_estimates(root, rel);
4041 * set_foreign_size_estimates
4042 * Set the size estimates for a base relation that is a foreign table.
4044 * There is not a whole lot that we can do here; the foreign-data wrapper
4045 * is responsible for producing useful estimates. We can do a decent job
4046 * of estimating baserestrictcost, so we set that, and we also set up width
4047 * using what will be purely datatype-driven estimates from the targetlist.
4048 * There is no way to do anything sane with the rows value, so we just put
4049 * a default estimate and hope that the wrapper can improve on it. The
4050 * wrapper's GetForeignRelSize function will be called momentarily.
4052 * The rel's targetlist and restrictinfo list must have been constructed
4056 set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4058 /* Should only be applied to base relations */
4059 Assert(rel->relid > 0);
4061 rel->rows = 1000; /* entirely bogus default estimate */
4063 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
4065 set_rel_width(root, rel);
4071 * Set the estimated output width of a base relation.
4073 * The estimated output width is the sum of the per-attribute width estimates
4074 * for the actually-referenced columns, plus any PHVs or other expressions
4075 * that have to be calculated at this relation. This is the amount of data
4076 * we'd need to pass upwards in case of a sort, hash, etc.
4078 * NB: this works best on plain relations because it prefers to look at
4079 * real Vars. For subqueries, set_subquery_size_estimates will already have
4080 * copied up whatever per-column estimates were made within the subquery,
4081 * and for other types of rels there isn't much we can do anyway. We fall
4082 * back on (fairly stupid) datatype-based width estimates if we can't get
4083 * any better number.
4085 * The per-attribute width estimates are cached for possible re-use while
4086 * building join relations.
4089 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
4091 Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
4092 int32 tuple_width = 0;
4093 bool have_wholerow_var = false;
4096 foreach(lc, rel->reltargetlist)
4098 Node *node = (Node *) lfirst(lc);
4101 * Ordinarily, a Var in a rel's reltargetlist must belong to that rel;
4102 * but there are corner cases involving LATERAL references where that
4103 * isn't so. If the Var has the wrong varno, fall through to the
4104 * generic case (it doesn't seem worth the trouble to be any smarter).
4106 if (IsA(node, Var) &&
4107 ((Var *) node)->varno == rel->relid)
4109 Var *var = (Var *) node;
4113 Assert(var->varattno >= rel->min_attr);
4114 Assert(var->varattno <= rel->max_attr);
4116 ndx = var->varattno - rel->min_attr;
4119 * If it's a whole-row Var, we'll deal with it below after we have
4120 * already cached as many attr widths as possible.
4122 if (var->varattno == 0)
4124 have_wholerow_var = true;
4129 * The width may have been cached already (especially if it's a
4130 * subquery), so don't duplicate effort.
4132 if (rel->attr_widths[ndx] > 0)
4134 tuple_width += rel->attr_widths[ndx];
4138 /* Try to get column width from statistics */
4139 if (reloid != InvalidOid && var->varattno > 0)
4141 item_width = get_attavgwidth(reloid, var->varattno);
4144 rel->attr_widths[ndx] = item_width;
4145 tuple_width += item_width;
4151 * Not a plain relation, or can't find statistics for it. Estimate
4152 * using just the type info.
4154 item_width = get_typavgwidth(var->vartype, var->vartypmod);
4155 Assert(item_width > 0);
4156 rel->attr_widths[ndx] = item_width;
4157 tuple_width += item_width;
4159 else if (IsA(node, PlaceHolderVar))
4161 PlaceHolderVar *phv = (PlaceHolderVar *) node;
4162 PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
4164 tuple_width += phinfo->ph_width;
4169 * We could be looking at an expression pulled up from a subquery,
4170 * or a ROW() representing a whole-row child Var, etc. Do what we
4171 * can using the expression type information.
4175 item_width = get_typavgwidth(exprType(node), exprTypmod(node));
4176 Assert(item_width > 0);
4177 tuple_width += item_width;
4182 * If we have a whole-row reference, estimate its width as the sum of
4183 * per-column widths plus heap tuple header overhead.
4185 if (have_wholerow_var)
4187 int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
4189 if (reloid != InvalidOid)
4191 /* Real relation, so estimate true tuple width */
4192 wholerow_width += get_relation_data_width(reloid,
4193 rel->attr_widths - rel->min_attr);
4197 /* Do what we can with info for a phony rel */
4200 for (i = 1; i <= rel->max_attr; i++)
4201 wholerow_width += rel->attr_widths[i - rel->min_attr];
4204 rel->attr_widths[0 - rel->min_attr] = wholerow_width;
4207 * Include the whole-row Var as part of the output tuple. Yes, that
4208 * really is what happens at runtime.
4210 tuple_width += wholerow_width;
4213 Assert(tuple_width >= 0);
4214 rel->width = tuple_width;
4218 * relation_byte_size
4219 * Estimate the storage space in bytes for a given number of tuples
4220 * of a given width (size in bytes).
4223 relation_byte_size(double tuples, int width)
4225 return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
4230 * Returns an estimate of the number of pages covered by a given
4231 * number of tuples of a given width (size in bytes).
4234 page_size(double tuples, int width)
4236 return ceil(relation_byte_size(tuples, width) / BLCKSZ);