1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in arbitrary units established by these basic
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
14 * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to master backend
15 * parallel_setup_cost Cost of setting up shared memory for parallelism
17 * We expect that the kernel will typically do some amount of read-ahead
18 * optimization; this in conjunction with seek costs means that seq_page_cost
19 * is normally considerably less than random_page_cost. (However, if the
20 * database is fully cached in RAM, it is reasonable to set them equal.)
22 * We also use a rough estimate "effective_cache_size" of the number of
23 * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 * NBuffers for this purpose because that would ignore the effects of
25 * the kernel's disk cache.)
27 * Obviously, taking constants for these values is an oversimplification,
28 * but it's tough enough to get any useful estimates even at this level of
29 * detail. Note that all of these parameters are user-settable, in case
30 * the default values are drastically off for a particular platform.
32 * seq_page_cost and random_page_cost can also be overridden for an individual
33 * tablespace, in case some data is on a fast disk and other data is on a slow
34 * disk. Per-tablespace overrides never apply to temporary work files such as
35 * an external sort or a materialize node that overflows work_mem.
37 * We compute two separate costs for each path:
38 * total_cost: total estimated cost to fetch all tuples
39 * startup_cost: cost that is expended before first tuple is fetched
40 * In some scenarios, such as when there is a LIMIT or we are implementing
41 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 * path's result. A caller can estimate the cost of fetching a partial
43 * result by interpolating between startup_cost and total_cost. In detail:
44 * actual_cost = startup_cost +
45 * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 * Note that a base relation's rows count (and, by extension, plan_rows for
47 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 * that this equation works properly. (Note: while path->rows is never zero
49 * for ordinary relations, it is zero for paths for provably-empty relations,
50 * so beware of division-by-zero.) The LIMIT is applied as a top-level
53 * For largely historical reasons, most of the routines in this module use
54 * the passed result Path only to store their results (rows, startup_cost and
55 * total_cost) into. All the input data they need is passed as separate
56 * parameters, even though much of it could be extracted from the Path.
57 * An exception is made for the cost_XXXjoin() routines, which expect all
58 * the other fields of the passed XXXPath to be filled in, and similarly
59 * cost_index() assumes the passed IndexPath is valid except for its output
63 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
64 * Portions Copyright (c) 1994, Regents of the University of California
67 * src/backend/optimizer/path/costsize.c
69 *-------------------------------------------------------------------------
75 #include <float.h> /* for _isnan */
79 #include "access/amapi.h"
80 #include "access/htup_details.h"
81 #include "access/tsmapi.h"
82 #include "executor/executor.h"
83 #include "executor/nodeHash.h"
84 #include "miscadmin.h"
85 #include "nodes/nodeFuncs.h"
86 #include "optimizer/clauses.h"
87 #include "optimizer/cost.h"
88 #include "optimizer/pathnode.h"
89 #include "optimizer/paths.h"
90 #include "optimizer/placeholder.h"
91 #include "optimizer/plancat.h"
92 #include "optimizer/planmain.h"
93 #include "optimizer/restrictinfo.h"
94 #include "parser/parsetree.h"
95 #include "utils/lsyscache.h"
96 #include "utils/selfuncs.h"
97 #include "utils/spccache.h"
98 #include "utils/tuplesort.h"
101 #define LOG2(x) (log(x) / 0.693147180559945)
104 double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
105 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
106 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
107 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
108 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
109 double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
110 double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
112 int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
114 Cost disable_cost = 1.0e10;
116 int max_parallel_degree = 2;
118 bool enable_seqscan = true;
119 bool enable_indexscan = true;
120 bool enable_indexonlyscan = true;
121 bool enable_bitmapscan = true;
122 bool enable_tidscan = true;
123 bool enable_sort = true;
124 bool enable_hashagg = true;
125 bool enable_nestloop = true;
126 bool enable_material = true;
127 bool enable_mergejoin = true;
128 bool enable_hashjoin = true;
129 bool enable_fkey_estimates = true;
135 } cost_qual_eval_context;
137 static List *extract_nonindex_conditions(List *qual_clauses, List *indexquals);
138 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
141 static void cost_rescan(PlannerInfo *root, Path *path,
142 Cost *rescan_startup_cost, Cost *rescan_total_cost);
143 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
144 static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
145 ParamPathInfo *param_info,
146 QualCost *qpqual_cost);
147 static bool has_indexed_join_quals(NestPath *joinpath);
148 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
150 static double calc_joinrel_size_estimate(PlannerInfo *root,
153 SpecialJoinInfo *sjinfo,
155 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
156 static double relation_byte_size(double tuples, int width);
157 static double page_size(double tuples, int width);
162 * Force a row-count estimate to a sane value.
165 clamp_row_est(double nrows)
168 * Force estimate to be at least one row, to make explain output look
169 * better and to avoid possible divide-by-zero when interpolating costs.
170 * Make it an integer, too.
183 * Determines and returns the cost of scanning a relation sequentially.
185 * 'baserel' is the relation to be scanned
186 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
189 cost_seqscan(Path *path, PlannerInfo *root,
190 RelOptInfo *baserel, ParamPathInfo *param_info)
192 Cost startup_cost = 0;
195 double spc_seq_page_cost;
196 QualCost qpqual_cost;
199 /* Should only be applied to base relations */
200 Assert(baserel->relid > 0);
201 Assert(baserel->rtekind == RTE_RELATION);
203 /* Mark the path with the correct row estimate */
205 path->rows = param_info->ppi_rows;
207 path->rows = baserel->rows;
210 startup_cost += disable_cost;
212 /* fetch estimated page cost for tablespace containing table */
213 get_tablespace_page_costs(baserel->reltablespace,
220 disk_run_cost = spc_seq_page_cost * baserel->pages;
223 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
225 startup_cost += qpqual_cost.startup;
226 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
227 cpu_run_cost = cpu_per_tuple * baserel->tuples;
228 /* tlist eval costs are paid per output row, not per tuple scanned */
229 startup_cost += path->pathtarget->cost.startup;
230 cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
232 /* Adjust costing for parallelism, if used. */
233 if (path->parallel_degree > 0)
235 double parallel_divisor = path->parallel_degree;
236 double leader_contribution;
239 * Early experience with parallel query suggests that when there is
240 * only one worker, the leader often makes a very substantial
241 * contribution to executing the parallel portion of the plan, but as
242 * more workers are added, it does less and less, because it's busy
243 * reading tuples from the workers and doing whatever non-parallel
244 * post-processing is needed. By the time we reach 4 workers, the
245 * leader no longer makes a meaningful contribution. Thus, for now,
246 * estimate that the leader spends 30% of its time servicing each
247 * worker, and the remainder executing the parallel plan.
249 leader_contribution = 1.0 - (0.3 * path->parallel_degree);
250 if (leader_contribution > 0)
251 parallel_divisor += leader_contribution;
254 * In the case of a parallel plan, the row count needs to represent
255 * the number of tuples processed per worker. Otherwise, higher-level
256 * plan nodes that appear below the gather will be costed incorrectly,
257 * because they'll anticipate receiving more rows than any given copy
260 path->rows /= parallel_divisor;
262 /* The CPU cost is divided among all the workers. */
263 cpu_run_cost /= parallel_divisor;
266 * It may be possible to amortize some of the I/O cost, but probably
267 * not very much, because most operating systems already do aggressive
268 * prefetching. For now, we assume that the disk run cost can't be
273 path->startup_cost = startup_cost;
274 path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
279 * Determines and returns the cost of scanning a relation using sampling.
281 * 'baserel' is the relation to be scanned
282 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
285 cost_samplescan(Path *path, PlannerInfo *root,
286 RelOptInfo *baserel, ParamPathInfo *param_info)
288 Cost startup_cost = 0;
291 TableSampleClause *tsc;
293 double spc_seq_page_cost,
294 spc_random_page_cost,
296 QualCost qpqual_cost;
299 /* Should only be applied to base relations with tablesample clauses */
300 Assert(baserel->relid > 0);
301 rte = planner_rt_fetch(baserel->relid, root);
302 Assert(rte->rtekind == RTE_RELATION);
303 tsc = rte->tablesample;
305 tsm = GetTsmRoutine(tsc->tsmhandler);
307 /* Mark the path with the correct row estimate */
309 path->rows = param_info->ppi_rows;
311 path->rows = baserel->rows;
313 /* fetch estimated page cost for tablespace containing table */
314 get_tablespace_page_costs(baserel->reltablespace,
315 &spc_random_page_cost,
318 /* if NextSampleBlock is used, assume random access, else sequential */
319 spc_page_cost = (tsm->NextSampleBlock != NULL) ?
320 spc_random_page_cost : spc_seq_page_cost;
323 * disk costs (recall that baserel->pages has already been set to the
324 * number of pages the sampling method will visit)
326 run_cost += spc_page_cost * baserel->pages;
329 * CPU costs (recall that baserel->tuples has already been set to the
330 * number of tuples the sampling method will select). Note that we ignore
331 * execution cost of the TABLESAMPLE parameter expressions; they will be
332 * evaluated only once per scan, and in most usages they'll likely be
333 * simple constants anyway. We also don't charge anything for the
334 * calculations the sampling method might do internally.
336 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
338 startup_cost += qpqual_cost.startup;
339 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
340 run_cost += cpu_per_tuple * baserel->tuples;
341 /* tlist eval costs are paid per output row, not per tuple scanned */
342 startup_cost += path->pathtarget->cost.startup;
343 run_cost += path->pathtarget->cost.per_tuple * path->rows;
345 path->startup_cost = startup_cost;
346 path->total_cost = startup_cost + run_cost;
351 * Determines and returns the cost of gather path.
353 * 'rel' is the relation to be operated upon
354 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
355 * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
356 * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
357 * correspond to any particular RelOptInfo.
360 cost_gather(GatherPath *path, PlannerInfo *root,
361 RelOptInfo *rel, ParamPathInfo *param_info,
364 Cost startup_cost = 0;
367 /* Mark the path with the correct row estimate */
369 path->path.rows = *rows;
371 path->path.rows = param_info->ppi_rows;
373 path->path.rows = rel->rows;
375 startup_cost = path->subpath->startup_cost;
377 run_cost = path->subpath->total_cost - path->subpath->startup_cost;
379 /* Parallel setup and communication cost. */
380 startup_cost += parallel_setup_cost;
381 run_cost += parallel_tuple_cost * path->path.rows;
383 path->path.startup_cost = startup_cost;
384 path->path.total_cost = (startup_cost + run_cost);
389 * Determines and returns the cost of scanning a relation using an index.
391 * 'path' describes the indexscan under consideration, and is complete
392 * except for the fields to be set by this routine
393 * 'loop_count' is the number of repetitions of the indexscan to factor into
394 * estimates of caching behavior
396 * In addition to rows, startup_cost and total_cost, cost_index() sets the
397 * path's indextotalcost and indexselectivity fields. These values will be
398 * needed if the IndexPath is used in a BitmapIndexScan.
400 * NOTE: path->indexquals must contain only clauses usable as index
401 * restrictions. Any additional quals evaluated as qpquals may reduce the
402 * number of returned tuples, but they won't reduce the number of tuples
403 * we have to fetch from the table, so they don't reduce the scan cost.
406 cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
408 IndexOptInfo *index = path->indexinfo;
409 RelOptInfo *baserel = index->rel;
410 bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
411 amcostestimate_function amcostestimate;
413 Cost startup_cost = 0;
415 Cost indexStartupCost;
417 Selectivity indexSelectivity;
418 double indexCorrelation,
420 double spc_seq_page_cost,
421 spc_random_page_cost;
424 QualCost qpqual_cost;
426 double tuples_fetched;
427 double pages_fetched;
429 /* Should only be applied to base relations */
430 Assert(IsA(baserel, RelOptInfo) &&
431 IsA(index, IndexOptInfo));
432 Assert(baserel->relid > 0);
433 Assert(baserel->rtekind == RTE_RELATION);
436 * Mark the path with the correct row estimate, and identify which quals
437 * will need to be enforced as qpquals. We need not check any quals that
438 * are implied by the index's predicate, so we can use indrestrictinfo not
439 * baserestrictinfo as the list of relevant restriction clauses for the
442 if (path->path.param_info)
444 path->path.rows = path->path.param_info->ppi_rows;
445 /* qpquals come from the rel's restriction clauses and ppi_clauses */
446 qpquals = list_concat(
447 extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
449 extract_nonindex_conditions(path->path.param_info->ppi_clauses,
454 path->path.rows = baserel->rows;
455 /* qpquals come from just the rel's restriction clauses */
456 qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
460 if (!enable_indexscan)
461 startup_cost += disable_cost;
462 /* we don't need to check enable_indexonlyscan; indxpath.c does that */
465 * Call index-access-method-specific code to estimate the processing cost
466 * for scanning the index, as well as the selectivity of the index (ie,
467 * the fraction of main-table tuples we will have to retrieve) and its
468 * correlation to the main-table tuple order. We need a cast here because
469 * relation.h uses a weak function type to avoid including amapi.h.
471 amcostestimate = (amcostestimate_function) index->amcostestimate;
472 amcostestimate(root, path, loop_count,
473 &indexStartupCost, &indexTotalCost,
474 &indexSelectivity, &indexCorrelation);
477 * Save amcostestimate's results for possible use in bitmap scan planning.
478 * We don't bother to save indexStartupCost or indexCorrelation, because a
479 * bitmap scan doesn't care about either.
481 path->indextotalcost = indexTotalCost;
482 path->indexselectivity = indexSelectivity;
484 /* all costs for touching index itself included here */
485 startup_cost += indexStartupCost;
486 run_cost += indexTotalCost - indexStartupCost;
488 /* estimate number of main-table tuples fetched */
489 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
491 /* fetch estimated page costs for tablespace containing table */
492 get_tablespace_page_costs(baserel->reltablespace,
493 &spc_random_page_cost,
497 * Estimate number of main-table pages fetched, and compute I/O cost.
499 * When the index ordering is uncorrelated with the table ordering,
500 * we use an approximation proposed by Mackert and Lohman (see
501 * index_pages_fetched() for details) to compute the number of pages
502 * fetched, and then charge spc_random_page_cost per page fetched.
504 * When the index ordering is exactly correlated with the table ordering
505 * (just after a CLUSTER, for example), the number of pages fetched should
506 * be exactly selectivity * table_size. What's more, all but the first
507 * will be sequential fetches, not the random fetches that occur in the
508 * uncorrelated case. So if the number of pages is more than 1, we
510 * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
511 * For partially-correlated indexes, we ought to charge somewhere between
512 * these two estimates. We currently interpolate linearly between the
513 * estimates based on the correlation squared (XXX is that appropriate?).
515 * If it's an index-only scan, then we will not need to fetch any heap
516 * pages for which the visibility map shows all tuples are visible.
517 * Hence, reduce the estimated number of heap fetches accordingly.
518 * We use the measured fraction of the entire heap that is all-visible,
519 * which might not be particularly relevant to the subset of the heap
520 * that this query will fetch; but it's not clear how to do better.
526 * For repeated indexscans, the appropriate estimate for the
527 * uncorrelated case is to scale up the number of tuples fetched in
528 * the Mackert and Lohman formula by the number of scans, so that we
529 * estimate the number of pages fetched by all the scans; then
530 * pro-rate the costs for one scan. In this case we assume all the
531 * fetches are random accesses.
533 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
535 (double) index->pages,
539 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
541 max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
544 * In the perfectly correlated case, the number of pages touched by
545 * each scan is selectivity * table_size, and we can use the Mackert
546 * and Lohman formula at the page level to estimate how much work is
547 * saved by caching across scans. We still assume all the fetches are
548 * random, though, which is an overestimate that's hard to correct for
549 * without double-counting the cache effects. (But in most cases
550 * where such a plan is actually interesting, only one page would get
551 * fetched per scan anyway, so it shouldn't matter much.)
553 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
555 pages_fetched = index_pages_fetched(pages_fetched * loop_count,
557 (double) index->pages,
561 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
563 min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
568 * Normal case: apply the Mackert and Lohman formula, and then
569 * interpolate between that and the correlation-derived result.
571 pages_fetched = index_pages_fetched(tuples_fetched,
573 (double) index->pages,
577 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
579 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
580 max_IO_cost = pages_fetched * spc_random_page_cost;
582 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
583 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
586 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
588 if (pages_fetched > 0)
590 min_IO_cost = spc_random_page_cost;
591 if (pages_fetched > 1)
592 min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
599 * Now interpolate based on estimated index order correlation to get total
600 * disk I/O cost for main table accesses.
602 csquared = indexCorrelation * indexCorrelation;
604 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
607 * Estimate CPU costs per tuple.
609 * What we want here is cpu_tuple_cost plus the evaluation costs of any
610 * qual clauses that we have to evaluate as qpquals.
612 cost_qual_eval(&qpqual_cost, qpquals, root);
614 startup_cost += qpqual_cost.startup;
615 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
617 run_cost += cpu_per_tuple * tuples_fetched;
619 /* tlist eval costs are paid per output row, not per tuple scanned */
620 startup_cost += path->path.pathtarget->cost.startup;
621 run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
623 path->path.startup_cost = startup_cost;
624 path->path.total_cost = startup_cost + run_cost;
628 * extract_nonindex_conditions
630 * Given a list of quals to be enforced in an indexscan, extract the ones that
631 * will have to be applied as qpquals (ie, the index machinery won't handle
632 * them). The actual rules for this appear in create_indexscan_plan() in
633 * createplan.c, but the full rules are fairly expensive and we don't want to
634 * go to that much effort for index paths that don't get selected for the
635 * final plan. So we approximate it as quals that don't appear directly in
636 * indexquals and also are not redundant children of the same EquivalenceClass
637 * as some indexqual. This method neglects some infrequently-relevant
638 * considerations, specifically clauses that needn't be checked because they
639 * are implied by an indexqual. It does not seem worth the cycles to try to
640 * factor that in at this stage, even though createplan.c will take pains to
641 * remove such unnecessary clauses from the qpquals list if this path is
645 extract_nonindex_conditions(List *qual_clauses, List *indexquals)
650 foreach(lc, qual_clauses)
652 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
654 Assert(IsA(rinfo, RestrictInfo));
655 if (rinfo->pseudoconstant)
656 continue; /* we may drop pseudoconstants here */
657 if (list_member_ptr(indexquals, rinfo))
658 continue; /* simple duplicate */
659 if (is_redundant_derived_clause(rinfo, indexquals))
660 continue; /* derived from same EquivalenceClass */
661 /* ... skip the predicate proof attempt createplan.c will try ... */
662 result = lappend(result, rinfo);
668 * index_pages_fetched
669 * Estimate the number of pages actually fetched after accounting for
672 * We use an approximation proposed by Mackert and Lohman, "Index Scans
673 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
674 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
675 * The Mackert and Lohman approximation is that the number of pages
678 * min(2TNs/(2T+Ns), T) when T <= b
679 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
680 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
682 * T = # pages in table
683 * N = # tuples in table
684 * s = selectivity = fraction of table to be scanned
685 * b = # buffer pages available (we include kernel space here)
687 * We assume that effective_cache_size is the total number of buffer pages
688 * available for the whole query, and pro-rate that space across all the
689 * tables in the query and the index currently under consideration. (This
690 * ignores space needed for other indexes used by the query, but since we
691 * don't know which indexes will get used, we can't estimate that very well;
692 * and in any case counting all the tables may well be an overestimate, since
693 * depending on the join plan not all the tables may be scanned concurrently.)
695 * The product Ns is the number of tuples fetched; we pass in that
696 * product rather than calculating it here. "pages" is the number of pages
697 * in the object under consideration (either an index or a table).
698 * "index_pages" is the amount to add to the total table space, which was
699 * computed for us by query_planner.
701 * Caller is expected to have ensured that tuples_fetched is greater than zero
702 * and rounded to integer (see clamp_row_est). The result will likewise be
703 * greater than zero and integral.
706 index_pages_fetched(double tuples_fetched, BlockNumber pages,
707 double index_pages, PlannerInfo *root)
709 double pages_fetched;
714 /* T is # pages in table, but don't allow it to be zero */
715 T = (pages > 1) ? (double) pages : 1.0;
717 /* Compute number of pages assumed to be competing for cache space */
718 total_pages = root->total_table_pages + index_pages;
719 total_pages = Max(total_pages, 1.0);
720 Assert(T <= total_pages);
722 /* b is pro-rated share of effective_cache_size */
723 b = (double) effective_cache_size *T / total_pages;
725 /* force it positive and integral */
731 /* This part is the Mackert and Lohman formula */
735 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
736 if (pages_fetched >= T)
739 pages_fetched = ceil(pages_fetched);
745 lim = (2.0 * T * b) / (2.0 * T - b);
746 if (tuples_fetched <= lim)
749 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
754 b + (tuples_fetched - lim) * (T - b) / T;
756 pages_fetched = ceil(pages_fetched);
758 return pages_fetched;
762 * get_indexpath_pages
763 * Determine the total size of the indexes used in a bitmap index path.
765 * Note: if the same index is used more than once in a bitmap tree, we will
766 * count it multiple times, which perhaps is the wrong thing ... but it's
767 * not completely clear, and detecting duplicates is difficult, so ignore it
771 get_indexpath_pages(Path *bitmapqual)
776 if (IsA(bitmapqual, BitmapAndPath))
778 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
780 foreach(l, apath->bitmapquals)
782 result += get_indexpath_pages((Path *) lfirst(l));
785 else if (IsA(bitmapqual, BitmapOrPath))
787 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
789 foreach(l, opath->bitmapquals)
791 result += get_indexpath_pages((Path *) lfirst(l));
794 else if (IsA(bitmapqual, IndexPath))
796 IndexPath *ipath = (IndexPath *) bitmapqual;
798 result = (double) ipath->indexinfo->pages;
801 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
807 * cost_bitmap_heap_scan
808 * Determines and returns the cost of scanning a relation using a bitmap
809 * index-then-heap plan.
811 * 'baserel' is the relation to be scanned
812 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
813 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
814 * 'loop_count' is the number of repetitions of the indexscan to factor into
815 * estimates of caching behavior
817 * Note: the component IndexPaths in bitmapqual should have been costed
818 * using the same loop_count.
821 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
822 ParamPathInfo *param_info,
823 Path *bitmapqual, double loop_count)
825 Cost startup_cost = 0;
828 Selectivity indexSelectivity;
829 QualCost qpqual_cost;
832 double tuples_fetched;
833 double pages_fetched;
834 double spc_seq_page_cost,
835 spc_random_page_cost;
838 /* Should only be applied to base relations */
839 Assert(IsA(baserel, RelOptInfo));
840 Assert(baserel->relid > 0);
841 Assert(baserel->rtekind == RTE_RELATION);
843 /* Mark the path with the correct row estimate */
845 path->rows = param_info->ppi_rows;
847 path->rows = baserel->rows;
849 if (!enable_bitmapscan)
850 startup_cost += disable_cost;
853 * Fetch total cost of obtaining the bitmap, as well as its total
856 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
858 startup_cost += indexTotalCost;
860 /* Fetch estimated page costs for tablespace containing table. */
861 get_tablespace_page_costs(baserel->reltablespace,
862 &spc_random_page_cost,
866 * Estimate number of main-table pages fetched.
868 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
870 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
875 * For repeated bitmap scans, scale up the number of tuples fetched in
876 * the Mackert and Lohman formula by the number of scans, so that we
877 * estimate the number of pages fetched by all the scans. Then
878 * pro-rate for one scan.
880 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
882 get_indexpath_pages(bitmapqual),
884 pages_fetched /= loop_count;
889 * For a single scan, the number of heap pages that need to be fetched
890 * is the same as the Mackert and Lohman formula for the case T <= b
891 * (ie, no re-reads needed).
893 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
895 if (pages_fetched >= T)
898 pages_fetched = ceil(pages_fetched);
901 * For small numbers of pages we should charge spc_random_page_cost
902 * apiece, while if nearly all the table's pages are being read, it's more
903 * appropriate to charge spc_seq_page_cost apiece. The effect is
904 * nonlinear, too. For lack of a better idea, interpolate like this to
905 * determine the cost per page.
907 if (pages_fetched >= 2.0)
908 cost_per_page = spc_random_page_cost -
909 (spc_random_page_cost - spc_seq_page_cost)
910 * sqrt(pages_fetched / T);
912 cost_per_page = spc_random_page_cost;
914 run_cost += pages_fetched * cost_per_page;
917 * Estimate CPU costs per tuple.
919 * Often the indexquals don't need to be rechecked at each tuple ... but
920 * not always, especially not if there are enough tuples involved that the
921 * bitmaps become lossy. For the moment, just assume they will be
922 * rechecked always. This means we charge the full freight for all the
925 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
927 startup_cost += qpqual_cost.startup;
928 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
930 run_cost += cpu_per_tuple * tuples_fetched;
932 /* tlist eval costs are paid per output row, not per tuple scanned */
933 startup_cost += path->pathtarget->cost.startup;
934 run_cost += path->pathtarget->cost.per_tuple * path->rows;
936 path->startup_cost = startup_cost;
937 path->total_cost = startup_cost + run_cost;
941 * cost_bitmap_tree_node
942 * Extract cost and selectivity from a bitmap tree node (index/and/or)
945 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
947 if (IsA(path, IndexPath))
949 *cost = ((IndexPath *) path)->indextotalcost;
950 *selec = ((IndexPath *) path)->indexselectivity;
953 * Charge a small amount per retrieved tuple to reflect the costs of
954 * manipulating the bitmap. This is mostly to make sure that a bitmap
955 * scan doesn't look to be the same cost as an indexscan to retrieve a
958 *cost += 0.1 * cpu_operator_cost * path->rows;
960 else if (IsA(path, BitmapAndPath))
962 *cost = path->total_cost;
963 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
965 else if (IsA(path, BitmapOrPath))
967 *cost = path->total_cost;
968 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
972 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
973 *cost = *selec = 0; /* keep compiler quiet */
978 * cost_bitmap_and_node
979 * Estimate the cost of a BitmapAnd node
981 * Note that this considers only the costs of index scanning and bitmap
982 * creation, not the eventual heap access. In that sense the object isn't
983 * truly a Path, but it has enough path-like properties (costs in particular)
984 * to warrant treating it as one. We don't bother to set the path rows field,
988 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
995 * We estimate AND selectivity on the assumption that the inputs are
996 * independent. This is probably often wrong, but we don't have the info
999 * The runtime cost of the BitmapAnd itself is estimated at 100x
1000 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1001 * definitely too simplistic?
1005 foreach(l, path->bitmapquals)
1007 Path *subpath = (Path *) lfirst(l);
1009 Selectivity subselec;
1011 cost_bitmap_tree_node(subpath, &subCost, &subselec);
1015 totalCost += subCost;
1016 if (l != list_head(path->bitmapquals))
1017 totalCost += 100.0 * cpu_operator_cost;
1019 path->bitmapselectivity = selec;
1020 path->path.rows = 0; /* per above, not used */
1021 path->path.startup_cost = totalCost;
1022 path->path.total_cost = totalCost;
1026 * cost_bitmap_or_node
1027 * Estimate the cost of a BitmapOr node
1029 * See comments for cost_bitmap_and_node.
1032 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1039 * We estimate OR selectivity on the assumption that the inputs are
1040 * non-overlapping, since that's often the case in "x IN (list)" type
1041 * situations. Of course, we clamp to 1.0 at the end.
1043 * The runtime cost of the BitmapOr itself is estimated at 100x
1044 * cpu_operator_cost for each tbm_union needed. Probably too small,
1045 * definitely too simplistic? We are aware that the tbm_unions are
1046 * optimized out when the inputs are BitmapIndexScans.
1050 foreach(l, path->bitmapquals)
1052 Path *subpath = (Path *) lfirst(l);
1054 Selectivity subselec;
1056 cost_bitmap_tree_node(subpath, &subCost, &subselec);
1060 totalCost += subCost;
1061 if (l != list_head(path->bitmapquals) &&
1062 !IsA(subpath, IndexPath))
1063 totalCost += 100.0 * cpu_operator_cost;
1065 path->bitmapselectivity = Min(selec, 1.0);
1066 path->path.rows = 0; /* per above, not used */
1067 path->path.startup_cost = totalCost;
1068 path->path.total_cost = totalCost;
1073 * Determines and returns the cost of scanning a relation using TIDs.
1075 * 'baserel' is the relation to be scanned
1076 * 'tidquals' is the list of TID-checkable quals
1077 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1080 cost_tidscan(Path *path, PlannerInfo *root,
1081 RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1083 Cost startup_cost = 0;
1085 bool isCurrentOf = false;
1086 QualCost qpqual_cost;
1088 QualCost tid_qual_cost;
1091 double spc_random_page_cost;
1093 /* Should only be applied to base relations */
1094 Assert(baserel->relid > 0);
1095 Assert(baserel->rtekind == RTE_RELATION);
1097 /* Mark the path with the correct row estimate */
1099 path->rows = param_info->ppi_rows;
1101 path->rows = baserel->rows;
1103 /* Count how many tuples we expect to retrieve */
1105 foreach(l, tidquals)
1107 if (IsA(lfirst(l), ScalarArrayOpExpr))
1109 /* Each element of the array yields 1 tuple */
1110 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
1111 Node *arraynode = (Node *) lsecond(saop->args);
1113 ntuples += estimate_array_length(arraynode);
1115 else if (IsA(lfirst(l), CurrentOfExpr))
1117 /* CURRENT OF yields 1 tuple */
1123 /* It's just CTID = something, count 1 tuple */
1129 * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1130 * understands how to do it correctly. Therefore, honor enable_tidscan
1131 * only when CURRENT OF isn't present. Also note that cost_qual_eval
1132 * counts a CurrentOfExpr as having startup cost disable_cost, which we
1133 * subtract off here; that's to prevent other plan types such as seqscan
1138 Assert(baserel->baserestrictcost.startup >= disable_cost);
1139 startup_cost -= disable_cost;
1141 else if (!enable_tidscan)
1142 startup_cost += disable_cost;
1145 * The TID qual expressions will be computed once, any other baserestrict
1146 * quals once per retrieved tuple.
1148 cost_qual_eval(&tid_qual_cost, tidquals, root);
1150 /* fetch estimated page cost for tablespace containing table */
1151 get_tablespace_page_costs(baserel->reltablespace,
1152 &spc_random_page_cost,
1155 /* disk costs --- assume each tuple on a different page */
1156 run_cost += spc_random_page_cost * ntuples;
1158 /* Add scanning CPU costs */
1159 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1161 /* XXX currently we assume TID quals are a subset of qpquals */
1162 startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1163 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1164 tid_qual_cost.per_tuple;
1165 run_cost += cpu_per_tuple * ntuples;
1167 /* tlist eval costs are paid per output row, not per tuple scanned */
1168 startup_cost += path->pathtarget->cost.startup;
1169 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1171 path->startup_cost = startup_cost;
1172 path->total_cost = startup_cost + run_cost;
1177 * Determines and returns the cost of scanning a subquery RTE.
1179 * 'baserel' is the relation to be scanned
1180 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1183 cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1184 RelOptInfo *baserel, ParamPathInfo *param_info)
1188 QualCost qpqual_cost;
1191 /* Should only be applied to base relations that are subqueries */
1192 Assert(baserel->relid > 0);
1193 Assert(baserel->rtekind == RTE_SUBQUERY);
1195 /* Mark the path with the correct row estimate */
1197 path->path.rows = param_info->ppi_rows;
1199 path->path.rows = baserel->rows;
1202 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1203 * any restriction clauses and tlist that will be attached to the
1204 * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1205 * projection overhead.
1207 path->path.startup_cost = path->subpath->startup_cost;
1208 path->path.total_cost = path->subpath->total_cost;
1210 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1212 startup_cost = qpqual_cost.startup;
1213 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1214 run_cost = cpu_per_tuple * baserel->tuples;
1216 /* tlist eval costs are paid per output row, not per tuple scanned */
1217 startup_cost += path->path.pathtarget->cost.startup;
1218 run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1220 path->path.startup_cost += startup_cost;
1221 path->path.total_cost += startup_cost + run_cost;
1226 * Determines and returns the cost of scanning a function RTE.
1228 * 'baserel' is the relation to be scanned
1229 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1232 cost_functionscan(Path *path, PlannerInfo *root,
1233 RelOptInfo *baserel, ParamPathInfo *param_info)
1235 Cost startup_cost = 0;
1237 QualCost qpqual_cost;
1242 /* Should only be applied to base relations that are functions */
1243 Assert(baserel->relid > 0);
1244 rte = planner_rt_fetch(baserel->relid, root);
1245 Assert(rte->rtekind == RTE_FUNCTION);
1247 /* Mark the path with the correct row estimate */
1249 path->rows = param_info->ppi_rows;
1251 path->rows = baserel->rows;
1254 * Estimate costs of executing the function expression(s).
1256 * Currently, nodeFunctionscan.c always executes the functions to
1257 * completion before returning any rows, and caches the results in a
1258 * tuplestore. So the function eval cost is all startup cost, and per-row
1259 * costs are minimal.
1261 * XXX in principle we ought to charge tuplestore spill costs if the
1262 * number of rows is large. However, given how phony our rowcount
1263 * estimates for functions tend to be, there's not a lot of point in that
1264 * refinement right now.
1266 cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1268 startup_cost += exprcost.startup + exprcost.per_tuple;
1270 /* Add scanning CPU costs */
1271 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1273 startup_cost += qpqual_cost.startup;
1274 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1275 run_cost += cpu_per_tuple * baserel->tuples;
1277 /* tlist eval costs are paid per output row, not per tuple scanned */
1278 startup_cost += path->pathtarget->cost.startup;
1279 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1281 path->startup_cost = startup_cost;
1282 path->total_cost = startup_cost + run_cost;
1287 * Determines and returns the cost of scanning a VALUES RTE.
1289 * 'baserel' is the relation to be scanned
1290 * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1293 cost_valuesscan(Path *path, PlannerInfo *root,
1294 RelOptInfo *baserel, ParamPathInfo *param_info)
1296 Cost startup_cost = 0;
1298 QualCost qpqual_cost;
1301 /* Should only be applied to base relations that are values lists */
1302 Assert(baserel->relid > 0);
1303 Assert(baserel->rtekind == RTE_VALUES);
1305 /* Mark the path with the correct row estimate */
1307 path->rows = param_info->ppi_rows;
1309 path->rows = baserel->rows;
1312 * For now, estimate list evaluation cost at one operator eval per list
1313 * (probably pretty bogus, but is it worth being smarter?)
1315 cpu_per_tuple = cpu_operator_cost;
1317 /* Add scanning CPU costs */
1318 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1320 startup_cost += qpqual_cost.startup;
1321 cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1322 run_cost += cpu_per_tuple * baserel->tuples;
1324 /* tlist eval costs are paid per output row, not per tuple scanned */
1325 startup_cost += path->pathtarget->cost.startup;
1326 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1328 path->startup_cost = startup_cost;
1329 path->total_cost = startup_cost + run_cost;
1334 * Determines and returns the cost of scanning a CTE RTE.
1336 * Note: this is used for both self-reference and regular CTEs; the
1337 * possible cost differences are below the threshold of what we could
1338 * estimate accurately anyway. Note that the costs of evaluating the
1339 * referenced CTE query are added into the final plan as initplan costs,
1340 * and should NOT be counted here.
1343 cost_ctescan(Path *path, PlannerInfo *root,
1344 RelOptInfo *baserel, ParamPathInfo *param_info)
1346 Cost startup_cost = 0;
1348 QualCost qpqual_cost;
1351 /* Should only be applied to base relations that are CTEs */
1352 Assert(baserel->relid > 0);
1353 Assert(baserel->rtekind == RTE_CTE);
1355 /* Mark the path with the correct row estimate */
1357 path->rows = param_info->ppi_rows;
1359 path->rows = baserel->rows;
1361 /* Charge one CPU tuple cost per row for tuplestore manipulation */
1362 cpu_per_tuple = cpu_tuple_cost;
1364 /* Add scanning CPU costs */
1365 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1367 startup_cost += qpqual_cost.startup;
1368 cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1369 run_cost += cpu_per_tuple * baserel->tuples;
1371 /* tlist eval costs are paid per output row, not per tuple scanned */
1372 startup_cost += path->pathtarget->cost.startup;
1373 run_cost += path->pathtarget->cost.per_tuple * path->rows;
1375 path->startup_cost = startup_cost;
1376 path->total_cost = startup_cost + run_cost;
1380 * cost_recursive_union
1381 * Determines and returns the cost of performing a recursive union,
1382 * and also the estimated output size.
1384 * We are given Paths for the nonrecursive and recursive terms.
1387 cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1393 /* We probably have decent estimates for the non-recursive term */
1394 startup_cost = nrterm->startup_cost;
1395 total_cost = nrterm->total_cost;
1396 total_rows = nrterm->rows;
1399 * We arbitrarily assume that about 10 recursive iterations will be
1400 * needed, and that we've managed to get a good fix on the cost and output
1401 * size of each one of them. These are mighty shaky assumptions but it's
1402 * hard to see how to do better.
1404 total_cost += 10 * rterm->total_cost;
1405 total_rows += 10 * rterm->rows;
1408 * Also charge cpu_tuple_cost per row to account for the costs of
1409 * manipulating the tuplestores. (We don't worry about possible
1410 * spill-to-disk costs.)
1412 total_cost += cpu_tuple_cost * total_rows;
1414 runion->startup_cost = startup_cost;
1415 runion->total_cost = total_cost;
1416 runion->rows = total_rows;
1417 runion->pathtarget->width = Max(nrterm->pathtarget->width,
1418 rterm->pathtarget->width);
1423 * Determines and returns the cost of sorting a relation, including
1424 * the cost of reading the input data.
1426 * If the total volume of data to sort is less than sort_mem, we will do
1427 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1428 * comparisons for t tuples.
1430 * If the total volume exceeds sort_mem, we switch to a tape-style merge
1431 * algorithm. There will still be about t*log2(t) tuple comparisons in
1432 * total, but we will also need to write and read each tuple once per
1433 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1434 * number of initial runs formed and M is the merge order used by tuplesort.c.
1435 * Since the average initial run should be about sort_mem, we have
1436 * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1437 * cpu = comparison_cost * t * log2(t)
1439 * If the sort is bounded (i.e., only the first k result tuples are needed)
1440 * and k tuples can fit into sort_mem, we use a heap method that keeps only
1441 * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1443 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1444 * accesses (XXX can't we refine that guess?)
1446 * By default, we charge two operator evals per tuple comparison, which should
1447 * be in the right ballpark in most cases. The caller can tweak this by
1448 * specifying nonzero comparison_cost; typically that's used for any extra
1449 * work that has to be done to prepare the inputs to the comparison operators.
1451 * 'pathkeys' is a list of sort keys
1452 * 'input_cost' is the total cost for reading the input data
1453 * 'tuples' is the number of tuples in the relation
1454 * 'width' is the average tuple width in bytes
1455 * 'comparison_cost' is the extra cost per comparison, if any
1456 * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1457 * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1459 * NOTE: some callers currently pass NIL for pathkeys because they
1460 * can't conveniently supply the sort keys. Since this routine doesn't
1461 * currently do anything with pathkeys anyway, that doesn't matter...
1462 * but if it ever does, it should react gracefully to lack of key data.
1463 * (Actually, the thing we'd most likely be interested in is just the number
1464 * of sort keys, which all callers *could* supply.)
1467 cost_sort(Path *path, PlannerInfo *root,
1468 List *pathkeys, Cost input_cost, double tuples, int width,
1469 Cost comparison_cost, int sort_mem,
1470 double limit_tuples)
1472 Cost startup_cost = input_cost;
1474 double input_bytes = relation_byte_size(tuples, width);
1475 double output_bytes;
1476 double output_tuples;
1477 long sort_mem_bytes = sort_mem * 1024L;
1480 startup_cost += disable_cost;
1482 path->rows = tuples;
1485 * We want to be sure the cost of a sort is never estimated as zero, even
1486 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1491 /* Include the default cost-per-comparison */
1492 comparison_cost += 2.0 * cpu_operator_cost;
1494 /* Do we have a useful LIMIT? */
1495 if (limit_tuples > 0 && limit_tuples < tuples)
1497 output_tuples = limit_tuples;
1498 output_bytes = relation_byte_size(output_tuples, width);
1502 output_tuples = tuples;
1503 output_bytes = input_bytes;
1506 if (output_bytes > sort_mem_bytes)
1509 * We'll have to use a disk-based sort of all the tuples
1511 double npages = ceil(input_bytes / BLCKSZ);
1512 double nruns = input_bytes / sort_mem_bytes;
1513 double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1515 double npageaccesses;
1520 * Assume about N log2 N comparisons
1522 startup_cost += comparison_cost * tuples * LOG2(tuples);
1526 /* Compute logM(r) as log(r) / log(M) */
1527 if (nruns > mergeorder)
1528 log_runs = ceil(log(nruns) / log(mergeorder));
1531 npageaccesses = 2.0 * npages * log_runs;
1532 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1533 startup_cost += npageaccesses *
1534 (seq_page_cost * 0.75 + random_page_cost * 0.25);
1536 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1539 * We'll use a bounded heap-sort keeping just K tuples in memory, for
1540 * a total number of tuple comparisons of N log2 K; but the constant
1541 * factor is a bit higher than for quicksort. Tweak it so that the
1542 * cost curve is continuous at the crossover point.
1544 startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1548 /* We'll use plain quicksort on all the input tuples */
1549 startup_cost += comparison_cost * tuples * LOG2(tuples);
1553 * Also charge a small amount (arbitrarily set equal to operator cost) per
1554 * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1555 * doesn't do qual-checking or projection, so it has less overhead than
1556 * most plan nodes. Note it's correct to use tuples not output_tuples
1557 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1558 * counting the LIMIT otherwise.
1560 run_cost += cpu_operator_cost * tuples;
1562 path->startup_cost = startup_cost;
1563 path->total_cost = startup_cost + run_cost;
1568 * Determines and returns the cost of a MergeAppend node.
1570 * MergeAppend merges several pre-sorted input streams, using a heap that
1571 * at any given instant holds the next tuple from each stream. If there
1572 * are N streams, we need about N*log2(N) tuple comparisons to construct
1573 * the heap at startup, and then for each output tuple, about log2(N)
1574 * comparisons to delete the top heap entry and another log2(N) comparisons
1575 * to insert its successor from the same stream.
1577 * (The effective value of N will drop once some of the input streams are
1578 * exhausted, but it seems unlikely to be worth trying to account for that.)
1580 * The heap is never spilled to disk, since we assume N is not very large.
1581 * So this is much simpler than cost_sort.
1583 * As in cost_sort, we charge two operator evals per tuple comparison.
1585 * 'pathkeys' is a list of sort keys
1586 * 'n_streams' is the number of input streams
1587 * 'input_startup_cost' is the sum of the input streams' startup costs
1588 * 'input_total_cost' is the sum of the input streams' total costs
1589 * 'tuples' is the number of tuples in all the streams
1592 cost_merge_append(Path *path, PlannerInfo *root,
1593 List *pathkeys, int n_streams,
1594 Cost input_startup_cost, Cost input_total_cost,
1597 Cost startup_cost = 0;
1599 Cost comparison_cost;
1606 N = (n_streams < 2) ? 2.0 : (double) n_streams;
1609 /* Assumed cost per tuple comparison */
1610 comparison_cost = 2.0 * cpu_operator_cost;
1612 /* Heap creation cost */
1613 startup_cost += comparison_cost * N * logN;
1615 /* Per-tuple heap maintenance cost */
1616 run_cost += tuples * comparison_cost * 2.0 * logN;
1619 * Also charge a small amount (arbitrarily set equal to operator cost) per
1620 * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend
1621 * node doesn't do qual-checking or projection, so it has less overhead
1622 * than most plan nodes.
1624 run_cost += cpu_operator_cost * tuples;
1626 path->startup_cost = startup_cost + input_startup_cost;
1627 path->total_cost = startup_cost + run_cost + input_total_cost;
1632 * Determines and returns the cost of materializing a relation, including
1633 * the cost of reading the input data.
1635 * If the total volume of data to materialize exceeds work_mem, we will need
1636 * to write it to disk, so the cost is much higher in that case.
1638 * Note that here we are estimating the costs for the first scan of the
1639 * relation, so the materialization is all overhead --- any savings will
1640 * occur only on rescan, which is estimated in cost_rescan.
1643 cost_material(Path *path,
1644 Cost input_startup_cost, Cost input_total_cost,
1645 double tuples, int width)
1647 Cost startup_cost = input_startup_cost;
1648 Cost run_cost = input_total_cost - input_startup_cost;
1649 double nbytes = relation_byte_size(tuples, width);
1650 long work_mem_bytes = work_mem * 1024L;
1652 path->rows = tuples;
1655 * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
1656 * reflect bookkeeping overhead. (This rate must be more than what
1657 * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
1658 * if it is exactly the same then there will be a cost tie between
1659 * nestloop with A outer, materialized B inner and nestloop with B outer,
1660 * materialized A inner. The extra cost ensures we'll prefer
1661 * materializing the smaller rel.) Note that this is normally a good deal
1662 * less than cpu_tuple_cost; which is OK because a Material plan node
1663 * doesn't do qual-checking or projection, so it's got less overhead than
1666 run_cost += 2 * cpu_operator_cost * tuples;
1669 * If we will spill to disk, charge at the rate of seq_page_cost per page.
1670 * This cost is assumed to be evenly spread through the plan run phase,
1671 * which isn't exactly accurate but our cost model doesn't allow for
1672 * nonuniform costs within the run phase.
1674 if (nbytes > work_mem_bytes)
1676 double npages = ceil(nbytes / BLCKSZ);
1678 run_cost += seq_page_cost * npages;
1681 path->startup_cost = startup_cost;
1682 path->total_cost = startup_cost + run_cost;
1687 * Determines and returns the cost of performing an Agg plan node,
1688 * including the cost of its input.
1690 * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
1691 * we are using a hashed Agg node just to do grouping).
1693 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1694 * are for appropriately-sorted input.
1697 cost_agg(Path *path, PlannerInfo *root,
1698 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
1699 int numGroupCols, double numGroups,
1700 Cost input_startup_cost, Cost input_total_cost,
1701 double input_tuples)
1703 double output_tuples;
1706 AggClauseCosts dummy_aggcosts;
1708 /* Use all-zero per-aggregate costs if NULL is passed */
1709 if (aggcosts == NULL)
1711 Assert(aggstrategy == AGG_HASHED);
1712 MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
1713 aggcosts = &dummy_aggcosts;
1717 * The transCost.per_tuple component of aggcosts should be charged once
1718 * per input tuple, corresponding to the costs of evaluating the aggregate
1719 * transfns and their input expressions (with any startup cost of course
1720 * charged but once). The finalCost component is charged once per output
1721 * tuple, corresponding to the costs of evaluating the finalfns.
1723 * If we are grouping, we charge an additional cpu_operator_cost per
1724 * grouping column per input tuple for grouping comparisons.
1726 * We will produce a single output tuple if not grouping, and a tuple per
1727 * group otherwise. We charge cpu_tuple_cost for each output tuple.
1729 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1730 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1731 * input path is already sorted appropriately, AGG_SORTED should be
1732 * preferred (since it has no risk of memory overflow). This will happen
1733 * as long as the computed total costs are indeed exactly equal --- but if
1734 * there's roundoff error we might do the wrong thing. So be sure that
1735 * the computations below form the same intermediate values in the same
1738 if (aggstrategy == AGG_PLAIN)
1740 startup_cost = input_total_cost;
1741 startup_cost += aggcosts->transCost.startup;
1742 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1743 startup_cost += aggcosts->finalCost;
1744 /* we aren't grouping */
1745 total_cost = startup_cost + cpu_tuple_cost;
1748 else if (aggstrategy == AGG_SORTED)
1750 /* Here we are able to deliver output on-the-fly */
1751 startup_cost = input_startup_cost;
1752 total_cost = input_total_cost;
1753 /* calcs phrased this way to match HASHED case, see note above */
1754 total_cost += aggcosts->transCost.startup;
1755 total_cost += aggcosts->transCost.per_tuple * input_tuples;
1756 total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1757 total_cost += aggcosts->finalCost * numGroups;
1758 total_cost += cpu_tuple_cost * numGroups;
1759 output_tuples = numGroups;
1763 /* must be AGG_HASHED */
1764 startup_cost = input_total_cost;
1765 if (!enable_hashagg)
1766 startup_cost += disable_cost;
1767 startup_cost += aggcosts->transCost.startup;
1768 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1769 startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1770 total_cost = startup_cost;
1771 total_cost += aggcosts->finalCost * numGroups;
1772 total_cost += cpu_tuple_cost * numGroups;
1773 output_tuples = numGroups;
1776 path->rows = output_tuples;
1777 path->startup_cost = startup_cost;
1778 path->total_cost = total_cost;
1783 * Determines and returns the cost of performing a WindowAgg plan node,
1784 * including the cost of its input.
1786 * Input is assumed already properly sorted.
1789 cost_windowagg(Path *path, PlannerInfo *root,
1790 List *windowFuncs, int numPartCols, int numOrderCols,
1791 Cost input_startup_cost, Cost input_total_cost,
1792 double input_tuples)
1798 startup_cost = input_startup_cost;
1799 total_cost = input_total_cost;
1802 * Window functions are assumed to cost their stated execution cost, plus
1803 * the cost of evaluating their input expressions, per tuple. Since they
1804 * may in fact evaluate their inputs at multiple rows during each cycle,
1805 * this could be a drastic underestimate; but without a way to know how
1806 * many rows the window function will fetch, it's hard to do better. In
1807 * any case, it's a good estimate for all the built-in window functions,
1808 * so we'll just do this for now.
1810 foreach(lc, windowFuncs)
1812 WindowFunc *wfunc = (WindowFunc *) lfirst(lc);
1816 Assert(IsA(wfunc, WindowFunc));
1818 wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
1820 /* also add the input expressions' cost to per-input-row costs */
1821 cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
1822 startup_cost += argcosts.startup;
1823 wfunccost += argcosts.per_tuple;
1826 * Add the filter's cost to per-input-row costs. XXX We should reduce
1827 * input expression costs according to filter selectivity.
1829 cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
1830 startup_cost += argcosts.startup;
1831 wfunccost += argcosts.per_tuple;
1833 total_cost += wfunccost * input_tuples;
1837 * We also charge cpu_operator_cost per grouping column per tuple for
1838 * grouping comparisons, plus cpu_tuple_cost per tuple for general
1841 * XXX this neglects costs of spooling the data to disk when it overflows
1842 * work_mem. Sooner or later that should get accounted for.
1844 total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
1845 total_cost += cpu_tuple_cost * input_tuples;
1847 path->rows = input_tuples;
1848 path->startup_cost = startup_cost;
1849 path->total_cost = total_cost;
1854 * Determines and returns the cost of performing a Group plan node,
1855 * including the cost of its input.
1857 * Note: caller must ensure that input costs are for appropriately-sorted
1861 cost_group(Path *path, PlannerInfo *root,
1862 int numGroupCols, double numGroups,
1863 Cost input_startup_cost, Cost input_total_cost,
1864 double input_tuples)
1869 startup_cost = input_startup_cost;
1870 total_cost = input_total_cost;
1873 * Charge one cpu_operator_cost per comparison per input tuple. We assume
1874 * all columns get compared at most of the tuples.
1876 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1878 path->rows = numGroups;
1879 path->startup_cost = startup_cost;
1880 path->total_cost = total_cost;
1884 * initial_cost_nestloop
1885 * Preliminary estimate of the cost of a nestloop join path.
1887 * This must quickly produce lower-bound estimates of the path's startup and
1888 * total costs. If we are unable to eliminate the proposed path from
1889 * consideration using the lower bounds, final_cost_nestloop will be called
1890 * to obtain the final estimates.
1892 * The exact division of labor between this function and final_cost_nestloop
1893 * is private to them, and represents a tradeoff between speed of the initial
1894 * estimate and getting a tight lower bound. We choose to not examine the
1895 * join quals here, since that's by far the most expensive part of the
1896 * calculations. The end result is that CPU-cost considerations must be
1897 * left for the second phase; and for SEMI/ANTI joins, we must also postpone
1898 * incorporation of the inner path's run cost.
1900 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
1901 * other data to be used by final_cost_nestloop
1902 * 'jointype' is the type of join to be performed
1903 * 'outer_path' is the outer input to the join
1904 * 'inner_path' is the inner input to the join
1905 * 'sjinfo' is extra info about the join for selectivity estimation
1906 * 'semifactors' contains valid data if jointype is SEMI or ANTI
1909 initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
1911 Path *outer_path, Path *inner_path,
1912 SpecialJoinInfo *sjinfo,
1913 SemiAntiJoinFactors *semifactors)
1915 Cost startup_cost = 0;
1917 double outer_path_rows = outer_path->rows;
1918 Cost inner_rescan_start_cost;
1919 Cost inner_rescan_total_cost;
1920 Cost inner_run_cost;
1921 Cost inner_rescan_run_cost;
1923 /* estimate costs to rescan the inner relation */
1924 cost_rescan(root, inner_path,
1925 &inner_rescan_start_cost,
1926 &inner_rescan_total_cost);
1928 /* cost of source data */
1931 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1932 * before we can start returning tuples, so the join's startup cost is
1933 * their sum. We'll also pay the inner path's rescan startup cost
1936 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1937 run_cost += outer_path->total_cost - outer_path->startup_cost;
1938 if (outer_path_rows > 1)
1939 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
1941 inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
1942 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
1944 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
1947 * SEMI or ANTI join: executor will stop after first match.
1949 * Getting decent estimates requires inspection of the join quals,
1950 * which we choose to postpone to final_cost_nestloop.
1953 /* Save private data for final_cost_nestloop */
1954 workspace->inner_run_cost = inner_run_cost;
1955 workspace->inner_rescan_run_cost = inner_rescan_run_cost;
1959 /* Normal case; we'll scan whole input rel for each outer row */
1960 run_cost += inner_run_cost;
1961 if (outer_path_rows > 1)
1962 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
1965 /* CPU costs left for later */
1967 /* Public result fields */
1968 workspace->startup_cost = startup_cost;
1969 workspace->total_cost = startup_cost + run_cost;
1970 /* Save private data for final_cost_nestloop */
1971 workspace->run_cost = run_cost;
1975 * final_cost_nestloop
1976 * Final estimate of the cost and result size of a nestloop join path.
1978 * 'path' is already filled in except for the rows and cost fields
1979 * 'workspace' is the result from initial_cost_nestloop
1980 * 'sjinfo' is extra info about the join for selectivity estimation
1981 * 'semifactors' contains valid data if path->jointype is SEMI or ANTI
1984 final_cost_nestloop(PlannerInfo *root, NestPath *path,
1985 JoinCostWorkspace *workspace,
1986 SpecialJoinInfo *sjinfo,
1987 SemiAntiJoinFactors *semifactors)
1989 Path *outer_path = path->outerjoinpath;
1990 Path *inner_path = path->innerjoinpath;
1991 double outer_path_rows = outer_path->rows;
1992 double inner_path_rows = inner_path->rows;
1993 Cost startup_cost = workspace->startup_cost;
1994 Cost run_cost = workspace->run_cost;
1996 QualCost restrict_qual_cost;
1999 /* Protect some assumptions below that rowcounts aren't zero or NaN */
2000 if (outer_path_rows <= 0 || isnan(outer_path_rows))
2001 outer_path_rows = 1;
2002 if (inner_path_rows <= 0 || isnan(inner_path_rows))
2003 inner_path_rows = 1;
2005 /* Mark the path with the correct row estimate */
2006 if (path->path.param_info)
2007 path->path.rows = path->path.param_info->ppi_rows;
2009 path->path.rows = path->path.parent->rows;
2012 * We could include disable_cost in the preliminary estimate, but that
2013 * would amount to optimizing for the case where the join method is
2014 * disabled, which doesn't seem like the way to bet.
2016 if (!enable_nestloop)
2017 startup_cost += disable_cost;
2019 /* cost of inner-relation source data (we already dealt with outer rel) */
2021 if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI)
2024 * SEMI or ANTI join: executor will stop after first match.
2026 Cost inner_run_cost = workspace->inner_run_cost;
2027 Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
2028 double outer_matched_rows;
2029 Selectivity inner_scan_frac;
2032 * For an outer-rel row that has at least one match, we can expect the
2033 * inner scan to stop after a fraction 1/(match_count+1) of the inner
2034 * rows, if the matches are evenly distributed. Since they probably
2035 * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
2036 * that fraction. (If we used a larger fuzz factor, we'd have to
2037 * clamp inner_scan_frac to at most 1.0; but since match_count is at
2038 * least 1, no such clamp is needed now.)
2040 outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
2041 inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
2044 * Compute number of tuples processed (not number emitted!). First,
2045 * account for successfully-matched outer rows.
2047 ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
2050 * Now we need to estimate the actual costs of scanning the inner
2051 * relation, which may be quite a bit less than N times inner_run_cost
2052 * due to early scan stops. We consider two cases. If the inner path
2053 * is an indexscan using all the joinquals as indexquals, then an
2054 * unmatched outer row results in an indexscan returning no rows,
2055 * which is probably quite cheap. Otherwise, the executor will have
2056 * to scan the whole inner rel for an unmatched row; not so cheap.
2058 if (has_indexed_join_quals(path))
2061 * Successfully-matched outer rows will only require scanning
2062 * inner_scan_frac of the inner relation. In this case, we don't
2063 * need to charge the full inner_run_cost even when that's more
2064 * than inner_rescan_run_cost, because we can assume that none of
2065 * the inner scans ever scan the whole inner relation. So it's
2066 * okay to assume that all the inner scan executions can be
2067 * fractions of the full cost, even if materialization is reducing
2068 * the rescan cost. At this writing, it's impossible to get here
2069 * for a materialized inner scan, so inner_run_cost and
2070 * inner_rescan_run_cost will be the same anyway; but just in
2071 * case, use inner_run_cost for the first matched tuple and
2072 * inner_rescan_run_cost for additional ones.
2074 run_cost += inner_run_cost * inner_scan_frac;
2075 if (outer_matched_rows > 1)
2076 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2079 * Add the cost of inner-scan executions for unmatched outer rows.
2080 * We estimate this as the same cost as returning the first tuple
2081 * of a nonempty scan. We consider that these are all rescans,
2082 * since we used inner_run_cost once already.
2084 run_cost += (outer_path_rows - outer_matched_rows) *
2085 inner_rescan_run_cost / inner_path_rows;
2088 * We won't be evaluating any quals at all for unmatched rows, so
2089 * don't add them to ntuples.
2095 * Here, a complicating factor is that rescans may be cheaper than
2096 * first scans. If we never scan all the way to the end of the
2097 * inner rel, it might be (depending on the plan type) that we'd
2098 * never pay the whole inner first-scan run cost. However it is
2099 * difficult to estimate whether that will happen (and it could
2100 * not happen if there are any unmatched outer rows!), so be
2101 * conservative and always charge the whole first-scan cost once.
2103 run_cost += inner_run_cost;
2105 /* Add inner run cost for additional outer tuples having matches */
2106 if (outer_matched_rows > 1)
2107 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
2109 /* Add inner run cost for unmatched outer tuples */
2110 run_cost += (outer_path_rows - outer_matched_rows) *
2111 inner_rescan_run_cost;
2113 /* And count the unmatched join tuples as being processed */
2114 ntuples += (outer_path_rows - outer_matched_rows) *
2120 /* Normal-case source costs were included in preliminary estimate */
2122 /* Compute number of tuples processed (not number emitted!) */
2123 ntuples = outer_path_rows * inner_path_rows;
2127 cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
2128 startup_cost += restrict_qual_cost.startup;
2129 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
2130 run_cost += cpu_per_tuple * ntuples;
2132 /* tlist eval costs are paid per output row, not per tuple scanned */
2133 startup_cost += path->path.pathtarget->cost.startup;
2134 run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
2136 path->path.startup_cost = startup_cost;
2137 path->path.total_cost = startup_cost + run_cost;
2141 * initial_cost_mergejoin
2142 * Preliminary estimate of the cost of a mergejoin path.
2144 * This must quickly produce lower-bound estimates of the path's startup and
2145 * total costs. If we are unable to eliminate the proposed path from
2146 * consideration using the lower bounds, final_cost_mergejoin will be called
2147 * to obtain the final estimates.
2149 * The exact division of labor between this function and final_cost_mergejoin
2150 * is private to them, and represents a tradeoff between speed of the initial
2151 * estimate and getting a tight lower bound. We choose to not examine the
2152 * join quals here, except for obtaining the scan selectivity estimate which
2153 * is really essential (but fortunately, use of caching keeps the cost of
2154 * getting that down to something reasonable).
2155 * We also assume that cost_sort is cheap enough to use here.
2157 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2158 * other data to be used by final_cost_mergejoin
2159 * 'jointype' is the type of join to be performed
2160 * 'mergeclauses' is the list of joinclauses to be used as merge clauses
2161 * 'outer_path' is the outer input to the join
2162 * 'inner_path' is the inner input to the join
2163 * 'outersortkeys' is the list of sort keys for the outer path
2164 * 'innersortkeys' is the list of sort keys for the inner path
2165 * 'sjinfo' is extra info about the join for selectivity estimation
2167 * Note: outersortkeys and innersortkeys should be NIL if no explicit
2168 * sort is needed because the respective source path is already ordered.
2171 initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2174 Path *outer_path, Path *inner_path,
2175 List *outersortkeys, List *innersortkeys,
2176 SpecialJoinInfo *sjinfo)
2178 Cost startup_cost = 0;
2180 double outer_path_rows = outer_path->rows;
2181 double inner_path_rows = inner_path->rows;
2182 Cost inner_run_cost;
2187 Selectivity outerstartsel,
2191 Path sort_path; /* dummy for result of cost_sort */
2193 /* Protect some assumptions below that rowcounts aren't zero or NaN */
2194 if (outer_path_rows <= 0 || isnan(outer_path_rows))
2195 outer_path_rows = 1;
2196 if (inner_path_rows <= 0 || isnan(inner_path_rows))
2197 inner_path_rows = 1;
2200 * A merge join will stop as soon as it exhausts either input stream
2201 * (unless it's an outer join, in which case the outer side has to be
2202 * scanned all the way anyway). Estimate fraction of the left and right
2203 * inputs that will actually need to be scanned. Likewise, we can
2204 * estimate the number of rows that will be skipped before the first join
2205 * pair is found, which should be factored into startup cost. We use only
2206 * the first (most significant) merge clause for this purpose. Since
2207 * mergejoinscansel() is a fairly expensive computation, we cache the
2208 * results in the merge clause RestrictInfo.
2210 if (mergeclauses && jointype != JOIN_FULL)
2212 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
2217 MergeScanSelCache *cache;
2219 /* Get the input pathkeys to determine the sort-order details */
2220 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
2221 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
2224 opathkey = (PathKey *) linitial(opathkeys);
2225 ipathkey = (PathKey *) linitial(ipathkeys);
2226 /* debugging check */
2227 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
2228 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
2229 opathkey->pk_strategy != ipathkey->pk_strategy ||
2230 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
2231 elog(ERROR, "left and right pathkeys do not match in mergejoin");
2233 /* Get the selectivity with caching */
2234 cache = cached_scansel(root, firstclause, opathkey);
2236 if (bms_is_subset(firstclause->left_relids,
2237 outer_path->parent->relids))
2239 /* left side of clause is outer */
2240 outerstartsel = cache->leftstartsel;
2241 outerendsel = cache->leftendsel;
2242 innerstartsel = cache->rightstartsel;
2243 innerendsel = cache->rightendsel;
2247 /* left side of clause is inner */
2248 outerstartsel = cache->rightstartsel;
2249 outerendsel = cache->rightendsel;
2250 innerstartsel = cache->leftstartsel;
2251 innerendsel = cache->leftendsel;
2253 if (jointype == JOIN_LEFT ||
2254 jointype == JOIN_ANTI)
2256 outerstartsel = 0.0;
2259 else if (jointype == JOIN_RIGHT)
2261 innerstartsel = 0.0;
2267 /* cope with clauseless or full mergejoin */
2268 outerstartsel = innerstartsel = 0.0;
2269 outerendsel = innerendsel = 1.0;
2273 * Convert selectivities to row counts. We force outer_rows and
2274 * inner_rows to be at least 1, but the skip_rows estimates can be zero.
2276 outer_skip_rows = rint(outer_path_rows * outerstartsel);
2277 inner_skip_rows = rint(inner_path_rows * innerstartsel);
2278 outer_rows = clamp_row_est(outer_path_rows * outerendsel);
2279 inner_rows = clamp_row_est(inner_path_rows * innerendsel);
2281 Assert(outer_skip_rows <= outer_rows);
2282 Assert(inner_skip_rows <= inner_rows);
2285 * Readjust scan selectivities to account for above rounding. This is
2286 * normally an insignificant effect, but when there are only a few rows in
2287 * the inputs, failing to do this makes for a large percentage error.
2289 outerstartsel = outer_skip_rows / outer_path_rows;
2290 innerstartsel = inner_skip_rows / inner_path_rows;
2291 outerendsel = outer_rows / outer_path_rows;
2292 innerendsel = inner_rows / inner_path_rows;
2294 Assert(outerstartsel <= outerendsel);
2295 Assert(innerstartsel <= innerendsel);
2297 /* cost of source data */
2299 if (outersortkeys) /* do we need to sort outer? */
2301 cost_sort(&sort_path,
2304 outer_path->total_cost,
2306 outer_path->pathtarget->width,
2310 startup_cost += sort_path.startup_cost;
2311 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2313 run_cost += (sort_path.total_cost - sort_path.startup_cost)
2314 * (outerendsel - outerstartsel);
2318 startup_cost += outer_path->startup_cost;
2319 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
2321 run_cost += (outer_path->total_cost - outer_path->startup_cost)
2322 * (outerendsel - outerstartsel);
2325 if (innersortkeys) /* do we need to sort inner? */
2327 cost_sort(&sort_path,
2330 inner_path->total_cost,
2332 inner_path->pathtarget->width,
2336 startup_cost += sort_path.startup_cost;
2337 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2339 inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
2340 * (innerendsel - innerstartsel);
2344 startup_cost += inner_path->startup_cost;
2345 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
2347 inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
2348 * (innerendsel - innerstartsel);
2352 * We can't yet determine whether rescanning occurs, or whether
2353 * materialization of the inner input should be done. The minimum
2354 * possible inner input cost, regardless of rescan and materialization
2355 * considerations, is inner_run_cost. We include that in
2356 * workspace->total_cost, but not yet in run_cost.
2359 /* CPU costs left for later */
2361 /* Public result fields */
2362 workspace->startup_cost = startup_cost;
2363 workspace->total_cost = startup_cost + run_cost + inner_run_cost;
2364 /* Save private data for final_cost_mergejoin */
2365 workspace->run_cost = run_cost;
2366 workspace->inner_run_cost = inner_run_cost;
2367 workspace->outer_rows = outer_rows;
2368 workspace->inner_rows = inner_rows;
2369 workspace->outer_skip_rows = outer_skip_rows;
2370 workspace->inner_skip_rows = inner_skip_rows;
2374 * final_cost_mergejoin
2375 * Final estimate of the cost and result size of a mergejoin path.
2377 * Unlike other costsize functions, this routine makes one actual decision:
2378 * whether we should materialize the inner path. We do that either because
2379 * the inner path can't support mark/restore, or because it's cheaper to
2380 * use an interposed Material node to handle mark/restore. When the decision
2381 * is cost-based it would be logically cleaner to build and cost two separate
2382 * paths with and without that flag set; but that would require repeating most
2383 * of the cost calculations, which are not all that cheap. Since the choice
2384 * will not affect output pathkeys or startup cost, only total cost, there is
2385 * no possibility of wanting to keep both paths. So it seems best to make
2386 * the decision here and record it in the path's materialize_inner field.
2388 * 'path' is already filled in except for the rows and cost fields and
2390 * 'workspace' is the result from initial_cost_mergejoin
2391 * 'sjinfo' is extra info about the join for selectivity estimation
2394 final_cost_mergejoin(PlannerInfo *root, MergePath *path,
2395 JoinCostWorkspace *workspace,
2396 SpecialJoinInfo *sjinfo)
2398 Path *outer_path = path->jpath.outerjoinpath;
2399 Path *inner_path = path->jpath.innerjoinpath;
2400 double inner_path_rows = inner_path->rows;
2401 List *mergeclauses = path->path_mergeclauses;
2402 List *innersortkeys = path->innersortkeys;
2403 Cost startup_cost = workspace->startup_cost;
2404 Cost run_cost = workspace->run_cost;
2405 Cost inner_run_cost = workspace->inner_run_cost;
2406 double outer_rows = workspace->outer_rows;
2407 double inner_rows = workspace->inner_rows;
2408 double outer_skip_rows = workspace->outer_skip_rows;
2409 double inner_skip_rows = workspace->inner_skip_rows;
2413 QualCost merge_qual_cost;
2414 QualCost qp_qual_cost;
2415 double mergejointuples,
2419 /* Protect some assumptions below that rowcounts aren't zero or NaN */
2420 if (inner_path_rows <= 0 || isnan(inner_path_rows))
2421 inner_path_rows = 1;
2423 /* Mark the path with the correct row estimate */
2424 if (path->jpath.path.param_info)
2425 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2427 path->jpath.path.rows = path->jpath.path.parent->rows;
2430 * We could include disable_cost in the preliminary estimate, but that
2431 * would amount to optimizing for the case where the join method is
2432 * disabled, which doesn't seem like the way to bet.
2434 if (!enable_mergejoin)
2435 startup_cost += disable_cost;
2438 * Compute cost of the mergequals and qpquals (other restriction clauses)
2441 cost_qual_eval(&merge_qual_cost, mergeclauses, root);
2442 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2443 qp_qual_cost.startup -= merge_qual_cost.startup;
2444 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
2447 * Get approx # tuples passing the mergequals. We use approx_tuple_count
2448 * here because we need an estimate done with JOIN_INNER semantics.
2450 mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
2453 * When there are equal merge keys in the outer relation, the mergejoin
2454 * must rescan any matching tuples in the inner relation. This means
2455 * re-fetching inner tuples; we have to estimate how often that happens.
2457 * For regular inner and outer joins, the number of re-fetches can be
2458 * estimated approximately as size of merge join output minus size of
2459 * inner relation. Assume that the distinct key values are 1, 2, ..., and
2460 * denote the number of values of each key in the outer relation as m1,
2461 * m2, ...; in the inner relation, n1, n2, ... Then we have
2463 * size of join = m1 * n1 + m2 * n2 + ...
2465 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
2466 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
2469 * This equation works correctly for outer tuples having no inner match
2470 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
2471 * are effectively subtracting those from the number of rescanned tuples,
2472 * when we should not. Can we do better without expensive selectivity
2475 * The whole issue is moot if we are working from a unique-ified outer
2478 if (IsA(outer_path, UniquePath))
2479 rescannedtuples = 0;
2482 rescannedtuples = mergejointuples - inner_path_rows;
2483 /* Must clamp because of possible underestimate */
2484 if (rescannedtuples < 0)
2485 rescannedtuples = 0;
2487 /* We'll inflate various costs this much to account for rescanning */
2488 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
2491 * Decide whether we want to materialize the inner input to shield it from
2492 * mark/restore and performing re-fetches. Our cost model for regular
2493 * re-fetches is that a re-fetch costs the same as an original fetch,
2494 * which is probably an overestimate; but on the other hand we ignore the
2495 * bookkeeping costs of mark/restore. Not clear if it's worth developing
2496 * a more refined model. So we just need to inflate the inner run cost by
2499 bare_inner_cost = inner_run_cost * rescanratio;
2502 * When we interpose a Material node the re-fetch cost is assumed to be
2503 * just cpu_operator_cost per tuple, independently of the underlying
2504 * plan's cost; and we charge an extra cpu_operator_cost per original
2505 * fetch as well. Note that we're assuming the materialize node will
2506 * never spill to disk, since it only has to remember tuples back to the
2507 * last mark. (If there are a huge number of duplicates, our other cost
2508 * factors will make the path so expensive that it probably won't get
2509 * chosen anyway.) So we don't use cost_rescan here.
2511 * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2512 * of the generated Material node.
2514 mat_inner_cost = inner_run_cost +
2515 cpu_operator_cost * inner_path_rows * rescanratio;
2518 * Prefer materializing if it looks cheaper, unless the user has asked to
2519 * suppress materialization.
2521 if (enable_material && mat_inner_cost < bare_inner_cost)
2522 path->materialize_inner = true;
2525 * Even if materializing doesn't look cheaper, we *must* do it if the
2526 * inner path is to be used directly (without sorting) and it doesn't
2527 * support mark/restore.
2529 * Since the inner side must be ordered, and only Sorts and IndexScans can
2530 * create order to begin with, and they both support mark/restore, you
2531 * might think there's no problem --- but you'd be wrong. Nestloop and
2532 * merge joins can *preserve* the order of their inputs, so they can be
2533 * selected as the input of a mergejoin, and they don't support
2534 * mark/restore at present.
2536 * We don't test the value of enable_material here, because
2537 * materialization is required for correctness in this case, and turning
2538 * it off does not entitle us to deliver an invalid plan.
2540 else if (innersortkeys == NIL &&
2541 !ExecSupportsMarkRestore(inner_path))
2542 path->materialize_inner = true;
2545 * Also, force materializing if the inner path is to be sorted and the
2546 * sort is expected to spill to disk. This is because the final merge
2547 * pass can be done on-the-fly if it doesn't have to support mark/restore.
2548 * We don't try to adjust the cost estimates for this consideration,
2551 * Since materialization is a performance optimization in this case,
2552 * rather than necessary for correctness, we skip it if enable_material is
2555 else if (enable_material && innersortkeys != NIL &&
2556 relation_byte_size(inner_path_rows,
2557 inner_path->pathtarget->width) >
2559 path->materialize_inner = true;
2561 path->materialize_inner = false;
2563 /* Charge the right incremental cost for the chosen case */
2564 if (path->materialize_inner)
2565 run_cost += mat_inner_cost;
2567 run_cost += bare_inner_cost;
2572 * The number of tuple comparisons needed is approximately number of outer
2573 * rows plus number of inner rows plus number of rescanned tuples (can we
2574 * refine this?). At each one, we need to evaluate the mergejoin quals.
2576 startup_cost += merge_qual_cost.startup;
2577 startup_cost += merge_qual_cost.per_tuple *
2578 (outer_skip_rows + inner_skip_rows * rescanratio);
2579 run_cost += merge_qual_cost.per_tuple *
2580 ((outer_rows - outer_skip_rows) +
2581 (inner_rows - inner_skip_rows) * rescanratio);
2584 * For each tuple that gets through the mergejoin proper, we charge
2585 * cpu_tuple_cost plus the cost of evaluating additional restriction
2586 * clauses that are to be applied at the join. (This is pessimistic since
2587 * not all of the quals may get evaluated at each tuple.)
2589 * Note: we could adjust for SEMI/ANTI joins skipping some qual
2590 * evaluations here, but it's probably not worth the trouble.
2592 startup_cost += qp_qual_cost.startup;
2593 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2594 run_cost += cpu_per_tuple * mergejointuples;
2596 /* tlist eval costs are paid per output row, not per tuple scanned */
2597 startup_cost += path->jpath.path.pathtarget->cost.startup;
2598 run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
2600 path->jpath.path.startup_cost = startup_cost;
2601 path->jpath.path.total_cost = startup_cost + run_cost;
2605 * run mergejoinscansel() with caching
2607 static MergeScanSelCache *
2608 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
2610 MergeScanSelCache *cache;
2612 Selectivity leftstartsel,
2616 MemoryContext oldcontext;
2618 /* Do we have this result already? */
2619 foreach(lc, rinfo->scansel_cache)
2621 cache = (MergeScanSelCache *) lfirst(lc);
2622 if (cache->opfamily == pathkey->pk_opfamily &&
2623 cache->collation == pathkey->pk_eclass->ec_collation &&
2624 cache->strategy == pathkey->pk_strategy &&
2625 cache->nulls_first == pathkey->pk_nulls_first)
2629 /* Nope, do the computation */
2630 mergejoinscansel(root,
2631 (Node *) rinfo->clause,
2632 pathkey->pk_opfamily,
2633 pathkey->pk_strategy,
2634 pathkey->pk_nulls_first,
2640 /* Cache the result in suitably long-lived workspace */
2641 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
2643 cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
2644 cache->opfamily = pathkey->pk_opfamily;
2645 cache->collation = pathkey->pk_eclass->ec_collation;
2646 cache->strategy = pathkey->pk_strategy;
2647 cache->nulls_first = pathkey->pk_nulls_first;
2648 cache->leftstartsel = leftstartsel;
2649 cache->leftendsel = leftendsel;
2650 cache->rightstartsel = rightstartsel;
2651 cache->rightendsel = rightendsel;
2653 rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
2655 MemoryContextSwitchTo(oldcontext);
2661 * initial_cost_hashjoin
2662 * Preliminary estimate of the cost of a hashjoin path.
2664 * This must quickly produce lower-bound estimates of the path's startup and
2665 * total costs. If we are unable to eliminate the proposed path from
2666 * consideration using the lower bounds, final_cost_hashjoin will be called
2667 * to obtain the final estimates.
2669 * The exact division of labor between this function and final_cost_hashjoin
2670 * is private to them, and represents a tradeoff between speed of the initial
2671 * estimate and getting a tight lower bound. We choose to not examine the
2672 * join quals here (other than by counting the number of hash clauses),
2673 * so we can't do much with CPU costs. We do assume that
2674 * ExecChooseHashTableSize is cheap enough to use here.
2676 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2677 * other data to be used by final_cost_hashjoin
2678 * 'jointype' is the type of join to be performed
2679 * 'hashclauses' is the list of joinclauses to be used as hash clauses
2680 * 'outer_path' is the outer input to the join
2681 * 'inner_path' is the inner input to the join
2682 * 'sjinfo' is extra info about the join for selectivity estimation
2683 * 'semifactors' contains valid data if jointype is SEMI or ANTI
2686 initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2689 Path *outer_path, Path *inner_path,
2690 SpecialJoinInfo *sjinfo,
2691 SemiAntiJoinFactors *semifactors)
2693 Cost startup_cost = 0;
2695 double outer_path_rows = outer_path->rows;
2696 double inner_path_rows = inner_path->rows;
2697 int num_hashclauses = list_length(hashclauses);
2702 /* cost of source data */
2703 startup_cost += outer_path->startup_cost;
2704 run_cost += outer_path->total_cost - outer_path->startup_cost;
2705 startup_cost += inner_path->total_cost;
2708 * Cost of computing hash function: must do it once per input tuple. We
2709 * charge one cpu_operator_cost for each column's hash function. Also,
2710 * tack on one cpu_tuple_cost per inner row, to model the costs of
2711 * inserting the row into the hashtable.
2713 * XXX when a hashclause is more complex than a single operator, we really
2714 * should charge the extra eval costs of the left or right side, as
2715 * appropriate, here. This seems more work than it's worth at the moment.
2717 startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
2719 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
2722 * Get hash table size that executor would use for inner relation.
2724 * XXX for the moment, always assume that skew optimization will be
2725 * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
2726 * trying to determine that for sure.
2728 * XXX at some point it might be interesting to try to account for skew
2729 * optimization in the cost estimate, but for now, we don't.
2731 ExecChooseHashTableSize(inner_path_rows,
2732 inner_path->pathtarget->width,
2739 * If inner relation is too big then we will need to "batch" the join,
2740 * which implies writing and reading most of the tuples to disk an extra
2741 * time. Charge seq_page_cost per page, since the I/O should be nice and
2742 * sequential. Writing the inner rel counts as startup cost, all the rest
2747 double outerpages = page_size(outer_path_rows,
2748 outer_path->pathtarget->width);
2749 double innerpages = page_size(inner_path_rows,
2750 inner_path->pathtarget->width);
2752 startup_cost += seq_page_cost * innerpages;
2753 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2756 /* CPU costs left for later */
2758 /* Public result fields */
2759 workspace->startup_cost = startup_cost;
2760 workspace->total_cost = startup_cost + run_cost;
2761 /* Save private data for final_cost_hashjoin */
2762 workspace->run_cost = run_cost;
2763 workspace->numbuckets = numbuckets;
2764 workspace->numbatches = numbatches;
2768 * final_cost_hashjoin
2769 * Final estimate of the cost and result size of a hashjoin path.
2771 * Note: the numbatches estimate is also saved into 'path' for use later
2773 * 'path' is already filled in except for the rows and cost fields and
2775 * 'workspace' is the result from initial_cost_hashjoin
2776 * 'sjinfo' is extra info about the join for selectivity estimation
2777 * 'semifactors' contains valid data if path->jointype is SEMI or ANTI
2780 final_cost_hashjoin(PlannerInfo *root, HashPath *path,
2781 JoinCostWorkspace *workspace,
2782 SpecialJoinInfo *sjinfo,
2783 SemiAntiJoinFactors *semifactors)
2785 Path *outer_path = path->jpath.outerjoinpath;
2786 Path *inner_path = path->jpath.innerjoinpath;
2787 double outer_path_rows = outer_path->rows;
2788 double inner_path_rows = inner_path->rows;
2789 List *hashclauses = path->path_hashclauses;
2790 Cost startup_cost = workspace->startup_cost;
2791 Cost run_cost = workspace->run_cost;
2792 int numbuckets = workspace->numbuckets;
2793 int numbatches = workspace->numbatches;
2795 QualCost hash_qual_cost;
2796 QualCost qp_qual_cost;
2797 double hashjointuples;
2798 double virtualbuckets;
2799 Selectivity innerbucketsize;
2802 /* Mark the path with the correct row estimate */
2803 if (path->jpath.path.param_info)
2804 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
2806 path->jpath.path.rows = path->jpath.path.parent->rows;
2809 * We could include disable_cost in the preliminary estimate, but that
2810 * would amount to optimizing for the case where the join method is
2811 * disabled, which doesn't seem like the way to bet.
2813 if (!enable_hashjoin)
2814 startup_cost += disable_cost;
2816 /* mark the path with estimated # of batches */
2817 path->num_batches = numbatches;
2819 /* and compute the number of "virtual" buckets in the whole join */
2820 virtualbuckets = (double) numbuckets *(double) numbatches;
2823 * Determine bucketsize fraction for inner relation. We use the smallest
2824 * bucketsize estimated for any individual hashclause; this is undoubtedly
2827 * BUT: if inner relation has been unique-ified, we can assume it's good
2828 * for hashing. This is important both because it's the right answer, and
2829 * because we avoid contaminating the cache with a value that's wrong for
2830 * non-unique-ified paths.
2832 if (IsA(inner_path, UniquePath))
2833 innerbucketsize = 1.0 / virtualbuckets;
2836 innerbucketsize = 1.0;
2837 foreach(hcl, hashclauses)
2839 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
2840 Selectivity thisbucketsize;
2842 Assert(IsA(restrictinfo, RestrictInfo));
2845 * First we have to figure out which side of the hashjoin clause
2846 * is the inner side.
2848 * Since we tend to visit the same clauses over and over when
2849 * planning a large query, we cache the bucketsize estimate in the
2850 * RestrictInfo node to avoid repeated lookups of statistics.
2852 if (bms_is_subset(restrictinfo->right_relids,
2853 inner_path->parent->relids))
2855 /* righthand side is inner */
2856 thisbucketsize = restrictinfo->right_bucketsize;
2857 if (thisbucketsize < 0)
2859 /* not cached yet */
2861 estimate_hash_bucketsize(root,
2862 get_rightop(restrictinfo->clause),
2864 restrictinfo->right_bucketsize = thisbucketsize;
2869 Assert(bms_is_subset(restrictinfo->left_relids,
2870 inner_path->parent->relids));
2871 /* lefthand side is inner */
2872 thisbucketsize = restrictinfo->left_bucketsize;
2873 if (thisbucketsize < 0)
2875 /* not cached yet */
2877 estimate_hash_bucketsize(root,
2878 get_leftop(restrictinfo->clause),
2880 restrictinfo->left_bucketsize = thisbucketsize;
2884 if (innerbucketsize > thisbucketsize)
2885 innerbucketsize = thisbucketsize;
2890 * Compute cost of the hashquals and qpquals (other restriction clauses)
2893 cost_qual_eval(&hash_qual_cost, hashclauses, root);
2894 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2895 qp_qual_cost.startup -= hash_qual_cost.startup;
2896 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
2900 if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI)
2902 double outer_matched_rows;
2903 Selectivity inner_scan_frac;
2906 * SEMI or ANTI join: executor will stop after first match.
2908 * For an outer-rel row that has at least one match, we can expect the
2909 * bucket scan to stop after a fraction 1/(match_count+1) of the
2910 * bucket's rows, if the matches are evenly distributed. Since they
2911 * probably aren't quite evenly distributed, we apply a fuzz factor of
2912 * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
2913 * to clamp inner_scan_frac to at most 1.0; but since match_count is
2914 * at least 1, no such clamp is needed now.)
2916 outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
2917 inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
2919 startup_cost += hash_qual_cost.startup;
2920 run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
2921 clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
2924 * For unmatched outer-rel rows, the picture is quite a lot different.
2925 * In the first place, there is no reason to assume that these rows
2926 * preferentially hit heavily-populated buckets; instead assume they
2927 * are uncorrelated with the inner distribution and so they see an
2928 * average bucket size of inner_path_rows / virtualbuckets. In the
2929 * second place, it seems likely that they will have few if any exact
2930 * hash-code matches and so very few of the tuples in the bucket will
2931 * actually require eval of the hash quals. We don't have any good
2932 * way to estimate how many will, but for the moment assume that the
2933 * effective cost per bucket entry is one-tenth what it is for
2936 run_cost += hash_qual_cost.per_tuple *
2937 (outer_path_rows - outer_matched_rows) *
2938 clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
2940 /* Get # of tuples that will pass the basic join */
2941 if (path->jpath.jointype == JOIN_SEMI)
2942 hashjointuples = outer_matched_rows;
2944 hashjointuples = outer_path_rows - outer_matched_rows;
2949 * The number of tuple comparisons needed is the number of outer
2950 * tuples times the typical number of tuples in a hash bucket, which
2951 * is the inner relation size times its bucketsize fraction. At each
2952 * one, we need to evaluate the hashjoin quals. But actually,
2953 * charging the full qual eval cost at each tuple is pessimistic,
2954 * since we don't evaluate the quals unless the hash values match
2955 * exactly. For lack of a better idea, halve the cost estimate to
2958 startup_cost += hash_qual_cost.startup;
2959 run_cost += hash_qual_cost.per_tuple * outer_path_rows *
2960 clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
2963 * Get approx # tuples passing the hashquals. We use
2964 * approx_tuple_count here because we need an estimate done with
2965 * JOIN_INNER semantics.
2967 hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
2971 * For each tuple that gets through the hashjoin proper, we charge
2972 * cpu_tuple_cost plus the cost of evaluating additional restriction
2973 * clauses that are to be applied at the join. (This is pessimistic since
2974 * not all of the quals may get evaluated at each tuple.)
2976 startup_cost += qp_qual_cost.startup;
2977 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2978 run_cost += cpu_per_tuple * hashjointuples;
2980 /* tlist eval costs are paid per output row, not per tuple scanned */
2981 startup_cost += path->jpath.path.pathtarget->cost.startup;
2982 run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
2984 path->jpath.path.startup_cost = startup_cost;
2985 path->jpath.path.total_cost = startup_cost + run_cost;
2991 * Figure the costs for a SubPlan (or initplan).
2993 * Note: we could dig the subplan's Plan out of the root list, but in practice
2994 * all callers have it handy already, so we make them pass it.
2997 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
3001 /* Figure any cost for evaluating the testexpr */
3002 cost_qual_eval(&sp_cost,
3003 make_ands_implicit((Expr *) subplan->testexpr),
3006 if (subplan->useHashTable)
3009 * If we are using a hash table for the subquery outputs, then the
3010 * cost of evaluating the query is a one-time cost. We charge one
3011 * cpu_operator_cost per tuple for the work of loading the hashtable,
3014 sp_cost.startup += plan->total_cost +
3015 cpu_operator_cost * plan->plan_rows;
3018 * The per-tuple costs include the cost of evaluating the lefthand
3019 * expressions, plus the cost of probing the hashtable. We already
3020 * accounted for the lefthand expressions as part of the testexpr, and
3021 * will also have counted one cpu_operator_cost for each comparison
3022 * operator. That is probably too low for the probing cost, but it's
3023 * hard to make a better estimate, so live with it for now.
3029 * Otherwise we will be rescanning the subplan output on each
3030 * evaluation. We need to estimate how much of the output we will
3031 * actually need to scan. NOTE: this logic should agree with the
3032 * tuple_fraction estimates used by make_subplan() in
3035 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
3037 if (subplan->subLinkType == EXISTS_SUBLINK)
3039 /* we only need to fetch 1 tuple; clamp to avoid zero divide */
3040 sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
3042 else if (subplan->subLinkType == ALL_SUBLINK ||
3043 subplan->subLinkType == ANY_SUBLINK)
3045 /* assume we need 50% of the tuples */
3046 sp_cost.per_tuple += 0.50 * plan_run_cost;
3047 /* also charge a cpu_operator_cost per row examined */
3048 sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
3052 /* assume we need all tuples */
3053 sp_cost.per_tuple += plan_run_cost;
3057 * Also account for subplan's startup cost. If the subplan is
3058 * uncorrelated or undirect correlated, AND its topmost node is one
3059 * that materializes its output, assume that we'll only need to pay
3060 * its startup cost once; otherwise assume we pay the startup cost
3063 if (subplan->parParam == NIL &&
3064 ExecMaterializesOutput(nodeTag(plan)))
3065 sp_cost.startup += plan->startup_cost;
3067 sp_cost.per_tuple += plan->startup_cost;
3070 subplan->startup_cost = sp_cost.startup;
3071 subplan->per_call_cost = sp_cost.per_tuple;
3077 * Given a finished Path, estimate the costs of rescanning it after
3078 * having done so the first time. For some Path types a rescan is
3079 * cheaper than an original scan (if no parameters change), and this
3080 * function embodies knowledge about that. The default is to return
3081 * the same costs stored in the Path. (Note that the cost estimates
3082 * actually stored in Paths are always for first scans.)
3084 * This function is not currently intended to model effects such as rescans
3085 * being cheaper due to disk block caching; what we are concerned with is
3086 * plan types wherein the executor caches results explicitly, or doesn't
3087 * redo startup calculations, etc.
3090 cost_rescan(PlannerInfo *root, Path *path,
3091 Cost *rescan_startup_cost, /* output parameters */
3092 Cost *rescan_total_cost)
3094 switch (path->pathtype)
3096 case T_FunctionScan:
3099 * Currently, nodeFunctionscan.c always executes the function to
3100 * completion before returning any rows, and caches the results in
3101 * a tuplestore. So the function eval cost is all startup cost
3102 * and isn't paid over again on rescans. However, all run costs
3103 * will be paid over again.
3105 *rescan_startup_cost = 0;
3106 *rescan_total_cost = path->total_cost - path->startup_cost;
3111 * Assume that all of the startup cost represents hash table
3112 * building, which we won't have to do over.
3114 *rescan_startup_cost = 0;
3115 *rescan_total_cost = path->total_cost - path->startup_cost;
3118 case T_WorkTableScan:
3121 * These plan types materialize their final result in a
3122 * tuplestore or tuplesort object. So the rescan cost is only
3123 * cpu_tuple_cost per tuple, unless the result is large enough
3126 Cost run_cost = cpu_tuple_cost * path->rows;
3127 double nbytes = relation_byte_size(path->rows,
3128 path->pathtarget->width);
3129 long work_mem_bytes = work_mem * 1024L;
3131 if (nbytes > work_mem_bytes)
3133 /* It will spill, so account for re-read cost */
3134 double npages = ceil(nbytes / BLCKSZ);
3136 run_cost += seq_page_cost * npages;
3138 *rescan_startup_cost = 0;
3139 *rescan_total_cost = run_cost;
3146 * These plan types not only materialize their results, but do
3147 * not implement qual filtering or projection. So they are
3148 * even cheaper to rescan than the ones above. We charge only
3149 * cpu_operator_cost per tuple. (Note: keep that in sync with
3150 * the run_cost charge in cost_sort, and also see comments in
3151 * cost_material before you change it.)
3153 Cost run_cost = cpu_operator_cost * path->rows;
3154 double nbytes = relation_byte_size(path->rows,
3155 path->pathtarget->width);
3156 long work_mem_bytes = work_mem * 1024L;
3158 if (nbytes > work_mem_bytes)
3160 /* It will spill, so account for re-read cost */
3161 double npages = ceil(nbytes / BLCKSZ);
3163 run_cost += seq_page_cost * npages;
3165 *rescan_startup_cost = 0;
3166 *rescan_total_cost = run_cost;
3170 *rescan_startup_cost = path->startup_cost;
3171 *rescan_total_cost = path->total_cost;
3179 * Estimate the CPU costs of evaluating a WHERE clause.
3180 * The input can be either an implicitly-ANDed list of boolean
3181 * expressions, or a list of RestrictInfo nodes. (The latter is
3182 * preferred since it allows caching of the results.)
3183 * The result includes both a one-time (startup) component,
3184 * and a per-evaluation component.
3187 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
3189 cost_qual_eval_context context;
3192 context.root = root;
3193 context.total.startup = 0;
3194 context.total.per_tuple = 0;
3196 /* We don't charge any cost for the implicit ANDing at top level ... */
3200 Node *qual = (Node *) lfirst(l);
3202 cost_qual_eval_walker(qual, &context);
3205 *cost = context.total;
3209 * cost_qual_eval_node
3210 * As above, for a single RestrictInfo or expression.
3213 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
3215 cost_qual_eval_context context;
3217 context.root = root;
3218 context.total.startup = 0;
3219 context.total.per_tuple = 0;
3221 cost_qual_eval_walker(qual, &context);
3223 *cost = context.total;
3227 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
3233 * RestrictInfo nodes contain an eval_cost field reserved for this
3234 * routine's use, so that it's not necessary to evaluate the qual clause's
3235 * cost more than once. If the clause's cost hasn't been computed yet,
3236 * the field's startup value will contain -1.
3238 if (IsA(node, RestrictInfo))
3240 RestrictInfo *rinfo = (RestrictInfo *) node;
3242 if (rinfo->eval_cost.startup < 0)
3244 cost_qual_eval_context locContext;
3246 locContext.root = context->root;
3247 locContext.total.startup = 0;
3248 locContext.total.per_tuple = 0;
3251 * For an OR clause, recurse into the marked-up tree so that we
3252 * set the eval_cost for contained RestrictInfos too.
3254 if (rinfo->orclause)
3255 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
3257 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
3260 * If the RestrictInfo is marked pseudoconstant, it will be tested
3261 * only once, so treat its cost as all startup cost.
3263 if (rinfo->pseudoconstant)
3265 /* count one execution during startup */
3266 locContext.total.startup += locContext.total.per_tuple;
3267 locContext.total.per_tuple = 0;
3269 rinfo->eval_cost = locContext.total;
3271 context->total.startup += rinfo->eval_cost.startup;
3272 context->total.per_tuple += rinfo->eval_cost.per_tuple;
3273 /* do NOT recurse into children */
3278 * For each operator or function node in the given tree, we charge the
3279 * estimated execution cost given by pg_proc.procost (remember to multiply
3280 * this by cpu_operator_cost).
3282 * Vars and Consts are charged zero, and so are boolean operators (AND,
3283 * OR, NOT). Simplistic, but a lot better than no model at all.
3285 * Should we try to account for the possibility of short-circuit
3286 * evaluation of AND/OR? Probably *not*, because that would make the
3287 * results depend on the clause ordering, and we are not in any position
3288 * to expect that the current ordering of the clauses is the one that's
3289 * going to end up being used. The above per-RestrictInfo caching would
3290 * not mix well with trying to re-order clauses anyway.
3292 * Another issue that is entirely ignored here is that if a set-returning
3293 * function is below top level in the tree, the functions/operators above
3294 * it will need to be evaluated multiple times. In practical use, such
3295 * cases arise so seldom as to not be worth the added complexity needed;
3296 * moreover, since our rowcount estimates for functions tend to be pretty
3297 * phony, the results would also be pretty phony.
3299 if (IsA(node, FuncExpr))
3301 context->total.per_tuple +=
3302 get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
3304 else if (IsA(node, OpExpr) ||
3305 IsA(node, DistinctExpr) ||
3306 IsA(node, NullIfExpr))
3308 /* rely on struct equivalence to treat these all alike */
3309 set_opfuncid((OpExpr *) node);
3310 context->total.per_tuple +=
3311 get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
3313 else if (IsA(node, ScalarArrayOpExpr))
3316 * Estimate that the operator will be applied to about half of the
3317 * array elements before the answer is determined.
3319 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
3320 Node *arraynode = (Node *) lsecond(saop->args);
3322 set_sa_opfuncid(saop);
3323 context->total.per_tuple += get_func_cost(saop->opfuncid) *
3324 cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
3326 else if (IsA(node, Aggref) ||
3327 IsA(node, WindowFunc))
3330 * Aggref and WindowFunc nodes are (and should be) treated like Vars,
3331 * ie, zero execution cost in the current model, because they behave
3332 * essentially like Vars in execQual.c. We disregard the costs of
3333 * their input expressions for the same reason. The actual execution
3334 * costs of the aggregate/window functions and their arguments have to
3335 * be factored into plan-node-specific costing of the Agg or WindowAgg
3338 return false; /* don't recurse into children */
3340 else if (IsA(node, CoerceViaIO))
3342 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
3347 /* check the result type's input function */
3348 getTypeInputInfo(iocoerce->resulttype,
3349 &iofunc, &typioparam);
3350 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3351 /* check the input type's output function */
3352 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
3353 &iofunc, &typisvarlena);
3354 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3356 else if (IsA(node, ArrayCoerceExpr))
3358 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
3359 Node *arraynode = (Node *) acoerce->arg;
3361 if (OidIsValid(acoerce->elemfuncid))
3362 context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
3363 cpu_operator_cost * estimate_array_length(arraynode);
3365 else if (IsA(node, RowCompareExpr))
3367 /* Conservatively assume we will check all the columns */
3368 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
3371 foreach(lc, rcexpr->opnos)
3373 Oid opid = lfirst_oid(lc);
3375 context->total.per_tuple += get_func_cost(get_opcode(opid)) *
3379 else if (IsA(node, CurrentOfExpr))
3381 /* Report high cost to prevent selection of anything but TID scan */
3382 context->total.startup += disable_cost;
3384 else if (IsA(node, SubLink))
3386 /* This routine should not be applied to un-planned expressions */
3387 elog(ERROR, "cannot handle unplanned sub-select");
3389 else if (IsA(node, SubPlan))
3392 * A subplan node in an expression typically indicates that the
3393 * subplan will be executed on each evaluation, so charge accordingly.
3394 * (Sub-selects that can be executed as InitPlans have already been
3395 * removed from the expression.)
3397 SubPlan *subplan = (SubPlan *) node;
3399 context->total.startup += subplan->startup_cost;
3400 context->total.per_tuple += subplan->per_call_cost;
3403 * We don't want to recurse into the testexpr, because it was already
3404 * counted in the SubPlan node's costs. So we're done.
3408 else if (IsA(node, AlternativeSubPlan))
3411 * Arbitrarily use the first alternative plan for costing. (We should
3412 * certainly only include one alternative, and we don't yet have
3413 * enough information to know which one the executor is most likely to
3416 AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
3418 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
3421 else if (IsA(node, PlaceHolderVar))
3424 * A PlaceHolderVar should be given cost zero when considering general
3425 * expression evaluation costs. The expense of doing the contained
3426 * expression is charged as part of the tlist eval costs of the scan
3427 * or join where the PHV is first computed (see set_rel_width and
3428 * add_placeholders_to_joinrel). If we charged it again here, we'd be
3429 * double-counting the cost for each level of plan that the PHV
3430 * bubbles up through. Hence, return without recursing into the
3436 /* recurse into children */
3437 return expression_tree_walker(node, cost_qual_eval_walker,
3442 * get_restriction_qual_cost
3443 * Compute evaluation costs of a baserel's restriction quals, plus any
3444 * movable join quals that have been pushed down to the scan.
3445 * Results are returned into *qpqual_cost.
3447 * This is a convenience subroutine that works for seqscans and other cases
3448 * where all the given quals will be evaluated the hard way. It's not useful
3449 * for cost_index(), for example, where the index machinery takes care of
3450 * some of the quals. We assume baserestrictcost was previously set by
3451 * set_baserel_size_estimates().
3454 get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
3455 ParamPathInfo *param_info,
3456 QualCost *qpqual_cost)
3460 /* Include costs of pushed-down clauses */
3461 cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
3463 qpqual_cost->startup += baserel->baserestrictcost.startup;
3464 qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
3467 *qpqual_cost = baserel->baserestrictcost;
3472 * compute_semi_anti_join_factors
3473 * Estimate how much of the inner input a SEMI or ANTI join
3474 * can be expected to scan.
3476 * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
3477 * inner rows as soon as it finds a match to the current outer row.
3478 * We should therefore adjust some of the cost components for this effect.
3479 * This function computes some estimates needed for these adjustments.
3480 * These estimates will be the same regardless of the particular paths used
3481 * for the outer and inner relation, so we compute these once and then pass
3482 * them to all the join cost estimation functions.
3485 * outerrel: outer relation under consideration
3486 * innerrel: inner relation under consideration
3487 * jointype: must be JOIN_SEMI or JOIN_ANTI
3488 * sjinfo: SpecialJoinInfo relevant to this join
3489 * restrictlist: join quals
3490 * Output parameters:
3491 * *semifactors is filled in (see relation.h for field definitions)
3494 compute_semi_anti_join_factors(PlannerInfo *root,
3495 RelOptInfo *outerrel,
3496 RelOptInfo *innerrel,
3498 SpecialJoinInfo *sjinfo,
3500 SemiAntiJoinFactors *semifactors)
3504 Selectivity avgmatch;
3505 SpecialJoinInfo norm_sjinfo;
3509 /* Should only be called in these cases */
3510 Assert(jointype == JOIN_SEMI || jointype == JOIN_ANTI);
3513 * In an ANTI join, we must ignore clauses that are "pushed down", since
3514 * those won't affect the match logic. In a SEMI join, we do not
3515 * distinguish joinquals from "pushed down" quals, so just use the whole
3516 * restrictinfo list.
3518 if (jointype == JOIN_ANTI)
3521 foreach(l, restrictlist)
3523 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
3525 Assert(IsA(rinfo, RestrictInfo));
3526 if (!rinfo->is_pushed_down)
3527 joinquals = lappend(joinquals, rinfo);
3531 joinquals = restrictlist;
3534 * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
3536 jselec = clauselist_selectivity(root,
3543 * Also get the normal inner-join selectivity of the join clauses.
3545 norm_sjinfo.type = T_SpecialJoinInfo;
3546 norm_sjinfo.min_lefthand = outerrel->relids;
3547 norm_sjinfo.min_righthand = innerrel->relids;
3548 norm_sjinfo.syn_lefthand = outerrel->relids;
3549 norm_sjinfo.syn_righthand = innerrel->relids;
3550 norm_sjinfo.jointype = JOIN_INNER;
3551 /* we don't bother trying to make the remaining fields valid */
3552 norm_sjinfo.lhs_strict = false;
3553 norm_sjinfo.delay_upper_joins = false;
3554 norm_sjinfo.semi_can_btree = false;
3555 norm_sjinfo.semi_can_hash = false;
3556 norm_sjinfo.semi_operators = NIL;
3557 norm_sjinfo.semi_rhs_exprs = NIL;
3559 nselec = clauselist_selectivity(root,
3565 /* Avoid leaking a lot of ListCells */
3566 if (jointype == JOIN_ANTI)
3567 list_free(joinquals);
3570 * jselec can be interpreted as the fraction of outer-rel rows that have
3571 * any matches (this is true for both SEMI and ANTI cases). And nselec is
3572 * the fraction of the Cartesian product that matches. So, the average
3573 * number of matches for each outer-rel row that has at least one match is
3574 * nselec * inner_rows / jselec.
3576 * Note: it is correct to use the inner rel's "rows" count here, even
3577 * though we might later be considering a parameterized inner path with
3578 * fewer rows. This is because we have included all the join clauses in
3579 * the selectivity estimate.
3581 if (jselec > 0) /* protect against zero divide */
3583 avgmatch = nselec * innerrel->rows / jselec;
3584 /* Clamp to sane range */
3585 avgmatch = Max(1.0, avgmatch);
3590 semifactors->outer_match_frac = jselec;
3591 semifactors->match_count = avgmatch;
3595 * has_indexed_join_quals
3596 * Check whether all the joinquals of a nestloop join are used as
3597 * inner index quals.
3599 * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
3600 * indexscan) that uses all the joinquals as indexquals, we can assume that an
3601 * unmatched outer tuple is cheap to process, whereas otherwise it's probably
3605 has_indexed_join_quals(NestPath *joinpath)
3607 Relids joinrelids = joinpath->path.parent->relids;
3608 Path *innerpath = joinpath->innerjoinpath;
3613 /* If join still has quals to evaluate, it's not fast */
3614 if (joinpath->joinrestrictinfo != NIL)
3616 /* Nor if the inner path isn't parameterized at all */
3617 if (innerpath->param_info == NULL)
3620 /* Find the indexclauses list for the inner scan */
3621 switch (innerpath->pathtype)
3624 case T_IndexOnlyScan:
3625 indexclauses = ((IndexPath *) innerpath)->indexclauses;
3627 case T_BitmapHeapScan:
3629 /* Accept only a simple bitmap scan, not AND/OR cases */
3630 Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
3632 if (IsA(bmqual, IndexPath))
3633 indexclauses = ((IndexPath *) bmqual)->indexclauses;
3641 * If it's not a simple indexscan, it probably doesn't run quickly
3642 * for zero rows out, even if it's a parameterized path using all
3649 * Examine the inner path's param clauses. Any that are from the outer
3650 * path must be found in the indexclauses list, either exactly or in an
3651 * equivalent form generated by equivclass.c. Also, we must find at least
3652 * one such clause, else it's a clauseless join which isn't fast.
3655 foreach(lc, innerpath->param_info->ppi_clauses)
3657 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
3659 if (join_clause_is_movable_into(rinfo,
3660 innerpath->parent->relids,
3663 if (!(list_member_ptr(indexclauses, rinfo) ||
3664 is_redundant_derived_clause(rinfo, indexclauses)))
3674 * approx_tuple_count
3675 * Quick-and-dirty estimation of the number of join rows passing
3676 * a set of qual conditions.
3678 * The quals can be either an implicitly-ANDed list of boolean expressions,
3679 * or a list of RestrictInfo nodes (typically the latter).
3681 * We intentionally compute the selectivity under JOIN_INNER rules, even
3682 * if it's some type of outer join. This is appropriate because we are
3683 * trying to figure out how many tuples pass the initial merge or hash
3686 * This is quick-and-dirty because we bypass clauselist_selectivity, and
3687 * simply multiply the independent clause selectivities together. Now
3688 * clauselist_selectivity often can't do any better than that anyhow, but
3689 * for some situations (such as range constraints) it is smarter. However,
3690 * we can't effectively cache the results of clauselist_selectivity, whereas
3691 * the individual clause selectivities can be and are cached.
3693 * Since we are only using the results to estimate how many potential
3694 * output tuples are generated and passed through qpqual checking, it
3695 * seems OK to live with the approximation.
3698 approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
3701 double outer_tuples = path->outerjoinpath->rows;
3702 double inner_tuples = path->innerjoinpath->rows;
3703 SpecialJoinInfo sjinfo;
3704 Selectivity selec = 1.0;
3708 * Make up a SpecialJoinInfo for JOIN_INNER semantics.
3710 sjinfo.type = T_SpecialJoinInfo;
3711 sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
3712 sjinfo.min_righthand = path->innerjoinpath->parent->relids;
3713 sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
3714 sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
3715 sjinfo.jointype = JOIN_INNER;
3716 /* we don't bother trying to make the remaining fields valid */
3717 sjinfo.lhs_strict = false;
3718 sjinfo.delay_upper_joins = false;
3719 sjinfo.semi_can_btree = false;
3720 sjinfo.semi_can_hash = false;
3721 sjinfo.semi_operators = NIL;
3722 sjinfo.semi_rhs_exprs = NIL;
3724 /* Get the approximate selectivity */
3727 Node *qual = (Node *) lfirst(l);
3729 /* Note that clause_selectivity will be able to cache its result */
3730 selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
3733 /* Apply it to the input relation sizes */
3734 tuples = selec * outer_tuples * inner_tuples;
3736 return clamp_row_est(tuples);
3741 * set_baserel_size_estimates
3742 * Set the size estimates for the given base relation.
3744 * The rel's targetlist and restrictinfo list must have been constructed
3745 * already, and rel->tuples must be set.
3747 * We set the following fields of the rel node:
3748 * rows: the estimated number of output tuples (after applying
3749 * restriction clauses).
3750 * width: the estimated average output tuple width in bytes.
3751 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
3754 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3758 /* Should only be applied to base relations */
3759 Assert(rel->relid > 0);
3761 nrows = rel->tuples *
3762 clauselist_selectivity(root,
3763 rel->baserestrictinfo,
3768 rel->rows = clamp_row_est(nrows);
3770 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
3772 set_rel_width(root, rel);
3776 * get_parameterized_baserel_size
3777 * Make a size estimate for a parameterized scan of a base relation.
3779 * 'param_clauses' lists the additional join clauses to be used.
3781 * set_baserel_size_estimates must have been applied already.
3784 get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
3785 List *param_clauses)
3791 * Estimate the number of rows returned by the parameterized scan, knowing
3792 * that it will apply all the extra join clauses as well as the rel's own
3793 * restriction clauses. Note that we force the clauses to be treated as
3794 * non-join clauses during selectivity estimation.
3796 allclauses = list_concat(list_copy(param_clauses),
3797 rel->baserestrictinfo);
3798 nrows = rel->tuples *
3799 clauselist_selectivity(root,
3801 rel->relid, /* do not use 0! */
3804 nrows = clamp_row_est(nrows);
3805 /* For safety, make sure result is not more than the base estimate */
3806 if (nrows > rel->rows)
3812 * set_joinrel_size_estimates
3813 * Set the size estimates for the given join relation.
3815 * The rel's targetlist must have been constructed already, and a
3816 * restriction clause list that matches the given component rels must
3819 * Since there is more than one way to make a joinrel for more than two
3820 * base relations, the results we get here could depend on which component
3821 * rel pair is provided. In theory we should get the same answers no matter
3822 * which pair is provided; in practice, since the selectivity estimation
3823 * routines don't handle all cases equally well, we might not. But there's
3824 * not much to be done about it. (Would it make sense to repeat the
3825 * calculations for each pair of input rels that's encountered, and somehow
3826 * average the results? Probably way more trouble than it's worth, and
3827 * anyway we must keep the rowcount estimate the same for all paths for the
3830 * We set only the rows field here. The reltarget field was already set by
3831 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
3834 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
3835 RelOptInfo *outer_rel,
3836 RelOptInfo *inner_rel,
3837 SpecialJoinInfo *sjinfo,
3840 rel->rows = calc_joinrel_size_estimate(root,
3848 * get_parameterized_joinrel_size
3849 * Make a size estimate for a parameterized scan of a join relation.
3851 * 'rel' is the joinrel under consideration.
3852 * 'outer_rows', 'inner_rows' are the sizes of the (probably also
3853 * parameterized) join inputs under consideration.
3854 * 'sjinfo' is any SpecialJoinInfo relevant to this join.
3855 * 'restrict_clauses' lists the join clauses that need to be applied at the
3856 * join node (including any movable clauses that were moved down to this join,
3857 * and not including any movable clauses that were pushed down into the
3860 * set_joinrel_size_estimates must have been applied already.
3863 get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
3866 SpecialJoinInfo *sjinfo,
3867 List *restrict_clauses)
3872 * Estimate the number of rows returned by the parameterized join as the
3873 * sizes of the input paths times the selectivity of the clauses that have
3874 * ended up at this join node.
3876 * As with set_joinrel_size_estimates, the rowcount estimate could depend
3877 * on the pair of input paths provided, though ideally we'd get the same
3878 * estimate for any pair with the same parameterization.
3880 nrows = calc_joinrel_size_estimate(root,
3885 /* For safety, make sure result is not more than the base estimate */
3886 if (nrows > rel->rows)
3892 * quals_match_foreign_key
3893 * Determines if the foreign key is matched by joinquals.
3895 * Checks that there are conditions on all columns of the foreign key, matching
3896 * the operator used by the foreign key etc. If such complete match is found,
3897 * the function returns bitmap identifying the matching quals (0-based).
3899 * Otherwise (no match at all or incomplete match), NULL is returned.
3901 * XXX It seems possible in the future to do something useful when a
3902 * partial match occurs between join and FK, but that is less common
3903 * and that part isn't worked out yet.
3906 quals_match_foreign_key(PlannerInfo *root, ForeignKeyOptInfo *fkinfo,
3907 RelOptInfo *fkrel, RelOptInfo *foreignrel,
3911 int nkeys = fkinfo->nkeys;
3912 Bitmapset *qualmatches = NULL;
3913 Bitmapset *fkmatches = NULL;
3916 * Loop over each column of the foreign key and build a bitmapset
3917 * of each joinqual which matches. Note that we don't stop when we find
3918 * the first match, as the expression could be duplicated in the
3919 * joinquals, and we want to generate a bitmapset which has bits set for
3920 * every matching join qual.
3922 for (i = 0; i < nkeys; i++)
3925 int quallstidx = -1;
3927 foreach(lc, joinquals)
3929 RestrictInfo *rinfo;
3937 * Technically we don't need to, but here we skip this qual if
3938 * we've matched it to part of the foreign key already. This
3939 * should prove to be a useful optimization when the quals appear
3940 * in the same order as the foreign key's keys. We need only bother
3941 * doing this when the foreign key is made up of more than 1 set
3942 * of columns, and we're not testing the first column.
3944 if (i > 0 && bms_is_member(quallstidx, qualmatches))
3948 * Here since 'usefulquals' only contains bitmap indexes for quals
3949 * of type "var op var" we can safely skip checking this.
3951 rinfo = (RestrictInfo *) lfirst(lc);
3952 clause = (OpExpr *) rinfo->clause;
3955 * If the operator does not match then there's little point in
3956 * checking the operands.
3958 if (clause->opno != fkinfo->conpfeqop[i])
3961 leftvar = (Var *) get_leftop((Expr *) clause);
3962 rightvar = (Var *) get_rightop((Expr *) clause);
3964 /* Foreign keys only support Vars, so ignore anything more complex */
3965 if (!IsA(leftvar, Var) || !IsA(rightvar, Var))
3969 * For RestrictInfos built from an eclass we must consider each
3970 * member of the eclass as rinfo's operands may not belong to the
3971 * foreign key. For efficient tracking of which Vars we've found,
3972 * since we're only tracking 2 Vars, we use a bitmask. We can
3973 * safely finish searching when both of the least significant bits
3976 if (rinfo->parent_ec)
3978 EquivalenceClass *ec = rinfo->parent_ec;
3980 int foundvarmask = 0;
3982 foreach(lc2, ec->ec_members)
3984 EquivalenceMember *em = (EquivalenceMember *) lfirst(lc2);
3985 Var *var = (Var *) em->em_expr;
3990 if (foreignrel->relid == var->varno &&
3991 fkinfo->confkeys[i] == var->varattno)
3994 else if (fkrel->relid == var->varno &&
3995 fkinfo->conkeys[i] == var->varattno)
3999 * Check if we've found both matches. If found we add
4000 * this qual to the matched list and mark this key as
4003 if (foundvarmask == 3)
4005 qualmatches = bms_add_member(qualmatches, quallstidx);
4006 fkmatches = bms_add_member(fkmatches, i);
4014 * In this non eclass RestrictInfo case we'll check if the left
4015 * and right Vars match to this part of the foreign key.
4016 * Remember that this could be written with the Vars in either
4017 * order, so we test both permutations of the expression.
4019 if ((foreignrel->relid == leftvar->varno) &&
4020 (fkrel->relid == rightvar->varno) &&
4021 (fkinfo->confkeys[i] == leftvar->varattno) &&
4022 (fkinfo->conkeys[i] == rightvar->varattno))
4024 qualmatches = bms_add_member(qualmatches, quallstidx);
4025 fkmatches = bms_add_member(fkmatches, i);
4027 else if ((foreignrel->relid == rightvar->varno) &&
4028 (fkrel->relid == leftvar->varno) &&
4029 (fkinfo->confkeys[i] == rightvar->varattno) &&
4030 (fkinfo->conkeys[i] == leftvar->varattno))
4032 qualmatches = bms_add_member(qualmatches, quallstidx);
4033 fkmatches = bms_add_member(fkmatches, i);
4039 /* can't find more matches than columns in the foreign key */
4040 Assert(bms_num_members(fkmatches) <= nkeys);
4042 /* Only return the matches if the foreign key is matched fully. */
4043 if (bms_num_members(fkmatches) == nkeys)
4045 bms_free(fkmatches);
4049 bms_free(fkmatches);
4050 bms_free(qualmatches);
4056 * find_best_foreign_key_quals
4057 * Finds the foreign key best matching the joinquals.
4059 * Analyzes joinquals to determine if any quals match foreign keys defined the
4060 * two relations (fkrel referencing foreignrel). When multiple foreign keys
4061 * match, we choose the one with the most keys as the best one because of the
4062 * way estimation occurs in clauselist_join_selectivity(). We could choose
4063 * the FK matching the most quals, however we assume the quals may be duplicated.
4065 * We also track which joinquals match the current foreign key, so that we can
4066 * easily skip then when computing the selectivity.
4068 * When no matching foreign key is found we return 0, otherwise we return the
4069 * number of keys in the foreign key.
4071 * Foreign keys matched only partially are currently ignored.
4074 find_best_foreign_key_quals(PlannerInfo *root, RelOptInfo *fkrel,
4075 RelOptInfo *foreignrel, List *joinquals,
4076 Bitmapset **joinqualsbitmap)
4078 Bitmapset *qualbestmatch;
4083 * fast path out when there's no foreign keys on fkrel, or when use of
4084 * foreign keys for estimation is disabled by GUC
4086 if ((fkrel->fkeylist == NIL) || (!enable_fkey_estimates))
4088 *joinqualsbitmap = NULL;
4092 qualbestmatch = NULL;
4095 /* now check the matches for each foreign key defined on the fkrel */
4096 foreach(lc, fkrel->fkeylist)
4098 ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
4099 Bitmapset *qualsmatched;
4102 * We make no attempt in checking that this foreign key actually
4103 * references 'foreignrel', the reasoning here is that we may be able
4104 * to match the foreign key to an eclass member Var of a RestrictInfo
4105 * that's in qualslist, this Var may belong to some other relation.
4107 * XXX Is this assumption safe in all cases? Maybe not, but does
4108 * it lead to a worse estimate than the previous approach? Doubt it.
4110 qualsmatched = quals_match_foreign_key(root, fkinfo, fkrel, foreignrel,
4113 /* Did we get a match? And is that match better than a previous one? */
4114 if (qualsmatched != NULL && fkinfo->nkeys > bestmatchnkeys)
4116 /* save the new best match */
4117 bms_free(qualbestmatch);
4118 qualbestmatch = qualsmatched;
4119 bestmatchnkeys = fkinfo->nkeys;
4123 *joinqualsbitmap = qualbestmatch;
4124 return bestmatchnkeys;
4128 * clauselist_join_selectivity
4129 * Estimate selectivity of join clauses either by using foreign key info
4130 * or by using the regular clauselist_selectivity().
4132 * Since selectivity estimates for each joinqual are multiplied together, this
4133 * can cause significant underestimates on the number of join tuples in cases
4134 * where there's more than 1 clause in the join condition. To help ease the
4135 * pain here we make use of foreign keys, and we assume that 1 row will match
4136 * when *all* of the foreign key columns are present in the join condition, any
4137 * additional clauses are estimated using clauselist_selectivity().
4139 * Note this ignores whether the FK is invalid or currently deferred; we don't
4140 * rely on this assumption for correctness of the query, so it is a reasonable
4141 * and safe assumption for planning purposes.
4144 clauselist_join_selectivity(PlannerInfo *root, List *joinquals,
4145 JoinType jointype, SpecialJoinInfo *sjinfo)
4149 Selectivity sel = 1.0;
4150 Bitmapset *foundfkquals = NULL;
4153 while ((innerid = bms_next_member(sjinfo->min_righthand, innerid)) >= 0)
4155 RelOptInfo *innerrel = find_base_rel(root, innerid);
4158 while ((outerid = bms_next_member(sjinfo->min_lefthand, outerid)) >= 0)
4160 RelOptInfo *outerrel = find_base_rel(root, outerid);
4161 Bitmapset *outer2inner;
4162 Bitmapset *inner2outer;
4167 * check which quals are matched by a foreign key referencing the
4170 outermatches = find_best_foreign_key_quals(root, outerrel,
4171 innerrel, joinquals, &outer2inner);
4173 /* do the same, but with relations swapped */
4174 innermatches = find_best_foreign_key_quals(root, innerrel,
4175 outerrel, joinquals, &inner2outer);
4178 * did we find any matches at all? If so we need to see which one is
4179 * the best/longest match
4181 if (outermatches != 0 || innermatches != 0)
4183 double referenced_tuples;
4186 /* either could be zero, but not both. */
4187 if (outermatches < innermatches)
4189 overlap = bms_overlap(foundfkquals, inner2outer);
4191 foundfkquals = bms_add_members(foundfkquals, inner2outer);
4192 referenced_tuples = Max(outerrel->tuples, 1.0);
4196 overlap = bms_overlap(foundfkquals, outer2inner);
4198 foundfkquals = bms_add_members(foundfkquals, outer2inner);
4199 referenced_tuples = Max(innerrel->tuples, 1.0);
4203 * XXX should we ignore these overlapping matches?
4204 * Or perhaps take the Max() or Min()?
4208 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
4209 sel = Min(sel,Min(1.0 / (outerrel->tuples / Max(innerrel->tuples, 1.0)), 1.0));
4211 sel = Min(sel, 1.0 / referenced_tuples);
4215 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
4216 sel *= Min(1.0 / (outerrel->tuples / Max(innerrel->tuples, 1.0)), 1.0);
4218 sel *= 1.0 / referenced_tuples;
4225 * If any non matched quals exist then we build a list of the non-matches
4226 * and use clauselist_selectivity() to estimate the selectivity of these.
4228 if (bms_num_members(foundfkquals) < list_length(joinquals))
4232 List *nonfkeyclauses = NIL;
4234 foreach (lc, joinquals)
4236 if (!bms_is_member(lstidx, foundfkquals))
4237 nonfkeyclauses = lappend(nonfkeyclauses, lfirst(lc));
4240 sel *= clauselist_selectivity(root, nonfkeyclauses, 0, jointype, sjinfo);
4247 * calc_joinrel_size_estimate
4248 * Workhorse for set_joinrel_size_estimates and
4249 * get_parameterized_joinrel_size.
4252 calc_joinrel_size_estimate(PlannerInfo *root,
4255 SpecialJoinInfo *sjinfo,
4258 JoinType jointype = sjinfo->jointype;
4264 * Compute joinclause selectivity. Note that we are only considering
4265 * clauses that become restriction clauses at this join level; we are not
4266 * double-counting them because they were not considered in estimating the
4267 * sizes of the component rels.
4269 * For an outer join, we have to distinguish the selectivity of the join's
4270 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
4271 * down". For inner joins we just count them all as joinclauses.
4273 if (IS_OUTER_JOIN(jointype))
4275 List *joinquals = NIL;
4276 List *pushedquals = NIL;
4279 /* Grovel through the clauses to separate into two lists */
4280 foreach(l, restrictlist)
4282 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
4284 Assert(IsA(rinfo, RestrictInfo));
4285 if (rinfo->is_pushed_down)
4286 pushedquals = lappend(pushedquals, rinfo);
4288 joinquals = lappend(joinquals, rinfo);
4291 /* Get the separate selectivities */
4292 jselec = clauselist_join_selectivity(root,
4297 pselec = clauselist_selectivity(root,
4303 /* Avoid leaking a lot of ListCells */
4304 list_free(joinquals);
4305 list_free(pushedquals);
4309 jselec = clauselist_join_selectivity(root,
4313 pselec = 0.0; /* not used, keep compiler quiet */
4317 * Basically, we multiply size of Cartesian product by selectivity.
4319 * If we are doing an outer join, take that into account: the joinqual
4320 * selectivity has to be clamped using the knowledge that the output must
4321 * be at least as large as the non-nullable input. However, any
4322 * pushed-down quals are applied after the outer join, so their
4323 * selectivity applies fully.
4325 * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
4326 * of LHS rows that have matches, and we apply that straightforwardly.
4331 nrows = outer_rows * inner_rows * jselec;
4334 nrows = outer_rows * inner_rows * jselec;
4335 if (nrows < outer_rows)
4340 nrows = outer_rows * inner_rows * jselec;
4341 if (nrows < outer_rows)
4343 if (nrows < inner_rows)
4348 nrows = outer_rows * jselec;
4349 /* pselec not used */
4352 nrows = outer_rows * (1.0 - jselec);
4356 /* other values not expected here */
4357 elog(ERROR, "unrecognized join type: %d", (int) jointype);
4358 nrows = 0; /* keep compiler quiet */
4362 return clamp_row_est(nrows);
4366 * set_subquery_size_estimates
4367 * Set the size estimates for a base relation that is a subquery.
4369 * The rel's targetlist and restrictinfo list must have been constructed
4370 * already, and the Paths for the subquery must have been completed.
4371 * We look at the subquery's PlannerInfo to extract data.
4373 * We set the same fields as set_baserel_size_estimates.
4376 set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4378 PlannerInfo *subroot = rel->subroot;
4379 RelOptInfo *sub_final_rel;
4380 RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY;
4383 /* Should only be applied to base relations that are subqueries */
4384 Assert(rel->relid > 0);
4385 rte = planner_rt_fetch(rel->relid, root);
4386 Assert(rte->rtekind == RTE_SUBQUERY);
4389 * Copy raw number of output rows from subquery. All of its paths should
4390 * have the same output rowcount, so just look at cheapest-total.
4392 sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
4393 rel->tuples = sub_final_rel->cheapest_total_path->rows;
4396 * Compute per-output-column width estimates by examining the subquery's
4397 * targetlist. For any output that is a plain Var, get the width estimate
4398 * that was made while planning the subquery. Otherwise, we leave it to
4399 * set_rel_width to fill in a datatype-based default estimate.
4401 foreach(lc, subroot->parse->targetList)
4403 TargetEntry *te = (TargetEntry *) lfirst(lc);
4404 Node *texpr = (Node *) te->expr;
4405 int32 item_width = 0;
4407 Assert(IsA(te, TargetEntry));
4408 /* junk columns aren't visible to upper query */
4413 * The subquery could be an expansion of a view that's had columns
4414 * added to it since the current query was parsed, so that there are
4415 * non-junk tlist columns in it that don't correspond to any column
4416 * visible at our query level. Ignore such columns.
4418 if (te->resno < rel->min_attr || te->resno > rel->max_attr)
4422 * XXX This currently doesn't work for subqueries containing set
4423 * operations, because the Vars in their tlists are bogus references
4424 * to the first leaf subquery, which wouldn't give the right answer
4425 * even if we could still get to its PlannerInfo.
4427 * Also, the subquery could be an appendrel for which all branches are
4428 * known empty due to constraint exclusion, in which case
4429 * set_append_rel_pathlist will have left the attr_widths set to zero.
4431 * In either case, we just leave the width estimate zero until
4432 * set_rel_width fixes it.
4434 if (IsA(texpr, Var) &&
4435 subroot->parse->setOperations == NULL)
4437 Var *var = (Var *) texpr;
4438 RelOptInfo *subrel = find_base_rel(subroot, var->varno);
4440 item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
4442 rel->attr_widths[te->resno - rel->min_attr] = item_width;
4445 /* Now estimate number of output rows, etc */
4446 set_baserel_size_estimates(root, rel);
4450 * set_function_size_estimates
4451 * Set the size estimates for a base relation that is a function call.
4453 * The rel's targetlist and restrictinfo list must have been constructed
4456 * We set the same fields as set_baserel_size_estimates.
4459 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4464 /* Should only be applied to base relations that are functions */
4465 Assert(rel->relid > 0);
4466 rte = planner_rt_fetch(rel->relid, root);
4467 Assert(rte->rtekind == RTE_FUNCTION);
4470 * Estimate number of rows the functions will return. The rowcount of the
4471 * node is that of the largest function result.
4474 foreach(lc, rte->functions)
4476 RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
4477 double ntup = expression_returns_set_rows(rtfunc->funcexpr);
4479 if (ntup > rel->tuples)
4483 /* Now estimate number of output rows, etc */
4484 set_baserel_size_estimates(root, rel);
4488 * set_values_size_estimates
4489 * Set the size estimates for a base relation that is a values list.
4491 * The rel's targetlist and restrictinfo list must have been constructed
4494 * We set the same fields as set_baserel_size_estimates.
4497 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4501 /* Should only be applied to base relations that are values lists */
4502 Assert(rel->relid > 0);
4503 rte = planner_rt_fetch(rel->relid, root);
4504 Assert(rte->rtekind == RTE_VALUES);
4507 * Estimate number of rows the values list will return. We know this
4508 * precisely based on the list length (well, barring set-returning
4509 * functions in list items, but that's a refinement not catered for
4510 * anywhere else either).
4512 rel->tuples = list_length(rte->values_lists);
4514 /* Now estimate number of output rows, etc */
4515 set_baserel_size_estimates(root, rel);
4519 * set_cte_size_estimates
4520 * Set the size estimates for a base relation that is a CTE reference.
4522 * The rel's targetlist and restrictinfo list must have been constructed
4523 * already, and we need an estimate of the number of rows returned by the CTE
4524 * (if a regular CTE) or the non-recursive term (if a self-reference).
4526 * We set the same fields as set_baserel_size_estimates.
4529 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
4533 /* Should only be applied to base relations that are CTE references */
4534 Assert(rel->relid > 0);
4535 rte = planner_rt_fetch(rel->relid, root);
4536 Assert(rte->rtekind == RTE_CTE);
4538 if (rte->self_reference)
4541 * In a self-reference, arbitrarily assume the average worktable size
4542 * is about 10 times the nonrecursive term's size.
4544 rel->tuples = 10 * cte_rows;
4548 /* Otherwise just believe the CTE's rowcount estimate */
4549 rel->tuples = cte_rows;
4552 /* Now estimate number of output rows, etc */
4553 set_baserel_size_estimates(root, rel);
4557 * set_foreign_size_estimates
4558 * Set the size estimates for a base relation that is a foreign table.
4560 * There is not a whole lot that we can do here; the foreign-data wrapper
4561 * is responsible for producing useful estimates. We can do a decent job
4562 * of estimating baserestrictcost, so we set that, and we also set up width
4563 * using what will be purely datatype-driven estimates from the targetlist.
4564 * There is no way to do anything sane with the rows value, so we just put
4565 * a default estimate and hope that the wrapper can improve on it. The
4566 * wrapper's GetForeignRelSize function will be called momentarily.
4568 * The rel's targetlist and restrictinfo list must have been constructed
4572 set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4574 /* Should only be applied to base relations */
4575 Assert(rel->relid > 0);
4577 rel->rows = 1000; /* entirely bogus default estimate */
4579 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
4581 set_rel_width(root, rel);
4587 * Set the estimated output width of a base relation.
4589 * The estimated output width is the sum of the per-attribute width estimates
4590 * for the actually-referenced columns, plus any PHVs or other expressions
4591 * that have to be calculated at this relation. This is the amount of data
4592 * we'd need to pass upwards in case of a sort, hash, etc.
4594 * This function also sets reltarget->cost, so it's a bit misnamed now.
4596 * NB: this works best on plain relations because it prefers to look at
4597 * real Vars. For subqueries, set_subquery_size_estimates will already have
4598 * copied up whatever per-column estimates were made within the subquery,
4599 * and for other types of rels there isn't much we can do anyway. We fall
4600 * back on (fairly stupid) datatype-based width estimates if we can't get
4601 * any better number.
4603 * The per-attribute width estimates are cached for possible re-use while
4604 * building join relations or post-scan/join pathtargets.
4607 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
4609 Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
4610 int32 tuple_width = 0;
4611 bool have_wholerow_var = false;
4614 /* Vars are assumed to have cost zero, but other exprs do not */
4615 rel->reltarget->cost.startup = 0;
4616 rel->reltarget->cost.per_tuple = 0;
4618 foreach(lc, rel->reltarget->exprs)
4620 Node *node = (Node *) lfirst(lc);
4623 * Ordinarily, a Var in a rel's targetlist must belong to that rel;
4624 * but there are corner cases involving LATERAL references where that
4625 * isn't so. If the Var has the wrong varno, fall through to the
4626 * generic case (it doesn't seem worth the trouble to be any smarter).
4628 if (IsA(node, Var) &&
4629 ((Var *) node)->varno == rel->relid)
4631 Var *var = (Var *) node;
4635 Assert(var->varattno >= rel->min_attr);
4636 Assert(var->varattno <= rel->max_attr);
4638 ndx = var->varattno - rel->min_attr;
4641 * If it's a whole-row Var, we'll deal with it below after we have
4642 * already cached as many attr widths as possible.
4644 if (var->varattno == 0)
4646 have_wholerow_var = true;
4651 * The width may have been cached already (especially if it's a
4652 * subquery), so don't duplicate effort.
4654 if (rel->attr_widths[ndx] > 0)
4656 tuple_width += rel->attr_widths[ndx];
4660 /* Try to get column width from statistics */
4661 if (reloid != InvalidOid && var->varattno > 0)
4663 item_width = get_attavgwidth(reloid, var->varattno);
4666 rel->attr_widths[ndx] = item_width;
4667 tuple_width += item_width;
4673 * Not a plain relation, or can't find statistics for it. Estimate
4674 * using just the type info.
4676 item_width = get_typavgwidth(var->vartype, var->vartypmod);
4677 Assert(item_width > 0);
4678 rel->attr_widths[ndx] = item_width;
4679 tuple_width += item_width;
4681 else if (IsA(node, PlaceHolderVar))
4684 * We will need to evaluate the PHV's contained expression while
4685 * scanning this rel, so be sure to include it in reltarget->cost.
4687 PlaceHolderVar *phv = (PlaceHolderVar *) node;
4688 PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
4691 tuple_width += phinfo->ph_width;
4692 cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
4693 rel->reltarget->cost.startup += cost.startup;
4694 rel->reltarget->cost.per_tuple += cost.per_tuple;
4699 * We could be looking at an expression pulled up from a subquery,
4700 * or a ROW() representing a whole-row child Var, etc. Do what we
4701 * can using the expression type information.
4706 item_width = get_typavgwidth(exprType(node), exprTypmod(node));
4707 Assert(item_width > 0);
4708 tuple_width += item_width;
4709 /* Not entirely clear if we need to account for cost, but do so */
4710 cost_qual_eval_node(&cost, node, root);
4711 rel->reltarget->cost.startup += cost.startup;
4712 rel->reltarget->cost.per_tuple += cost.per_tuple;
4717 * If we have a whole-row reference, estimate its width as the sum of
4718 * per-column widths plus heap tuple header overhead.
4720 if (have_wholerow_var)
4722 int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
4724 if (reloid != InvalidOid)
4726 /* Real relation, so estimate true tuple width */
4727 wholerow_width += get_relation_data_width(reloid,
4728 rel->attr_widths - rel->min_attr);
4732 /* Do what we can with info for a phony rel */
4735 for (i = 1; i <= rel->max_attr; i++)
4736 wholerow_width += rel->attr_widths[i - rel->min_attr];
4739 rel->attr_widths[0 - rel->min_attr] = wholerow_width;
4742 * Include the whole-row Var as part of the output tuple. Yes, that
4743 * really is what happens at runtime.
4745 tuple_width += wholerow_width;
4748 Assert(tuple_width >= 0);
4749 rel->reltarget->width = tuple_width;
4753 * set_pathtarget_cost_width
4754 * Set the estimated eval cost and output width of a PathTarget tlist.
4756 * As a notational convenience, returns the same PathTarget pointer passed in.
4758 * Most, though not quite all, uses of this function occur after we've run
4759 * set_rel_width() for base relations; so we can usually obtain cached width
4760 * estimates for Vars. If we can't, fall back on datatype-based width
4761 * estimates. Present early-planning uses of PathTargets don't need accurate
4762 * widths badly enough to justify going to the catalogs for better data.
4765 set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
4767 int32 tuple_width = 0;
4770 /* Vars are assumed to have cost zero, but other exprs do not */
4771 target->cost.startup = 0;
4772 target->cost.per_tuple = 0;
4774 foreach(lc, target->exprs)
4776 Node *node = (Node *) lfirst(lc);
4780 Var *var = (Var *) node;
4783 /* We should not see any upper-level Vars here */
4784 Assert(var->varlevelsup == 0);
4786 /* Try to get data from RelOptInfo cache */
4787 if (var->varno < root->simple_rel_array_size)
4789 RelOptInfo *rel = root->simple_rel_array[var->varno];
4792 var->varattno >= rel->min_attr &&
4793 var->varattno <= rel->max_attr)
4795 int ndx = var->varattno - rel->min_attr;
4797 if (rel->attr_widths[ndx] > 0)
4799 tuple_width += rel->attr_widths[ndx];
4806 * No cached data available, so estimate using just the type info.
4808 item_width = get_typavgwidth(var->vartype, var->vartypmod);
4809 Assert(item_width > 0);
4810 tuple_width += item_width;
4815 * Handle general expressions using type info.
4820 item_width = get_typavgwidth(exprType(node), exprTypmod(node));
4821 Assert(item_width > 0);
4822 tuple_width += item_width;
4824 /* Account for cost, too */
4825 cost_qual_eval_node(&cost, node, root);
4826 target->cost.startup += cost.startup;
4827 target->cost.per_tuple += cost.per_tuple;
4831 Assert(tuple_width >= 0);
4832 target->width = tuple_width;
4838 * relation_byte_size
4839 * Estimate the storage space in bytes for a given number of tuples
4840 * of a given width (size in bytes).
4843 relation_byte_size(double tuples, int width)
4845 return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
4850 * Returns an estimate of the number of pages covered by a given
4851 * number of tuples of a given width (size in bytes).
4854 page_size(double tuples, int width)
4856 return ceil(relation_byte_size(tuples, width) / BLCKSZ);