1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in arbitrary units established by these basic
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
15 * We expect that the kernel will typically do some amount of read-ahead
16 * optimization; this in conjunction with seek costs means that seq_page_cost
17 * is normally considerably less than random_page_cost. (However, if the
18 * database is fully cached in RAM, it is reasonable to set them equal.)
20 * We also use a rough estimate "effective_cache_size" of the number of
21 * disk pages in Postgres + OS-level disk cache. (We can't simply use
22 * NBuffers for this purpose because that would ignore the effects of
23 * the kernel's disk cache.)
25 * Obviously, taking constants for these values is an oversimplification,
26 * but it's tough enough to get any useful estimates even at this level of
27 * detail. Note that all of these parameters are user-settable, in case
28 * the default values are drastically off for a particular platform.
30 * seq_page_cost and random_page_cost can also be overridden for an individual
31 * tablespace, in case some data is on a fast disk and other data is on a slow
32 * disk. Per-tablespace overrides never apply to temporary work files such as
33 * an external sort or a materialize node that overflows work_mem.
35 * We compute two separate costs for each path:
36 * total_cost: total estimated cost to fetch all tuples
37 * startup_cost: cost that is expended before first tuple is fetched
38 * In some scenarios, such as when there is a LIMIT or we are implementing
39 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
40 * path's result. A caller can estimate the cost of fetching a partial
41 * result by interpolating between startup_cost and total_cost. In detail:
42 * actual_cost = startup_cost +
43 * (total_cost - startup_cost) * tuples_to_fetch / path->parent->rows;
44 * Note that a base relation's rows count (and, by extension, plan_rows for
45 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
46 * that this equation works properly. (Also, these routines guarantee not to
47 * set the rows count to zero, so there will be no zero divide.) The LIMIT is
48 * applied as a top-level plan node.
50 * For largely historical reasons, most of the routines in this module use
51 * the passed result Path only to store their startup_cost and total_cost
52 * results into. All the input data they need is passed as separate
53 * parameters, even though much of it could be extracted from the Path.
54 * An exception is made for the cost_XXXjoin() routines, which expect all
55 * the non-cost fields of the passed XXXPath to be filled in.
58 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
59 * Portions Copyright (c) 1994, Regents of the University of California
62 * src/backend/optimizer/path/costsize.c
64 *-------------------------------------------------------------------------
71 #include "executor/executor.h"
72 #include "executor/nodeHash.h"
73 #include "miscadmin.h"
74 #include "nodes/nodeFuncs.h"
75 #include "optimizer/clauses.h"
76 #include "optimizer/cost.h"
77 #include "optimizer/pathnode.h"
78 #include "optimizer/placeholder.h"
79 #include "optimizer/plancat.h"
80 #include "optimizer/planmain.h"
81 #include "optimizer/restrictinfo.h"
82 #include "parser/parsetree.h"
83 #include "utils/lsyscache.h"
84 #include "utils/selfuncs.h"
85 #include "utils/spccache.h"
86 #include "utils/tuplesort.h"
89 #define LOG2(x) (log(x) / 0.693147180559945)
92 * Some Paths return less than the nominal number of rows of their parent
93 * relations; join nodes need to do this to get the correct input count:
95 #define PATH_ROWS(path) \
96 (IsA(path, UniquePath) ? \
97 ((UniquePath *) (path))->rows : \
101 double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
102 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
103 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
104 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
105 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
107 int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
109 Cost disable_cost = 1.0e10;
111 bool enable_seqscan = true;
112 bool enable_indexscan = true;
113 bool enable_indexonlyscan = true;
114 bool enable_bitmapscan = true;
115 bool enable_tidscan = true;
116 bool enable_sort = true;
117 bool enable_hashagg = true;
118 bool enable_nestloop = true;
119 bool enable_material = true;
120 bool enable_mergejoin = true;
121 bool enable_hashjoin = true;
123 /* Possibly this should become a GUC too */
124 static double visibility_fraction = 0.9;
130 } cost_qual_eval_context;
132 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
135 static void cost_rescan(PlannerInfo *root, Path *path,
136 Cost *rescan_startup_cost, Cost *rescan_total_cost);
137 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
138 static bool adjust_semi_join(PlannerInfo *root, JoinPath *path,
139 SpecialJoinInfo *sjinfo,
140 Selectivity *outer_match_frac,
141 Selectivity *match_count,
142 bool *indexed_join_quals);
143 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
145 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
146 static double relation_byte_size(double tuples, int width);
147 static double page_size(double tuples, int width);
152 * Force a row-count estimate to a sane value.
155 clamp_row_est(double nrows)
158 * Force estimate to be at least one row, to make explain output look
159 * better and to avoid possible divide-by-zero when interpolating costs.
160 * Make it an integer, too.
173 * Determines and returns the cost of scanning a relation sequentially.
176 cost_seqscan(Path *path, PlannerInfo *root,
179 double spc_seq_page_cost;
180 Cost startup_cost = 0;
184 /* Should only be applied to base relations */
185 Assert(baserel->relid > 0);
186 Assert(baserel->rtekind == RTE_RELATION);
189 startup_cost += disable_cost;
191 /* fetch estimated page cost for tablespace containing table */
192 get_tablespace_page_costs(baserel->reltablespace,
199 run_cost += spc_seq_page_cost * baserel->pages;
202 startup_cost += baserel->baserestrictcost.startup;
203 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
204 run_cost += cpu_per_tuple * baserel->tuples;
206 path->startup_cost = startup_cost;
207 path->total_cost = startup_cost + run_cost;
212 * Determines and returns the cost of scanning a relation using an index.
214 * 'index' is the index to be used
215 * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
216 * 'indexOrderBys' is the list of ORDER BY operators for amcanorderbyop indexes
217 * 'indexonly' is true if it's an index-only scan
218 * 'outer_rel' is the outer relation when we are considering using the index
219 * scan as the inside of a nestloop join (hence, some of the indexQuals
220 * are join clauses, and we should expect repeated scans of the index);
221 * NULL for a plain index scan
223 * cost_index() takes an IndexPath not just a Path, because it sets a few
224 * additional fields of the IndexPath besides startup_cost and total_cost.
225 * These fields are needed if the IndexPath is used in a BitmapIndexScan.
227 * indexQuals is a list of RestrictInfo nodes, but indexOrderBys is a list of
230 * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
231 * Any additional quals evaluated as qpquals may reduce the number of returned
232 * tuples, but they won't reduce the number of tuples we have to fetch from
233 * the table, so they don't reduce the scan cost.
236 cost_index(IndexPath *path, PlannerInfo *root,
241 RelOptInfo *outer_rel)
243 RelOptInfo *baserel = index->rel;
244 Cost startup_cost = 0;
246 Cost indexStartupCost;
248 Selectivity indexSelectivity;
249 double indexCorrelation,
251 double spc_seq_page_cost,
252 spc_random_page_cost;
256 double tuples_fetched;
257 double pages_fetched;
259 /* Should only be applied to base relations */
260 Assert(IsA(baserel, RelOptInfo) &&
261 IsA(index, IndexOptInfo));
262 Assert(baserel->relid > 0);
263 Assert(baserel->rtekind == RTE_RELATION);
265 if (!enable_indexscan)
266 startup_cost += disable_cost;
267 /* we don't need to check enable_indexonlyscan; indxpath.c does that */
270 * Call index-access-method-specific code to estimate the processing cost
271 * for scanning the index, as well as the selectivity of the index (ie,
272 * the fraction of main-table tuples we will have to retrieve) and its
273 * correlation to the main-table tuple order.
275 OidFunctionCall9(index->amcostestimate,
276 PointerGetDatum(root),
277 PointerGetDatum(index),
278 PointerGetDatum(indexQuals),
279 PointerGetDatum(indexOrderBys),
280 PointerGetDatum(outer_rel),
281 PointerGetDatum(&indexStartupCost),
282 PointerGetDatum(&indexTotalCost),
283 PointerGetDatum(&indexSelectivity),
284 PointerGetDatum(&indexCorrelation));
287 * Save amcostestimate's results for possible use in bitmap scan planning.
288 * We don't bother to save indexStartupCost or indexCorrelation, because a
289 * bitmap scan doesn't care about either.
291 path->indextotalcost = indexTotalCost;
292 path->indexselectivity = indexSelectivity;
294 /* all costs for touching index itself included here */
295 startup_cost += indexStartupCost;
296 run_cost += indexTotalCost - indexStartupCost;
298 /* estimate number of main-table tuples fetched */
299 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
301 /* fetch estimated page costs for tablespace containing table */
302 get_tablespace_page_costs(baserel->reltablespace,
303 &spc_random_page_cost,
307 * Estimate number of main-table pages fetched, and compute I/O cost.
309 * When the index ordering is uncorrelated with the table ordering,
310 * we use an approximation proposed by Mackert and Lohman (see
311 * index_pages_fetched() for details) to compute the number of pages
312 * fetched, and then charge spc_random_page_cost per page fetched.
314 * When the index ordering is exactly correlated with the table ordering
315 * (just after a CLUSTER, for example), the number of pages fetched should
316 * be exactly selectivity * table_size. What's more, all but the first
317 * will be sequential fetches, not the random fetches that occur in the
318 * uncorrelated case. So if the number of pages is more than 1, we
320 * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
321 * For partially-correlated indexes, we ought to charge somewhere between
322 * these two estimates. We currently interpolate linearly between the
323 * estimates based on the correlation squared (XXX is that appropriate?).
325 * If it's an index-only scan, then we will not need to fetch any heap
326 * pages for which the visibility map shows all tuples are visible.
327 * Unfortunately, we have no stats as to how much of the heap is
328 * all-visible, and that's likely to be a rather unstable number anyway.
329 * We use an arbitrary constant visibility_fraction to estimate this.
332 if (outer_rel != NULL && outer_rel->rows > 1)
335 * For repeated indexscans, the appropriate estimate for the
336 * uncorrelated case is to scale up the number of tuples fetched in
337 * the Mackert and Lohman formula by the number of scans, so that we
338 * estimate the number of pages fetched by all the scans; then
339 * pro-rate the costs for one scan. In this case we assume all the
340 * fetches are random accesses.
342 double num_scans = outer_rel->rows;
344 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
346 (double) index->pages,
350 pages_fetched = ceil(pages_fetched * visibility_fraction);
352 max_IO_cost = (pages_fetched * spc_random_page_cost) / num_scans;
355 * In the perfectly correlated case, the number of pages touched by
356 * each scan is selectivity * table_size, and we can use the Mackert
357 * and Lohman formula at the page level to estimate how much work is
358 * saved by caching across scans. We still assume all the fetches are
359 * random, though, which is an overestimate that's hard to correct for
360 * without double-counting the cache effects. (But in most cases
361 * where such a plan is actually interesting, only one page would get
362 * fetched per scan anyway, so it shouldn't matter much.)
364 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
366 pages_fetched = index_pages_fetched(pages_fetched * num_scans,
368 (double) index->pages,
372 pages_fetched = ceil(pages_fetched * visibility_fraction);
374 min_IO_cost = (pages_fetched * spc_random_page_cost) / num_scans;
379 * Normal case: apply the Mackert and Lohman formula, and then
380 * interpolate between that and the correlation-derived result.
382 pages_fetched = index_pages_fetched(tuples_fetched,
384 (double) index->pages,
388 pages_fetched = ceil(pages_fetched * visibility_fraction);
390 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
391 max_IO_cost = pages_fetched * spc_random_page_cost;
393 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
394 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
397 pages_fetched = ceil(pages_fetched * visibility_fraction);
399 min_IO_cost = spc_random_page_cost;
400 if (pages_fetched > 1)
401 min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
405 * Now interpolate based on estimated index order correlation to get total
406 * disk I/O cost for main table accesses.
408 csquared = indexCorrelation * indexCorrelation;
410 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
413 * Estimate CPU costs per tuple.
415 * Normally the indexquals will be removed from the list of restriction
416 * clauses that we have to evaluate as qpquals, so we should subtract
417 * their costs from baserestrictcost. But if we are doing a join then
418 * some of the indexquals are join clauses and shouldn't be subtracted.
419 * Rather than work out exactly how much to subtract, we don't subtract
422 startup_cost += baserel->baserestrictcost.startup;
423 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
425 if (outer_rel == NULL)
427 QualCost index_qual_cost;
429 cost_qual_eval(&index_qual_cost, indexQuals, root);
430 /* any startup cost still has to be paid ... */
431 cpu_per_tuple -= index_qual_cost.per_tuple;
434 run_cost += cpu_per_tuple * tuples_fetched;
436 path->path.startup_cost = startup_cost;
437 path->path.total_cost = startup_cost + run_cost;
441 * index_pages_fetched
442 * Estimate the number of pages actually fetched after accounting for
445 * We use an approximation proposed by Mackert and Lohman, "Index Scans
446 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
447 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
448 * The Mackert and Lohman approximation is that the number of pages
451 * min(2TNs/(2T+Ns), T) when T <= b
452 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
453 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
455 * T = # pages in table
456 * N = # tuples in table
457 * s = selectivity = fraction of table to be scanned
458 * b = # buffer pages available (we include kernel space here)
460 * We assume that effective_cache_size is the total number of buffer pages
461 * available for the whole query, and pro-rate that space across all the
462 * tables in the query and the index currently under consideration. (This
463 * ignores space needed for other indexes used by the query, but since we
464 * don't know which indexes will get used, we can't estimate that very well;
465 * and in any case counting all the tables may well be an overestimate, since
466 * depending on the join plan not all the tables may be scanned concurrently.)
468 * The product Ns is the number of tuples fetched; we pass in that
469 * product rather than calculating it here. "pages" is the number of pages
470 * in the object under consideration (either an index or a table).
471 * "index_pages" is the amount to add to the total table space, which was
472 * computed for us by query_planner.
474 * Caller is expected to have ensured that tuples_fetched is greater than zero
475 * and rounded to integer (see clamp_row_est). The result will likewise be
476 * greater than zero and integral.
479 index_pages_fetched(double tuples_fetched, BlockNumber pages,
480 double index_pages, PlannerInfo *root)
482 double pages_fetched;
487 /* T is # pages in table, but don't allow it to be zero */
488 T = (pages > 1) ? (double) pages : 1.0;
490 /* Compute number of pages assumed to be competing for cache space */
491 total_pages = root->total_table_pages + index_pages;
492 total_pages = Max(total_pages, 1.0);
493 Assert(T <= total_pages);
495 /* b is pro-rated share of effective_cache_size */
496 b = (double) effective_cache_size *T / total_pages;
498 /* force it positive and integral */
504 /* This part is the Mackert and Lohman formula */
508 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
509 if (pages_fetched >= T)
512 pages_fetched = ceil(pages_fetched);
518 lim = (2.0 * T * b) / (2.0 * T - b);
519 if (tuples_fetched <= lim)
522 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
527 b + (tuples_fetched - lim) * (T - b) / T;
529 pages_fetched = ceil(pages_fetched);
531 return pages_fetched;
535 * get_indexpath_pages
536 * Determine the total size of the indexes used in a bitmap index path.
538 * Note: if the same index is used more than once in a bitmap tree, we will
539 * count it multiple times, which perhaps is the wrong thing ... but it's
540 * not completely clear, and detecting duplicates is difficult, so ignore it
544 get_indexpath_pages(Path *bitmapqual)
549 if (IsA(bitmapqual, BitmapAndPath))
551 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
553 foreach(l, apath->bitmapquals)
555 result += get_indexpath_pages((Path *) lfirst(l));
558 else if (IsA(bitmapqual, BitmapOrPath))
560 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
562 foreach(l, opath->bitmapquals)
564 result += get_indexpath_pages((Path *) lfirst(l));
567 else if (IsA(bitmapqual, IndexPath))
569 IndexPath *ipath = (IndexPath *) bitmapqual;
571 result = (double) ipath->indexinfo->pages;
574 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
580 * cost_bitmap_heap_scan
581 * Determines and returns the cost of scanning a relation using a bitmap
582 * index-then-heap plan.
584 * 'baserel' is the relation to be scanned
585 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
586 * 'outer_rel' is the outer relation when we are considering using the bitmap
587 * scan as the inside of a nestloop join (hence, some of the indexQuals
588 * are join clauses, and we should expect repeated scans of the table);
589 * NULL for a plain bitmap scan
591 * Note: if this is a join inner path, the component IndexPaths in bitmapqual
592 * should have been costed accordingly.
595 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
596 Path *bitmapqual, RelOptInfo *outer_rel)
598 Cost startup_cost = 0;
601 Selectivity indexSelectivity;
604 double tuples_fetched;
605 double pages_fetched;
606 double spc_seq_page_cost,
607 spc_random_page_cost;
610 /* Should only be applied to base relations */
611 Assert(IsA(baserel, RelOptInfo));
612 Assert(baserel->relid > 0);
613 Assert(baserel->rtekind == RTE_RELATION);
615 if (!enable_bitmapscan)
616 startup_cost += disable_cost;
619 * Fetch total cost of obtaining the bitmap, as well as its total
622 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
624 startup_cost += indexTotalCost;
626 /* Fetch estimated page costs for tablespace containing table. */
627 get_tablespace_page_costs(baserel->reltablespace,
628 &spc_random_page_cost,
632 * Estimate number of main-table pages fetched.
634 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
636 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
638 if (outer_rel != NULL && outer_rel->rows > 1)
641 * For repeated bitmap scans, scale up the number of tuples fetched in
642 * the Mackert and Lohman formula by the number of scans, so that we
643 * estimate the number of pages fetched by all the scans. Then
644 * pro-rate for one scan.
646 double num_scans = outer_rel->rows;
648 pages_fetched = index_pages_fetched(tuples_fetched * num_scans,
650 get_indexpath_pages(bitmapqual),
652 pages_fetched /= num_scans;
657 * For a single scan, the number of heap pages that need to be fetched
658 * is the same as the Mackert and Lohman formula for the case T <= b
659 * (ie, no re-reads needed).
661 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
663 if (pages_fetched >= T)
666 pages_fetched = ceil(pages_fetched);
669 * For small numbers of pages we should charge spc_random_page_cost
670 * apiece, while if nearly all the table's pages are being read, it's more
671 * appropriate to charge spc_seq_page_cost apiece. The effect is
672 * nonlinear, too. For lack of a better idea, interpolate like this to
673 * determine the cost per page.
675 if (pages_fetched >= 2.0)
676 cost_per_page = spc_random_page_cost -
677 (spc_random_page_cost - spc_seq_page_cost)
678 * sqrt(pages_fetched / T);
680 cost_per_page = spc_random_page_cost;
682 run_cost += pages_fetched * cost_per_page;
685 * Estimate CPU costs per tuple.
687 * Often the indexquals don't need to be rechecked at each tuple ... but
688 * not always, especially not if there are enough tuples involved that the
689 * bitmaps become lossy. For the moment, just assume they will be
692 startup_cost += baserel->baserestrictcost.startup;
693 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
695 run_cost += cpu_per_tuple * tuples_fetched;
697 path->startup_cost = startup_cost;
698 path->total_cost = startup_cost + run_cost;
702 * cost_bitmap_tree_node
703 * Extract cost and selectivity from a bitmap tree node (index/and/or)
706 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
708 if (IsA(path, IndexPath))
710 *cost = ((IndexPath *) path)->indextotalcost;
711 *selec = ((IndexPath *) path)->indexselectivity;
714 * Charge a small amount per retrieved tuple to reflect the costs of
715 * manipulating the bitmap. This is mostly to make sure that a bitmap
716 * scan doesn't look to be the same cost as an indexscan to retrieve a
719 *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
721 else if (IsA(path, BitmapAndPath))
723 *cost = path->total_cost;
724 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
726 else if (IsA(path, BitmapOrPath))
728 *cost = path->total_cost;
729 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
733 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
734 *cost = *selec = 0; /* keep compiler quiet */
739 * cost_bitmap_and_node
740 * Estimate the cost of a BitmapAnd node
742 * Note that this considers only the costs of index scanning and bitmap
743 * creation, not the eventual heap access. In that sense the object isn't
744 * truly a Path, but it has enough path-like properties (costs in particular)
745 * to warrant treating it as one.
748 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
755 * We estimate AND selectivity on the assumption that the inputs are
756 * independent. This is probably often wrong, but we don't have the info
759 * The runtime cost of the BitmapAnd itself is estimated at 100x
760 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
761 * definitely too simplistic?
765 foreach(l, path->bitmapquals)
767 Path *subpath = (Path *) lfirst(l);
769 Selectivity subselec;
771 cost_bitmap_tree_node(subpath, &subCost, &subselec);
775 totalCost += subCost;
776 if (l != list_head(path->bitmapquals))
777 totalCost += 100.0 * cpu_operator_cost;
779 path->bitmapselectivity = selec;
780 path->path.startup_cost = totalCost;
781 path->path.total_cost = totalCost;
785 * cost_bitmap_or_node
786 * Estimate the cost of a BitmapOr node
788 * See comments for cost_bitmap_and_node.
791 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
798 * We estimate OR selectivity on the assumption that the inputs are
799 * non-overlapping, since that's often the case in "x IN (list)" type
800 * situations. Of course, we clamp to 1.0 at the end.
802 * The runtime cost of the BitmapOr itself is estimated at 100x
803 * cpu_operator_cost for each tbm_union needed. Probably too small,
804 * definitely too simplistic? We are aware that the tbm_unions are
805 * optimized out when the inputs are BitmapIndexScans.
809 foreach(l, path->bitmapquals)
811 Path *subpath = (Path *) lfirst(l);
813 Selectivity subselec;
815 cost_bitmap_tree_node(subpath, &subCost, &subselec);
819 totalCost += subCost;
820 if (l != list_head(path->bitmapquals) &&
821 !IsA(subpath, IndexPath))
822 totalCost += 100.0 * cpu_operator_cost;
824 path->bitmapselectivity = Min(selec, 1.0);
825 path->path.startup_cost = totalCost;
826 path->path.total_cost = totalCost;
831 * Determines and returns the cost of scanning a relation using TIDs.
834 cost_tidscan(Path *path, PlannerInfo *root,
835 RelOptInfo *baserel, List *tidquals)
837 Cost startup_cost = 0;
839 bool isCurrentOf = false;
841 QualCost tid_qual_cost;
844 double spc_random_page_cost;
846 /* Should only be applied to base relations */
847 Assert(baserel->relid > 0);
848 Assert(baserel->rtekind == RTE_RELATION);
850 /* Count how many tuples we expect to retrieve */
854 if (IsA(lfirst(l), ScalarArrayOpExpr))
856 /* Each element of the array yields 1 tuple */
857 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
858 Node *arraynode = (Node *) lsecond(saop->args);
860 ntuples += estimate_array_length(arraynode);
862 else if (IsA(lfirst(l), CurrentOfExpr))
864 /* CURRENT OF yields 1 tuple */
870 /* It's just CTID = something, count 1 tuple */
876 * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
877 * understands how to do it correctly. Therefore, honor enable_tidscan
878 * only when CURRENT OF isn't present. Also note that cost_qual_eval
879 * counts a CurrentOfExpr as having startup cost disable_cost, which we
880 * subtract off here; that's to prevent other plan types such as seqscan
885 Assert(baserel->baserestrictcost.startup >= disable_cost);
886 startup_cost -= disable_cost;
888 else if (!enable_tidscan)
889 startup_cost += disable_cost;
892 * The TID qual expressions will be computed once, any other baserestrict
893 * quals once per retrived tuple.
895 cost_qual_eval(&tid_qual_cost, tidquals, root);
897 /* fetch estimated page cost for tablespace containing table */
898 get_tablespace_page_costs(baserel->reltablespace,
899 &spc_random_page_cost,
902 /* disk costs --- assume each tuple on a different page */
903 run_cost += spc_random_page_cost * ntuples;
906 startup_cost += baserel->baserestrictcost.startup +
907 tid_qual_cost.per_tuple;
908 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
909 tid_qual_cost.per_tuple;
910 run_cost += cpu_per_tuple * ntuples;
912 path->startup_cost = startup_cost;
913 path->total_cost = startup_cost + run_cost;
918 * Determines and returns the cost of scanning a subquery RTE.
921 cost_subqueryscan(Path *path, RelOptInfo *baserel)
927 /* Should only be applied to base relations that are subqueries */
928 Assert(baserel->relid > 0);
929 Assert(baserel->rtekind == RTE_SUBQUERY);
932 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
933 * any restriction clauses that will be attached to the SubqueryScan node,
934 * plus cpu_tuple_cost to account for selection and projection overhead.
936 path->startup_cost = baserel->subplan->startup_cost;
937 path->total_cost = baserel->subplan->total_cost;
939 startup_cost = baserel->baserestrictcost.startup;
940 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
941 run_cost = cpu_per_tuple * baserel->tuples;
943 path->startup_cost += startup_cost;
944 path->total_cost += startup_cost + run_cost;
949 * Determines and returns the cost of scanning a function RTE.
952 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
954 Cost startup_cost = 0;
960 /* Should only be applied to base relations that are functions */
961 Assert(baserel->relid > 0);
962 rte = planner_rt_fetch(baserel->relid, root);
963 Assert(rte->rtekind == RTE_FUNCTION);
966 * Estimate costs of executing the function expression.
968 * Currently, nodeFunctionscan.c always executes the function to
969 * completion before returning any rows, and caches the results in a
970 * tuplestore. So the function eval cost is all startup cost, and per-row
973 * XXX in principle we ought to charge tuplestore spill costs if the
974 * number of rows is large. However, given how phony our rowcount
975 * estimates for functions tend to be, there's not a lot of point in that
976 * refinement right now.
978 cost_qual_eval_node(&exprcost, rte->funcexpr, root);
980 startup_cost += exprcost.startup + exprcost.per_tuple;
982 /* Add scanning CPU costs */
983 startup_cost += baserel->baserestrictcost.startup;
984 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
985 run_cost += cpu_per_tuple * baserel->tuples;
987 path->startup_cost = startup_cost;
988 path->total_cost = startup_cost + run_cost;
993 * Determines and returns the cost of scanning a VALUES RTE.
996 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
998 Cost startup_cost = 0;
1002 /* Should only be applied to base relations that are values lists */
1003 Assert(baserel->relid > 0);
1004 Assert(baserel->rtekind == RTE_VALUES);
1007 * For now, estimate list evaluation cost at one operator eval per list
1008 * (probably pretty bogus, but is it worth being smarter?)
1010 cpu_per_tuple = cpu_operator_cost;
1012 /* Add scanning CPU costs */
1013 startup_cost += baserel->baserestrictcost.startup;
1014 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
1015 run_cost += cpu_per_tuple * baserel->tuples;
1017 path->startup_cost = startup_cost;
1018 path->total_cost = startup_cost + run_cost;
1023 * Determines and returns the cost of scanning a CTE RTE.
1025 * Note: this is used for both self-reference and regular CTEs; the
1026 * possible cost differences are below the threshold of what we could
1027 * estimate accurately anyway. Note that the costs of evaluating the
1028 * referenced CTE query are added into the final plan as initplan costs,
1029 * and should NOT be counted here.
1032 cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
1034 Cost startup_cost = 0;
1038 /* Should only be applied to base relations that are CTEs */
1039 Assert(baserel->relid > 0);
1040 Assert(baserel->rtekind == RTE_CTE);
1042 /* Charge one CPU tuple cost per row for tuplestore manipulation */
1043 cpu_per_tuple = cpu_tuple_cost;
1045 /* Add scanning CPU costs */
1046 startup_cost += baserel->baserestrictcost.startup;
1047 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
1048 run_cost += cpu_per_tuple * baserel->tuples;
1050 path->startup_cost = startup_cost;
1051 path->total_cost = startup_cost + run_cost;
1055 * cost_recursive_union
1056 * Determines and returns the cost of performing a recursive union,
1057 * and also the estimated output size.
1059 * We are given Plans for the nonrecursive and recursive terms.
1061 * Note that the arguments and output are Plans, not Paths as in most of
1062 * the rest of this module. That's because we don't bother setting up a
1063 * Path representation for recursive union --- we have only one way to do it.
1066 cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
1072 /* We probably have decent estimates for the non-recursive term */
1073 startup_cost = nrterm->startup_cost;
1074 total_cost = nrterm->total_cost;
1075 total_rows = nrterm->plan_rows;
1078 * We arbitrarily assume that about 10 recursive iterations will be
1079 * needed, and that we've managed to get a good fix on the cost and output
1080 * size of each one of them. These are mighty shaky assumptions but it's
1081 * hard to see how to do better.
1083 total_cost += 10 * rterm->total_cost;
1084 total_rows += 10 * rterm->plan_rows;
1087 * Also charge cpu_tuple_cost per row to account for the costs of
1088 * manipulating the tuplestores. (We don't worry about possible
1089 * spill-to-disk costs.)
1091 total_cost += cpu_tuple_cost * total_rows;
1093 runion->startup_cost = startup_cost;
1094 runion->total_cost = total_cost;
1095 runion->plan_rows = total_rows;
1096 runion->plan_width = Max(nrterm->plan_width, rterm->plan_width);
1101 * Determines and returns the cost of sorting a relation, including
1102 * the cost of reading the input data.
1104 * If the total volume of data to sort is less than sort_mem, we will do
1105 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1106 * comparisons for t tuples.
1108 * If the total volume exceeds sort_mem, we switch to a tape-style merge
1109 * algorithm. There will still be about t*log2(t) tuple comparisons in
1110 * total, but we will also need to write and read each tuple once per
1111 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1112 * number of initial runs formed and M is the merge order used by tuplesort.c.
1113 * Since the average initial run should be about twice sort_mem, we have
1114 * disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem)))
1115 * cpu = comparison_cost * t * log2(t)
1117 * If the sort is bounded (i.e., only the first k result tuples are needed)
1118 * and k tuples can fit into sort_mem, we use a heap method that keeps only
1119 * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1121 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1122 * accesses (XXX can't we refine that guess?)
1124 * By default, we charge two operator evals per tuple comparison, which should
1125 * be in the right ballpark in most cases. The caller can tweak this by
1126 * specifying nonzero comparison_cost; typically that's used for any extra
1127 * work that has to be done to prepare the inputs to the comparison operators.
1129 * 'pathkeys' is a list of sort keys
1130 * 'input_cost' is the total cost for reading the input data
1131 * 'tuples' is the number of tuples in the relation
1132 * 'width' is the average tuple width in bytes
1133 * 'comparison_cost' is the extra cost per comparison, if any
1134 * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1135 * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1137 * NOTE: some callers currently pass NIL for pathkeys because they
1138 * can't conveniently supply the sort keys. Since this routine doesn't
1139 * currently do anything with pathkeys anyway, that doesn't matter...
1140 * but if it ever does, it should react gracefully to lack of key data.
1141 * (Actually, the thing we'd most likely be interested in is just the number
1142 * of sort keys, which all callers *could* supply.)
1145 cost_sort(Path *path, PlannerInfo *root,
1146 List *pathkeys, Cost input_cost, double tuples, int width,
1147 Cost comparison_cost, int sort_mem,
1148 double limit_tuples)
1150 Cost startup_cost = input_cost;
1152 double input_bytes = relation_byte_size(tuples, width);
1153 double output_bytes;
1154 double output_tuples;
1155 long sort_mem_bytes = sort_mem * 1024L;
1158 startup_cost += disable_cost;
1161 * We want to be sure the cost of a sort is never estimated as zero, even
1162 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1167 /* Include the default cost-per-comparison */
1168 comparison_cost += 2.0 * cpu_operator_cost;
1170 /* Do we have a useful LIMIT? */
1171 if (limit_tuples > 0 && limit_tuples < tuples)
1173 output_tuples = limit_tuples;
1174 output_bytes = relation_byte_size(output_tuples, width);
1178 output_tuples = tuples;
1179 output_bytes = input_bytes;
1182 if (output_bytes > sort_mem_bytes)
1185 * We'll have to use a disk-based sort of all the tuples
1187 double npages = ceil(input_bytes / BLCKSZ);
1188 double nruns = (input_bytes / sort_mem_bytes) * 0.5;
1189 double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1191 double npageaccesses;
1196 * Assume about N log2 N comparisons
1198 startup_cost += comparison_cost * tuples * LOG2(tuples);
1202 /* Compute logM(r) as log(r) / log(M) */
1203 if (nruns > mergeorder)
1204 log_runs = ceil(log(nruns) / log(mergeorder));
1207 npageaccesses = 2.0 * npages * log_runs;
1208 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1209 startup_cost += npageaccesses *
1210 (seq_page_cost * 0.75 + random_page_cost * 0.25);
1212 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1215 * We'll use a bounded heap-sort keeping just K tuples in memory, for
1216 * a total number of tuple comparisons of N log2 K; but the constant
1217 * factor is a bit higher than for quicksort. Tweak it so that the
1218 * cost curve is continuous at the crossover point.
1220 startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1224 /* We'll use plain quicksort on all the input tuples */
1225 startup_cost += comparison_cost * tuples * LOG2(tuples);
1229 * Also charge a small amount (arbitrarily set equal to operator cost) per
1230 * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1231 * doesn't do qual-checking or projection, so it has less overhead than
1232 * most plan nodes. Note it's correct to use tuples not output_tuples
1233 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1234 * counting the LIMIT otherwise.
1236 run_cost += cpu_operator_cost * tuples;
1238 path->startup_cost = startup_cost;
1239 path->total_cost = startup_cost + run_cost;
1244 * Determines and returns the cost of a MergeAppend node.
1246 * MergeAppend merges several pre-sorted input streams, using a heap that
1247 * at any given instant holds the next tuple from each stream. If there
1248 * are N streams, we need about N*log2(N) tuple comparisons to construct
1249 * the heap at startup, and then for each output tuple, about log2(N)
1250 * comparisons to delete the top heap entry and another log2(N) comparisons
1251 * to insert its successor from the same stream.
1253 * (The effective value of N will drop once some of the input streams are
1254 * exhausted, but it seems unlikely to be worth trying to account for that.)
1256 * The heap is never spilled to disk, since we assume N is not very large.
1257 * So this is much simpler than cost_sort.
1259 * As in cost_sort, we charge two operator evals per tuple comparison.
1261 * 'pathkeys' is a list of sort keys
1262 * 'n_streams' is the number of input streams
1263 * 'input_startup_cost' is the sum of the input streams' startup costs
1264 * 'input_total_cost' is the sum of the input streams' total costs
1265 * 'tuples' is the number of tuples in all the streams
1268 cost_merge_append(Path *path, PlannerInfo *root,
1269 List *pathkeys, int n_streams,
1270 Cost input_startup_cost, Cost input_total_cost,
1273 Cost startup_cost = 0;
1275 Cost comparison_cost;
1282 N = (n_streams < 2) ? 2.0 : (double) n_streams;
1285 /* Assumed cost per tuple comparison */
1286 comparison_cost = 2.0 * cpu_operator_cost;
1288 /* Heap creation cost */
1289 startup_cost += comparison_cost * N * logN;
1291 /* Per-tuple heap maintenance cost */
1292 run_cost += tuples * comparison_cost * 2.0 * logN;
1295 * Also charge a small amount (arbitrarily set equal to operator cost) per
1296 * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend
1297 * node doesn't do qual-checking or projection, so it has less overhead
1298 * than most plan nodes.
1300 run_cost += cpu_operator_cost * tuples;
1302 path->startup_cost = startup_cost + input_startup_cost;
1303 path->total_cost = startup_cost + run_cost + input_total_cost;
1308 * Determines and returns the cost of materializing a relation, including
1309 * the cost of reading the input data.
1311 * If the total volume of data to materialize exceeds work_mem, we will need
1312 * to write it to disk, so the cost is much higher in that case.
1314 * Note that here we are estimating the costs for the first scan of the
1315 * relation, so the materialization is all overhead --- any savings will
1316 * occur only on rescan, which is estimated in cost_rescan.
1319 cost_material(Path *path,
1320 Cost input_startup_cost, Cost input_total_cost,
1321 double tuples, int width)
1323 Cost startup_cost = input_startup_cost;
1324 Cost run_cost = input_total_cost - input_startup_cost;
1325 double nbytes = relation_byte_size(tuples, width);
1326 long work_mem_bytes = work_mem * 1024L;
1329 * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
1330 * reflect bookkeeping overhead. (This rate must be more than what
1331 * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
1332 * if it is exactly the same then there will be a cost tie between
1333 * nestloop with A outer, materialized B inner and nestloop with B outer,
1334 * materialized A inner. The extra cost ensures we'll prefer
1335 * materializing the smaller rel.) Note that this is normally a good deal
1336 * less than cpu_tuple_cost; which is OK because a Material plan node
1337 * doesn't do qual-checking or projection, so it's got less overhead than
1340 run_cost += 2 * cpu_operator_cost * tuples;
1343 * If we will spill to disk, charge at the rate of seq_page_cost per page.
1344 * This cost is assumed to be evenly spread through the plan run phase,
1345 * which isn't exactly accurate but our cost model doesn't allow for
1346 * nonuniform costs within the run phase.
1348 if (nbytes > work_mem_bytes)
1350 double npages = ceil(nbytes / BLCKSZ);
1352 run_cost += seq_page_cost * npages;
1355 path->startup_cost = startup_cost;
1356 path->total_cost = startup_cost + run_cost;
1361 * Determines and returns the cost of performing an Agg plan node,
1362 * including the cost of its input.
1364 * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
1365 * we are using a hashed Agg node just to do grouping).
1367 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1368 * are for appropriately-sorted input.
1371 cost_agg(Path *path, PlannerInfo *root,
1372 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
1373 int numGroupCols, double numGroups,
1374 Cost input_startup_cost, Cost input_total_cost,
1375 double input_tuples)
1379 AggClauseCosts dummy_aggcosts;
1381 /* Use all-zero per-aggregate costs if NULL is passed */
1382 if (aggcosts == NULL)
1384 Assert(aggstrategy == AGG_HASHED);
1385 MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
1386 aggcosts = &dummy_aggcosts;
1390 * The transCost.per_tuple component of aggcosts should be charged once
1391 * per input tuple, corresponding to the costs of evaluating the aggregate
1392 * transfns and their input expressions (with any startup cost of course
1393 * charged but once). The finalCost component is charged once per output
1394 * tuple, corresponding to the costs of evaluating the finalfns.
1396 * If we are grouping, we charge an additional cpu_operator_cost per
1397 * grouping column per input tuple for grouping comparisons.
1399 * We will produce a single output tuple if not grouping, and a tuple per
1400 * group otherwise. We charge cpu_tuple_cost for each output tuple.
1402 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1403 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1404 * input path is already sorted appropriately, AGG_SORTED should be
1405 * preferred (since it has no risk of memory overflow). This will happen
1406 * as long as the computed total costs are indeed exactly equal --- but if
1407 * there's roundoff error we might do the wrong thing. So be sure that
1408 * the computations below form the same intermediate values in the same
1411 if (aggstrategy == AGG_PLAIN)
1413 startup_cost = input_total_cost;
1414 startup_cost += aggcosts->transCost.startup;
1415 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1416 startup_cost += aggcosts->finalCost;
1417 /* we aren't grouping */
1418 total_cost = startup_cost + cpu_tuple_cost;
1420 else if (aggstrategy == AGG_SORTED)
1422 /* Here we are able to deliver output on-the-fly */
1423 startup_cost = input_startup_cost;
1424 total_cost = input_total_cost;
1425 /* calcs phrased this way to match HASHED case, see note above */
1426 total_cost += aggcosts->transCost.startup;
1427 total_cost += aggcosts->transCost.per_tuple * input_tuples;
1428 total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1429 total_cost += aggcosts->finalCost * numGroups;
1430 total_cost += cpu_tuple_cost * numGroups;
1434 /* must be AGG_HASHED */
1435 startup_cost = input_total_cost;
1436 startup_cost += aggcosts->transCost.startup;
1437 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1438 startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1439 total_cost = startup_cost;
1440 total_cost += aggcosts->finalCost * numGroups;
1441 total_cost += cpu_tuple_cost * numGroups;
1444 path->startup_cost = startup_cost;
1445 path->total_cost = total_cost;
1450 * Determines and returns the cost of performing a WindowAgg plan node,
1451 * including the cost of its input.
1453 * Input is assumed already properly sorted.
1456 cost_windowagg(Path *path, PlannerInfo *root,
1457 List *windowFuncs, int numPartCols, int numOrderCols,
1458 Cost input_startup_cost, Cost input_total_cost,
1459 double input_tuples)
1465 startup_cost = input_startup_cost;
1466 total_cost = input_total_cost;
1469 * Window functions are assumed to cost their stated execution cost, plus
1470 * the cost of evaluating their input expressions, per tuple. Since they
1471 * may in fact evaluate their inputs at multiple rows during each cycle,
1472 * this could be a drastic underestimate; but without a way to know how
1473 * many rows the window function will fetch, it's hard to do better. In
1474 * any case, it's a good estimate for all the built-in window functions,
1475 * so we'll just do this for now.
1477 foreach(lc, windowFuncs)
1479 WindowFunc *wfunc = (WindowFunc *) lfirst(lc);
1483 Assert(IsA(wfunc, WindowFunc));
1485 wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
1487 /* also add the input expressions' cost to per-input-row costs */
1488 cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
1489 startup_cost += argcosts.startup;
1490 wfunccost += argcosts.per_tuple;
1492 total_cost += wfunccost * input_tuples;
1496 * We also charge cpu_operator_cost per grouping column per tuple for
1497 * grouping comparisons, plus cpu_tuple_cost per tuple for general
1500 * XXX this neglects costs of spooling the data to disk when it overflows
1501 * work_mem. Sooner or later that should get accounted for.
1503 total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
1504 total_cost += cpu_tuple_cost * input_tuples;
1506 path->startup_cost = startup_cost;
1507 path->total_cost = total_cost;
1512 * Determines and returns the cost of performing a Group plan node,
1513 * including the cost of its input.
1515 * Note: caller must ensure that input costs are for appropriately-sorted
1519 cost_group(Path *path, PlannerInfo *root,
1520 int numGroupCols, double numGroups,
1521 Cost input_startup_cost, Cost input_total_cost,
1522 double input_tuples)
1527 startup_cost = input_startup_cost;
1528 total_cost = input_total_cost;
1531 * Charge one cpu_operator_cost per comparison per input tuple. We assume
1532 * all columns get compared at most of the tuples.
1534 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1536 path->startup_cost = startup_cost;
1537 path->total_cost = total_cost;
1541 * If a nestloop's inner path is an indexscan, be sure to use its estimated
1542 * output row count, which may be lower than the restriction-clause-only row
1543 * count of its parent. (We don't include this case in the PATH_ROWS macro
1544 * because it applies *only* to a nestloop's inner relation.) We have to
1545 * be prepared to recurse through Append or MergeAppend nodes in case of an
1546 * appendrel. (It's not clear MergeAppend can be seen here, but we may as
1547 * well handle it if so.)
1550 nestloop_inner_path_rows(Path *path)
1554 if (IsA(path, IndexPath))
1555 result = ((IndexPath *) path)->rows;
1556 else if (IsA(path, BitmapHeapPath))
1557 result = ((BitmapHeapPath *) path)->rows;
1558 else if (IsA(path, AppendPath))
1563 foreach(l, ((AppendPath *) path)->subpaths)
1565 result += nestloop_inner_path_rows((Path *) lfirst(l));
1568 else if (IsA(path, MergeAppendPath))
1573 foreach(l, ((MergeAppendPath *) path)->subpaths)
1575 result += nestloop_inner_path_rows((Path *) lfirst(l));
1579 result = PATH_ROWS(path);
1586 * Determines and returns the cost of joining two relations using the
1587 * nested loop algorithm.
1589 * 'path' is already filled in except for the cost fields
1590 * 'sjinfo' is extra info about the join for selectivity estimation
1593 cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1595 Path *outer_path = path->outerjoinpath;
1596 Path *inner_path = path->innerjoinpath;
1597 Cost startup_cost = 0;
1599 Cost inner_rescan_start_cost;
1600 Cost inner_rescan_total_cost;
1601 Cost inner_run_cost;
1602 Cost inner_rescan_run_cost;
1604 QualCost restrict_qual_cost;
1605 double outer_path_rows = PATH_ROWS(outer_path);
1606 double inner_path_rows = nestloop_inner_path_rows(inner_path);
1608 Selectivity outer_match_frac;
1609 Selectivity match_count;
1610 bool indexed_join_quals;
1612 if (!enable_nestloop)
1613 startup_cost += disable_cost;
1615 /* estimate costs to rescan the inner relation */
1616 cost_rescan(root, inner_path,
1617 &inner_rescan_start_cost,
1618 &inner_rescan_total_cost);
1620 /* cost of source data */
1623 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1624 * before we can start returning tuples, so the join's startup cost is
1625 * their sum. We'll also pay the inner path's rescan startup cost
1628 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1629 run_cost += outer_path->total_cost - outer_path->startup_cost;
1630 if (outer_path_rows > 1)
1631 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
1633 inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
1634 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
1636 if (adjust_semi_join(root, path, sjinfo,
1639 &indexed_join_quals))
1641 double outer_matched_rows;
1642 Selectivity inner_scan_frac;
1645 * SEMI or ANTI join: executor will stop after first match.
1647 * For an outer-rel row that has at least one match, we can expect the
1648 * inner scan to stop after a fraction 1/(match_count+1) of the inner
1649 * rows, if the matches are evenly distributed. Since they probably
1650 * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
1651 * that fraction. (If we used a larger fuzz factor, we'd have to
1652 * clamp inner_scan_frac to at most 1.0; but since match_count is at
1653 * least 1, no such clamp is needed now.)
1655 * A complicating factor is that rescans may be cheaper than first
1656 * scans. If we never scan all the way to the end of the inner rel,
1657 * it might be (depending on the plan type) that we'd never pay the
1658 * whole inner first-scan run cost. However it is difficult to
1659 * estimate whether that will happen, so be conservative and always
1660 * charge the whole first-scan cost once.
1662 run_cost += inner_run_cost;
1664 outer_matched_rows = rint(outer_path_rows * outer_match_frac);
1665 inner_scan_frac = 2.0 / (match_count + 1.0);
1667 /* Add inner run cost for additional outer tuples having matches */
1668 if (outer_matched_rows > 1)
1669 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
1671 /* Compute number of tuples processed (not number emitted!) */
1672 ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
1675 * For unmatched outer-rel rows, there are two cases. If the inner
1676 * path is an indexscan using all the joinquals as indexquals, then an
1677 * unmatched row results in an indexscan returning no rows, which is
1678 * probably quite cheap. We estimate this case as the same cost to
1679 * return the first tuple of a nonempty scan. Otherwise, the executor
1680 * will have to scan the whole inner rel; not so cheap.
1682 if (indexed_join_quals)
1684 run_cost += (outer_path_rows - outer_matched_rows) *
1685 inner_rescan_run_cost / inner_path_rows;
1688 * We won't be evaluating any quals at all for these rows, so
1689 * don't add them to ntuples.
1694 run_cost += (outer_path_rows - outer_matched_rows) *
1695 inner_rescan_run_cost;
1696 ntuples += (outer_path_rows - outer_matched_rows) *
1702 /* Normal case; we'll scan whole input rel for each outer row */
1703 run_cost += inner_run_cost;
1704 if (outer_path_rows > 1)
1705 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
1707 /* Compute number of tuples processed (not number emitted!) */
1708 ntuples = outer_path_rows * inner_path_rows;
1712 cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
1713 startup_cost += restrict_qual_cost.startup;
1714 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1715 run_cost += cpu_per_tuple * ntuples;
1717 path->path.startup_cost = startup_cost;
1718 path->path.total_cost = startup_cost + run_cost;
1723 * Determines and returns the cost of joining two relations using the
1724 * merge join algorithm.
1726 * Unlike other costsize functions, this routine makes one actual decision:
1727 * whether we should materialize the inner path. We do that either because
1728 * the inner path can't support mark/restore, or because it's cheaper to
1729 * use an interposed Material node to handle mark/restore. When the decision
1730 * is cost-based it would be logically cleaner to build and cost two separate
1731 * paths with and without that flag set; but that would require repeating most
1732 * of the calculations here, which are not all that cheap. Since the choice
1733 * will not affect output pathkeys or startup cost, only total cost, there is
1734 * no possibility of wanting to keep both paths. So it seems best to make
1735 * the decision here and record it in the path's materialize_inner field.
1737 * 'path' is already filled in except for the cost fields and materialize_inner
1738 * 'sjinfo' is extra info about the join for selectivity estimation
1740 * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
1741 * outersortkeys and innersortkeys are lists of the keys to be used
1742 * to sort the outer and inner relations, or NIL if no explicit
1743 * sort is needed because the source path is already ordered.
1746 cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
1748 Path *outer_path = path->jpath.outerjoinpath;
1749 Path *inner_path = path->jpath.innerjoinpath;
1750 List *mergeclauses = path->path_mergeclauses;
1751 List *outersortkeys = path->outersortkeys;
1752 List *innersortkeys = path->innersortkeys;
1753 Cost startup_cost = 0;
1759 QualCost merge_qual_cost;
1760 QualCost qp_qual_cost;
1761 double outer_path_rows = PATH_ROWS(outer_path);
1762 double inner_path_rows = PATH_ROWS(inner_path);
1767 double mergejointuples,
1770 Selectivity outerstartsel,
1774 Path sort_path; /* dummy for result of cost_sort */
1776 /* Protect some assumptions below that rowcounts aren't zero or NaN */
1777 if (outer_path_rows <= 0 || isnan(outer_path_rows))
1778 outer_path_rows = 1;
1779 if (inner_path_rows <= 0 || isnan(inner_path_rows))
1780 inner_path_rows = 1;
1782 if (!enable_mergejoin)
1783 startup_cost += disable_cost;
1786 * Compute cost of the mergequals and qpquals (other restriction clauses)
1789 cost_qual_eval(&merge_qual_cost, mergeclauses, root);
1790 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
1791 qp_qual_cost.startup -= merge_qual_cost.startup;
1792 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
1795 * Get approx # tuples passing the mergequals. We use approx_tuple_count
1796 * here because we need an estimate done with JOIN_INNER semantics.
1798 mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
1801 * When there are equal merge keys in the outer relation, the mergejoin
1802 * must rescan any matching tuples in the inner relation. This means
1803 * re-fetching inner tuples; we have to estimate how often that happens.
1805 * For regular inner and outer joins, the number of re-fetches can be
1806 * estimated approximately as size of merge join output minus size of
1807 * inner relation. Assume that the distinct key values are 1, 2, ..., and
1808 * denote the number of values of each key in the outer relation as m1,
1809 * m2, ...; in the inner relation, n1, n2, ... Then we have
1811 * size of join = m1 * n1 + m2 * n2 + ...
1813 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
1814 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
1817 * This equation works correctly for outer tuples having no inner match
1818 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
1819 * are effectively subtracting those from the number of rescanned tuples,
1820 * when we should not. Can we do better without expensive selectivity
1823 * The whole issue is moot if we are working from a unique-ified outer
1826 if (IsA(outer_path, UniquePath))
1827 rescannedtuples = 0;
1830 rescannedtuples = mergejointuples - inner_path_rows;
1831 /* Must clamp because of possible underestimate */
1832 if (rescannedtuples < 0)
1833 rescannedtuples = 0;
1835 /* We'll inflate various costs this much to account for rescanning */
1836 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
1839 * A merge join will stop as soon as it exhausts either input stream
1840 * (unless it's an outer join, in which case the outer side has to be
1841 * scanned all the way anyway). Estimate fraction of the left and right
1842 * inputs that will actually need to be scanned. Likewise, we can
1843 * estimate the number of rows that will be skipped before the first join
1844 * pair is found, which should be factored into startup cost. We use only
1845 * the first (most significant) merge clause for this purpose. Since
1846 * mergejoinscansel() is a fairly expensive computation, we cache the
1847 * results in the merge clause RestrictInfo.
1849 if (mergeclauses && path->jpath.jointype != JOIN_FULL)
1851 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1856 MergeScanSelCache *cache;
1858 /* Get the input pathkeys to determine the sort-order details */
1859 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1860 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1863 opathkey = (PathKey *) linitial(opathkeys);
1864 ipathkey = (PathKey *) linitial(ipathkeys);
1865 /* debugging check */
1866 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1867 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
1868 opathkey->pk_strategy != ipathkey->pk_strategy ||
1869 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1870 elog(ERROR, "left and right pathkeys do not match in mergejoin");
1872 /* Get the selectivity with caching */
1873 cache = cached_scansel(root, firstclause, opathkey);
1875 if (bms_is_subset(firstclause->left_relids,
1876 outer_path->parent->relids))
1878 /* left side of clause is outer */
1879 outerstartsel = cache->leftstartsel;
1880 outerendsel = cache->leftendsel;
1881 innerstartsel = cache->rightstartsel;
1882 innerendsel = cache->rightendsel;
1886 /* left side of clause is inner */
1887 outerstartsel = cache->rightstartsel;
1888 outerendsel = cache->rightendsel;
1889 innerstartsel = cache->leftstartsel;
1890 innerendsel = cache->leftendsel;
1892 if (path->jpath.jointype == JOIN_LEFT ||
1893 path->jpath.jointype == JOIN_ANTI)
1895 outerstartsel = 0.0;
1898 else if (path->jpath.jointype == JOIN_RIGHT)
1900 innerstartsel = 0.0;
1906 /* cope with clauseless or full mergejoin */
1907 outerstartsel = innerstartsel = 0.0;
1908 outerendsel = innerendsel = 1.0;
1912 * Convert selectivities to row counts. We force outer_rows and
1913 * inner_rows to be at least 1, but the skip_rows estimates can be zero.
1915 outer_skip_rows = rint(outer_path_rows * outerstartsel);
1916 inner_skip_rows = rint(inner_path_rows * innerstartsel);
1917 outer_rows = clamp_row_est(outer_path_rows * outerendsel);
1918 inner_rows = clamp_row_est(inner_path_rows * innerendsel);
1920 Assert(outer_skip_rows <= outer_rows);
1921 Assert(inner_skip_rows <= inner_rows);
1924 * Readjust scan selectivities to account for above rounding. This is
1925 * normally an insignificant effect, but when there are only a few rows in
1926 * the inputs, failing to do this makes for a large percentage error.
1928 outerstartsel = outer_skip_rows / outer_path_rows;
1929 innerstartsel = inner_skip_rows / inner_path_rows;
1930 outerendsel = outer_rows / outer_path_rows;
1931 innerendsel = inner_rows / inner_path_rows;
1933 Assert(outerstartsel <= outerendsel);
1934 Assert(innerstartsel <= innerendsel);
1936 /* cost of source data */
1938 if (outersortkeys) /* do we need to sort outer? */
1940 cost_sort(&sort_path,
1943 outer_path->total_cost,
1945 outer_path->parent->width,
1949 startup_cost += sort_path.startup_cost;
1950 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
1952 run_cost += (sort_path.total_cost - sort_path.startup_cost)
1953 * (outerendsel - outerstartsel);
1957 startup_cost += outer_path->startup_cost;
1958 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
1960 run_cost += (outer_path->total_cost - outer_path->startup_cost)
1961 * (outerendsel - outerstartsel);
1964 if (innersortkeys) /* do we need to sort inner? */
1966 cost_sort(&sort_path,
1969 inner_path->total_cost,
1971 inner_path->parent->width,
1975 startup_cost += sort_path.startup_cost;
1976 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
1978 inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
1979 * (innerendsel - innerstartsel);
1983 startup_cost += inner_path->startup_cost;
1984 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
1986 inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
1987 * (innerendsel - innerstartsel);
1991 * Decide whether we want to materialize the inner input to shield it from
1992 * mark/restore and performing re-fetches. Our cost model for regular
1993 * re-fetches is that a re-fetch costs the same as an original fetch,
1994 * which is probably an overestimate; but on the other hand we ignore the
1995 * bookkeeping costs of mark/restore. Not clear if it's worth developing
1996 * a more refined model. So we just need to inflate the inner run cost by
1999 bare_inner_cost = inner_run_cost * rescanratio;
2002 * When we interpose a Material node the re-fetch cost is assumed to be
2003 * just cpu_operator_cost per tuple, independently of the underlying
2004 * plan's cost; and we charge an extra cpu_operator_cost per original
2005 * fetch as well. Note that we're assuming the materialize node will
2006 * never spill to disk, since it only has to remember tuples back to the
2007 * last mark. (If there are a huge number of duplicates, our other cost
2008 * factors will make the path so expensive that it probably won't get
2009 * chosen anyway.) So we don't use cost_rescan here.
2011 * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2012 * of the generated Material node.
2014 mat_inner_cost = inner_run_cost +
2015 cpu_operator_cost * inner_path_rows * rescanratio;
2018 * Prefer materializing if it looks cheaper, unless the user has asked to
2019 * suppress materialization.
2021 if (enable_material && mat_inner_cost < bare_inner_cost)
2022 path->materialize_inner = true;
2025 * Even if materializing doesn't look cheaper, we *must* do it if the
2026 * inner path is to be used directly (without sorting) and it doesn't
2027 * support mark/restore.
2029 * Since the inner side must be ordered, and only Sorts and IndexScans can
2030 * create order to begin with, and they both support mark/restore, you
2031 * might think there's no problem --- but you'd be wrong. Nestloop and
2032 * merge joins can *preserve* the order of their inputs, so they can be
2033 * selected as the input of a mergejoin, and they don't support
2034 * mark/restore at present.
2036 * We don't test the value of enable_material here, because
2037 * materialization is required for correctness in this case, and turning
2038 * it off does not entitle us to deliver an invalid plan.
2040 else if (innersortkeys == NIL &&
2041 !ExecSupportsMarkRestore(inner_path->pathtype))
2042 path->materialize_inner = true;
2045 * Also, force materializing if the inner path is to be sorted and the
2046 * sort is expected to spill to disk. This is because the final merge
2047 * pass can be done on-the-fly if it doesn't have to support mark/restore.
2048 * We don't try to adjust the cost estimates for this consideration,
2051 * Since materialization is a performance optimization in this case,
2052 * rather than necessary for correctness, we skip it if enable_material is
2055 else if (enable_material && innersortkeys != NIL &&
2056 relation_byte_size(inner_path_rows, inner_path->parent->width) >
2058 path->materialize_inner = true;
2060 path->materialize_inner = false;
2062 /* Charge the right incremental cost for the chosen case */
2063 if (path->materialize_inner)
2064 run_cost += mat_inner_cost;
2066 run_cost += bare_inner_cost;
2071 * The number of tuple comparisons needed is approximately number of outer
2072 * rows plus number of inner rows plus number of rescanned tuples (can we
2073 * refine this?). At each one, we need to evaluate the mergejoin quals.
2075 startup_cost += merge_qual_cost.startup;
2076 startup_cost += merge_qual_cost.per_tuple *
2077 (outer_skip_rows + inner_skip_rows * rescanratio);
2078 run_cost += merge_qual_cost.per_tuple *
2079 ((outer_rows - outer_skip_rows) +
2080 (inner_rows - inner_skip_rows) * rescanratio);
2083 * For each tuple that gets through the mergejoin proper, we charge
2084 * cpu_tuple_cost plus the cost of evaluating additional restriction
2085 * clauses that are to be applied at the join. (This is pessimistic since
2086 * not all of the quals may get evaluated at each tuple.)
2088 * Note: we could adjust for SEMI/ANTI joins skipping some qual
2089 * evaluations here, but it's probably not worth the trouble.
2091 startup_cost += qp_qual_cost.startup;
2092 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2093 run_cost += cpu_per_tuple * mergejointuples;
2095 path->jpath.path.startup_cost = startup_cost;
2096 path->jpath.path.total_cost = startup_cost + run_cost;
2100 * run mergejoinscansel() with caching
2102 static MergeScanSelCache *
2103 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
2105 MergeScanSelCache *cache;
2107 Selectivity leftstartsel,
2111 MemoryContext oldcontext;
2113 /* Do we have this result already? */
2114 foreach(lc, rinfo->scansel_cache)
2116 cache = (MergeScanSelCache *) lfirst(lc);
2117 if (cache->opfamily == pathkey->pk_opfamily &&
2118 cache->collation == pathkey->pk_eclass->ec_collation &&
2119 cache->strategy == pathkey->pk_strategy &&
2120 cache->nulls_first == pathkey->pk_nulls_first)
2124 /* Nope, do the computation */
2125 mergejoinscansel(root,
2126 (Node *) rinfo->clause,
2127 pathkey->pk_opfamily,
2128 pathkey->pk_strategy,
2129 pathkey->pk_nulls_first,
2135 /* Cache the result in suitably long-lived workspace */
2136 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
2138 cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
2139 cache->opfamily = pathkey->pk_opfamily;
2140 cache->collation = pathkey->pk_eclass->ec_collation;
2141 cache->strategy = pathkey->pk_strategy;
2142 cache->nulls_first = pathkey->pk_nulls_first;
2143 cache->leftstartsel = leftstartsel;
2144 cache->leftendsel = leftendsel;
2145 cache->rightstartsel = rightstartsel;
2146 cache->rightendsel = rightendsel;
2148 rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
2150 MemoryContextSwitchTo(oldcontext);
2157 * Determines and returns the cost of joining two relations using the
2158 * hash join algorithm.
2160 * 'path' is already filled in except for the cost fields
2161 * 'sjinfo' is extra info about the join for selectivity estimation
2163 * Note: path's hashclauses should be a subset of the joinrestrictinfo list
2166 cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
2168 Path *outer_path = path->jpath.outerjoinpath;
2169 Path *inner_path = path->jpath.innerjoinpath;
2170 List *hashclauses = path->path_hashclauses;
2171 Cost startup_cost = 0;
2174 QualCost hash_qual_cost;
2175 QualCost qp_qual_cost;
2176 double hashjointuples;
2177 double outer_path_rows = PATH_ROWS(outer_path);
2178 double inner_path_rows = PATH_ROWS(inner_path);
2179 int num_hashclauses = list_length(hashclauses);
2183 double virtualbuckets;
2184 Selectivity innerbucketsize;
2185 Selectivity outer_match_frac;
2186 Selectivity match_count;
2189 if (!enable_hashjoin)
2190 startup_cost += disable_cost;
2193 * Compute cost of the hashquals and qpquals (other restriction clauses)
2196 cost_qual_eval(&hash_qual_cost, hashclauses, root);
2197 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2198 qp_qual_cost.startup -= hash_qual_cost.startup;
2199 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
2201 /* cost of source data */
2202 startup_cost += outer_path->startup_cost;
2203 run_cost += outer_path->total_cost - outer_path->startup_cost;
2204 startup_cost += inner_path->total_cost;
2207 * Cost of computing hash function: must do it once per input tuple. We
2208 * charge one cpu_operator_cost for each column's hash function. Also,
2209 * tack on one cpu_tuple_cost per inner row, to model the costs of
2210 * inserting the row into the hashtable.
2212 * XXX when a hashclause is more complex than a single operator, we really
2213 * should charge the extra eval costs of the left or right side, as
2214 * appropriate, here. This seems more work than it's worth at the moment.
2216 startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
2218 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
2221 * Get hash table size that executor would use for inner relation.
2223 * XXX for the moment, always assume that skew optimization will be
2224 * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
2225 * trying to determine that for sure.
2227 * XXX at some point it might be interesting to try to account for skew
2228 * optimization in the cost estimate, but for now, we don't.
2230 ExecChooseHashTableSize(inner_path_rows,
2231 inner_path->parent->width,
2236 virtualbuckets = (double) numbuckets *(double) numbatches;
2238 /* mark the path with estimated # of batches */
2239 path->num_batches = numbatches;
2242 * Determine bucketsize fraction for inner relation. We use the smallest
2243 * bucketsize estimated for any individual hashclause; this is undoubtedly
2246 * BUT: if inner relation has been unique-ified, we can assume it's good
2247 * for hashing. This is important both because it's the right answer, and
2248 * because we avoid contaminating the cache with a value that's wrong for
2249 * non-unique-ified paths.
2251 if (IsA(inner_path, UniquePath))
2252 innerbucketsize = 1.0 / virtualbuckets;
2255 innerbucketsize = 1.0;
2256 foreach(hcl, hashclauses)
2258 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
2259 Selectivity thisbucketsize;
2261 Assert(IsA(restrictinfo, RestrictInfo));
2264 * First we have to figure out which side of the hashjoin clause
2265 * is the inner side.
2267 * Since we tend to visit the same clauses over and over when
2268 * planning a large query, we cache the bucketsize estimate in the
2269 * RestrictInfo node to avoid repeated lookups of statistics.
2271 if (bms_is_subset(restrictinfo->right_relids,
2272 inner_path->parent->relids))
2274 /* righthand side is inner */
2275 thisbucketsize = restrictinfo->right_bucketsize;
2276 if (thisbucketsize < 0)
2278 /* not cached yet */
2280 estimate_hash_bucketsize(root,
2281 get_rightop(restrictinfo->clause),
2283 restrictinfo->right_bucketsize = thisbucketsize;
2288 Assert(bms_is_subset(restrictinfo->left_relids,
2289 inner_path->parent->relids));
2290 /* lefthand side is inner */
2291 thisbucketsize = restrictinfo->left_bucketsize;
2292 if (thisbucketsize < 0)
2294 /* not cached yet */
2296 estimate_hash_bucketsize(root,
2297 get_leftop(restrictinfo->clause),
2299 restrictinfo->left_bucketsize = thisbucketsize;
2303 if (innerbucketsize > thisbucketsize)
2304 innerbucketsize = thisbucketsize;
2309 * If inner relation is too big then we will need to "batch" the join,
2310 * which implies writing and reading most of the tuples to disk an extra
2311 * time. Charge seq_page_cost per page, since the I/O should be nice and
2312 * sequential. Writing the inner rel counts as startup cost, all the rest
2317 double outerpages = page_size(outer_path_rows,
2318 outer_path->parent->width);
2319 double innerpages = page_size(inner_path_rows,
2320 inner_path->parent->width);
2322 startup_cost += seq_page_cost * innerpages;
2323 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2328 if (adjust_semi_join(root, &path->jpath, sjinfo,
2333 double outer_matched_rows;
2334 Selectivity inner_scan_frac;
2337 * SEMI or ANTI join: executor will stop after first match.
2339 * For an outer-rel row that has at least one match, we can expect the
2340 * bucket scan to stop after a fraction 1/(match_count+1) of the
2341 * bucket's rows, if the matches are evenly distributed. Since they
2342 * probably aren't quite evenly distributed, we apply a fuzz factor of
2343 * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
2344 * to clamp inner_scan_frac to at most 1.0; but since match_count is
2345 * at least 1, no such clamp is needed now.)
2347 outer_matched_rows = rint(outer_path_rows * outer_match_frac);
2348 inner_scan_frac = 2.0 / (match_count + 1.0);
2350 startup_cost += hash_qual_cost.startup;
2351 run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
2352 clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
2355 * For unmatched outer-rel rows, the picture is quite a lot different.
2356 * In the first place, there is no reason to assume that these rows
2357 * preferentially hit heavily-populated buckets; instead assume they
2358 * are uncorrelated with the inner distribution and so they see an
2359 * average bucket size of inner_path_rows / virtualbuckets. In the
2360 * second place, it seems likely that they will have few if any exact
2361 * hash-code matches and so very few of the tuples in the bucket will
2362 * actually require eval of the hash quals. We don't have any good
2363 * way to estimate how many will, but for the moment assume that the
2364 * effective cost per bucket entry is one-tenth what it is for
2367 run_cost += hash_qual_cost.per_tuple *
2368 (outer_path_rows - outer_matched_rows) *
2369 clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
2371 /* Get # of tuples that will pass the basic join */
2372 if (path->jpath.jointype == JOIN_SEMI)
2373 hashjointuples = outer_matched_rows;
2375 hashjointuples = outer_path_rows - outer_matched_rows;
2380 * The number of tuple comparisons needed is the number of outer
2381 * tuples times the typical number of tuples in a hash bucket, which
2382 * is the inner relation size times its bucketsize fraction. At each
2383 * one, we need to evaluate the hashjoin quals. But actually,
2384 * charging the full qual eval cost at each tuple is pessimistic,
2385 * since we don't evaluate the quals unless the hash values match
2386 * exactly. For lack of a better idea, halve the cost estimate to
2389 startup_cost += hash_qual_cost.startup;
2390 run_cost += hash_qual_cost.per_tuple * outer_path_rows *
2391 clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
2394 * Get approx # tuples passing the hashquals. We use
2395 * approx_tuple_count here because we need an estimate done with
2396 * JOIN_INNER semantics.
2398 hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
2402 * For each tuple that gets through the hashjoin proper, we charge
2403 * cpu_tuple_cost plus the cost of evaluating additional restriction
2404 * clauses that are to be applied at the join. (This is pessimistic since
2405 * not all of the quals may get evaluated at each tuple.)
2407 startup_cost += qp_qual_cost.startup;
2408 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2409 run_cost += cpu_per_tuple * hashjointuples;
2411 path->jpath.path.startup_cost = startup_cost;
2412 path->jpath.path.total_cost = startup_cost + run_cost;
2418 * Figure the costs for a SubPlan (or initplan).
2420 * Note: we could dig the subplan's Plan out of the root list, but in practice
2421 * all callers have it handy already, so we make them pass it.
2424 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
2428 /* Figure any cost for evaluating the testexpr */
2429 cost_qual_eval(&sp_cost,
2430 make_ands_implicit((Expr *) subplan->testexpr),
2433 if (subplan->useHashTable)
2436 * If we are using a hash table for the subquery outputs, then the
2437 * cost of evaluating the query is a one-time cost. We charge one
2438 * cpu_operator_cost per tuple for the work of loading the hashtable,
2441 sp_cost.startup += plan->total_cost +
2442 cpu_operator_cost * plan->plan_rows;
2445 * The per-tuple costs include the cost of evaluating the lefthand
2446 * expressions, plus the cost of probing the hashtable. We already
2447 * accounted for the lefthand expressions as part of the testexpr, and
2448 * will also have counted one cpu_operator_cost for each comparison
2449 * operator. That is probably too low for the probing cost, but it's
2450 * hard to make a better estimate, so live with it for now.
2456 * Otherwise we will be rescanning the subplan output on each
2457 * evaluation. We need to estimate how much of the output we will
2458 * actually need to scan. NOTE: this logic should agree with the
2459 * tuple_fraction estimates used by make_subplan() in
2462 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
2464 if (subplan->subLinkType == EXISTS_SUBLINK)
2466 /* we only need to fetch 1 tuple */
2467 sp_cost.per_tuple += plan_run_cost / plan->plan_rows;
2469 else if (subplan->subLinkType == ALL_SUBLINK ||
2470 subplan->subLinkType == ANY_SUBLINK)
2472 /* assume we need 50% of the tuples */
2473 sp_cost.per_tuple += 0.50 * plan_run_cost;
2474 /* also charge a cpu_operator_cost per row examined */
2475 sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
2479 /* assume we need all tuples */
2480 sp_cost.per_tuple += plan_run_cost;
2484 * Also account for subplan's startup cost. If the subplan is
2485 * uncorrelated or undirect correlated, AND its topmost node is one
2486 * that materializes its output, assume that we'll only need to pay
2487 * its startup cost once; otherwise assume we pay the startup cost
2490 if (subplan->parParam == NIL &&
2491 ExecMaterializesOutput(nodeTag(plan)))
2492 sp_cost.startup += plan->startup_cost;
2494 sp_cost.per_tuple += plan->startup_cost;
2497 subplan->startup_cost = sp_cost.startup;
2498 subplan->per_call_cost = sp_cost.per_tuple;
2504 * Given a finished Path, estimate the costs of rescanning it after
2505 * having done so the first time. For some Path types a rescan is
2506 * cheaper than an original scan (if no parameters change), and this
2507 * function embodies knowledge about that. The default is to return
2508 * the same costs stored in the Path. (Note that the cost estimates
2509 * actually stored in Paths are always for first scans.)
2511 * This function is not currently intended to model effects such as rescans
2512 * being cheaper due to disk block caching; what we are concerned with is
2513 * plan types wherein the executor caches results explicitly, or doesn't
2514 * redo startup calculations, etc.
2517 cost_rescan(PlannerInfo *root, Path *path,
2518 Cost *rescan_startup_cost, /* output parameters */
2519 Cost *rescan_total_cost)
2521 switch (path->pathtype)
2523 case T_FunctionScan:
2526 * Currently, nodeFunctionscan.c always executes the function to
2527 * completion before returning any rows, and caches the results in
2528 * a tuplestore. So the function eval cost is all startup cost
2529 * and isn't paid over again on rescans. However, all run costs
2530 * will be paid over again.
2532 *rescan_startup_cost = 0;
2533 *rescan_total_cost = path->total_cost - path->startup_cost;
2538 * Assume that all of the startup cost represents hash table
2539 * building, which we won't have to do over.
2541 *rescan_startup_cost = 0;
2542 *rescan_total_cost = path->total_cost - path->startup_cost;
2545 case T_WorkTableScan:
2548 * These plan types materialize their final result in a
2549 * tuplestore or tuplesort object. So the rescan cost is only
2550 * cpu_tuple_cost per tuple, unless the result is large enough
2553 Cost run_cost = cpu_tuple_cost * path->parent->rows;
2554 double nbytes = relation_byte_size(path->parent->rows,
2555 path->parent->width);
2556 long work_mem_bytes = work_mem * 1024L;
2558 if (nbytes > work_mem_bytes)
2560 /* It will spill, so account for re-read cost */
2561 double npages = ceil(nbytes / BLCKSZ);
2563 run_cost += seq_page_cost * npages;
2565 *rescan_startup_cost = 0;
2566 *rescan_total_cost = run_cost;
2573 * These plan types not only materialize their results, but do
2574 * not implement qual filtering or projection. So they are
2575 * even cheaper to rescan than the ones above. We charge only
2576 * cpu_operator_cost per tuple. (Note: keep that in sync with
2577 * the run_cost charge in cost_sort, and also see comments in
2578 * cost_material before you change it.)
2580 Cost run_cost = cpu_operator_cost * path->parent->rows;
2581 double nbytes = relation_byte_size(path->parent->rows,
2582 path->parent->width);
2583 long work_mem_bytes = work_mem * 1024L;
2585 if (nbytes > work_mem_bytes)
2587 /* It will spill, so account for re-read cost */
2588 double npages = ceil(nbytes / BLCKSZ);
2590 run_cost += seq_page_cost * npages;
2592 *rescan_startup_cost = 0;
2593 *rescan_total_cost = run_cost;
2597 *rescan_startup_cost = path->startup_cost;
2598 *rescan_total_cost = path->total_cost;
2606 * Estimate the CPU costs of evaluating a WHERE clause.
2607 * The input can be either an implicitly-ANDed list of boolean
2608 * expressions, or a list of RestrictInfo nodes. (The latter is
2609 * preferred since it allows caching of the results.)
2610 * The result includes both a one-time (startup) component,
2611 * and a per-evaluation component.
2614 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
2616 cost_qual_eval_context context;
2619 context.root = root;
2620 context.total.startup = 0;
2621 context.total.per_tuple = 0;
2623 /* We don't charge any cost for the implicit ANDing at top level ... */
2627 Node *qual = (Node *) lfirst(l);
2629 cost_qual_eval_walker(qual, &context);
2632 *cost = context.total;
2636 * cost_qual_eval_node
2637 * As above, for a single RestrictInfo or expression.
2640 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
2642 cost_qual_eval_context context;
2644 context.root = root;
2645 context.total.startup = 0;
2646 context.total.per_tuple = 0;
2648 cost_qual_eval_walker(qual, &context);
2650 *cost = context.total;
2654 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
2660 * RestrictInfo nodes contain an eval_cost field reserved for this
2661 * routine's use, so that it's not necessary to evaluate the qual clause's
2662 * cost more than once. If the clause's cost hasn't been computed yet,
2663 * the field's startup value will contain -1.
2665 if (IsA(node, RestrictInfo))
2667 RestrictInfo *rinfo = (RestrictInfo *) node;
2669 if (rinfo->eval_cost.startup < 0)
2671 cost_qual_eval_context locContext;
2673 locContext.root = context->root;
2674 locContext.total.startup = 0;
2675 locContext.total.per_tuple = 0;
2678 * For an OR clause, recurse into the marked-up tree so that we
2679 * set the eval_cost for contained RestrictInfos too.
2681 if (rinfo->orclause)
2682 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
2684 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
2687 * If the RestrictInfo is marked pseudoconstant, it will be tested
2688 * only once, so treat its cost as all startup cost.
2690 if (rinfo->pseudoconstant)
2692 /* count one execution during startup */
2693 locContext.total.startup += locContext.total.per_tuple;
2694 locContext.total.per_tuple = 0;
2696 rinfo->eval_cost = locContext.total;
2698 context->total.startup += rinfo->eval_cost.startup;
2699 context->total.per_tuple += rinfo->eval_cost.per_tuple;
2700 /* do NOT recurse into children */
2705 * For each operator or function node in the given tree, we charge the
2706 * estimated execution cost given by pg_proc.procost (remember to multiply
2707 * this by cpu_operator_cost).
2709 * Vars and Consts are charged zero, and so are boolean operators (AND,
2710 * OR, NOT). Simplistic, but a lot better than no model at all.
2712 * Should we try to account for the possibility of short-circuit
2713 * evaluation of AND/OR? Probably *not*, because that would make the
2714 * results depend on the clause ordering, and we are not in any position
2715 * to expect that the current ordering of the clauses is the one that's
2716 * going to end up being used. The above per-RestrictInfo caching would
2717 * not mix well with trying to re-order clauses anyway.
2719 if (IsA(node, FuncExpr))
2721 context->total.per_tuple +=
2722 get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
2724 else if (IsA(node, OpExpr) ||
2725 IsA(node, DistinctExpr) ||
2726 IsA(node, NullIfExpr))
2728 /* rely on struct equivalence to treat these all alike */
2729 set_opfuncid((OpExpr *) node);
2730 context->total.per_tuple +=
2731 get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
2733 else if (IsA(node, ScalarArrayOpExpr))
2736 * Estimate that the operator will be applied to about half of the
2737 * array elements before the answer is determined.
2739 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
2740 Node *arraynode = (Node *) lsecond(saop->args);
2742 set_sa_opfuncid(saop);
2743 context->total.per_tuple += get_func_cost(saop->opfuncid) *
2744 cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
2746 else if (IsA(node, Aggref) ||
2747 IsA(node, WindowFunc))
2750 * Aggref and WindowFunc nodes are (and should be) treated like Vars,
2751 * ie, zero execution cost in the current model, because they behave
2752 * essentially like Vars in execQual.c. We disregard the costs of
2753 * their input expressions for the same reason. The actual execution
2754 * costs of the aggregate/window functions and their arguments have to
2755 * be factored into plan-node-specific costing of the Agg or WindowAgg
2758 return false; /* don't recurse into children */
2760 else if (IsA(node, CoerceViaIO))
2762 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
2767 /* check the result type's input function */
2768 getTypeInputInfo(iocoerce->resulttype,
2769 &iofunc, &typioparam);
2770 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2771 /* check the input type's output function */
2772 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
2773 &iofunc, &typisvarlena);
2774 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
2776 else if (IsA(node, ArrayCoerceExpr))
2778 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
2779 Node *arraynode = (Node *) acoerce->arg;
2781 if (OidIsValid(acoerce->elemfuncid))
2782 context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
2783 cpu_operator_cost * estimate_array_length(arraynode);
2785 else if (IsA(node, RowCompareExpr))
2787 /* Conservatively assume we will check all the columns */
2788 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
2791 foreach(lc, rcexpr->opnos)
2793 Oid opid = lfirst_oid(lc);
2795 context->total.per_tuple += get_func_cost(get_opcode(opid)) *
2799 else if (IsA(node, CurrentOfExpr))
2801 /* Report high cost to prevent selection of anything but TID scan */
2802 context->total.startup += disable_cost;
2804 else if (IsA(node, SubLink))
2806 /* This routine should not be applied to un-planned expressions */
2807 elog(ERROR, "cannot handle unplanned sub-select");
2809 else if (IsA(node, SubPlan))
2812 * A subplan node in an expression typically indicates that the
2813 * subplan will be executed on each evaluation, so charge accordingly.
2814 * (Sub-selects that can be executed as InitPlans have already been
2815 * removed from the expression.)
2817 SubPlan *subplan = (SubPlan *) node;
2819 context->total.startup += subplan->startup_cost;
2820 context->total.per_tuple += subplan->per_call_cost;
2823 * We don't want to recurse into the testexpr, because it was already
2824 * counted in the SubPlan node's costs. So we're done.
2828 else if (IsA(node, AlternativeSubPlan))
2831 * Arbitrarily use the first alternative plan for costing. (We should
2832 * certainly only include one alternative, and we don't yet have
2833 * enough information to know which one the executor is most likely to
2836 AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
2838 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
2842 /* recurse into children */
2843 return expression_tree_walker(node, cost_qual_eval_walker,
2850 * Estimate how much of the inner input a SEMI or ANTI join
2851 * can be expected to scan.
2853 * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
2854 * inner rows as soon as it finds a match to the current outer row.
2855 * We should therefore adjust some of the cost components for this effect.
2856 * This function computes some estimates needed for these adjustments.
2858 * 'path' is already filled in except for the cost fields
2859 * 'sjinfo' is extra info about the join for selectivity estimation
2861 * Returns TRUE if this is a SEMI or ANTI join, FALSE if not.
2863 * Output parameters (set only in TRUE-result case):
2864 * *outer_match_frac is set to the fraction of the outer tuples that are
2865 * expected to have at least one match.
2866 * *match_count is set to the average number of matches expected for
2867 * outer tuples that have at least one match.
2868 * *indexed_join_quals is set to TRUE if all the joinquals are used as
2869 * inner index quals, FALSE if not.
2871 * indexed_join_quals can be passed as NULL if that information is not
2872 * relevant (it is only useful for the nestloop case).
2875 adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
2876 Selectivity *outer_match_frac,
2877 Selectivity *match_count,
2878 bool *indexed_join_quals)
2880 JoinType jointype = path->jointype;
2883 Selectivity avgmatch;
2884 SpecialJoinInfo norm_sjinfo;
2888 /* Fall out if it's not JOIN_SEMI or JOIN_ANTI */
2889 if (jointype != JOIN_SEMI && jointype != JOIN_ANTI)
2893 * Note: it's annoying to repeat this selectivity estimation on each call,
2894 * when the joinclause list will be the same for all path pairs
2895 * implementing a given join. clausesel.c will save us from the worst
2896 * effects of this by caching at the RestrictInfo level; but perhaps it'd
2897 * be worth finding a way to cache the results at a higher level.
2901 * In an ANTI join, we must ignore clauses that are "pushed down", since
2902 * those won't affect the match logic. In a SEMI join, we do not
2903 * distinguish joinquals from "pushed down" quals, so just use the whole
2904 * restrictinfo list.
2906 if (jointype == JOIN_ANTI)
2909 foreach(l, path->joinrestrictinfo)
2911 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
2913 Assert(IsA(rinfo, RestrictInfo));
2914 if (!rinfo->is_pushed_down)
2915 joinquals = lappend(joinquals, rinfo);
2919 joinquals = path->joinrestrictinfo;
2922 * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
2924 jselec = clauselist_selectivity(root,
2931 * Also get the normal inner-join selectivity of the join clauses.
2933 norm_sjinfo.type = T_SpecialJoinInfo;
2934 norm_sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
2935 norm_sjinfo.min_righthand = path->innerjoinpath->parent->relids;
2936 norm_sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
2937 norm_sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
2938 norm_sjinfo.jointype = JOIN_INNER;
2939 /* we don't bother trying to make the remaining fields valid */
2940 norm_sjinfo.lhs_strict = false;
2941 norm_sjinfo.delay_upper_joins = false;
2942 norm_sjinfo.join_quals = NIL;
2944 nselec = clauselist_selectivity(root,
2950 /* Avoid leaking a lot of ListCells */
2951 if (jointype == JOIN_ANTI)
2952 list_free(joinquals);
2955 * jselec can be interpreted as the fraction of outer-rel rows that have
2956 * any matches (this is true for both SEMI and ANTI cases). And nselec is
2957 * the fraction of the Cartesian product that matches. So, the average
2958 * number of matches for each outer-rel row that has at least one match is
2959 * nselec * inner_rows / jselec.
2961 * Note: it is correct to use the inner rel's "rows" count here, not
2962 * PATH_ROWS(), even if the inner path under consideration is an inner
2963 * indexscan. This is because we have included all the join clauses in
2964 * the selectivity estimate, even ones used in an inner indexscan.
2966 if (jselec > 0) /* protect against zero divide */
2968 avgmatch = nselec * path->innerjoinpath->parent->rows / jselec;
2969 /* Clamp to sane range */
2970 avgmatch = Max(1.0, avgmatch);
2975 *outer_match_frac = jselec;
2976 *match_count = avgmatch;
2979 * If requested, check whether the inner path uses all the joinquals as
2980 * indexquals. (If that's true, we can assume that an unmatched outer
2981 * tuple is cheap to process, whereas otherwise it's probably expensive.)
2983 if (indexed_join_quals)
2985 if (path->joinrestrictinfo != NIL)
2989 nrclauses = select_nonredundant_join_clauses(root,
2990 path->joinrestrictinfo,
2991 path->innerjoinpath);
2992 *indexed_join_quals = (nrclauses == NIL);
2996 /* a clauseless join does NOT qualify */
2997 *indexed_join_quals = false;
3006 * approx_tuple_count
3007 * Quick-and-dirty estimation of the number of join rows passing
3008 * a set of qual conditions.
3010 * The quals can be either an implicitly-ANDed list of boolean expressions,
3011 * or a list of RestrictInfo nodes (typically the latter).
3013 * We intentionally compute the selectivity under JOIN_INNER rules, even
3014 * if it's some type of outer join. This is appropriate because we are
3015 * trying to figure out how many tuples pass the initial merge or hash
3018 * This is quick-and-dirty because we bypass clauselist_selectivity, and
3019 * simply multiply the independent clause selectivities together. Now
3020 * clauselist_selectivity often can't do any better than that anyhow, but
3021 * for some situations (such as range constraints) it is smarter. However,
3022 * we can't effectively cache the results of clauselist_selectivity, whereas
3023 * the individual clause selectivities can be and are cached.
3025 * Since we are only using the results to estimate how many potential
3026 * output tuples are generated and passed through qpqual checking, it
3027 * seems OK to live with the approximation.
3030 approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
3033 double outer_tuples = path->outerjoinpath->parent->rows;
3034 double inner_tuples = path->innerjoinpath->parent->rows;
3035 SpecialJoinInfo sjinfo;
3036 Selectivity selec = 1.0;
3040 * Make up a SpecialJoinInfo for JOIN_INNER semantics.
3042 sjinfo.type = T_SpecialJoinInfo;
3043 sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
3044 sjinfo.min_righthand = path->innerjoinpath->parent->relids;
3045 sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
3046 sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
3047 sjinfo.jointype = JOIN_INNER;
3048 /* we don't bother trying to make the remaining fields valid */
3049 sjinfo.lhs_strict = false;
3050 sjinfo.delay_upper_joins = false;
3051 sjinfo.join_quals = NIL;
3053 /* Get the approximate selectivity */
3056 Node *qual = (Node *) lfirst(l);
3058 /* Note that clause_selectivity will be able to cache its result */
3059 selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
3062 /* Apply it to the input relation sizes */
3063 tuples = selec * outer_tuples * inner_tuples;
3065 return clamp_row_est(tuples);
3070 * set_baserel_size_estimates
3071 * Set the size estimates for the given base relation.
3073 * The rel's targetlist and restrictinfo list must have been constructed
3074 * already, and rel->tuples must be set.
3076 * We set the following fields of the rel node:
3077 * rows: the estimated number of output tuples (after applying
3078 * restriction clauses).
3079 * width: the estimated average output tuple width in bytes.
3080 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
3083 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3087 /* Should only be applied to base relations */
3088 Assert(rel->relid > 0);
3090 nrows = rel->tuples *
3091 clauselist_selectivity(root,
3092 rel->baserestrictinfo,
3097 rel->rows = clamp_row_est(nrows);
3099 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
3101 set_rel_width(root, rel);
3105 * set_joinrel_size_estimates
3106 * Set the size estimates for the given join relation.
3108 * The rel's targetlist must have been constructed already, and a
3109 * restriction clause list that matches the given component rels must
3112 * Since there is more than one way to make a joinrel for more than two
3113 * base relations, the results we get here could depend on which component
3114 * rel pair is provided. In theory we should get the same answers no matter
3115 * which pair is provided; in practice, since the selectivity estimation
3116 * routines don't handle all cases equally well, we might not. But there's
3117 * not much to be done about it. (Would it make sense to repeat the
3118 * calculations for each pair of input rels that's encountered, and somehow
3119 * average the results? Probably way more trouble than it's worth.)
3121 * We set only the rows field here. The width field was already set by
3122 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
3125 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
3126 RelOptInfo *outer_rel,
3127 RelOptInfo *inner_rel,
3128 SpecialJoinInfo *sjinfo,
3131 JoinType jointype = sjinfo->jointype;
3137 * Compute joinclause selectivity. Note that we are only considering
3138 * clauses that become restriction clauses at this join level; we are not
3139 * double-counting them because they were not considered in estimating the
3140 * sizes of the component rels.
3142 * For an outer join, we have to distinguish the selectivity of the join's
3143 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
3144 * down". For inner joins we just count them all as joinclauses.
3146 if (IS_OUTER_JOIN(jointype))
3148 List *joinquals = NIL;
3149 List *pushedquals = NIL;
3152 /* Grovel through the clauses to separate into two lists */
3153 foreach(l, restrictlist)
3155 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
3157 Assert(IsA(rinfo, RestrictInfo));
3158 if (rinfo->is_pushed_down)
3159 pushedquals = lappend(pushedquals, rinfo);
3161 joinquals = lappend(joinquals, rinfo);
3164 /* Get the separate selectivities */
3165 jselec = clauselist_selectivity(root,
3170 pselec = clauselist_selectivity(root,
3176 /* Avoid leaking a lot of ListCells */
3177 list_free(joinquals);
3178 list_free(pushedquals);
3182 jselec = clauselist_selectivity(root,
3187 pselec = 0.0; /* not used, keep compiler quiet */
3191 * Basically, we multiply size of Cartesian product by selectivity.
3193 * If we are doing an outer join, take that into account: the joinqual
3194 * selectivity has to be clamped using the knowledge that the output must
3195 * be at least as large as the non-nullable input. However, any
3196 * pushed-down quals are applied after the outer join, so their
3197 * selectivity applies fully.
3199 * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
3200 * of LHS rows that have matches, and we apply that straightforwardly.
3205 nrows = outer_rel->rows * inner_rel->rows * jselec;
3208 nrows = outer_rel->rows * inner_rel->rows * jselec;
3209 if (nrows < outer_rel->rows)
3210 nrows = outer_rel->rows;
3214 nrows = outer_rel->rows * inner_rel->rows * jselec;
3215 if (nrows < outer_rel->rows)
3216 nrows = outer_rel->rows;
3217 if (nrows < inner_rel->rows)
3218 nrows = inner_rel->rows;
3222 nrows = outer_rel->rows * jselec;
3223 /* pselec not used */
3226 nrows = outer_rel->rows * (1.0 - jselec);
3230 /* other values not expected here */
3231 elog(ERROR, "unrecognized join type: %d", (int) jointype);
3232 nrows = 0; /* keep compiler quiet */
3236 rel->rows = clamp_row_est(nrows);
3240 * set_subquery_size_estimates
3241 * Set the size estimates for a base relation that is a subquery.
3243 * The rel's targetlist and restrictinfo list must have been constructed
3244 * already, and the plan for the subquery must have been completed.
3245 * We look at the subquery's plan and PlannerInfo to extract data.
3247 * We set the same fields as set_baserel_size_estimates.
3250 set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3252 PlannerInfo *subroot = rel->subroot;
3256 /* Should only be applied to base relations that are subqueries */
3257 Assert(rel->relid > 0);
3258 rte = planner_rt_fetch(rel->relid, root);
3259 Assert(rte->rtekind == RTE_SUBQUERY);
3261 /* Copy raw number of output rows from subplan */
3262 rel->tuples = rel->subplan->plan_rows;
3265 * Compute per-output-column width estimates by examining the subquery's
3266 * targetlist. For any output that is a plain Var, get the width estimate
3267 * that was made while planning the subquery. Otherwise, we leave it to
3268 * set_rel_width to fill in a datatype-based default estimate.
3270 foreach(lc, subroot->parse->targetList)
3272 TargetEntry *te = (TargetEntry *) lfirst(lc);
3273 Node *texpr = (Node *) te->expr;
3274 int32 item_width = 0;
3276 Assert(IsA(te, TargetEntry));
3277 /* junk columns aren't visible to upper query */
3282 * XXX This currently doesn't work for subqueries containing set
3283 * operations, because the Vars in their tlists are bogus references
3284 * to the first leaf subquery, which wouldn't give the right answer
3285 * even if we could still get to its PlannerInfo.
3287 * Also, the subquery could be an appendrel for which all branches are
3288 * known empty due to constraint exclusion, in which case
3289 * set_append_rel_pathlist will have left the attr_widths set to zero.
3291 * In either case, we just leave the width estimate zero until
3292 * set_rel_width fixes it.
3294 if (IsA(texpr, Var) &&
3295 subroot->parse->setOperations == NULL)
3297 Var *var = (Var *) texpr;
3298 RelOptInfo *subrel = find_base_rel(subroot, var->varno);
3300 item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
3302 Assert(te->resno >= rel->min_attr && te->resno <= rel->max_attr);
3303 rel->attr_widths[te->resno - rel->min_attr] = item_width;
3306 /* Now estimate number of output rows, etc */
3307 set_baserel_size_estimates(root, rel);
3311 * set_function_size_estimates
3312 * Set the size estimates for a base relation that is a function call.
3314 * The rel's targetlist and restrictinfo list must have been constructed
3317 * We set the same fields as set_baserel_size_estimates.
3320 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3324 /* Should only be applied to base relations that are functions */
3325 Assert(rel->relid > 0);
3326 rte = planner_rt_fetch(rel->relid, root);
3327 Assert(rte->rtekind == RTE_FUNCTION);
3329 /* Estimate number of rows the function itself will return */
3330 rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
3332 /* Now estimate number of output rows, etc */
3333 set_baserel_size_estimates(root, rel);
3337 * set_values_size_estimates
3338 * Set the size estimates for a base relation that is a values list.
3340 * The rel's targetlist and restrictinfo list must have been constructed
3343 * We set the same fields as set_baserel_size_estimates.
3346 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3350 /* Should only be applied to base relations that are values lists */
3351 Assert(rel->relid > 0);
3352 rte = planner_rt_fetch(rel->relid, root);
3353 Assert(rte->rtekind == RTE_VALUES);
3356 * Estimate number of rows the values list will return. We know this
3357 * precisely based on the list length (well, barring set-returning
3358 * functions in list items, but that's a refinement not catered for
3359 * anywhere else either).
3361 rel->tuples = list_length(rte->values_lists);
3363 /* Now estimate number of output rows, etc */
3364 set_baserel_size_estimates(root, rel);
3368 * set_cte_size_estimates
3369 * Set the size estimates for a base relation that is a CTE reference.
3371 * The rel's targetlist and restrictinfo list must have been constructed
3372 * already, and we need the completed plan for the CTE (if a regular CTE)
3373 * or the non-recursive term (if a self-reference).
3375 * We set the same fields as set_baserel_size_estimates.
3378 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
3382 /* Should only be applied to base relations that are CTE references */
3383 Assert(rel->relid > 0);
3384 rte = planner_rt_fetch(rel->relid, root);
3385 Assert(rte->rtekind == RTE_CTE);
3387 if (rte->self_reference)
3390 * In a self-reference, arbitrarily assume the average worktable size
3391 * is about 10 times the nonrecursive term's size.
3393 rel->tuples = 10 * cteplan->plan_rows;
3397 /* Otherwise just believe the CTE plan's output estimate */
3398 rel->tuples = cteplan->plan_rows;
3401 /* Now estimate number of output rows, etc */
3402 set_baserel_size_estimates(root, rel);
3406 * set_foreign_size_estimates
3407 * Set the size estimates for a base relation that is a foreign table.
3409 * There is not a whole lot that we can do here; the foreign-data wrapper
3410 * is responsible for producing useful estimates. We can do a decent job
3411 * of estimating baserestrictcost, so we set that, and we also set up width
3412 * using what will be purely datatype-driven estimates from the targetlist.
3413 * There is no way to do anything sane with the rows value, so we just put
3414 * a default estimate and hope that the wrapper can improve on it. The
3415 * wrapper's PlanForeignScan function will be called momentarily.
3417 * The rel's targetlist and restrictinfo list must have been constructed
3421 set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3423 /* Should only be applied to base relations */
3424 Assert(rel->relid > 0);
3426 rel->rows = 1000; /* entirely bogus default estimate */
3428 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
3430 set_rel_width(root, rel);
3436 * Set the estimated output width of a base relation.
3438 * The estimated output width is the sum of the per-attribute width estimates
3439 * for the actually-referenced columns, plus any PHVs or other expressions
3440 * that have to be calculated at this relation. This is the amount of data
3441 * we'd need to pass upwards in case of a sort, hash, etc.
3443 * NB: this works best on plain relations because it prefers to look at
3444 * real Vars. For subqueries, set_subquery_size_estimates will already have
3445 * copied up whatever per-column estimates were made within the subquery,
3446 * and for other types of rels there isn't much we can do anyway. We fall
3447 * back on (fairly stupid) datatype-based width estimates if we can't get
3448 * any better number.
3450 * The per-attribute width estimates are cached for possible re-use while
3451 * building join relations.
3454 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
3456 Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
3457 int32 tuple_width = 0;
3458 bool have_wholerow_var = false;
3461 foreach(lc, rel->reltargetlist)
3463 Node *node = (Node *) lfirst(lc);
3467 Var *var = (Var *) node;
3471 Assert(var->varno == rel->relid);
3472 Assert(var->varattno >= rel->min_attr);
3473 Assert(var->varattno <= rel->max_attr);
3475 ndx = var->varattno - rel->min_attr;
3478 * If it's a whole-row Var, we'll deal with it below after we have
3479 * already cached as many attr widths as possible.
3481 if (var->varattno == 0)
3483 have_wholerow_var = true;
3488 * The width may have been cached already (especially if it's a
3489 * subquery), so don't duplicate effort.
3491 if (rel->attr_widths[ndx] > 0)
3493 tuple_width += rel->attr_widths[ndx];
3497 /* Try to get column width from statistics */
3498 if (reloid != InvalidOid && var->varattno > 0)
3500 item_width = get_attavgwidth(reloid, var->varattno);
3503 rel->attr_widths[ndx] = item_width;
3504 tuple_width += item_width;
3510 * Not a plain relation, or can't find statistics for it. Estimate
3511 * using just the type info.
3513 item_width = get_typavgwidth(var->vartype, var->vartypmod);
3514 Assert(item_width > 0);
3515 rel->attr_widths[ndx] = item_width;
3516 tuple_width += item_width;
3518 else if (IsA(node, PlaceHolderVar))
3520 PlaceHolderVar *phv = (PlaceHolderVar *) node;
3521 PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
3523 tuple_width += phinfo->ph_width;
3528 * We could be looking at an expression pulled up from a subquery,
3529 * or a ROW() representing a whole-row child Var, etc. Do what we
3530 * can using the expression type information.
3534 item_width = get_typavgwidth(exprType(node), exprTypmod(node));
3535 Assert(item_width > 0);
3536 tuple_width += item_width;
3541 * If we have a whole-row reference, estimate its width as the sum of
3542 * per-column widths plus sizeof(HeapTupleHeaderData).
3544 if (have_wholerow_var)
3546 int32 wholerow_width = sizeof(HeapTupleHeaderData);
3548 if (reloid != InvalidOid)
3550 /* Real relation, so estimate true tuple width */
3551 wholerow_width += get_relation_data_width(reloid,
3552 rel->attr_widths - rel->min_attr);
3556 /* Do what we can with info for a phony rel */
3559 for (i = 1; i <= rel->max_attr; i++)
3560 wholerow_width += rel->attr_widths[i - rel->min_attr];
3563 rel->attr_widths[0 - rel->min_attr] = wholerow_width;
3566 * Include the whole-row Var as part of the output tuple. Yes, that
3567 * really is what happens at runtime.
3569 tuple_width += wholerow_width;
3572 Assert(tuple_width >= 0);
3573 rel->width = tuple_width;
3577 * relation_byte_size
3578 * Estimate the storage space in bytes for a given number of tuples
3579 * of a given width (size in bytes).
3582 relation_byte_size(double tuples, int width)
3584 return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
3589 * Returns an estimate of the number of pages covered by a given
3590 * number of tuples of a given width (size in bytes).
3593 page_size(double tuples, int width)
3595 return ceil(relation_byte_size(tuples, width) / BLCKSZ);