1 /*-------------------------------------------------------------------------
4 * Routines to compute (and set) relation sizes and path costs
6 * Path costs are measured in arbitrary units established by these basic
9 * seq_page_cost Cost of a sequential page fetch
10 * random_page_cost Cost of a non-sequential page fetch
11 * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 * cpu_operator_cost Cost of CPU time to execute an operator or function
15 * We expect that the kernel will typically do some amount of read-ahead
16 * optimization; this in conjunction with seek costs means that seq_page_cost
17 * is normally considerably less than random_page_cost. (However, if the
18 * database is fully cached in RAM, it is reasonable to set them equal.)
20 * We also use a rough estimate "effective_cache_size" of the number of
21 * disk pages in Postgres + OS-level disk cache. (We can't simply use
22 * NBuffers for this purpose because that would ignore the effects of
23 * the kernel's disk cache.)
25 * Obviously, taking constants for these values is an oversimplification,
26 * but it's tough enough to get any useful estimates even at this level of
27 * detail. Note that all of these parameters are user-settable, in case
28 * the default values are drastically off for a particular platform.
30 * seq_page_cost and random_page_cost can also be overridden for an individual
31 * tablespace, in case some data is on a fast disk and other data is on a slow
32 * disk. Per-tablespace overrides never apply to temporary work files such as
33 * an external sort or a materialize node that overflows work_mem.
35 * We compute two separate costs for each path:
36 * total_cost: total estimated cost to fetch all tuples
37 * startup_cost: cost that is expended before first tuple is fetched
38 * In some scenarios, such as when there is a LIMIT or we are implementing
39 * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
40 * path's result. A caller can estimate the cost of fetching a partial
41 * result by interpolating between startup_cost and total_cost. In detail:
42 * actual_cost = startup_cost +
43 * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
44 * Note that a base relation's rows count (and, by extension, plan_rows for
45 * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
46 * that this equation works properly. (Also, these routines guarantee not to
47 * set the rows count to zero, so there will be no zero divide.) The LIMIT is
48 * applied as a top-level plan node.
50 * For largely historical reasons, most of the routines in this module use
51 * the passed result Path only to store their results (rows, startup_cost and
52 * total_cost) into. All the input data they need is passed as separate
53 * parameters, even though much of it could be extracted from the Path.
54 * An exception is made for the cost_XXXjoin() routines, which expect all
55 * the other fields of the passed XXXPath to be filled in, and similarly
56 * cost_index() assumes the passed IndexPath is valid except for its output
60 * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
61 * Portions Copyright (c) 1994, Regents of the University of California
64 * src/backend/optimizer/path/costsize.c
66 *-------------------------------------------------------------------------
73 #include "executor/executor.h"
74 #include "executor/nodeHash.h"
75 #include "miscadmin.h"
76 #include "nodes/nodeFuncs.h"
77 #include "optimizer/clauses.h"
78 #include "optimizer/cost.h"
79 #include "optimizer/pathnode.h"
80 #include "optimizer/placeholder.h"
81 #include "optimizer/plancat.h"
82 #include "optimizer/planmain.h"
83 #include "optimizer/restrictinfo.h"
84 #include "parser/parsetree.h"
85 #include "utils/lsyscache.h"
86 #include "utils/selfuncs.h"
87 #include "utils/spccache.h"
88 #include "utils/tuplesort.h"
91 #define LOG2(x) (log(x) / 0.693147180559945)
94 double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
95 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
96 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
97 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
98 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
100 int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
102 Cost disable_cost = 1.0e10;
104 bool enable_seqscan = true;
105 bool enable_indexscan = true;
106 bool enable_indexonlyscan = true;
107 bool enable_bitmapscan = true;
108 bool enable_tidscan = true;
109 bool enable_sort = true;
110 bool enable_hashagg = true;
111 bool enable_nestloop = true;
112 bool enable_material = true;
113 bool enable_mergejoin = true;
114 bool enable_hashjoin = true;
120 } cost_qual_eval_context;
122 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
125 static void cost_rescan(PlannerInfo *root, Path *path,
126 Cost *rescan_startup_cost, Cost *rescan_total_cost);
127 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
128 static bool has_indexed_join_quals(NestPath *path, List *joinclauses);
129 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
131 static void set_joinpath_size_estimate(PlannerInfo *root, JoinPath *path,
132 SpecialJoinInfo *sjinfo,
134 static double calc_joinrel_size_estimate(PlannerInfo *root,
137 SpecialJoinInfo *sjinfo,
139 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
140 static double relation_byte_size(double tuples, int width);
141 static double page_size(double tuples, int width);
146 * Force a row-count estimate to a sane value.
149 clamp_row_est(double nrows)
152 * Force estimate to be at least one row, to make explain output look
153 * better and to avoid possible divide-by-zero when interpolating costs.
154 * Make it an integer, too.
167 * Determines and returns the cost of scanning a relation sequentially.
170 cost_seqscan(Path *path, PlannerInfo *root,
173 double spc_seq_page_cost;
174 Cost startup_cost = 0;
178 /* Should only be applied to base relations */
179 Assert(baserel->relid > 0);
180 Assert(baserel->rtekind == RTE_RELATION);
182 /* For now, at least, seqscans are never parameterized */
183 path->rows = baserel->rows;
186 startup_cost += disable_cost;
188 /* fetch estimated page cost for tablespace containing table */
189 get_tablespace_page_costs(baserel->reltablespace,
196 run_cost += spc_seq_page_cost * baserel->pages;
199 startup_cost += baserel->baserestrictcost.startup;
200 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
201 run_cost += cpu_per_tuple * baserel->tuples;
203 path->startup_cost = startup_cost;
204 path->total_cost = startup_cost + run_cost;
209 * Determines and returns the cost of scanning a relation using an index.
211 * 'path' describes the indexscan under consideration, and is complete
212 * except for the fields to be set by this routine
213 * 'loop_count' is the number of repetitions of the indexscan to factor into
214 * estimates of caching behavior
216 * In addition to rows, startup_cost and total_cost, cost_index() sets the
217 * path's indextotalcost and indexselectivity fields. These values will be
218 * needed if the IndexPath is used in a BitmapIndexScan.
220 * NOTE: path->indexquals must contain only clauses usable as index
221 * restrictions. Any additional quals evaluated as qpquals may reduce the
222 * number of returned tuples, but they won't reduce the number of tuples
223 * we have to fetch from the table, so they don't reduce the scan cost.
226 cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
228 IndexOptInfo *index = path->indexinfo;
229 RelOptInfo *baserel = index->rel;
230 bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
232 Cost startup_cost = 0;
234 Cost indexStartupCost;
236 Selectivity indexSelectivity;
237 double indexCorrelation,
239 double spc_seq_page_cost,
240 spc_random_page_cost;
243 QualCost qpqual_cost;
245 double tuples_fetched;
246 double pages_fetched;
248 /* Should only be applied to base relations */
249 Assert(IsA(baserel, RelOptInfo) &&
250 IsA(index, IndexOptInfo));
251 Assert(baserel->relid > 0);
252 Assert(baserel->rtekind == RTE_RELATION);
254 /* Estimate the number of rows returned by the indexscan */
255 if (path->path.required_outer)
258 * The estimate should be less than baserel->rows because of the
259 * additional selectivity of the join clauses. Since indexclauses may
260 * contain both restriction and join clauses, we have to do a set
261 * union to get the full set of clauses that must be considered to
262 * compute the correct selectivity. (Without the union operation, we
263 * might have some restriction clauses appearing twice, which'd
264 * mislead clauselist_selectivity into double-counting their
265 * selectivity. However, since RestrictInfo nodes aren't copied when
266 * linking them into different lists, it should be sufficient to use
267 * pointer comparison to remove duplicates.)
269 * Note that we force the clauses to be treated as non-join clauses
270 * during selectivity estimation.
272 allclauses = list_union_ptr(baserel->baserestrictinfo,
274 path->path.rows = baserel->tuples *
275 clauselist_selectivity(root,
277 baserel->relid, /* do not use 0! */
280 if (path->path.rows > baserel->rows)
281 path->path.rows = baserel->rows;
282 path->path.rows = clamp_row_est(path->path.rows);
286 /* allclauses should just be the rel's restriction clauses */
287 allclauses = baserel->baserestrictinfo;
290 * The number of rows is the same as the parent rel's estimate, since
291 * this isn't a parameterized path.
293 path->path.rows = baserel->rows;
296 if (!enable_indexscan)
297 startup_cost += disable_cost;
298 /* we don't need to check enable_indexonlyscan; indxpath.c does that */
301 * Call index-access-method-specific code to estimate the processing cost
302 * for scanning the index, as well as the selectivity of the index (ie,
303 * the fraction of main-table tuples we will have to retrieve) and its
304 * correlation to the main-table tuple order.
306 OidFunctionCall7(index->amcostestimate,
307 PointerGetDatum(root),
308 PointerGetDatum(path),
309 Float8GetDatum(loop_count),
310 PointerGetDatum(&indexStartupCost),
311 PointerGetDatum(&indexTotalCost),
312 PointerGetDatum(&indexSelectivity),
313 PointerGetDatum(&indexCorrelation));
316 * Save amcostestimate's results for possible use in bitmap scan planning.
317 * We don't bother to save indexStartupCost or indexCorrelation, because a
318 * bitmap scan doesn't care about either.
320 path->indextotalcost = indexTotalCost;
321 path->indexselectivity = indexSelectivity;
323 /* all costs for touching index itself included here */
324 startup_cost += indexStartupCost;
325 run_cost += indexTotalCost - indexStartupCost;
327 /* estimate number of main-table tuples fetched */
328 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
330 /* fetch estimated page costs for tablespace containing table */
331 get_tablespace_page_costs(baserel->reltablespace,
332 &spc_random_page_cost,
336 * Estimate number of main-table pages fetched, and compute I/O cost.
338 * When the index ordering is uncorrelated with the table ordering,
339 * we use an approximation proposed by Mackert and Lohman (see
340 * index_pages_fetched() for details) to compute the number of pages
341 * fetched, and then charge spc_random_page_cost per page fetched.
343 * When the index ordering is exactly correlated with the table ordering
344 * (just after a CLUSTER, for example), the number of pages fetched should
345 * be exactly selectivity * table_size. What's more, all but the first
346 * will be sequential fetches, not the random fetches that occur in the
347 * uncorrelated case. So if the number of pages is more than 1, we
349 * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
350 * For partially-correlated indexes, we ought to charge somewhere between
351 * these two estimates. We currently interpolate linearly between the
352 * estimates based on the correlation squared (XXX is that appropriate?).
354 * If it's an index-only scan, then we will not need to fetch any heap
355 * pages for which the visibility map shows all tuples are visible.
356 * Hence, reduce the estimated number of heap fetches accordingly.
357 * We use the measured fraction of the entire heap that is all-visible,
358 * which might not be particularly relevant to the subset of the heap
359 * that this query will fetch; but it's not clear how to do better.
365 * For repeated indexscans, the appropriate estimate for the
366 * uncorrelated case is to scale up the number of tuples fetched in
367 * the Mackert and Lohman formula by the number of scans, so that we
368 * estimate the number of pages fetched by all the scans; then
369 * pro-rate the costs for one scan. In this case we assume all the
370 * fetches are random accesses.
372 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
374 (double) index->pages,
378 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
380 max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
383 * In the perfectly correlated case, the number of pages touched by
384 * each scan is selectivity * table_size, and we can use the Mackert
385 * and Lohman formula at the page level to estimate how much work is
386 * saved by caching across scans. We still assume all the fetches are
387 * random, though, which is an overestimate that's hard to correct for
388 * without double-counting the cache effects. (But in most cases
389 * where such a plan is actually interesting, only one page would get
390 * fetched per scan anyway, so it shouldn't matter much.)
392 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
394 pages_fetched = index_pages_fetched(pages_fetched * loop_count,
396 (double) index->pages,
400 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
402 min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
407 * Normal case: apply the Mackert and Lohman formula, and then
408 * interpolate between that and the correlation-derived result.
410 pages_fetched = index_pages_fetched(tuples_fetched,
412 (double) index->pages,
416 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
418 /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
419 max_IO_cost = pages_fetched * spc_random_page_cost;
421 /* min_IO_cost is for the perfectly correlated case (csquared=1) */
422 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
425 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
427 if (pages_fetched > 0)
429 min_IO_cost = spc_random_page_cost;
430 if (pages_fetched > 1)
431 min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
438 * Now interpolate based on estimated index order correlation to get total
439 * disk I/O cost for main table accesses.
441 csquared = indexCorrelation * indexCorrelation;
443 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
446 * Estimate CPU costs per tuple.
448 * What we want here is cpu_tuple_cost plus the evaluation costs of any
449 * qual clauses that we have to evaluate as qpquals. We approximate that
450 * list as allclauses minus any clauses appearing in indexquals (as
451 * before, assuming that pointer equality is enough to recognize duplicate
452 * RestrictInfos). This method neglects some considerations such as
453 * clauses that needn't be checked because they are implied by a partial
454 * index's predicate. It does not seem worth the cycles to try to factor
455 * those things in at this stage, even though createplan.c will take pains
456 * to remove such unnecessary clauses from the qpquals list if this path
457 * is selected for use.
459 cost_qual_eval(&qpqual_cost,
460 list_difference_ptr(allclauses, path->indexquals),
463 startup_cost += qpqual_cost.startup;
464 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
466 run_cost += cpu_per_tuple * tuples_fetched;
468 path->path.startup_cost = startup_cost;
469 path->path.total_cost = startup_cost + run_cost;
473 * index_pages_fetched
474 * Estimate the number of pages actually fetched after accounting for
477 * We use an approximation proposed by Mackert and Lohman, "Index Scans
478 * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
479 * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
480 * The Mackert and Lohman approximation is that the number of pages
483 * min(2TNs/(2T+Ns), T) when T <= b
484 * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
485 * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
487 * T = # pages in table
488 * N = # tuples in table
489 * s = selectivity = fraction of table to be scanned
490 * b = # buffer pages available (we include kernel space here)
492 * We assume that effective_cache_size is the total number of buffer pages
493 * available for the whole query, and pro-rate that space across all the
494 * tables in the query and the index currently under consideration. (This
495 * ignores space needed for other indexes used by the query, but since we
496 * don't know which indexes will get used, we can't estimate that very well;
497 * and in any case counting all the tables may well be an overestimate, since
498 * depending on the join plan not all the tables may be scanned concurrently.)
500 * The product Ns is the number of tuples fetched; we pass in that
501 * product rather than calculating it here. "pages" is the number of pages
502 * in the object under consideration (either an index or a table).
503 * "index_pages" is the amount to add to the total table space, which was
504 * computed for us by query_planner.
506 * Caller is expected to have ensured that tuples_fetched is greater than zero
507 * and rounded to integer (see clamp_row_est). The result will likewise be
508 * greater than zero and integral.
511 index_pages_fetched(double tuples_fetched, BlockNumber pages,
512 double index_pages, PlannerInfo *root)
514 double pages_fetched;
519 /* T is # pages in table, but don't allow it to be zero */
520 T = (pages > 1) ? (double) pages : 1.0;
522 /* Compute number of pages assumed to be competing for cache space */
523 total_pages = root->total_table_pages + index_pages;
524 total_pages = Max(total_pages, 1.0);
525 Assert(T <= total_pages);
527 /* b is pro-rated share of effective_cache_size */
528 b = (double) effective_cache_size *T / total_pages;
530 /* force it positive and integral */
536 /* This part is the Mackert and Lohman formula */
540 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
541 if (pages_fetched >= T)
544 pages_fetched = ceil(pages_fetched);
550 lim = (2.0 * T * b) / (2.0 * T - b);
551 if (tuples_fetched <= lim)
554 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
559 b + (tuples_fetched - lim) * (T - b) / T;
561 pages_fetched = ceil(pages_fetched);
563 return pages_fetched;
567 * get_indexpath_pages
568 * Determine the total size of the indexes used in a bitmap index path.
570 * Note: if the same index is used more than once in a bitmap tree, we will
571 * count it multiple times, which perhaps is the wrong thing ... but it's
572 * not completely clear, and detecting duplicates is difficult, so ignore it
576 get_indexpath_pages(Path *bitmapqual)
581 if (IsA(bitmapqual, BitmapAndPath))
583 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
585 foreach(l, apath->bitmapquals)
587 result += get_indexpath_pages((Path *) lfirst(l));
590 else if (IsA(bitmapqual, BitmapOrPath))
592 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
594 foreach(l, opath->bitmapquals)
596 result += get_indexpath_pages((Path *) lfirst(l));
599 else if (IsA(bitmapqual, IndexPath))
601 IndexPath *ipath = (IndexPath *) bitmapqual;
603 result = (double) ipath->indexinfo->pages;
606 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
612 * cost_bitmap_heap_scan
613 * Determines and returns the cost of scanning a relation using a bitmap
614 * index-then-heap plan.
616 * 'baserel' is the relation to be scanned
617 * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
618 * 'loop_count' is the number of repetitions of the indexscan to factor into
619 * estimates of caching behavior
621 * Note: the component IndexPaths in bitmapqual should have been costed
622 * using the same loop_count.
625 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
626 Path *bitmapqual, double loop_count)
628 Cost startup_cost = 0;
631 Selectivity indexSelectivity;
634 double tuples_fetched;
635 double pages_fetched;
636 double spc_seq_page_cost,
637 spc_random_page_cost;
640 /* Should only be applied to base relations */
641 Assert(IsA(baserel, RelOptInfo));
642 Assert(baserel->relid > 0);
643 Assert(baserel->rtekind == RTE_RELATION);
645 /* Estimate the number of rows returned by the bitmap scan */
646 if (path->required_outer)
649 * The estimate should be less than baserel->rows because of the
650 * additional selectivity of the join clauses. We make use of the
651 * selectivity estimated for the bitmap to do this; this isn't really
652 * quite right since there may be restriction conditions not included
656 Selectivity indexSelectivity;
658 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
659 path->rows = baserel->tuples * indexSelectivity;
660 if (path->rows > baserel->rows)
661 path->rows = baserel->rows;
662 path->rows = clamp_row_est(path->rows);
667 * The number of rows is the same as the parent rel's estimate, since
668 * this isn't a parameterized path.
670 path->rows = baserel->rows;
673 if (!enable_bitmapscan)
674 startup_cost += disable_cost;
677 * Fetch total cost of obtaining the bitmap, as well as its total
680 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
682 startup_cost += indexTotalCost;
684 /* Fetch estimated page costs for tablespace containing table. */
685 get_tablespace_page_costs(baserel->reltablespace,
686 &spc_random_page_cost,
690 * Estimate number of main-table pages fetched.
692 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
694 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
699 * For repeated bitmap scans, scale up the number of tuples fetched in
700 * the Mackert and Lohman formula by the number of scans, so that we
701 * estimate the number of pages fetched by all the scans. Then
702 * pro-rate for one scan.
704 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
706 get_indexpath_pages(bitmapqual),
708 pages_fetched /= loop_count;
713 * For a single scan, the number of heap pages that need to be fetched
714 * is the same as the Mackert and Lohman formula for the case T <= b
715 * (ie, no re-reads needed).
717 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
719 if (pages_fetched >= T)
722 pages_fetched = ceil(pages_fetched);
725 * For small numbers of pages we should charge spc_random_page_cost
726 * apiece, while if nearly all the table's pages are being read, it's more
727 * appropriate to charge spc_seq_page_cost apiece. The effect is
728 * nonlinear, too. For lack of a better idea, interpolate like this to
729 * determine the cost per page.
731 if (pages_fetched >= 2.0)
732 cost_per_page = spc_random_page_cost -
733 (spc_random_page_cost - spc_seq_page_cost)
734 * sqrt(pages_fetched / T);
736 cost_per_page = spc_random_page_cost;
738 run_cost += pages_fetched * cost_per_page;
741 * Estimate CPU costs per tuple.
743 * Often the indexquals don't need to be rechecked at each tuple ... but
744 * not always, especially not if there are enough tuples involved that the
745 * bitmaps become lossy. For the moment, just assume they will be
748 startup_cost += baserel->baserestrictcost.startup;
749 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
751 run_cost += cpu_per_tuple * tuples_fetched;
753 path->startup_cost = startup_cost;
754 path->total_cost = startup_cost + run_cost;
758 * cost_bitmap_tree_node
759 * Extract cost and selectivity from a bitmap tree node (index/and/or)
762 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
764 if (IsA(path, IndexPath))
766 *cost = ((IndexPath *) path)->indextotalcost;
767 *selec = ((IndexPath *) path)->indexselectivity;
770 * Charge a small amount per retrieved tuple to reflect the costs of
771 * manipulating the bitmap. This is mostly to make sure that a bitmap
772 * scan doesn't look to be the same cost as an indexscan to retrieve a
775 *cost += 0.1 * cpu_operator_cost * path->rows;
777 else if (IsA(path, BitmapAndPath))
779 *cost = path->total_cost;
780 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
782 else if (IsA(path, BitmapOrPath))
784 *cost = path->total_cost;
785 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
789 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
790 *cost = *selec = 0; /* keep compiler quiet */
795 * cost_bitmap_and_node
796 * Estimate the cost of a BitmapAnd node
798 * Note that this considers only the costs of index scanning and bitmap
799 * creation, not the eventual heap access. In that sense the object isn't
800 * truly a Path, but it has enough path-like properties (costs in particular)
801 * to warrant treating it as one. We don't bother to set the path rows field,
805 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
812 * We estimate AND selectivity on the assumption that the inputs are
813 * independent. This is probably often wrong, but we don't have the info
816 * The runtime cost of the BitmapAnd itself is estimated at 100x
817 * cpu_operator_cost for each tbm_intersect needed. Probably too small,
818 * definitely too simplistic?
822 foreach(l, path->bitmapquals)
824 Path *subpath = (Path *) lfirst(l);
826 Selectivity subselec;
828 cost_bitmap_tree_node(subpath, &subCost, &subselec);
832 totalCost += subCost;
833 if (l != list_head(path->bitmapquals))
834 totalCost += 100.0 * cpu_operator_cost;
836 path->bitmapselectivity = selec;
837 path->path.rows = 0; /* per above, not used */
838 path->path.startup_cost = totalCost;
839 path->path.total_cost = totalCost;
843 * cost_bitmap_or_node
844 * Estimate the cost of a BitmapOr node
846 * See comments for cost_bitmap_and_node.
849 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
856 * We estimate OR selectivity on the assumption that the inputs are
857 * non-overlapping, since that's often the case in "x IN (list)" type
858 * situations. Of course, we clamp to 1.0 at the end.
860 * The runtime cost of the BitmapOr itself is estimated at 100x
861 * cpu_operator_cost for each tbm_union needed. Probably too small,
862 * definitely too simplistic? We are aware that the tbm_unions are
863 * optimized out when the inputs are BitmapIndexScans.
867 foreach(l, path->bitmapquals)
869 Path *subpath = (Path *) lfirst(l);
871 Selectivity subselec;
873 cost_bitmap_tree_node(subpath, &subCost, &subselec);
877 totalCost += subCost;
878 if (l != list_head(path->bitmapquals) &&
879 !IsA(subpath, IndexPath))
880 totalCost += 100.0 * cpu_operator_cost;
882 path->bitmapselectivity = Min(selec, 1.0);
883 path->path.rows = 0; /* per above, not used */
884 path->path.startup_cost = totalCost;
885 path->path.total_cost = totalCost;
890 * Determines and returns the cost of scanning a relation using TIDs.
893 cost_tidscan(Path *path, PlannerInfo *root,
894 RelOptInfo *baserel, List *tidquals)
896 Cost startup_cost = 0;
898 bool isCurrentOf = false;
900 QualCost tid_qual_cost;
903 double spc_random_page_cost;
905 /* Should only be applied to base relations */
906 Assert(baserel->relid > 0);
907 Assert(baserel->rtekind == RTE_RELATION);
909 /* For now, tidscans are never parameterized */
910 path->rows = baserel->rows;
912 /* Count how many tuples we expect to retrieve */
916 if (IsA(lfirst(l), ScalarArrayOpExpr))
918 /* Each element of the array yields 1 tuple */
919 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
920 Node *arraynode = (Node *) lsecond(saop->args);
922 ntuples += estimate_array_length(arraynode);
924 else if (IsA(lfirst(l), CurrentOfExpr))
926 /* CURRENT OF yields 1 tuple */
932 /* It's just CTID = something, count 1 tuple */
938 * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
939 * understands how to do it correctly. Therefore, honor enable_tidscan
940 * only when CURRENT OF isn't present. Also note that cost_qual_eval
941 * counts a CurrentOfExpr as having startup cost disable_cost, which we
942 * subtract off here; that's to prevent other plan types such as seqscan
947 Assert(baserel->baserestrictcost.startup >= disable_cost);
948 startup_cost -= disable_cost;
950 else if (!enable_tidscan)
951 startup_cost += disable_cost;
954 * The TID qual expressions will be computed once, any other baserestrict
955 * quals once per retrived tuple.
957 cost_qual_eval(&tid_qual_cost, tidquals, root);
959 /* fetch estimated page cost for tablespace containing table */
960 get_tablespace_page_costs(baserel->reltablespace,
961 &spc_random_page_cost,
964 /* disk costs --- assume each tuple on a different page */
965 run_cost += spc_random_page_cost * ntuples;
968 startup_cost += baserel->baserestrictcost.startup +
969 tid_qual_cost.per_tuple;
970 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
971 tid_qual_cost.per_tuple;
972 run_cost += cpu_per_tuple * ntuples;
974 path->startup_cost = startup_cost;
975 path->total_cost = startup_cost + run_cost;
980 * Determines and returns the cost of scanning a subquery RTE.
983 cost_subqueryscan(Path *path, RelOptInfo *baserel)
989 /* Should only be applied to base relations that are subqueries */
990 Assert(baserel->relid > 0);
991 Assert(baserel->rtekind == RTE_SUBQUERY);
993 /* subqueryscans are never parameterized */
994 path->rows = baserel->rows;
997 * Cost of path is cost of evaluating the subplan, plus cost of evaluating
998 * any restriction clauses that will be attached to the SubqueryScan node,
999 * plus cpu_tuple_cost to account for selection and projection overhead.
1001 path->startup_cost = baserel->subplan->startup_cost;
1002 path->total_cost = baserel->subplan->total_cost;
1004 startup_cost = baserel->baserestrictcost.startup;
1005 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
1006 run_cost = cpu_per_tuple * baserel->tuples;
1008 path->startup_cost += startup_cost;
1009 path->total_cost += startup_cost + run_cost;
1014 * Determines and returns the cost of scanning a function RTE.
1017 cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
1019 Cost startup_cost = 0;
1025 /* Should only be applied to base relations that are functions */
1026 Assert(baserel->relid > 0);
1027 rte = planner_rt_fetch(baserel->relid, root);
1028 Assert(rte->rtekind == RTE_FUNCTION);
1030 /* functionscans are never parameterized */
1031 path->rows = baserel->rows;
1034 * Estimate costs of executing the function expression.
1036 * Currently, nodeFunctionscan.c always executes the function to
1037 * completion before returning any rows, and caches the results in a
1038 * tuplestore. So the function eval cost is all startup cost, and per-row
1039 * costs are minimal.
1041 * XXX in principle we ought to charge tuplestore spill costs if the
1042 * number of rows is large. However, given how phony our rowcount
1043 * estimates for functions tend to be, there's not a lot of point in that
1044 * refinement right now.
1046 cost_qual_eval_node(&exprcost, rte->funcexpr, root);
1048 startup_cost += exprcost.startup + exprcost.per_tuple;
1050 /* Add scanning CPU costs */
1051 startup_cost += baserel->baserestrictcost.startup;
1052 cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
1053 run_cost += cpu_per_tuple * baserel->tuples;
1055 path->startup_cost = startup_cost;
1056 path->total_cost = startup_cost + run_cost;
1061 * Determines and returns the cost of scanning a VALUES RTE.
1064 cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
1066 Cost startup_cost = 0;
1070 /* Should only be applied to base relations that are values lists */
1071 Assert(baserel->relid > 0);
1072 Assert(baserel->rtekind == RTE_VALUES);
1074 /* valuesscans are never parameterized */
1075 path->rows = baserel->rows;
1078 * For now, estimate list evaluation cost at one operator eval per list
1079 * (probably pretty bogus, but is it worth being smarter?)
1081 cpu_per_tuple = cpu_operator_cost;
1083 /* Add scanning CPU costs */
1084 startup_cost += baserel->baserestrictcost.startup;
1085 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
1086 run_cost += cpu_per_tuple * baserel->tuples;
1088 path->startup_cost = startup_cost;
1089 path->total_cost = startup_cost + run_cost;
1094 * Determines and returns the cost of scanning a CTE RTE.
1096 * Note: this is used for both self-reference and regular CTEs; the
1097 * possible cost differences are below the threshold of what we could
1098 * estimate accurately anyway. Note that the costs of evaluating the
1099 * referenced CTE query are added into the final plan as initplan costs,
1100 * and should NOT be counted here.
1103 cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
1105 Cost startup_cost = 0;
1109 /* Should only be applied to base relations that are CTEs */
1110 Assert(baserel->relid > 0);
1111 Assert(baserel->rtekind == RTE_CTE);
1113 /* ctescans are never parameterized */
1114 path->rows = baserel->rows;
1116 /* Charge one CPU tuple cost per row for tuplestore manipulation */
1117 cpu_per_tuple = cpu_tuple_cost;
1119 /* Add scanning CPU costs */
1120 startup_cost += baserel->baserestrictcost.startup;
1121 cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
1122 run_cost += cpu_per_tuple * baserel->tuples;
1124 path->startup_cost = startup_cost;
1125 path->total_cost = startup_cost + run_cost;
1129 * cost_recursive_union
1130 * Determines and returns the cost of performing a recursive union,
1131 * and also the estimated output size.
1133 * We are given Plans for the nonrecursive and recursive terms.
1135 * Note that the arguments and output are Plans, not Paths as in most of
1136 * the rest of this module. That's because we don't bother setting up a
1137 * Path representation for recursive union --- we have only one way to do it.
1140 cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
1146 /* We probably have decent estimates for the non-recursive term */
1147 startup_cost = nrterm->startup_cost;
1148 total_cost = nrterm->total_cost;
1149 total_rows = nrterm->plan_rows;
1152 * We arbitrarily assume that about 10 recursive iterations will be
1153 * needed, and that we've managed to get a good fix on the cost and output
1154 * size of each one of them. These are mighty shaky assumptions but it's
1155 * hard to see how to do better.
1157 total_cost += 10 * rterm->total_cost;
1158 total_rows += 10 * rterm->plan_rows;
1161 * Also charge cpu_tuple_cost per row to account for the costs of
1162 * manipulating the tuplestores. (We don't worry about possible
1163 * spill-to-disk costs.)
1165 total_cost += cpu_tuple_cost * total_rows;
1167 runion->startup_cost = startup_cost;
1168 runion->total_cost = total_cost;
1169 runion->plan_rows = total_rows;
1170 runion->plan_width = Max(nrterm->plan_width, rterm->plan_width);
1175 * Determines and returns the cost of sorting a relation, including
1176 * the cost of reading the input data.
1178 * If the total volume of data to sort is less than sort_mem, we will do
1179 * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1180 * comparisons for t tuples.
1182 * If the total volume exceeds sort_mem, we switch to a tape-style merge
1183 * algorithm. There will still be about t*log2(t) tuple comparisons in
1184 * total, but we will also need to write and read each tuple once per
1185 * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1186 * number of initial runs formed and M is the merge order used by tuplesort.c.
1187 * Since the average initial run should be about twice sort_mem, we have
1188 * disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem)))
1189 * cpu = comparison_cost * t * log2(t)
1191 * If the sort is bounded (i.e., only the first k result tuples are needed)
1192 * and k tuples can fit into sort_mem, we use a heap method that keeps only
1193 * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1195 * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1196 * accesses (XXX can't we refine that guess?)
1198 * By default, we charge two operator evals per tuple comparison, which should
1199 * be in the right ballpark in most cases. The caller can tweak this by
1200 * specifying nonzero comparison_cost; typically that's used for any extra
1201 * work that has to be done to prepare the inputs to the comparison operators.
1203 * 'pathkeys' is a list of sort keys
1204 * 'input_cost' is the total cost for reading the input data
1205 * 'tuples' is the number of tuples in the relation
1206 * 'width' is the average tuple width in bytes
1207 * 'comparison_cost' is the extra cost per comparison, if any
1208 * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1209 * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1211 * NOTE: some callers currently pass NIL for pathkeys because they
1212 * can't conveniently supply the sort keys. Since this routine doesn't
1213 * currently do anything with pathkeys anyway, that doesn't matter...
1214 * but if it ever does, it should react gracefully to lack of key data.
1215 * (Actually, the thing we'd most likely be interested in is just the number
1216 * of sort keys, which all callers *could* supply.)
1219 cost_sort(Path *path, PlannerInfo *root,
1220 List *pathkeys, Cost input_cost, double tuples, int width,
1221 Cost comparison_cost, int sort_mem,
1222 double limit_tuples)
1224 Cost startup_cost = input_cost;
1226 double input_bytes = relation_byte_size(tuples, width);
1227 double output_bytes;
1228 double output_tuples;
1229 long sort_mem_bytes = sort_mem * 1024L;
1232 startup_cost += disable_cost;
1234 path->rows = tuples;
1237 * We want to be sure the cost of a sort is never estimated as zero, even
1238 * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1243 /* Include the default cost-per-comparison */
1244 comparison_cost += 2.0 * cpu_operator_cost;
1246 /* Do we have a useful LIMIT? */
1247 if (limit_tuples > 0 && limit_tuples < tuples)
1249 output_tuples = limit_tuples;
1250 output_bytes = relation_byte_size(output_tuples, width);
1254 output_tuples = tuples;
1255 output_bytes = input_bytes;
1258 if (output_bytes > sort_mem_bytes)
1261 * We'll have to use a disk-based sort of all the tuples
1263 double npages = ceil(input_bytes / BLCKSZ);
1264 double nruns = (input_bytes / sort_mem_bytes) * 0.5;
1265 double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1267 double npageaccesses;
1272 * Assume about N log2 N comparisons
1274 startup_cost += comparison_cost * tuples * LOG2(tuples);
1278 /* Compute logM(r) as log(r) / log(M) */
1279 if (nruns > mergeorder)
1280 log_runs = ceil(log(nruns) / log(mergeorder));
1283 npageaccesses = 2.0 * npages * log_runs;
1284 /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1285 startup_cost += npageaccesses *
1286 (seq_page_cost * 0.75 + random_page_cost * 0.25);
1288 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1291 * We'll use a bounded heap-sort keeping just K tuples in memory, for
1292 * a total number of tuple comparisons of N log2 K; but the constant
1293 * factor is a bit higher than for quicksort. Tweak it so that the
1294 * cost curve is continuous at the crossover point.
1296 startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
1300 /* We'll use plain quicksort on all the input tuples */
1301 startup_cost += comparison_cost * tuples * LOG2(tuples);
1305 * Also charge a small amount (arbitrarily set equal to operator cost) per
1306 * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1307 * doesn't do qual-checking or projection, so it has less overhead than
1308 * most plan nodes. Note it's correct to use tuples not output_tuples
1309 * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1310 * counting the LIMIT otherwise.
1312 run_cost += cpu_operator_cost * tuples;
1314 path->startup_cost = startup_cost;
1315 path->total_cost = startup_cost + run_cost;
1320 * Determines and returns the cost of a MergeAppend node.
1322 * MergeAppend merges several pre-sorted input streams, using a heap that
1323 * at any given instant holds the next tuple from each stream. If there
1324 * are N streams, we need about N*log2(N) tuple comparisons to construct
1325 * the heap at startup, and then for each output tuple, about log2(N)
1326 * comparisons to delete the top heap entry and another log2(N) comparisons
1327 * to insert its successor from the same stream.
1329 * (The effective value of N will drop once some of the input streams are
1330 * exhausted, but it seems unlikely to be worth trying to account for that.)
1332 * The heap is never spilled to disk, since we assume N is not very large.
1333 * So this is much simpler than cost_sort.
1335 * As in cost_sort, we charge two operator evals per tuple comparison.
1337 * 'pathkeys' is a list of sort keys
1338 * 'n_streams' is the number of input streams
1339 * 'input_startup_cost' is the sum of the input streams' startup costs
1340 * 'input_total_cost' is the sum of the input streams' total costs
1341 * 'tuples' is the number of tuples in all the streams
1344 cost_merge_append(Path *path, PlannerInfo *root,
1345 List *pathkeys, int n_streams,
1346 Cost input_startup_cost, Cost input_total_cost,
1349 Cost startup_cost = 0;
1351 Cost comparison_cost;
1358 N = (n_streams < 2) ? 2.0 : (double) n_streams;
1361 /* Assumed cost per tuple comparison */
1362 comparison_cost = 2.0 * cpu_operator_cost;
1364 /* Heap creation cost */
1365 startup_cost += comparison_cost * N * logN;
1367 /* Per-tuple heap maintenance cost */
1368 run_cost += tuples * comparison_cost * 2.0 * logN;
1371 * Also charge a small amount (arbitrarily set equal to operator cost) per
1372 * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend
1373 * node doesn't do qual-checking or projection, so it has less overhead
1374 * than most plan nodes.
1376 run_cost += cpu_operator_cost * tuples;
1378 path->startup_cost = startup_cost + input_startup_cost;
1379 path->total_cost = startup_cost + run_cost + input_total_cost;
1384 * Determines and returns the cost of materializing a relation, including
1385 * the cost of reading the input data.
1387 * If the total volume of data to materialize exceeds work_mem, we will need
1388 * to write it to disk, so the cost is much higher in that case.
1390 * Note that here we are estimating the costs for the first scan of the
1391 * relation, so the materialization is all overhead --- any savings will
1392 * occur only on rescan, which is estimated in cost_rescan.
1395 cost_material(Path *path,
1396 Cost input_startup_cost, Cost input_total_cost,
1397 double tuples, int width)
1399 Cost startup_cost = input_startup_cost;
1400 Cost run_cost = input_total_cost - input_startup_cost;
1401 double nbytes = relation_byte_size(tuples, width);
1402 long work_mem_bytes = work_mem * 1024L;
1404 path->rows = tuples;
1407 * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
1408 * reflect bookkeeping overhead. (This rate must be more than what
1409 * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
1410 * if it is exactly the same then there will be a cost tie between
1411 * nestloop with A outer, materialized B inner and nestloop with B outer,
1412 * materialized A inner. The extra cost ensures we'll prefer
1413 * materializing the smaller rel.) Note that this is normally a good deal
1414 * less than cpu_tuple_cost; which is OK because a Material plan node
1415 * doesn't do qual-checking or projection, so it's got less overhead than
1418 run_cost += 2 * cpu_operator_cost * tuples;
1421 * If we will spill to disk, charge at the rate of seq_page_cost per page.
1422 * This cost is assumed to be evenly spread through the plan run phase,
1423 * which isn't exactly accurate but our cost model doesn't allow for
1424 * nonuniform costs within the run phase.
1426 if (nbytes > work_mem_bytes)
1428 double npages = ceil(nbytes / BLCKSZ);
1430 run_cost += seq_page_cost * npages;
1433 path->startup_cost = startup_cost;
1434 path->total_cost = startup_cost + run_cost;
1439 * Determines and returns the cost of performing an Agg plan node,
1440 * including the cost of its input.
1442 * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
1443 * we are using a hashed Agg node just to do grouping).
1445 * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
1446 * are for appropriately-sorted input.
1449 cost_agg(Path *path, PlannerInfo *root,
1450 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
1451 int numGroupCols, double numGroups,
1452 Cost input_startup_cost, Cost input_total_cost,
1453 double input_tuples)
1455 double output_tuples;
1458 AggClauseCosts dummy_aggcosts;
1460 /* Use all-zero per-aggregate costs if NULL is passed */
1461 if (aggcosts == NULL)
1463 Assert(aggstrategy == AGG_HASHED);
1464 MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
1465 aggcosts = &dummy_aggcosts;
1469 * The transCost.per_tuple component of aggcosts should be charged once
1470 * per input tuple, corresponding to the costs of evaluating the aggregate
1471 * transfns and their input expressions (with any startup cost of course
1472 * charged but once). The finalCost component is charged once per output
1473 * tuple, corresponding to the costs of evaluating the finalfns.
1475 * If we are grouping, we charge an additional cpu_operator_cost per
1476 * grouping column per input tuple for grouping comparisons.
1478 * We will produce a single output tuple if not grouping, and a tuple per
1479 * group otherwise. We charge cpu_tuple_cost for each output tuple.
1481 * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
1482 * same total CPU cost, but AGG_SORTED has lower startup cost. If the
1483 * input path is already sorted appropriately, AGG_SORTED should be
1484 * preferred (since it has no risk of memory overflow). This will happen
1485 * as long as the computed total costs are indeed exactly equal --- but if
1486 * there's roundoff error we might do the wrong thing. So be sure that
1487 * the computations below form the same intermediate values in the same
1490 if (aggstrategy == AGG_PLAIN)
1492 startup_cost = input_total_cost;
1493 startup_cost += aggcosts->transCost.startup;
1494 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1495 startup_cost += aggcosts->finalCost;
1496 /* we aren't grouping */
1497 total_cost = startup_cost + cpu_tuple_cost;
1500 else if (aggstrategy == AGG_SORTED)
1502 /* Here we are able to deliver output on-the-fly */
1503 startup_cost = input_startup_cost;
1504 total_cost = input_total_cost;
1505 /* calcs phrased this way to match HASHED case, see note above */
1506 total_cost += aggcosts->transCost.startup;
1507 total_cost += aggcosts->transCost.per_tuple * input_tuples;
1508 total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1509 total_cost += aggcosts->finalCost * numGroups;
1510 total_cost += cpu_tuple_cost * numGroups;
1511 output_tuples = numGroups;
1515 /* must be AGG_HASHED */
1516 startup_cost = input_total_cost;
1517 startup_cost += aggcosts->transCost.startup;
1518 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1519 startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1520 total_cost = startup_cost;
1521 total_cost += aggcosts->finalCost * numGroups;
1522 total_cost += cpu_tuple_cost * numGroups;
1523 output_tuples = numGroups;
1526 path->rows = output_tuples;
1527 path->startup_cost = startup_cost;
1528 path->total_cost = total_cost;
1533 * Determines and returns the cost of performing a WindowAgg plan node,
1534 * including the cost of its input.
1536 * Input is assumed already properly sorted.
1539 cost_windowagg(Path *path, PlannerInfo *root,
1540 List *windowFuncs, int numPartCols, int numOrderCols,
1541 Cost input_startup_cost, Cost input_total_cost,
1542 double input_tuples)
1548 startup_cost = input_startup_cost;
1549 total_cost = input_total_cost;
1552 * Window functions are assumed to cost their stated execution cost, plus
1553 * the cost of evaluating their input expressions, per tuple. Since they
1554 * may in fact evaluate their inputs at multiple rows during each cycle,
1555 * this could be a drastic underestimate; but without a way to know how
1556 * many rows the window function will fetch, it's hard to do better. In
1557 * any case, it's a good estimate for all the built-in window functions,
1558 * so we'll just do this for now.
1560 foreach(lc, windowFuncs)
1562 WindowFunc *wfunc = (WindowFunc *) lfirst(lc);
1566 Assert(IsA(wfunc, WindowFunc));
1568 wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
1570 /* also add the input expressions' cost to per-input-row costs */
1571 cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
1572 startup_cost += argcosts.startup;
1573 wfunccost += argcosts.per_tuple;
1575 total_cost += wfunccost * input_tuples;
1579 * We also charge cpu_operator_cost per grouping column per tuple for
1580 * grouping comparisons, plus cpu_tuple_cost per tuple for general
1583 * XXX this neglects costs of spooling the data to disk when it overflows
1584 * work_mem. Sooner or later that should get accounted for.
1586 total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
1587 total_cost += cpu_tuple_cost * input_tuples;
1589 path->rows = input_tuples;
1590 path->startup_cost = startup_cost;
1591 path->total_cost = total_cost;
1596 * Determines and returns the cost of performing a Group plan node,
1597 * including the cost of its input.
1599 * Note: caller must ensure that input costs are for appropriately-sorted
1603 cost_group(Path *path, PlannerInfo *root,
1604 int numGroupCols, double numGroups,
1605 Cost input_startup_cost, Cost input_total_cost,
1606 double input_tuples)
1611 startup_cost = input_startup_cost;
1612 total_cost = input_total_cost;
1615 * Charge one cpu_operator_cost per comparison per input tuple. We assume
1616 * all columns get compared at most of the tuples.
1618 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
1620 path->rows = numGroups;
1621 path->startup_cost = startup_cost;
1622 path->total_cost = total_cost;
1626 * initial_cost_nestloop
1627 * Preliminary estimate of the cost of a nestloop join path.
1629 * This must quickly produce lower-bound estimates of the path's startup and
1630 * total costs. If we are unable to eliminate the proposed path from
1631 * consideration using the lower bounds, final_cost_nestloop will be called
1632 * to obtain the final estimates.
1634 * The exact division of labor between this function and final_cost_nestloop
1635 * is private to them, and represents a tradeoff between speed of the initial
1636 * estimate and getting a tight lower bound. We choose to not examine the
1637 * join quals here, since that's by far the most expensive part of the
1638 * calculations. The end result is that CPU-cost considerations must be
1639 * left for the second phase.
1641 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
1642 * other data to be used by final_cost_nestloop
1643 * 'jointype' is the type of join to be performed
1644 * 'outer_path' is the outer input to the join
1645 * 'inner_path' is the inner input to the join
1646 * 'sjinfo' is extra info about the join for selectivity estimation
1647 * 'semifactors' contains valid data if jointype is SEMI or ANTI
1650 initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
1652 Path *outer_path, Path *inner_path,
1653 SpecialJoinInfo *sjinfo,
1654 SemiAntiJoinFactors *semifactors)
1656 Cost startup_cost = 0;
1658 double outer_path_rows = outer_path->rows;
1659 Cost inner_rescan_start_cost;
1660 Cost inner_rescan_total_cost;
1661 Cost inner_run_cost;
1662 Cost inner_rescan_run_cost;
1664 /* estimate costs to rescan the inner relation */
1665 cost_rescan(root, inner_path,
1666 &inner_rescan_start_cost,
1667 &inner_rescan_total_cost);
1669 /* cost of source data */
1672 * NOTE: clearly, we must pay both outer and inner paths' startup_cost
1673 * before we can start returning tuples, so the join's startup cost is
1674 * their sum. We'll also pay the inner path's rescan startup cost
1677 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
1678 run_cost += outer_path->total_cost - outer_path->startup_cost;
1679 if (outer_path_rows > 1)
1680 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
1682 inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
1683 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
1685 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
1687 double outer_matched_rows;
1688 Selectivity inner_scan_frac;
1691 * SEMI or ANTI join: executor will stop after first match.
1693 * For an outer-rel row that has at least one match, we can expect the
1694 * inner scan to stop after a fraction 1/(match_count+1) of the inner
1695 * rows, if the matches are evenly distributed. Since they probably
1696 * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
1697 * that fraction. (If we used a larger fuzz factor, we'd have to
1698 * clamp inner_scan_frac to at most 1.0; but since match_count is at
1699 * least 1, no such clamp is needed now.)
1701 * A complicating factor is that rescans may be cheaper than first
1702 * scans. If we never scan all the way to the end of the inner rel,
1703 * it might be (depending on the plan type) that we'd never pay the
1704 * whole inner first-scan run cost. However it is difficult to
1705 * estimate whether that will happen, so be conservative and always
1706 * charge the whole first-scan cost once.
1708 run_cost += inner_run_cost;
1710 outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
1711 inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
1713 /* Add inner run cost for additional outer tuples having matches */
1714 if (outer_matched_rows > 1)
1715 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
1718 * The cost of processing unmatched rows varies depending on the
1719 * details of the joinclauses, so we leave that part for later.
1722 /* Save private data for final_cost_nestloop */
1723 workspace->outer_matched_rows = outer_matched_rows;
1724 workspace->inner_scan_frac = inner_scan_frac;
1728 /* Normal case; we'll scan whole input rel for each outer row */
1729 run_cost += inner_run_cost;
1730 if (outer_path_rows > 1)
1731 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
1734 /* CPU costs left for later */
1736 /* Public result fields */
1737 workspace->startup_cost = startup_cost;
1738 workspace->total_cost = startup_cost + run_cost;
1739 /* Save private data for final_cost_nestloop */
1740 workspace->run_cost = run_cost;
1741 workspace->inner_rescan_run_cost = inner_rescan_run_cost;
1745 * final_cost_nestloop
1746 * Final estimate of the cost and result size of a nestloop join path.
1748 * 'path' is already filled in except for the rows and cost fields
1749 * 'workspace' is the result from initial_cost_nestloop
1750 * 'sjinfo' is extra info about the join for selectivity estimation
1751 * 'semifactors' contains valid data if path->jointype is SEMI or ANTI
1754 final_cost_nestloop(PlannerInfo *root, NestPath *path,
1755 JoinCostWorkspace *workspace,
1756 SpecialJoinInfo *sjinfo,
1757 SemiAntiJoinFactors *semifactors)
1759 Path *outer_path = path->outerjoinpath;
1760 Path *inner_path = path->innerjoinpath;
1761 double outer_path_rows = outer_path->rows;
1762 double inner_path_rows = inner_path->rows;
1763 Cost startup_cost = workspace->startup_cost;
1764 Cost run_cost = workspace->run_cost;
1765 Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
1768 QualCost restrict_qual_cost;
1771 /* Estimate the number of rows returned by the join */
1772 if (path->path.required_outer)
1775 * The nestloop is (still) parameterized because of upper-level join
1776 * clauses used by the input paths. So the rowcount estimate should
1777 * be less than the joinrel's row count because of the additional
1778 * selectivity of those join clauses. To estimate the size we need
1779 * to know which of the joinrestrictinfo clauses nominally associated
1780 * with the join have been applied in the inner input path.
1782 * We should also assume that such clauses won't be evaluated at the
1783 * join node at runtime, so exclude them from restrict_qual_cost.
1785 joinclauses = select_nonredundant_join_clauses(path->joinrestrictinfo,
1786 path->innerjoinpath->param_clauses);
1787 set_joinpath_size_estimate(root, path, sjinfo, joinclauses);
1791 joinclauses = path->joinrestrictinfo;
1792 path->path.rows = path->path.parent->rows;
1796 * We could include disable_cost in the preliminary estimate, but that
1797 * would amount to optimizing for the case where the join method is
1798 * disabled, which doesn't seem like the way to bet.
1800 if (!enable_nestloop)
1801 startup_cost += disable_cost;
1803 /* cost of source data */
1805 if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI)
1807 double outer_matched_rows = workspace->outer_matched_rows;
1808 Selectivity inner_scan_frac = workspace->inner_scan_frac;
1811 * SEMI or ANTI join: executor will stop after first match.
1814 /* Compute number of tuples processed (not number emitted!) */
1815 ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
1818 * For unmatched outer-rel rows, there are two cases. If the inner
1819 * path is an indexscan using all the joinquals as indexquals, then an
1820 * unmatched row results in an indexscan returning no rows, which is
1821 * probably quite cheap. We estimate this case as the same cost to
1822 * return the first tuple of a nonempty scan. Otherwise, the executor
1823 * will have to scan the whole inner rel; not so cheap.
1825 if (has_indexed_join_quals(path, joinclauses))
1827 run_cost += (outer_path_rows - outer_matched_rows) *
1828 inner_rescan_run_cost / inner_path_rows;
1831 * We won't be evaluating any quals at all for these rows, so
1832 * don't add them to ntuples.
1837 run_cost += (outer_path_rows - outer_matched_rows) *
1838 inner_rescan_run_cost;
1839 ntuples += (outer_path_rows - outer_matched_rows) *
1845 /* Normal-case source costs were included in preliminary estimate */
1847 /* Compute number of tuples processed (not number emitted!) */
1848 ntuples = outer_path_rows * inner_path_rows;
1852 cost_qual_eval(&restrict_qual_cost, joinclauses, root);
1853 startup_cost += restrict_qual_cost.startup;
1854 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
1855 run_cost += cpu_per_tuple * ntuples;
1857 path->path.startup_cost = startup_cost;
1858 path->path.total_cost = startup_cost + run_cost;
1862 * initial_cost_mergejoin
1863 * Preliminary estimate of the cost of a mergejoin path.
1865 * This must quickly produce lower-bound estimates of the path's startup and
1866 * total costs. If we are unable to eliminate the proposed path from
1867 * consideration using the lower bounds, final_cost_mergejoin will be called
1868 * to obtain the final estimates.
1870 * The exact division of labor between this function and final_cost_mergejoin
1871 * is private to them, and represents a tradeoff between speed of the initial
1872 * estimate and getting a tight lower bound. We choose to not examine the
1873 * join quals here, except for obtaining the scan selectivity estimate which
1874 * is really essential (but fortunately, use of caching keeps the cost of
1875 * getting that down to something reasonable).
1876 * We also assume that cost_sort is cheap enough to use here.
1878 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
1879 * other data to be used by final_cost_mergejoin
1880 * 'jointype' is the type of join to be performed
1881 * 'mergeclauses' is the list of joinclauses to be used as merge clauses
1882 * 'outer_path' is the outer input to the join
1883 * 'inner_path' is the inner input to the join
1884 * 'outersortkeys' is the list of sort keys for the outer path
1885 * 'innersortkeys' is the list of sort keys for the inner path
1886 * 'sjinfo' is extra info about the join for selectivity estimation
1888 * Note: outersortkeys and innersortkeys should be NIL if no explicit
1889 * sort is needed because the respective source path is already ordered.
1892 initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
1895 Path *outer_path, Path *inner_path,
1896 List *outersortkeys, List *innersortkeys,
1897 SpecialJoinInfo *sjinfo)
1899 Cost startup_cost = 0;
1901 double outer_path_rows = outer_path->rows;
1902 double inner_path_rows = inner_path->rows;
1903 Cost inner_run_cost;
1908 Selectivity outerstartsel,
1912 Path sort_path; /* dummy for result of cost_sort */
1914 /* Protect some assumptions below that rowcounts aren't zero or NaN */
1915 if (outer_path_rows <= 0 || isnan(outer_path_rows))
1916 outer_path_rows = 1;
1917 if (inner_path_rows <= 0 || isnan(inner_path_rows))
1918 inner_path_rows = 1;
1921 * A merge join will stop as soon as it exhausts either input stream
1922 * (unless it's an outer join, in which case the outer side has to be
1923 * scanned all the way anyway). Estimate fraction of the left and right
1924 * inputs that will actually need to be scanned. Likewise, we can
1925 * estimate the number of rows that will be skipped before the first join
1926 * pair is found, which should be factored into startup cost. We use only
1927 * the first (most significant) merge clause for this purpose. Since
1928 * mergejoinscansel() is a fairly expensive computation, we cache the
1929 * results in the merge clause RestrictInfo.
1931 if (mergeclauses && jointype != JOIN_FULL)
1933 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
1938 MergeScanSelCache *cache;
1940 /* Get the input pathkeys to determine the sort-order details */
1941 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
1942 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
1945 opathkey = (PathKey *) linitial(opathkeys);
1946 ipathkey = (PathKey *) linitial(ipathkeys);
1947 /* debugging check */
1948 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
1949 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
1950 opathkey->pk_strategy != ipathkey->pk_strategy ||
1951 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
1952 elog(ERROR, "left and right pathkeys do not match in mergejoin");
1954 /* Get the selectivity with caching */
1955 cache = cached_scansel(root, firstclause, opathkey);
1957 if (bms_is_subset(firstclause->left_relids,
1958 outer_path->parent->relids))
1960 /* left side of clause is outer */
1961 outerstartsel = cache->leftstartsel;
1962 outerendsel = cache->leftendsel;
1963 innerstartsel = cache->rightstartsel;
1964 innerendsel = cache->rightendsel;
1968 /* left side of clause is inner */
1969 outerstartsel = cache->rightstartsel;
1970 outerendsel = cache->rightendsel;
1971 innerstartsel = cache->leftstartsel;
1972 innerendsel = cache->leftendsel;
1974 if (jointype == JOIN_LEFT ||
1975 jointype == JOIN_ANTI)
1977 outerstartsel = 0.0;
1980 else if (jointype == JOIN_RIGHT)
1982 innerstartsel = 0.0;
1988 /* cope with clauseless or full mergejoin */
1989 outerstartsel = innerstartsel = 0.0;
1990 outerendsel = innerendsel = 1.0;
1994 * Convert selectivities to row counts. We force outer_rows and
1995 * inner_rows to be at least 1, but the skip_rows estimates can be zero.
1997 outer_skip_rows = rint(outer_path_rows * outerstartsel);
1998 inner_skip_rows = rint(inner_path_rows * innerstartsel);
1999 outer_rows = clamp_row_est(outer_path_rows * outerendsel);
2000 inner_rows = clamp_row_est(inner_path_rows * innerendsel);
2002 Assert(outer_skip_rows <= outer_rows);
2003 Assert(inner_skip_rows <= inner_rows);
2006 * Readjust scan selectivities to account for above rounding. This is
2007 * normally an insignificant effect, but when there are only a few rows in
2008 * the inputs, failing to do this makes for a large percentage error.
2010 outerstartsel = outer_skip_rows / outer_path_rows;
2011 innerstartsel = inner_skip_rows / inner_path_rows;
2012 outerendsel = outer_rows / outer_path_rows;
2013 innerendsel = inner_rows / inner_path_rows;
2015 Assert(outerstartsel <= outerendsel);
2016 Assert(innerstartsel <= innerendsel);
2018 /* cost of source data */
2020 if (outersortkeys) /* do we need to sort outer? */
2022 cost_sort(&sort_path,
2025 outer_path->total_cost,
2027 outer_path->parent->width,
2031 startup_cost += sort_path.startup_cost;
2032 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2034 run_cost += (sort_path.total_cost - sort_path.startup_cost)
2035 * (outerendsel - outerstartsel);
2039 startup_cost += outer_path->startup_cost;
2040 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
2042 run_cost += (outer_path->total_cost - outer_path->startup_cost)
2043 * (outerendsel - outerstartsel);
2046 if (innersortkeys) /* do we need to sort inner? */
2048 cost_sort(&sort_path,
2051 inner_path->total_cost,
2053 inner_path->parent->width,
2057 startup_cost += sort_path.startup_cost;
2058 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
2060 inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
2061 * (innerendsel - innerstartsel);
2065 startup_cost += inner_path->startup_cost;
2066 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
2068 inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
2069 * (innerendsel - innerstartsel);
2073 * We can't yet determine whether rescanning occurs, or whether
2074 * materialization of the inner input should be done. The minimum
2075 * possible inner input cost, regardless of rescan and materialization
2076 * considerations, is inner_run_cost. We include that in
2077 * workspace->total_cost, but not yet in run_cost.
2080 /* CPU costs left for later */
2082 /* Public result fields */
2083 workspace->startup_cost = startup_cost;
2084 workspace->total_cost = startup_cost + run_cost + inner_run_cost;
2085 /* Save private data for final_cost_mergejoin */
2086 workspace->run_cost = run_cost;
2087 workspace->inner_run_cost = inner_run_cost;
2088 workspace->outer_rows = outer_rows;
2089 workspace->inner_rows = inner_rows;
2090 workspace->outer_skip_rows = outer_skip_rows;
2091 workspace->inner_skip_rows = inner_skip_rows;
2095 * final_cost_mergejoin
2096 * Final estimate of the cost and result size of a mergejoin path.
2098 * Unlike other costsize functions, this routine makes one actual decision:
2099 * whether we should materialize the inner path. We do that either because
2100 * the inner path can't support mark/restore, or because it's cheaper to
2101 * use an interposed Material node to handle mark/restore. When the decision
2102 * is cost-based it would be logically cleaner to build and cost two separate
2103 * paths with and without that flag set; but that would require repeating most
2104 * of the cost calculations, which are not all that cheap. Since the choice
2105 * will not affect output pathkeys or startup cost, only total cost, there is
2106 * no possibility of wanting to keep both paths. So it seems best to make
2107 * the decision here and record it in the path's materialize_inner field.
2109 * 'path' is already filled in except for the rows and cost fields and
2111 * 'workspace' is the result from initial_cost_mergejoin
2112 * 'sjinfo' is extra info about the join for selectivity estimation
2115 final_cost_mergejoin(PlannerInfo *root, MergePath *path,
2116 JoinCostWorkspace *workspace,
2117 SpecialJoinInfo *sjinfo)
2119 Path *outer_path = path->jpath.outerjoinpath;
2120 Path *inner_path = path->jpath.innerjoinpath;
2121 double inner_path_rows = inner_path->rows;
2122 List *mergeclauses = path->path_mergeclauses;
2123 List *innersortkeys = path->innersortkeys;
2124 Cost startup_cost = workspace->startup_cost;
2125 Cost run_cost = workspace->run_cost;
2126 Cost inner_run_cost = workspace->inner_run_cost;
2127 double outer_rows = workspace->outer_rows;
2128 double inner_rows = workspace->inner_rows;
2129 double outer_skip_rows = workspace->outer_skip_rows;
2130 double inner_skip_rows = workspace->inner_skip_rows;
2134 QualCost merge_qual_cost;
2135 QualCost qp_qual_cost;
2136 double mergejointuples,
2140 /* Protect some assumptions below that rowcounts aren't zero or NaN */
2141 if (inner_path_rows <= 0 || isnan(inner_path_rows))
2142 inner_path_rows = 1;
2144 /* Estimate the number of rows returned by the join */
2145 set_joinpath_size_estimate(root, &path->jpath, sjinfo,
2146 path->jpath.joinrestrictinfo);
2149 * We could include disable_cost in the preliminary estimate, but that
2150 * would amount to optimizing for the case where the join method is
2151 * disabled, which doesn't seem like the way to bet.
2153 if (!enable_mergejoin)
2154 startup_cost += disable_cost;
2157 * Compute cost of the mergequals and qpquals (other restriction clauses)
2160 cost_qual_eval(&merge_qual_cost, mergeclauses, root);
2161 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2162 qp_qual_cost.startup -= merge_qual_cost.startup;
2163 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
2166 * Get approx # tuples passing the mergequals. We use approx_tuple_count
2167 * here because we need an estimate done with JOIN_INNER semantics.
2169 mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
2172 * When there are equal merge keys in the outer relation, the mergejoin
2173 * must rescan any matching tuples in the inner relation. This means
2174 * re-fetching inner tuples; we have to estimate how often that happens.
2176 * For regular inner and outer joins, the number of re-fetches can be
2177 * estimated approximately as size of merge join output minus size of
2178 * inner relation. Assume that the distinct key values are 1, 2, ..., and
2179 * denote the number of values of each key in the outer relation as m1,
2180 * m2, ...; in the inner relation, n1, n2, ... Then we have
2182 * size of join = m1 * n1 + m2 * n2 + ...
2184 * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
2185 * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
2188 * This equation works correctly for outer tuples having no inner match
2189 * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
2190 * are effectively subtracting those from the number of rescanned tuples,
2191 * when we should not. Can we do better without expensive selectivity
2194 * The whole issue is moot if we are working from a unique-ified outer
2197 if (IsA(outer_path, UniquePath))
2198 rescannedtuples = 0;
2201 rescannedtuples = mergejointuples - inner_path_rows;
2202 /* Must clamp because of possible underestimate */
2203 if (rescannedtuples < 0)
2204 rescannedtuples = 0;
2206 /* We'll inflate various costs this much to account for rescanning */
2207 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
2210 * Decide whether we want to materialize the inner input to shield it from
2211 * mark/restore and performing re-fetches. Our cost model for regular
2212 * re-fetches is that a re-fetch costs the same as an original fetch,
2213 * which is probably an overestimate; but on the other hand we ignore the
2214 * bookkeeping costs of mark/restore. Not clear if it's worth developing
2215 * a more refined model. So we just need to inflate the inner run cost by
2218 bare_inner_cost = inner_run_cost * rescanratio;
2221 * When we interpose a Material node the re-fetch cost is assumed to be
2222 * just cpu_operator_cost per tuple, independently of the underlying
2223 * plan's cost; and we charge an extra cpu_operator_cost per original
2224 * fetch as well. Note that we're assuming the materialize node will
2225 * never spill to disk, since it only has to remember tuples back to the
2226 * last mark. (If there are a huge number of duplicates, our other cost
2227 * factors will make the path so expensive that it probably won't get
2228 * chosen anyway.) So we don't use cost_rescan here.
2230 * Note: keep this estimate in sync with create_mergejoin_plan's labeling
2231 * of the generated Material node.
2233 mat_inner_cost = inner_run_cost +
2234 cpu_operator_cost * inner_path_rows * rescanratio;
2237 * Prefer materializing if it looks cheaper, unless the user has asked to
2238 * suppress materialization.
2240 if (enable_material && mat_inner_cost < bare_inner_cost)
2241 path->materialize_inner = true;
2244 * Even if materializing doesn't look cheaper, we *must* do it if the
2245 * inner path is to be used directly (without sorting) and it doesn't
2246 * support mark/restore.
2248 * Since the inner side must be ordered, and only Sorts and IndexScans can
2249 * create order to begin with, and they both support mark/restore, you
2250 * might think there's no problem --- but you'd be wrong. Nestloop and
2251 * merge joins can *preserve* the order of their inputs, so they can be
2252 * selected as the input of a mergejoin, and they don't support
2253 * mark/restore at present.
2255 * We don't test the value of enable_material here, because
2256 * materialization is required for correctness in this case, and turning
2257 * it off does not entitle us to deliver an invalid plan.
2259 else if (innersortkeys == NIL &&
2260 !ExecSupportsMarkRestore(inner_path->pathtype))
2261 path->materialize_inner = true;
2264 * Also, force materializing if the inner path is to be sorted and the
2265 * sort is expected to spill to disk. This is because the final merge
2266 * pass can be done on-the-fly if it doesn't have to support mark/restore.
2267 * We don't try to adjust the cost estimates for this consideration,
2270 * Since materialization is a performance optimization in this case,
2271 * rather than necessary for correctness, we skip it if enable_material is
2274 else if (enable_material && innersortkeys != NIL &&
2275 relation_byte_size(inner_path_rows, inner_path->parent->width) >
2277 path->materialize_inner = true;
2279 path->materialize_inner = false;
2281 /* Charge the right incremental cost for the chosen case */
2282 if (path->materialize_inner)
2283 run_cost += mat_inner_cost;
2285 run_cost += bare_inner_cost;
2290 * The number of tuple comparisons needed is approximately number of outer
2291 * rows plus number of inner rows plus number of rescanned tuples (can we
2292 * refine this?). At each one, we need to evaluate the mergejoin quals.
2294 startup_cost += merge_qual_cost.startup;
2295 startup_cost += merge_qual_cost.per_tuple *
2296 (outer_skip_rows + inner_skip_rows * rescanratio);
2297 run_cost += merge_qual_cost.per_tuple *
2298 ((outer_rows - outer_skip_rows) +
2299 (inner_rows - inner_skip_rows) * rescanratio);
2302 * For each tuple that gets through the mergejoin proper, we charge
2303 * cpu_tuple_cost plus the cost of evaluating additional restriction
2304 * clauses that are to be applied at the join. (This is pessimistic since
2305 * not all of the quals may get evaluated at each tuple.)
2307 * Note: we could adjust for SEMI/ANTI joins skipping some qual
2308 * evaluations here, but it's probably not worth the trouble.
2310 startup_cost += qp_qual_cost.startup;
2311 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2312 run_cost += cpu_per_tuple * mergejointuples;
2314 path->jpath.path.startup_cost = startup_cost;
2315 path->jpath.path.total_cost = startup_cost + run_cost;
2319 * run mergejoinscansel() with caching
2321 static MergeScanSelCache *
2322 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
2324 MergeScanSelCache *cache;
2326 Selectivity leftstartsel,
2330 MemoryContext oldcontext;
2332 /* Do we have this result already? */
2333 foreach(lc, rinfo->scansel_cache)
2335 cache = (MergeScanSelCache *) lfirst(lc);
2336 if (cache->opfamily == pathkey->pk_opfamily &&
2337 cache->collation == pathkey->pk_eclass->ec_collation &&
2338 cache->strategy == pathkey->pk_strategy &&
2339 cache->nulls_first == pathkey->pk_nulls_first)
2343 /* Nope, do the computation */
2344 mergejoinscansel(root,
2345 (Node *) rinfo->clause,
2346 pathkey->pk_opfamily,
2347 pathkey->pk_strategy,
2348 pathkey->pk_nulls_first,
2354 /* Cache the result in suitably long-lived workspace */
2355 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
2357 cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
2358 cache->opfamily = pathkey->pk_opfamily;
2359 cache->collation = pathkey->pk_eclass->ec_collation;
2360 cache->strategy = pathkey->pk_strategy;
2361 cache->nulls_first = pathkey->pk_nulls_first;
2362 cache->leftstartsel = leftstartsel;
2363 cache->leftendsel = leftendsel;
2364 cache->rightstartsel = rightstartsel;
2365 cache->rightendsel = rightendsel;
2367 rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
2369 MemoryContextSwitchTo(oldcontext);
2375 * initial_cost_hashjoin
2376 * Preliminary estimate of the cost of a hashjoin path.
2378 * This must quickly produce lower-bound estimates of the path's startup and
2379 * total costs. If we are unable to eliminate the proposed path from
2380 * consideration using the lower bounds, final_cost_hashjoin will be called
2381 * to obtain the final estimates.
2383 * The exact division of labor between this function and final_cost_hashjoin
2384 * is private to them, and represents a tradeoff between speed of the initial
2385 * estimate and getting a tight lower bound. We choose to not examine the
2386 * join quals here (other than by counting the number of hash clauses),
2387 * so we can't do much with CPU costs. We do assume that
2388 * ExecChooseHashTableSize is cheap enough to use here.
2390 * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2391 * other data to be used by final_cost_hashjoin
2392 * 'jointype' is the type of join to be performed
2393 * 'hashclauses' is the list of joinclauses to be used as hash clauses
2394 * 'outer_path' is the outer input to the join
2395 * 'inner_path' is the inner input to the join
2396 * 'sjinfo' is extra info about the join for selectivity estimation
2397 * 'semifactors' contains valid data if jointype is SEMI or ANTI
2400 initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
2403 Path *outer_path, Path *inner_path,
2404 SpecialJoinInfo *sjinfo,
2405 SemiAntiJoinFactors *semifactors)
2407 Cost startup_cost = 0;
2409 double outer_path_rows = outer_path->rows;
2410 double inner_path_rows = inner_path->rows;
2411 int num_hashclauses = list_length(hashclauses);
2416 /* cost of source data */
2417 startup_cost += outer_path->startup_cost;
2418 run_cost += outer_path->total_cost - outer_path->startup_cost;
2419 startup_cost += inner_path->total_cost;
2422 * Cost of computing hash function: must do it once per input tuple. We
2423 * charge one cpu_operator_cost for each column's hash function. Also,
2424 * tack on one cpu_tuple_cost per inner row, to model the costs of
2425 * inserting the row into the hashtable.
2427 * XXX when a hashclause is more complex than a single operator, we really
2428 * should charge the extra eval costs of the left or right side, as
2429 * appropriate, here. This seems more work than it's worth at the moment.
2431 startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
2433 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
2436 * Get hash table size that executor would use for inner relation.
2438 * XXX for the moment, always assume that skew optimization will be
2439 * performed. As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
2440 * trying to determine that for sure.
2442 * XXX at some point it might be interesting to try to account for skew
2443 * optimization in the cost estimate, but for now, we don't.
2445 ExecChooseHashTableSize(inner_path_rows,
2446 inner_path->parent->width,
2453 * If inner relation is too big then we will need to "batch" the join,
2454 * which implies writing and reading most of the tuples to disk an extra
2455 * time. Charge seq_page_cost per page, since the I/O should be nice and
2456 * sequential. Writing the inner rel counts as startup cost, all the rest
2461 double outerpages = page_size(outer_path_rows,
2462 outer_path->parent->width);
2463 double innerpages = page_size(inner_path_rows,
2464 inner_path->parent->width);
2466 startup_cost += seq_page_cost * innerpages;
2467 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
2470 /* CPU costs left for later */
2472 /* Public result fields */
2473 workspace->startup_cost = startup_cost;
2474 workspace->total_cost = startup_cost + run_cost;
2475 /* Save private data for final_cost_hashjoin */
2476 workspace->run_cost = run_cost;
2477 workspace->numbuckets = numbuckets;
2478 workspace->numbatches = numbatches;
2482 * final_cost_hashjoin
2483 * Final estimate of the cost and result size of a hashjoin path.
2485 * Note: the numbatches estimate is also saved into 'path' for use later
2487 * 'path' is already filled in except for the rows and cost fields and
2489 * 'workspace' is the result from initial_cost_hashjoin
2490 * 'sjinfo' is extra info about the join for selectivity estimation
2491 * 'semifactors' contains valid data if path->jointype is SEMI or ANTI
2494 final_cost_hashjoin(PlannerInfo *root, HashPath *path,
2495 JoinCostWorkspace *workspace,
2496 SpecialJoinInfo *sjinfo,
2497 SemiAntiJoinFactors *semifactors)
2499 Path *outer_path = path->jpath.outerjoinpath;
2500 Path *inner_path = path->jpath.innerjoinpath;
2501 double outer_path_rows = outer_path->rows;
2502 double inner_path_rows = inner_path->rows;
2503 List *hashclauses = path->path_hashclauses;
2504 Cost startup_cost = workspace->startup_cost;
2505 Cost run_cost = workspace->run_cost;
2506 int numbuckets = workspace->numbuckets;
2507 int numbatches = workspace->numbatches;
2509 QualCost hash_qual_cost;
2510 QualCost qp_qual_cost;
2511 double hashjointuples;
2512 double virtualbuckets;
2513 Selectivity innerbucketsize;
2516 /* Estimate the number of rows returned by the join */
2517 set_joinpath_size_estimate(root, &path->jpath, sjinfo,
2518 path->jpath.joinrestrictinfo);
2521 * We could include disable_cost in the preliminary estimate, but that
2522 * would amount to optimizing for the case where the join method is
2523 * disabled, which doesn't seem like the way to bet.
2525 if (!enable_hashjoin)
2526 startup_cost += disable_cost;
2528 /* mark the path with estimated # of batches */
2529 path->num_batches = numbatches;
2531 /* and compute the number of "virtual" buckets in the whole join */
2532 virtualbuckets = (double) numbuckets *(double) numbatches;
2535 * Determine bucketsize fraction for inner relation. We use the smallest
2536 * bucketsize estimated for any individual hashclause; this is undoubtedly
2539 * BUT: if inner relation has been unique-ified, we can assume it's good
2540 * for hashing. This is important both because it's the right answer, and
2541 * because we avoid contaminating the cache with a value that's wrong for
2542 * non-unique-ified paths.
2544 if (IsA(inner_path, UniquePath))
2545 innerbucketsize = 1.0 / virtualbuckets;
2548 innerbucketsize = 1.0;
2549 foreach(hcl, hashclauses)
2551 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
2552 Selectivity thisbucketsize;
2554 Assert(IsA(restrictinfo, RestrictInfo));
2557 * First we have to figure out which side of the hashjoin clause
2558 * is the inner side.
2560 * Since we tend to visit the same clauses over and over when
2561 * planning a large query, we cache the bucketsize estimate in the
2562 * RestrictInfo node to avoid repeated lookups of statistics.
2564 if (bms_is_subset(restrictinfo->right_relids,
2565 inner_path->parent->relids))
2567 /* righthand side is inner */
2568 thisbucketsize = restrictinfo->right_bucketsize;
2569 if (thisbucketsize < 0)
2571 /* not cached yet */
2573 estimate_hash_bucketsize(root,
2574 get_rightop(restrictinfo->clause),
2576 restrictinfo->right_bucketsize = thisbucketsize;
2581 Assert(bms_is_subset(restrictinfo->left_relids,
2582 inner_path->parent->relids));
2583 /* lefthand side is inner */
2584 thisbucketsize = restrictinfo->left_bucketsize;
2585 if (thisbucketsize < 0)
2587 /* not cached yet */
2589 estimate_hash_bucketsize(root,
2590 get_leftop(restrictinfo->clause),
2592 restrictinfo->left_bucketsize = thisbucketsize;
2596 if (innerbucketsize > thisbucketsize)
2597 innerbucketsize = thisbucketsize;
2602 * Compute cost of the hashquals and qpquals (other restriction clauses)
2605 cost_qual_eval(&hash_qual_cost, hashclauses, root);
2606 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
2607 qp_qual_cost.startup -= hash_qual_cost.startup;
2608 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
2612 if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI)
2614 double outer_matched_rows;
2615 Selectivity inner_scan_frac;
2618 * SEMI or ANTI join: executor will stop after first match.
2620 * For an outer-rel row that has at least one match, we can expect the
2621 * bucket scan to stop after a fraction 1/(match_count+1) of the
2622 * bucket's rows, if the matches are evenly distributed. Since they
2623 * probably aren't quite evenly distributed, we apply a fuzz factor of
2624 * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
2625 * to clamp inner_scan_frac to at most 1.0; but since match_count is
2626 * at least 1, no such clamp is needed now.)
2628 outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
2629 inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
2631 startup_cost += hash_qual_cost.startup;
2632 run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
2633 clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
2636 * For unmatched outer-rel rows, the picture is quite a lot different.
2637 * In the first place, there is no reason to assume that these rows
2638 * preferentially hit heavily-populated buckets; instead assume they
2639 * are uncorrelated with the inner distribution and so they see an
2640 * average bucket size of inner_path_rows / virtualbuckets. In the
2641 * second place, it seems likely that they will have few if any exact
2642 * hash-code matches and so very few of the tuples in the bucket will
2643 * actually require eval of the hash quals. We don't have any good
2644 * way to estimate how many will, but for the moment assume that the
2645 * effective cost per bucket entry is one-tenth what it is for
2648 run_cost += hash_qual_cost.per_tuple *
2649 (outer_path_rows - outer_matched_rows) *
2650 clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
2652 /* Get # of tuples that will pass the basic join */
2653 if (path->jpath.jointype == JOIN_SEMI)
2654 hashjointuples = outer_matched_rows;
2656 hashjointuples = outer_path_rows - outer_matched_rows;
2661 * The number of tuple comparisons needed is the number of outer
2662 * tuples times the typical number of tuples in a hash bucket, which
2663 * is the inner relation size times its bucketsize fraction. At each
2664 * one, we need to evaluate the hashjoin quals. But actually,
2665 * charging the full qual eval cost at each tuple is pessimistic,
2666 * since we don't evaluate the quals unless the hash values match
2667 * exactly. For lack of a better idea, halve the cost estimate to
2670 startup_cost += hash_qual_cost.startup;
2671 run_cost += hash_qual_cost.per_tuple * outer_path_rows *
2672 clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
2675 * Get approx # tuples passing the hashquals. We use
2676 * approx_tuple_count here because we need an estimate done with
2677 * JOIN_INNER semantics.
2679 hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
2683 * For each tuple that gets through the hashjoin proper, we charge
2684 * cpu_tuple_cost plus the cost of evaluating additional restriction
2685 * clauses that are to be applied at the join. (This is pessimistic since
2686 * not all of the quals may get evaluated at each tuple.)
2688 startup_cost += qp_qual_cost.startup;
2689 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
2690 run_cost += cpu_per_tuple * hashjointuples;
2692 path->jpath.path.startup_cost = startup_cost;
2693 path->jpath.path.total_cost = startup_cost + run_cost;
2699 * Figure the costs for a SubPlan (or initplan).
2701 * Note: we could dig the subplan's Plan out of the root list, but in practice
2702 * all callers have it handy already, so we make them pass it.
2705 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
2709 /* Figure any cost for evaluating the testexpr */
2710 cost_qual_eval(&sp_cost,
2711 make_ands_implicit((Expr *) subplan->testexpr),
2714 if (subplan->useHashTable)
2717 * If we are using a hash table for the subquery outputs, then the
2718 * cost of evaluating the query is a one-time cost. We charge one
2719 * cpu_operator_cost per tuple for the work of loading the hashtable,
2722 sp_cost.startup += plan->total_cost +
2723 cpu_operator_cost * plan->plan_rows;
2726 * The per-tuple costs include the cost of evaluating the lefthand
2727 * expressions, plus the cost of probing the hashtable. We already
2728 * accounted for the lefthand expressions as part of the testexpr, and
2729 * will also have counted one cpu_operator_cost for each comparison
2730 * operator. That is probably too low for the probing cost, but it's
2731 * hard to make a better estimate, so live with it for now.
2737 * Otherwise we will be rescanning the subplan output on each
2738 * evaluation. We need to estimate how much of the output we will
2739 * actually need to scan. NOTE: this logic should agree with the
2740 * tuple_fraction estimates used by make_subplan() in
2743 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
2745 if (subplan->subLinkType == EXISTS_SUBLINK)
2747 /* we only need to fetch 1 tuple */
2748 sp_cost.per_tuple += plan_run_cost / plan->plan_rows;
2750 else if (subplan->subLinkType == ALL_SUBLINK ||
2751 subplan->subLinkType == ANY_SUBLINK)
2753 /* assume we need 50% of the tuples */
2754 sp_cost.per_tuple += 0.50 * plan_run_cost;
2755 /* also charge a cpu_operator_cost per row examined */
2756 sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
2760 /* assume we need all tuples */
2761 sp_cost.per_tuple += plan_run_cost;
2765 * Also account for subplan's startup cost. If the subplan is
2766 * uncorrelated or undirect correlated, AND its topmost node is one
2767 * that materializes its output, assume that we'll only need to pay
2768 * its startup cost once; otherwise assume we pay the startup cost
2771 if (subplan->parParam == NIL &&
2772 ExecMaterializesOutput(nodeTag(plan)))
2773 sp_cost.startup += plan->startup_cost;
2775 sp_cost.per_tuple += plan->startup_cost;
2778 subplan->startup_cost = sp_cost.startup;
2779 subplan->per_call_cost = sp_cost.per_tuple;
2785 * Given a finished Path, estimate the costs of rescanning it after
2786 * having done so the first time. For some Path types a rescan is
2787 * cheaper than an original scan (if no parameters change), and this
2788 * function embodies knowledge about that. The default is to return
2789 * the same costs stored in the Path. (Note that the cost estimates
2790 * actually stored in Paths are always for first scans.)
2792 * This function is not currently intended to model effects such as rescans
2793 * being cheaper due to disk block caching; what we are concerned with is
2794 * plan types wherein the executor caches results explicitly, or doesn't
2795 * redo startup calculations, etc.
2798 cost_rescan(PlannerInfo *root, Path *path,
2799 Cost *rescan_startup_cost, /* output parameters */
2800 Cost *rescan_total_cost)
2802 switch (path->pathtype)
2804 case T_FunctionScan:
2807 * Currently, nodeFunctionscan.c always executes the function to
2808 * completion before returning any rows, and caches the results in
2809 * a tuplestore. So the function eval cost is all startup cost
2810 * and isn't paid over again on rescans. However, all run costs
2811 * will be paid over again.
2813 *rescan_startup_cost = 0;
2814 *rescan_total_cost = path->total_cost - path->startup_cost;
2819 * Assume that all of the startup cost represents hash table
2820 * building, which we won't have to do over.
2822 *rescan_startup_cost = 0;
2823 *rescan_total_cost = path->total_cost - path->startup_cost;
2826 case T_WorkTableScan:
2829 * These plan types materialize their final result in a
2830 * tuplestore or tuplesort object. So the rescan cost is only
2831 * cpu_tuple_cost per tuple, unless the result is large enough
2834 Cost run_cost = cpu_tuple_cost * path->rows;
2835 double nbytes = relation_byte_size(path->rows,
2836 path->parent->width);
2837 long work_mem_bytes = work_mem * 1024L;
2839 if (nbytes > work_mem_bytes)
2841 /* It will spill, so account for re-read cost */
2842 double npages = ceil(nbytes / BLCKSZ);
2844 run_cost += seq_page_cost * npages;
2846 *rescan_startup_cost = 0;
2847 *rescan_total_cost = run_cost;
2854 * These plan types not only materialize their results, but do
2855 * not implement qual filtering or projection. So they are
2856 * even cheaper to rescan than the ones above. We charge only
2857 * cpu_operator_cost per tuple. (Note: keep that in sync with
2858 * the run_cost charge in cost_sort, and also see comments in
2859 * cost_material before you change it.)
2861 Cost run_cost = cpu_operator_cost * path->rows;
2862 double nbytes = relation_byte_size(path->rows,
2863 path->parent->width);
2864 long work_mem_bytes = work_mem * 1024L;
2866 if (nbytes > work_mem_bytes)
2868 /* It will spill, so account for re-read cost */
2869 double npages = ceil(nbytes / BLCKSZ);
2871 run_cost += seq_page_cost * npages;
2873 *rescan_startup_cost = 0;
2874 *rescan_total_cost = run_cost;
2878 *rescan_startup_cost = path->startup_cost;
2879 *rescan_total_cost = path->total_cost;
2887 * Estimate the CPU costs of evaluating a WHERE clause.
2888 * The input can be either an implicitly-ANDed list of boolean
2889 * expressions, or a list of RestrictInfo nodes. (The latter is
2890 * preferred since it allows caching of the results.)
2891 * The result includes both a one-time (startup) component,
2892 * and a per-evaluation component.
2895 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
2897 cost_qual_eval_context context;
2900 context.root = root;
2901 context.total.startup = 0;
2902 context.total.per_tuple = 0;
2904 /* We don't charge any cost for the implicit ANDing at top level ... */
2908 Node *qual = (Node *) lfirst(l);
2910 cost_qual_eval_walker(qual, &context);
2913 *cost = context.total;
2917 * cost_qual_eval_node
2918 * As above, for a single RestrictInfo or expression.
2921 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
2923 cost_qual_eval_context context;
2925 context.root = root;
2926 context.total.startup = 0;
2927 context.total.per_tuple = 0;
2929 cost_qual_eval_walker(qual, &context);
2931 *cost = context.total;
2935 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
2941 * RestrictInfo nodes contain an eval_cost field reserved for this
2942 * routine's use, so that it's not necessary to evaluate the qual clause's
2943 * cost more than once. If the clause's cost hasn't been computed yet,
2944 * the field's startup value will contain -1.
2946 if (IsA(node, RestrictInfo))
2948 RestrictInfo *rinfo = (RestrictInfo *) node;
2950 if (rinfo->eval_cost.startup < 0)
2952 cost_qual_eval_context locContext;
2954 locContext.root = context->root;
2955 locContext.total.startup = 0;
2956 locContext.total.per_tuple = 0;
2959 * For an OR clause, recurse into the marked-up tree so that we
2960 * set the eval_cost for contained RestrictInfos too.
2962 if (rinfo->orclause)
2963 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
2965 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
2968 * If the RestrictInfo is marked pseudoconstant, it will be tested
2969 * only once, so treat its cost as all startup cost.
2971 if (rinfo->pseudoconstant)
2973 /* count one execution during startup */
2974 locContext.total.startup += locContext.total.per_tuple;
2975 locContext.total.per_tuple = 0;
2977 rinfo->eval_cost = locContext.total;
2979 context->total.startup += rinfo->eval_cost.startup;
2980 context->total.per_tuple += rinfo->eval_cost.per_tuple;
2981 /* do NOT recurse into children */
2986 * For each operator or function node in the given tree, we charge the
2987 * estimated execution cost given by pg_proc.procost (remember to multiply
2988 * this by cpu_operator_cost).
2990 * Vars and Consts are charged zero, and so are boolean operators (AND,
2991 * OR, NOT). Simplistic, but a lot better than no model at all.
2993 * Should we try to account for the possibility of short-circuit
2994 * evaluation of AND/OR? Probably *not*, because that would make the
2995 * results depend on the clause ordering, and we are not in any position
2996 * to expect that the current ordering of the clauses is the one that's
2997 * going to end up being used. The above per-RestrictInfo caching would
2998 * not mix well with trying to re-order clauses anyway.
3000 if (IsA(node, FuncExpr))
3002 context->total.per_tuple +=
3003 get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
3005 else if (IsA(node, OpExpr) ||
3006 IsA(node, DistinctExpr) ||
3007 IsA(node, NullIfExpr))
3009 /* rely on struct equivalence to treat these all alike */
3010 set_opfuncid((OpExpr *) node);
3011 context->total.per_tuple +=
3012 get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
3014 else if (IsA(node, ScalarArrayOpExpr))
3017 * Estimate that the operator will be applied to about half of the
3018 * array elements before the answer is determined.
3020 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
3021 Node *arraynode = (Node *) lsecond(saop->args);
3023 set_sa_opfuncid(saop);
3024 context->total.per_tuple += get_func_cost(saop->opfuncid) *
3025 cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
3027 else if (IsA(node, Aggref) ||
3028 IsA(node, WindowFunc))
3031 * Aggref and WindowFunc nodes are (and should be) treated like Vars,
3032 * ie, zero execution cost in the current model, because they behave
3033 * essentially like Vars in execQual.c. We disregard the costs of
3034 * their input expressions for the same reason. The actual execution
3035 * costs of the aggregate/window functions and their arguments have to
3036 * be factored into plan-node-specific costing of the Agg or WindowAgg
3039 return false; /* don't recurse into children */
3041 else if (IsA(node, CoerceViaIO))
3043 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
3048 /* check the result type's input function */
3049 getTypeInputInfo(iocoerce->resulttype,
3050 &iofunc, &typioparam);
3051 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3052 /* check the input type's output function */
3053 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
3054 &iofunc, &typisvarlena);
3055 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
3057 else if (IsA(node, ArrayCoerceExpr))
3059 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
3060 Node *arraynode = (Node *) acoerce->arg;
3062 if (OidIsValid(acoerce->elemfuncid))
3063 context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
3064 cpu_operator_cost * estimate_array_length(arraynode);
3066 else if (IsA(node, RowCompareExpr))
3068 /* Conservatively assume we will check all the columns */
3069 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
3072 foreach(lc, rcexpr->opnos)
3074 Oid opid = lfirst_oid(lc);
3076 context->total.per_tuple += get_func_cost(get_opcode(opid)) *
3080 else if (IsA(node, CurrentOfExpr))
3082 /* Report high cost to prevent selection of anything but TID scan */
3083 context->total.startup += disable_cost;
3085 else if (IsA(node, SubLink))
3087 /* This routine should not be applied to un-planned expressions */
3088 elog(ERROR, "cannot handle unplanned sub-select");
3090 else if (IsA(node, SubPlan))
3093 * A subplan node in an expression typically indicates that the
3094 * subplan will be executed on each evaluation, so charge accordingly.
3095 * (Sub-selects that can be executed as InitPlans have already been
3096 * removed from the expression.)
3098 SubPlan *subplan = (SubPlan *) node;
3100 context->total.startup += subplan->startup_cost;
3101 context->total.per_tuple += subplan->per_call_cost;
3104 * We don't want to recurse into the testexpr, because it was already
3105 * counted in the SubPlan node's costs. So we're done.
3109 else if (IsA(node, AlternativeSubPlan))
3112 * Arbitrarily use the first alternative plan for costing. (We should
3113 * certainly only include one alternative, and we don't yet have
3114 * enough information to know which one the executor is most likely to
3117 AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
3119 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
3123 /* recurse into children */
3124 return expression_tree_walker(node, cost_qual_eval_walker,
3130 * compute_semi_anti_join_factors
3131 * Estimate how much of the inner input a SEMI or ANTI join
3132 * can be expected to scan.
3134 * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
3135 * inner rows as soon as it finds a match to the current outer row.
3136 * We should therefore adjust some of the cost components for this effect.
3137 * This function computes some estimates needed for these adjustments.
3138 * These estimates will be the same regardless of the particular paths used
3139 * for the outer and inner relation, so we compute these once and then pass
3140 * them to all the join cost estimation functions.
3143 * outerrel: outer relation under consideration
3144 * innerrel: inner relation under consideration
3145 * jointype: must be JOIN_SEMI or JOIN_ANTI
3146 * sjinfo: SpecialJoinInfo relevant to this join
3147 * restrictlist: join quals
3148 * Output parameters:
3149 * *semifactors is filled in (see relation.h for field definitions)
3152 compute_semi_anti_join_factors(PlannerInfo *root,
3153 RelOptInfo *outerrel,
3154 RelOptInfo *innerrel,
3156 SpecialJoinInfo *sjinfo,
3158 SemiAntiJoinFactors *semifactors)
3162 Selectivity avgmatch;
3163 SpecialJoinInfo norm_sjinfo;
3167 /* Should only be called in these cases */
3168 Assert(jointype == JOIN_SEMI || jointype == JOIN_ANTI);
3171 * In an ANTI join, we must ignore clauses that are "pushed down", since
3172 * those won't affect the match logic. In a SEMI join, we do not
3173 * distinguish joinquals from "pushed down" quals, so just use the whole
3174 * restrictinfo list.
3176 if (jointype == JOIN_ANTI)
3179 foreach(l, restrictlist)
3181 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
3183 Assert(IsA(rinfo, RestrictInfo));
3184 if (!rinfo->is_pushed_down)
3185 joinquals = lappend(joinquals, rinfo);
3189 joinquals = restrictlist;
3192 * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
3194 jselec = clauselist_selectivity(root,
3201 * Also get the normal inner-join selectivity of the join clauses.
3203 norm_sjinfo.type = T_SpecialJoinInfo;
3204 norm_sjinfo.min_lefthand = outerrel->relids;
3205 norm_sjinfo.min_righthand = innerrel->relids;
3206 norm_sjinfo.syn_lefthand = outerrel->relids;
3207 norm_sjinfo.syn_righthand = innerrel->relids;
3208 norm_sjinfo.jointype = JOIN_INNER;
3209 /* we don't bother trying to make the remaining fields valid */
3210 norm_sjinfo.lhs_strict = false;
3211 norm_sjinfo.delay_upper_joins = false;
3212 norm_sjinfo.join_quals = NIL;
3214 nselec = clauselist_selectivity(root,
3220 /* Avoid leaking a lot of ListCells */
3221 if (jointype == JOIN_ANTI)
3222 list_free(joinquals);
3225 * jselec can be interpreted as the fraction of outer-rel rows that have
3226 * any matches (this is true for both SEMI and ANTI cases). And nselec is
3227 * the fraction of the Cartesian product that matches. So, the average
3228 * number of matches for each outer-rel row that has at least one match is
3229 * nselec * inner_rows / jselec.
3231 * Note: it is correct to use the inner rel's "rows" count here, even
3232 * though we might later be considering a parameterized inner path with
3233 * fewer rows. This is because we have included all the join clauses
3234 * in the selectivity estimate.
3236 if (jselec > 0) /* protect against zero divide */
3238 avgmatch = nselec * innerrel->rows / jselec;
3239 /* Clamp to sane range */
3240 avgmatch = Max(1.0, avgmatch);
3245 semifactors->outer_match_frac = jselec;
3246 semifactors->match_count = avgmatch;
3250 * has_indexed_join_quals
3251 * Check whether all the joinquals of a nestloop join are used as
3252 * inner index quals.
3254 * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
3255 * indexscan) that uses all the joinquals as indexquals, we can assume that an
3256 * unmatched outer tuple is cheap to process, whereas otherwise it's probably
3260 has_indexed_join_quals(NestPath *path, List *joinclauses)
3262 NodeTag pathtype = path->innerjoinpath->pathtype;
3264 if (pathtype == T_IndexScan ||
3265 pathtype == T_IndexOnlyScan ||
3266 pathtype == T_BitmapHeapScan)
3268 if (path->joinrestrictinfo != NIL)
3270 /* OK if all those clauses were found to be redundant */
3271 return (joinclauses == NIL);
3275 /* a clauseless join does NOT qualify */
3282 * If it's not a simple indexscan, it probably doesn't run quickly for
3283 * zero rows out, even if it's a parameterized path using all the
3292 * approx_tuple_count
3293 * Quick-and-dirty estimation of the number of join rows passing
3294 * a set of qual conditions.
3296 * The quals can be either an implicitly-ANDed list of boolean expressions,
3297 * or a list of RestrictInfo nodes (typically the latter).
3299 * We intentionally compute the selectivity under JOIN_INNER rules, even
3300 * if it's some type of outer join. This is appropriate because we are
3301 * trying to figure out how many tuples pass the initial merge or hash
3304 * This is quick-and-dirty because we bypass clauselist_selectivity, and
3305 * simply multiply the independent clause selectivities together. Now
3306 * clauselist_selectivity often can't do any better than that anyhow, but
3307 * for some situations (such as range constraints) it is smarter. However,
3308 * we can't effectively cache the results of clauselist_selectivity, whereas
3309 * the individual clause selectivities can be and are cached.
3311 * Since we are only using the results to estimate how many potential
3312 * output tuples are generated and passed through qpqual checking, it
3313 * seems OK to live with the approximation.
3316 approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
3319 double outer_tuples = path->outerjoinpath->rows;
3320 double inner_tuples = path->innerjoinpath->rows;
3321 SpecialJoinInfo sjinfo;
3322 Selectivity selec = 1.0;
3326 * Make up a SpecialJoinInfo for JOIN_INNER semantics.
3328 sjinfo.type = T_SpecialJoinInfo;
3329 sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
3330 sjinfo.min_righthand = path->innerjoinpath->parent->relids;
3331 sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
3332 sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
3333 sjinfo.jointype = JOIN_INNER;
3334 /* we don't bother trying to make the remaining fields valid */
3335 sjinfo.lhs_strict = false;
3336 sjinfo.delay_upper_joins = false;
3337 sjinfo.join_quals = NIL;
3339 /* Get the approximate selectivity */
3342 Node *qual = (Node *) lfirst(l);
3344 /* Note that clause_selectivity will be able to cache its result */
3345 selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
3348 /* Apply it to the input relation sizes */
3349 tuples = selec * outer_tuples * inner_tuples;
3351 return clamp_row_est(tuples);
3356 * set_baserel_size_estimates
3357 * Set the size estimates for the given base relation.
3359 * The rel's targetlist and restrictinfo list must have been constructed
3360 * already, and rel->tuples must be set.
3362 * We set the following fields of the rel node:
3363 * rows: the estimated number of output tuples (after applying
3364 * restriction clauses).
3365 * width: the estimated average output tuple width in bytes.
3366 * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
3369 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3373 /* Should only be applied to base relations */
3374 Assert(rel->relid > 0);
3376 nrows = rel->tuples *
3377 clauselist_selectivity(root,
3378 rel->baserestrictinfo,
3383 rel->rows = clamp_row_est(nrows);
3385 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
3387 set_rel_width(root, rel);
3391 * set_joinrel_size_estimates
3392 * Set the size estimates for the given join relation.
3394 * The rel's targetlist must have been constructed already, and a
3395 * restriction clause list that matches the given component rels must
3398 * Since there is more than one way to make a joinrel for more than two
3399 * base relations, the results we get here could depend on which component
3400 * rel pair is provided. In theory we should get the same answers no matter
3401 * which pair is provided; in practice, since the selectivity estimation
3402 * routines don't handle all cases equally well, we might not. But there's
3403 * not much to be done about it. (Would it make sense to repeat the
3404 * calculations for each pair of input rels that's encountered, and somehow
3405 * average the results? Probably way more trouble than it's worth.)
3407 * We set only the rows field here. The width field was already set by
3408 * build_joinrel_tlist, and baserestrictcost is not used for join rels.
3411 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
3412 RelOptInfo *outer_rel,
3413 RelOptInfo *inner_rel,
3414 SpecialJoinInfo *sjinfo,
3417 rel->rows = calc_joinrel_size_estimate(root,
3425 * set_joinpath_size_estimate
3426 * Set the rows estimate for the given join path.
3428 * If the join is not parameterized by any joinclauses from higher joins, the
3429 * estimate is the same as previously computed by set_joinrel_size_estimates.
3430 * Otherwise, we estimate afresh using the identical logic, but with the rows
3431 * estimates from the input paths (which are typically less than their rels'
3432 * regular row estimates) and the restriction clauses actually being applied
3436 set_joinpath_size_estimate(PlannerInfo *root, JoinPath *path,
3437 SpecialJoinInfo *sjinfo,
3440 if (path->path.required_outer)
3442 path->path.rows = calc_joinrel_size_estimate(root,
3443 path->outerjoinpath->rows,
3444 path->innerjoinpath->rows,
3447 /* For safety, make sure result is not more than the base estimate */
3448 if (path->path.rows > path->path.parent->rows)
3449 path->path.rows = path->path.parent->rows;
3452 path->path.rows = path->path.parent->rows;
3456 * calc_joinrel_size_estimate
3457 * Workhorse for set_joinrel_size_estimates and set_joinpath_size_estimate
3460 calc_joinrel_size_estimate(PlannerInfo *root,
3463 SpecialJoinInfo *sjinfo,
3466 JoinType jointype = sjinfo->jointype;
3472 * Compute joinclause selectivity. Note that we are only considering
3473 * clauses that become restriction clauses at this join level; we are not
3474 * double-counting them because they were not considered in estimating the
3475 * sizes of the component rels.
3477 * For an outer join, we have to distinguish the selectivity of the join's
3478 * own clauses (JOIN/ON conditions) from any clauses that were "pushed
3479 * down". For inner joins we just count them all as joinclauses.
3481 if (IS_OUTER_JOIN(jointype))
3483 List *joinquals = NIL;
3484 List *pushedquals = NIL;
3487 /* Grovel through the clauses to separate into two lists */
3488 foreach(l, restrictlist)
3490 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
3492 Assert(IsA(rinfo, RestrictInfo));
3493 if (rinfo->is_pushed_down)
3494 pushedquals = lappend(pushedquals, rinfo);
3496 joinquals = lappend(joinquals, rinfo);
3499 /* Get the separate selectivities */
3500 jselec = clauselist_selectivity(root,
3505 pselec = clauselist_selectivity(root,
3511 /* Avoid leaking a lot of ListCells */
3512 list_free(joinquals);
3513 list_free(pushedquals);
3517 jselec = clauselist_selectivity(root,
3522 pselec = 0.0; /* not used, keep compiler quiet */
3526 * Basically, we multiply size of Cartesian product by selectivity.
3528 * If we are doing an outer join, take that into account: the joinqual
3529 * selectivity has to be clamped using the knowledge that the output must
3530 * be at least as large as the non-nullable input. However, any
3531 * pushed-down quals are applied after the outer join, so their
3532 * selectivity applies fully.
3534 * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
3535 * of LHS rows that have matches, and we apply that straightforwardly.
3540 nrows = outer_rows * inner_rows * jselec;
3543 nrows = outer_rows * inner_rows * jselec;
3544 if (nrows < outer_rows)
3549 nrows = outer_rows * inner_rows * jselec;
3550 if (nrows < outer_rows)
3552 if (nrows < inner_rows)
3557 nrows = outer_rows * jselec;
3558 /* pselec not used */
3561 nrows = outer_rows * (1.0 - jselec);
3565 /* other values not expected here */
3566 elog(ERROR, "unrecognized join type: %d", (int) jointype);
3567 nrows = 0; /* keep compiler quiet */
3571 return clamp_row_est(nrows);
3575 * set_subquery_size_estimates
3576 * Set the size estimates for a base relation that is a subquery.
3578 * The rel's targetlist and restrictinfo list must have been constructed
3579 * already, and the plan for the subquery must have been completed.
3580 * We look at the subquery's plan and PlannerInfo to extract data.
3582 * We set the same fields as set_baserel_size_estimates.
3585 set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3587 PlannerInfo *subroot = rel->subroot;
3588 RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY;
3591 /* Should only be applied to base relations that are subqueries */
3592 Assert(rel->relid > 0);
3593 rte = planner_rt_fetch(rel->relid, root);
3594 Assert(rte->rtekind == RTE_SUBQUERY);
3596 /* Copy raw number of output rows from subplan */
3597 rel->tuples = rel->subplan->plan_rows;
3600 * Compute per-output-column width estimates by examining the subquery's
3601 * targetlist. For any output that is a plain Var, get the width estimate
3602 * that was made while planning the subquery. Otherwise, we leave it to
3603 * set_rel_width to fill in a datatype-based default estimate.
3605 foreach(lc, subroot->parse->targetList)
3607 TargetEntry *te = (TargetEntry *) lfirst(lc);
3608 Node *texpr = (Node *) te->expr;
3609 int32 item_width = 0;
3611 Assert(IsA(te, TargetEntry));
3612 /* junk columns aren't visible to upper query */
3617 * XXX This currently doesn't work for subqueries containing set
3618 * operations, because the Vars in their tlists are bogus references
3619 * to the first leaf subquery, which wouldn't give the right answer
3620 * even if we could still get to its PlannerInfo.
3622 * Also, the subquery could be an appendrel for which all branches are
3623 * known empty due to constraint exclusion, in which case
3624 * set_append_rel_pathlist will have left the attr_widths set to zero.
3626 * In either case, we just leave the width estimate zero until
3627 * set_rel_width fixes it.
3629 if (IsA(texpr, Var) &&
3630 subroot->parse->setOperations == NULL)
3632 Var *var = (Var *) texpr;
3633 RelOptInfo *subrel = find_base_rel(subroot, var->varno);
3635 item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
3637 Assert(te->resno >= rel->min_attr && te->resno <= rel->max_attr);
3638 rel->attr_widths[te->resno - rel->min_attr] = item_width;
3641 /* Now estimate number of output rows, etc */
3642 set_baserel_size_estimates(root, rel);
3646 * set_function_size_estimates
3647 * Set the size estimates for a base relation that is a function call.
3649 * The rel's targetlist and restrictinfo list must have been constructed
3652 * We set the same fields as set_baserel_size_estimates.
3655 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3659 /* Should only be applied to base relations that are functions */
3660 Assert(rel->relid > 0);
3661 rte = planner_rt_fetch(rel->relid, root);
3662 Assert(rte->rtekind == RTE_FUNCTION);
3664 /* Estimate number of rows the function itself will return */
3665 rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
3667 /* Now estimate number of output rows, etc */
3668 set_baserel_size_estimates(root, rel);
3672 * set_values_size_estimates
3673 * Set the size estimates for a base relation that is a values list.
3675 * The rel's targetlist and restrictinfo list must have been constructed
3678 * We set the same fields as set_baserel_size_estimates.
3681 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3685 /* Should only be applied to base relations that are values lists */
3686 Assert(rel->relid > 0);
3687 rte = planner_rt_fetch(rel->relid, root);
3688 Assert(rte->rtekind == RTE_VALUES);
3691 * Estimate number of rows the values list will return. We know this
3692 * precisely based on the list length (well, barring set-returning
3693 * functions in list items, but that's a refinement not catered for
3694 * anywhere else either).
3696 rel->tuples = list_length(rte->values_lists);
3698 /* Now estimate number of output rows, etc */
3699 set_baserel_size_estimates(root, rel);
3703 * set_cte_size_estimates
3704 * Set the size estimates for a base relation that is a CTE reference.
3706 * The rel's targetlist and restrictinfo list must have been constructed
3707 * already, and we need the completed plan for the CTE (if a regular CTE)
3708 * or the non-recursive term (if a self-reference).
3710 * We set the same fields as set_baserel_size_estimates.
3713 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
3717 /* Should only be applied to base relations that are CTE references */
3718 Assert(rel->relid > 0);
3719 rte = planner_rt_fetch(rel->relid, root);
3720 Assert(rte->rtekind == RTE_CTE);
3722 if (rte->self_reference)
3725 * In a self-reference, arbitrarily assume the average worktable size
3726 * is about 10 times the nonrecursive term's size.
3728 rel->tuples = 10 * cteplan->plan_rows;
3732 /* Otherwise just believe the CTE plan's output estimate */
3733 rel->tuples = cteplan->plan_rows;
3736 /* Now estimate number of output rows, etc */
3737 set_baserel_size_estimates(root, rel);
3741 * set_foreign_size_estimates
3742 * Set the size estimates for a base relation that is a foreign table.
3744 * There is not a whole lot that we can do here; the foreign-data wrapper
3745 * is responsible for producing useful estimates. We can do a decent job
3746 * of estimating baserestrictcost, so we set that, and we also set up width
3747 * using what will be purely datatype-driven estimates from the targetlist.
3748 * There is no way to do anything sane with the rows value, so we just put
3749 * a default estimate and hope that the wrapper can improve on it. The
3750 * wrapper's GetForeignRelSize function will be called momentarily.
3752 * The rel's targetlist and restrictinfo list must have been constructed
3756 set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
3758 /* Should only be applied to base relations */
3759 Assert(rel->relid > 0);
3761 rel->rows = 1000; /* entirely bogus default estimate */
3763 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
3765 set_rel_width(root, rel);
3771 * Set the estimated output width of a base relation.
3773 * The estimated output width is the sum of the per-attribute width estimates
3774 * for the actually-referenced columns, plus any PHVs or other expressions
3775 * that have to be calculated at this relation. This is the amount of data
3776 * we'd need to pass upwards in case of a sort, hash, etc.
3778 * NB: this works best on plain relations because it prefers to look at
3779 * real Vars. For subqueries, set_subquery_size_estimates will already have
3780 * copied up whatever per-column estimates were made within the subquery,
3781 * and for other types of rels there isn't much we can do anyway. We fall
3782 * back on (fairly stupid) datatype-based width estimates if we can't get
3783 * any better number.
3785 * The per-attribute width estimates are cached for possible re-use while
3786 * building join relations.
3789 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
3791 Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
3792 int32 tuple_width = 0;
3793 bool have_wholerow_var = false;
3796 foreach(lc, rel->reltargetlist)
3798 Node *node = (Node *) lfirst(lc);
3802 Var *var = (Var *) node;
3806 Assert(var->varno == rel->relid);
3807 Assert(var->varattno >= rel->min_attr);
3808 Assert(var->varattno <= rel->max_attr);
3810 ndx = var->varattno - rel->min_attr;
3813 * If it's a whole-row Var, we'll deal with it below after we have
3814 * already cached as many attr widths as possible.
3816 if (var->varattno == 0)
3818 have_wholerow_var = true;
3823 * The width may have been cached already (especially if it's a
3824 * subquery), so don't duplicate effort.
3826 if (rel->attr_widths[ndx] > 0)
3828 tuple_width += rel->attr_widths[ndx];
3832 /* Try to get column width from statistics */
3833 if (reloid != InvalidOid && var->varattno > 0)
3835 item_width = get_attavgwidth(reloid, var->varattno);
3838 rel->attr_widths[ndx] = item_width;
3839 tuple_width += item_width;
3845 * Not a plain relation, or can't find statistics for it. Estimate
3846 * using just the type info.
3848 item_width = get_typavgwidth(var->vartype, var->vartypmod);
3849 Assert(item_width > 0);
3850 rel->attr_widths[ndx] = item_width;
3851 tuple_width += item_width;
3853 else if (IsA(node, PlaceHolderVar))
3855 PlaceHolderVar *phv = (PlaceHolderVar *) node;
3856 PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
3858 tuple_width += phinfo->ph_width;
3863 * We could be looking at an expression pulled up from a subquery,
3864 * or a ROW() representing a whole-row child Var, etc. Do what we
3865 * can using the expression type information.
3869 item_width = get_typavgwidth(exprType(node), exprTypmod(node));
3870 Assert(item_width > 0);
3871 tuple_width += item_width;
3876 * If we have a whole-row reference, estimate its width as the sum of
3877 * per-column widths plus sizeof(HeapTupleHeaderData).
3879 if (have_wholerow_var)
3881 int32 wholerow_width = sizeof(HeapTupleHeaderData);
3883 if (reloid != InvalidOid)
3885 /* Real relation, so estimate true tuple width */
3886 wholerow_width += get_relation_data_width(reloid,
3887 rel->attr_widths - rel->min_attr);
3891 /* Do what we can with info for a phony rel */
3894 for (i = 1; i <= rel->max_attr; i++)
3895 wholerow_width += rel->attr_widths[i - rel->min_attr];
3898 rel->attr_widths[0 - rel->min_attr] = wholerow_width;
3901 * Include the whole-row Var as part of the output tuple. Yes, that
3902 * really is what happens at runtime.
3904 tuple_width += wholerow_width;
3907 Assert(tuple_width >= 0);
3908 rel->width = tuple_width;
3912 * relation_byte_size
3913 * Estimate the storage space in bytes for a given number of tuples
3914 * of a given width (size in bytes).
3917 relation_byte_size(double tuples, int width)
3919 return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
3924 * Returns an estimate of the number of pages covered by a given
3925 * number of tuples of a given width (size in bytes).
3928 page_size(double tuples, int width)
3930 return ceil(relation_byte_size(tuples, width) / BLCKSZ);