1 /*-------------------------------------------------------------------------
4 * Selectivity functions and index cost estimation functions for
5 * standard operators and index access methods.
7 * Selectivity routines are registered in the pg_operator catalog
8 * in the "oprrest" and "oprjoin" attributes.
10 * Index cost functions are registered in the pg_am catalog
11 * in the "amcostestimate" attribute.
13 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
14 * Portions Copyright (c) 1994, Regents of the University of California
18 * src/backend/utils/adt/selfuncs.c
20 *-------------------------------------------------------------------------
24 * Operator selectivity estimation functions are called to estimate the
25 * selectivity of WHERE clauses whose top-level operator is their operator.
26 * We divide the problem into two cases:
27 * Restriction clause estimation: the clause involves vars of just
29 * Join clause estimation: the clause involves vars of multiple rels.
30 * Join selectivity estimation is far more difficult and usually less accurate
31 * than restriction estimation.
33 * When dealing with the inner scan of a nestloop join, we consider the
34 * join's joinclauses as restriction clauses for the inner relation, and
35 * treat vars of the outer relation as parameters (a/k/a constants of unknown
36 * values). So, restriction estimators need to be able to accept an argument
37 * telling which relation is to be treated as the variable.
39 * The call convention for a restriction estimator (oprrest function) is
41 * Selectivity oprrest (PlannerInfo *root,
46 * root: general information about the query (rtable and RelOptInfo lists
47 * are particularly important for the estimator).
48 * operator: OID of the specific operator in question.
49 * args: argument list from the operator clause.
50 * varRelid: if not zero, the relid (rtable index) of the relation to
51 * be treated as the variable relation. May be zero if the args list
52 * is known to contain vars of only one relation.
54 * This is represented at the SQL level (in pg_proc) as
56 * float8 oprrest (internal, oid, internal, int4);
58 * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59 * of the relation that are expected to produce a TRUE result for the
62 * The call convention for a join estimator (oprjoin function) is similar
63 * except that varRelid is not needed, and instead join information is
66 * Selectivity oprjoin (PlannerInfo *root,
70 * SpecialJoinInfo *sjinfo);
72 * float8 oprjoin (internal, oid, internal, int2, internal);
74 * (Before Postgres 8.4, join estimators had only the first four of these
75 * parameters. That signature is still allowed, but deprecated.) The
76 * relationship between jointype and sjinfo is explained in the comments for
77 * clause_selectivity() --- the short version is that jointype is usually
78 * best ignored in favor of examining sjinfo.
80 * Join selectivity for regular inner and outer joins is defined as the
81 * fraction (0 to 1) of the cross product of the relations that is expected
82 * to produce a TRUE result for the given operator. For both semi and anti
83 * joins, however, the selectivity is defined as the fraction of the left-hand
84 * side relation's rows that are expected to have a match (ie, at least one
85 * row with a TRUE result) in the right-hand side.
94 #include "access/gin.h"
95 #include "access/sysattr.h"
96 #include "catalog/index.h"
97 #include "catalog/pg_collation.h"
98 #include "catalog/pg_opfamily.h"
99 #include "catalog/pg_statistic.h"
100 #include "catalog/pg_type.h"
101 #include "executor/executor.h"
102 #include "mb/pg_wchar.h"
103 #include "nodes/makefuncs.h"
104 #include "nodes/nodeFuncs.h"
105 #include "optimizer/clauses.h"
106 #include "optimizer/cost.h"
107 #include "optimizer/pathnode.h"
108 #include "optimizer/paths.h"
109 #include "optimizer/plancat.h"
110 #include "optimizer/predtest.h"
111 #include "optimizer/restrictinfo.h"
112 #include "optimizer/var.h"
113 #include "parser/parse_coerce.h"
114 #include "parser/parsetree.h"
115 #include "utils/builtins.h"
116 #include "utils/bytea.h"
117 #include "utils/date.h"
118 #include "utils/datum.h"
119 #include "utils/fmgroids.h"
120 #include "utils/lsyscache.h"
121 #include "utils/nabstime.h"
122 #include "utils/pg_locale.h"
123 #include "utils/selfuncs.h"
124 #include "utils/spccache.h"
125 #include "utils/syscache.h"
126 #include "utils/tqual.h"
129 /* Hooks for plugins to get control when we ask for stats */
130 get_relation_stats_hook_type get_relation_stats_hook = NULL;
131 get_index_stats_hook_type get_index_stats_hook = NULL;
133 static double var_eq_const(VariableStatData *vardata, Oid operator,
134 Datum constval, bool constisnull,
136 static double var_eq_non_const(VariableStatData *vardata, Oid operator,
139 static double ineq_histogram_selectivity(PlannerInfo *root,
140 VariableStatData *vardata,
141 FmgrInfo *opproc, bool isgt,
142 Datum constval, Oid consttype);
143 static double eqjoinsel_inner(Oid operator,
144 VariableStatData *vardata1, VariableStatData *vardata2);
145 static double eqjoinsel_semi(Oid operator,
146 VariableStatData *vardata1, VariableStatData *vardata2);
147 static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
148 Datum lobound, Datum hibound, Oid boundstypid,
149 double *scaledlobound, double *scaledhibound);
150 static double convert_numeric_to_scalar(Datum value, Oid typid);
151 static void convert_string_to_scalar(char *value,
154 double *scaledlobound,
156 double *scaledhibound);
157 static void convert_bytea_to_scalar(Datum value,
160 double *scaledlobound,
162 double *scaledhibound);
163 static double convert_one_string_to_scalar(char *value,
164 int rangelo, int rangehi);
165 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
166 int rangelo, int rangehi);
167 static char *convert_string_datum(Datum value, Oid typid);
168 static double convert_timevalue_to_scalar(Datum value, Oid typid);
169 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
170 Oid sortop, Datum *min, Datum *max);
171 static bool get_actual_variable_range(PlannerInfo *root,
172 VariableStatData *vardata,
174 Datum *min, Datum *max);
175 static Selectivity prefix_selectivity(PlannerInfo *root,
176 VariableStatData *vardata,
177 Oid vartype, Oid opfamily, Const *prefixcon);
178 static Selectivity pattern_selectivity(Const *patt, Pattern_Type ptype);
179 static Datum string_to_datum(const char *str, Oid datatype);
180 static Const *string_to_const(const char *str, Oid datatype);
181 static Const *string_to_bytea_const(const char *str, size_t str_len);
185 * eqsel - Selectivity of "=" for any data types.
187 * Note: this routine is also used to estimate selectivity for some
188 * operators that are not "=" but have comparable selectivity behavior,
189 * such as "~=" (geometric approximate-match). Even for "=", we must
190 * keep in mind that the left and right datatypes may differ.
193 eqsel(PG_FUNCTION_ARGS)
195 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
196 Oid operator = PG_GETARG_OID(1);
197 List *args = (List *) PG_GETARG_POINTER(2);
198 int varRelid = PG_GETARG_INT32(3);
199 VariableStatData vardata;
205 * If expression is not variable = something or something = variable, then
206 * punt and return a default estimate.
208 if (!get_restriction_variable(root, args, varRelid,
209 &vardata, &other, &varonleft))
210 PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
213 * We can do a lot better if the something is a constant. (Note: the
214 * Const might result from estimation rather than being a simple constant
217 if (IsA(other, Const))
218 selec = var_eq_const(&vardata, operator,
219 ((Const *) other)->constvalue,
220 ((Const *) other)->constisnull,
223 selec = var_eq_non_const(&vardata, operator, other,
226 ReleaseVariableStats(vardata);
228 PG_RETURN_FLOAT8((float8) selec);
232 * var_eq_const --- eqsel for var = const case
234 * This is split out so that some other estimation functions can use it.
237 var_eq_const(VariableStatData *vardata, Oid operator,
238 Datum constval, bool constisnull,
244 * If the constant is NULL, assume operator is strict and return zero, ie,
245 * operator will never return TRUE.
251 * If we matched the var to a unique index, assume there is exactly one
252 * match regardless of anything else. (This is slightly bogus, since the
253 * index's equality operator might be different from ours, but it's more
254 * likely to be right than ignoring the information.)
256 if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
257 return 1.0 / vardata->rel->tuples;
259 if (HeapTupleIsValid(vardata->statsTuple))
261 Form_pg_statistic stats;
269 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
272 * Is the constant "=" to any of the column's most common values?
273 * (Although the given operator may not really be "=", we will assume
274 * that seeing whether it returns TRUE is an appropriate test. If you
275 * don't like this, maybe you shouldn't be using eqsel for your
278 if (get_attstatsslot(vardata->statsTuple,
279 vardata->atttype, vardata->atttypmod,
280 STATISTIC_KIND_MCV, InvalidOid,
283 &numbers, &nnumbers))
287 fmgr_info(get_opcode(operator), &eqproc);
288 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &eqproc);
290 for (i = 0; i < nvalues; i++)
292 /* be careful to apply operator right way 'round */
294 match = DatumGetBool(FunctionCall2(&eqproc,
298 match = DatumGetBool(FunctionCall2(&eqproc,
307 /* no most-common-value info available */
310 i = nvalues = nnumbers = 0;
316 * Constant is "=" to this common value. We know selectivity
317 * exactly (or as exactly as ANALYZE could calculate it, anyway).
324 * Comparison is against a constant that is neither NULL nor any
325 * of the common values. Its selectivity cannot be more than
328 double sumcommon = 0.0;
329 double otherdistinct;
331 for (i = 0; i < nnumbers; i++)
332 sumcommon += numbers[i];
333 selec = 1.0 - sumcommon - stats->stanullfrac;
334 CLAMP_PROBABILITY(selec);
337 * and in fact it's probably a good deal less. We approximate that
338 * all the not-common values share this remaining fraction
339 * equally, so we divide by the number of other distinct values.
341 otherdistinct = get_variable_numdistinct(vardata) - nnumbers;
342 if (otherdistinct > 1)
343 selec /= otherdistinct;
346 * Another cross-check: selectivity shouldn't be estimated as more
347 * than the least common "most common value".
349 if (nnumbers > 0 && selec > numbers[nnumbers - 1])
350 selec = numbers[nnumbers - 1];
353 free_attstatsslot(vardata->atttype, values, nvalues,
359 * No ANALYZE stats available, so make a guess using estimated number
360 * of distinct values and assuming they are equally common. (The guess
361 * is unlikely to be very good, but we do know a few special cases.)
363 selec = 1.0 / get_variable_numdistinct(vardata);
366 /* result should be in range, but make sure... */
367 CLAMP_PROBABILITY(selec);
373 * var_eq_non_const --- eqsel for var = something-other-than-const case
376 var_eq_non_const(VariableStatData *vardata, Oid operator,
383 * If we matched the var to a unique index, assume there is exactly one
384 * match regardless of anything else. (This is slightly bogus, since the
385 * index's equality operator might be different from ours, but it's more
386 * likely to be right than ignoring the information.)
388 if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
389 return 1.0 / vardata->rel->tuples;
391 if (HeapTupleIsValid(vardata->statsTuple))
393 Form_pg_statistic stats;
398 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
401 * Search is for a value that we do not know a priori, but we will
402 * assume it is not NULL. Estimate the selectivity as non-null
403 * fraction divided by number of distinct values, so that we get a
404 * result averaged over all possible values whether common or
405 * uncommon. (Essentially, we are assuming that the not-yet-known
406 * comparison value is equally likely to be any of the possible
407 * values, regardless of their frequency in the table. Is that a good
410 selec = 1.0 - stats->stanullfrac;
411 ndistinct = get_variable_numdistinct(vardata);
416 * Cross-check: selectivity should never be estimated as more than the
417 * most common value's.
419 if (get_attstatsslot(vardata->statsTuple,
420 vardata->atttype, vardata->atttypmod,
421 STATISTIC_KIND_MCV, InvalidOid,
424 &numbers, &nnumbers))
426 if (nnumbers > 0 && selec > numbers[0])
428 free_attstatsslot(vardata->atttype, NULL, 0, numbers, nnumbers);
434 * No ANALYZE stats available, so make a guess using estimated number
435 * of distinct values and assuming they are equally common. (The guess
436 * is unlikely to be very good, but we do know a few special cases.)
438 selec = 1.0 / get_variable_numdistinct(vardata);
441 /* result should be in range, but make sure... */
442 CLAMP_PROBABILITY(selec);
448 * neqsel - Selectivity of "!=" for any data types.
450 * This routine is also used for some operators that are not "!="
451 * but have comparable selectivity behavior. See above comments
455 neqsel(PG_FUNCTION_ARGS)
457 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
458 Oid operator = PG_GETARG_OID(1);
459 List *args = (List *) PG_GETARG_POINTER(2);
460 int varRelid = PG_GETARG_INT32(3);
465 * We want 1 - eqsel() where the equality operator is the one associated
466 * with this != operator, that is, its negator.
468 eqop = get_negator(operator);
471 result = DatumGetFloat8(DirectFunctionCall4(eqsel,
472 PointerGetDatum(root),
473 ObjectIdGetDatum(eqop),
474 PointerGetDatum(args),
475 Int32GetDatum(varRelid)));
479 /* Use default selectivity (should we raise an error instead?) */
480 result = DEFAULT_EQ_SEL;
482 result = 1.0 - result;
483 PG_RETURN_FLOAT8(result);
487 * scalarineqsel - Selectivity of "<", "<=", ">", ">=" for scalars.
489 * This is the guts of both scalarltsel and scalargtsel. The caller has
490 * commuted the clause, if necessary, so that we can treat the variable as
491 * being on the left. The caller must also make sure that the other side
492 * of the clause is a non-null Const, and dissect same into a value and
495 * This routine works for any datatype (or pair of datatypes) known to
496 * convert_to_scalar(). If it is applied to some other datatype,
497 * it will return a default estimate.
500 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
501 VariableStatData *vardata, Datum constval, Oid consttype)
503 Form_pg_statistic stats;
510 if (!HeapTupleIsValid(vardata->statsTuple))
512 /* no stats available, so default result */
513 return DEFAULT_INEQ_SEL;
515 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
517 fmgr_info(get_opcode(operator), &opproc);
518 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &opproc);
521 * If we have most-common-values info, add up the fractions of the MCV
522 * entries that satisfy MCV OP CONST. These fractions contribute directly
523 * to the result selectivity. Also add up the total fraction represented
526 mcv_selec = mcv_selectivity(vardata, &opproc, constval, true,
530 * If there is a histogram, determine which bin the constant falls in, and
531 * compute the resulting contribution to selectivity.
533 hist_selec = ineq_histogram_selectivity(root, vardata, &opproc, isgt,
534 constval, consttype);
537 * Now merge the results from the MCV and histogram calculations,
538 * realizing that the histogram covers only the non-null values that are
541 selec = 1.0 - stats->stanullfrac - sumcommon;
543 if (hist_selec >= 0.0)
548 * If no histogram but there are values not accounted for by MCV,
549 * arbitrarily assume half of them will match.
556 /* result should be in range, but make sure... */
557 CLAMP_PROBABILITY(selec);
563 * mcv_selectivity - Examine the MCV list for selectivity estimates
565 * Determine the fraction of the variable's MCV population that satisfies
566 * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft. Also
567 * compute the fraction of the total column population represented by the MCV
568 * list. This code will work for any boolean-returning predicate operator.
570 * The function result is the MCV selectivity, and the fraction of the
571 * total population is returned into *sumcommonp. Zeroes are returned
572 * if there is no MCV list.
575 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
576 Datum constval, bool varonleft,
590 if (HeapTupleIsValid(vardata->statsTuple) &&
591 get_attstatsslot(vardata->statsTuple,
592 vardata->atttype, vardata->atttypmod,
593 STATISTIC_KIND_MCV, InvalidOid,
596 &numbers, &nnumbers))
598 for (i = 0; i < nvalues; i++)
601 DatumGetBool(FunctionCall2(opproc,
604 DatumGetBool(FunctionCall2(opproc,
607 mcv_selec += numbers[i];
608 sumcommon += numbers[i];
610 free_attstatsslot(vardata->atttype, values, nvalues,
614 *sumcommonp = sumcommon;
619 * histogram_selectivity - Examine the histogram for selectivity estimates
621 * Determine the fraction of the variable's histogram entries that satisfy
622 * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
624 * This code will work for any boolean-returning predicate operator, whether
625 * or not it has anything to do with the histogram sort operator. We are
626 * essentially using the histogram just as a representative sample. However,
627 * small histograms are unlikely to be all that representative, so the caller
628 * should be prepared to fall back on some other estimation approach when the
629 * histogram is missing or very small. It may also be prudent to combine this
630 * approach with another one when the histogram is small.
632 * If the actual histogram size is not at least min_hist_size, we won't bother
633 * to do the calculation at all. Also, if the n_skip parameter is > 0, we
634 * ignore the first and last n_skip histogram elements, on the grounds that
635 * they are outliers and hence not very representative. Typical values for
636 * these parameters are 10 and 1.
638 * The function result is the selectivity, or -1 if there is no histogram
639 * or it's smaller than min_hist_size.
641 * The output parameter *hist_size receives the actual histogram size,
642 * or zero if no histogram. Callers may use this number to decide how
643 * much faith to put in the function result.
645 * Note that the result disregards both the most-common-values (if any) and
646 * null entries. The caller is expected to combine this result with
647 * statistics for those portions of the column population. It may also be
648 * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
651 histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
652 Datum constval, bool varonleft,
653 int min_hist_size, int n_skip,
660 /* check sanity of parameters */
662 Assert(min_hist_size > 2 * n_skip);
664 if (HeapTupleIsValid(vardata->statsTuple) &&
665 get_attstatsslot(vardata->statsTuple,
666 vardata->atttype, vardata->atttypmod,
667 STATISTIC_KIND_HISTOGRAM, InvalidOid,
672 *hist_size = nvalues;
673 if (nvalues >= min_hist_size)
678 for (i = n_skip; i < nvalues - n_skip; i++)
681 DatumGetBool(FunctionCall2(opproc,
684 DatumGetBool(FunctionCall2(opproc,
689 result = ((double) nmatch) / ((double) (nvalues - 2 * n_skip));
693 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
705 * ineq_histogram_selectivity - Examine the histogram for scalarineqsel
707 * Determine the fraction of the variable's histogram population that
708 * satisfies the inequality condition, ie, VAR < CONST or VAR > CONST.
710 * Returns -1 if there is no histogram (valid results will always be >= 0).
712 * Note that the result disregards both the most-common-values (if any) and
713 * null entries. The caller is expected to combine this result with
714 * statistics for those portions of the column population.
717 ineq_histogram_selectivity(PlannerInfo *root,
718 VariableStatData *vardata,
719 FmgrInfo *opproc, bool isgt,
720 Datum constval, Oid consttype)
730 * Someday, ANALYZE might store more than one histogram per rel/att,
731 * corresponding to more than one possible sort ordering defined for the
732 * column type. However, to make that work we will need to figure out
733 * which staop to search for --- it's not necessarily the one we have at
734 * hand! (For example, we might have a '<=' operator rather than the '<'
735 * operator that will appear in staop.) For now, assume that whatever
736 * appears in pg_statistic is sorted the same way our operator sorts, or
737 * the reverse way if isgt is TRUE.
739 if (HeapTupleIsValid(vardata->statsTuple) &&
740 get_attstatsslot(vardata->statsTuple,
741 vardata->atttype, vardata->atttypmod,
742 STATISTIC_KIND_HISTOGRAM, InvalidOid,
750 * Use binary search to find proper location, ie, the first slot
751 * at which the comparison fails. (If the given operator isn't
752 * actually sort-compatible with the histogram, you'll get garbage
753 * results ... but probably not any more garbage-y than you would
754 * from the old linear search.)
756 * If the binary search accesses the first or last histogram
757 * entry, we try to replace that endpoint with the true column min
758 * or max as found by get_actual_variable_range(). This
759 * ameliorates misestimates when the min or max is moving as a
760 * result of changes since the last ANALYZE. Note that this could
761 * result in effectively including MCVs into the histogram that
762 * weren't there before, but we don't try to correct for that.
765 int lobound = 0; /* first possible slot to search */
766 int hibound = nvalues; /* last+1 slot to search */
767 bool have_end = false;
770 * If there are only two histogram entries, we'll want up-to-date
771 * values for both. (If there are more than two, we need at most
772 * one of them to be updated, so we deal with that within the
776 have_end = get_actual_variable_range(root,
782 while (lobound < hibound)
784 int probe = (lobound + hibound) / 2;
788 * If we find ourselves about to compare to the first or last
789 * histogram entry, first try to replace it with the actual
790 * current min or max (unless we already did so above).
792 if (probe == 0 && nvalues > 2)
793 have_end = get_actual_variable_range(root,
798 else if (probe == nvalues - 1 && nvalues > 2)
799 have_end = get_actual_variable_range(root,
805 ltcmp = DatumGetBool(FunctionCall2(opproc,
818 /* Constant is below lower histogram boundary. */
821 else if (lobound >= nvalues)
823 /* Constant is above upper histogram boundary. */
835 * We have values[i-1] <= constant <= values[i].
837 * Convert the constant and the two nearest bin boundary
838 * values to a uniform comparison scale, and do a linear
839 * interpolation within this bin.
841 if (convert_to_scalar(constval, consttype, &val,
842 values[i - 1], values[i],
848 /* cope if bin boundaries appear identical */
853 else if (val >= high)
857 binfrac = (val - low) / (high - low);
860 * Watch out for the possibility that we got a NaN or
861 * Infinity from the division. This can happen
862 * despite the previous checks, if for example "low"
865 if (isnan(binfrac) ||
866 binfrac < 0.0 || binfrac > 1.0)
873 * Ideally we'd produce an error here, on the grounds that
874 * the given operator shouldn't have scalarXXsel
875 * registered as its selectivity func unless we can deal
876 * with its operand types. But currently, all manner of
877 * stuff is invoking scalarXXsel, so give a default
878 * estimate until that can be fixed.
884 * Now, compute the overall selectivity across the values
885 * represented by the histogram. We have i-1 full bins and
886 * binfrac partial bin below the constant.
888 histfrac = (double) (i - 1) + binfrac;
889 histfrac /= (double) (nvalues - 1);
893 * Now histfrac = fraction of histogram entries below the
896 * Account for "<" vs ">"
898 hist_selec = isgt ? (1.0 - histfrac) : histfrac;
901 * The histogram boundaries are only approximate to begin with,
902 * and may well be out of date anyway. Therefore, don't believe
903 * extremely small or large selectivity estimates --- unless we
904 * got actual current endpoint values from the table.
907 CLAMP_PROBABILITY(hist_selec);
910 if (hist_selec < 0.0001)
912 else if (hist_selec > 0.9999)
917 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
924 * scalarltsel - Selectivity of "<" (also "<=") for scalars.
927 scalarltsel(PG_FUNCTION_ARGS)
929 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
930 Oid operator = PG_GETARG_OID(1);
931 List *args = (List *) PG_GETARG_POINTER(2);
932 int varRelid = PG_GETARG_INT32(3);
933 VariableStatData vardata;
942 * If expression is not variable op something or something op variable,
943 * then punt and return a default estimate.
945 if (!get_restriction_variable(root, args, varRelid,
946 &vardata, &other, &varonleft))
947 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
950 * Can't do anything useful if the something is not a constant, either.
952 if (!IsA(other, Const))
954 ReleaseVariableStats(vardata);
955 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
959 * If the constant is NULL, assume operator is strict and return zero, ie,
960 * operator will never return TRUE.
962 if (((Const *) other)->constisnull)
964 ReleaseVariableStats(vardata);
965 PG_RETURN_FLOAT8(0.0);
967 constval = ((Const *) other)->constvalue;
968 consttype = ((Const *) other)->consttype;
971 * Force the var to be on the left to simplify logic in scalarineqsel.
975 /* we have var < other */
980 /* we have other < var, commute to make var > other */
981 operator = get_commutator(operator);
984 /* Use default selectivity (should we raise an error instead?) */
985 ReleaseVariableStats(vardata);
986 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
991 selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
993 ReleaseVariableStats(vardata);
995 PG_RETURN_FLOAT8((float8) selec);
999 * scalargtsel - Selectivity of ">" (also ">=") for integers.
1002 scalargtsel(PG_FUNCTION_ARGS)
1004 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1005 Oid operator = PG_GETARG_OID(1);
1006 List *args = (List *) PG_GETARG_POINTER(2);
1007 int varRelid = PG_GETARG_INT32(3);
1008 VariableStatData vardata;
1017 * If expression is not variable op something or something op variable,
1018 * then punt and return a default estimate.
1020 if (!get_restriction_variable(root, args, varRelid,
1021 &vardata, &other, &varonleft))
1022 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1025 * Can't do anything useful if the something is not a constant, either.
1027 if (!IsA(other, Const))
1029 ReleaseVariableStats(vardata);
1030 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1034 * If the constant is NULL, assume operator is strict and return zero, ie,
1035 * operator will never return TRUE.
1037 if (((Const *) other)->constisnull)
1039 ReleaseVariableStats(vardata);
1040 PG_RETURN_FLOAT8(0.0);
1042 constval = ((Const *) other)->constvalue;
1043 consttype = ((Const *) other)->consttype;
1046 * Force the var to be on the left to simplify logic in scalarineqsel.
1050 /* we have var > other */
1055 /* we have other > var, commute to make var < other */
1056 operator = get_commutator(operator);
1059 /* Use default selectivity (should we raise an error instead?) */
1060 ReleaseVariableStats(vardata);
1061 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1066 selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1068 ReleaseVariableStats(vardata);
1070 PG_RETURN_FLOAT8((float8) selec);
1074 * patternsel - Generic code for pattern-match selectivity.
1077 patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
1079 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1080 Oid operator = PG_GETARG_OID(1);
1081 List *args = (List *) PG_GETARG_POINTER(2);
1082 int varRelid = PG_GETARG_INT32(3);
1083 VariableStatData vardata;
1090 Pattern_Prefix_Status pstatus;
1092 Const *prefix = NULL;
1097 * If this is for a NOT LIKE or similar operator, get the corresponding
1098 * positive-match operator and work with that. Set result to the correct
1099 * default estimate, too.
1103 operator = get_negator(operator);
1104 if (!OidIsValid(operator))
1105 elog(ERROR, "patternsel called for operator without a negator");
1106 result = 1.0 - DEFAULT_MATCH_SEL;
1110 result = DEFAULT_MATCH_SEL;
1114 * If expression is not variable op constant, then punt and return a
1117 if (!get_restriction_variable(root, args, varRelid,
1118 &vardata, &other, &varonleft))
1120 if (!varonleft || !IsA(other, Const))
1122 ReleaseVariableStats(vardata);
1127 * If the constant is NULL, assume operator is strict and return zero, ie,
1128 * operator will never return TRUE. (It's zero even for a negator op.)
1130 if (((Const *) other)->constisnull)
1132 ReleaseVariableStats(vardata);
1135 constval = ((Const *) other)->constvalue;
1136 consttype = ((Const *) other)->consttype;
1139 * The right-hand const is type text or bytea for all supported operators.
1140 * We do not expect to see binary-compatible types here, since
1141 * const-folding should have relabeled the const to exactly match the
1142 * operator's declared type.
1144 if (consttype != TEXTOID && consttype != BYTEAOID)
1146 ReleaseVariableStats(vardata);
1151 * Similarly, the exposed type of the left-hand side should be one of
1152 * those we know. (Do not look at vardata.atttype, which might be
1153 * something binary-compatible but different.) We can use it to choose
1154 * the index opfamily from which we must draw the comparison operators.
1156 * NOTE: It would be more correct to use the PATTERN opfamilies than the
1157 * simple ones, but at the moment ANALYZE will not generate statistics for
1158 * the PATTERN operators. But our results are so approximate anyway that
1159 * it probably hardly matters.
1161 vartype = vardata.vartype;
1166 opfamily = TEXT_BTREE_FAM_OID;
1169 opfamily = BPCHAR_BTREE_FAM_OID;
1172 opfamily = NAME_BTREE_FAM_OID;
1175 opfamily = BYTEA_BTREE_FAM_OID;
1178 ReleaseVariableStats(vardata);
1183 * Divide pattern into fixed prefix and remainder. XXX we have to assume
1184 * default collation here, because we don't have access to the actual
1185 * input collation for the operator. FIXME ...
1187 patt = (Const *) other;
1188 pstatus = pattern_fixed_prefix(patt, ptype, DEFAULT_COLLATION_OID,
1192 * If necessary, coerce the prefix constant to the right type. (The "rest"
1193 * constant need not be changed.)
1195 if (prefix && prefix->consttype != vartype)
1199 switch (prefix->consttype)
1202 prefixstr = TextDatumGetCString(prefix->constvalue);
1205 prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
1206 prefix->constvalue));
1209 elog(ERROR, "unrecognized consttype: %u",
1211 ReleaseVariableStats(vardata);
1214 prefix = string_to_const(prefixstr, vartype);
1218 if (pstatus == Pattern_Prefix_Exact)
1221 * Pattern specifies an exact match, so pretend operator is '='
1223 Oid eqopr = get_opfamily_member(opfamily, vartype, vartype,
1224 BTEqualStrategyNumber);
1226 if (eqopr == InvalidOid)
1227 elog(ERROR, "no = operator for opfamily %u", opfamily);
1228 result = var_eq_const(&vardata, eqopr, prefix->constvalue,
1234 * Not exact-match pattern. If we have a sufficiently large
1235 * histogram, estimate selectivity for the histogram part of the
1236 * population by counting matches in the histogram. If not, estimate
1237 * selectivity of the fixed prefix and remainder of pattern
1238 * separately, then combine the two to get an estimate of the
1239 * selectivity for the part of the column population represented by
1240 * the histogram. (For small histograms, we combine these
1243 * We then add up data for any most-common-values values; these are
1244 * not in the histogram population, and we can get exact answers for
1245 * them by applying the pattern operator, so there's no reason to
1246 * approximate. (If the MCVs cover a significant part of the total
1247 * population, this gives us a big leg up in accuracy.)
1256 /* Try to use the histogram entries to get selectivity */
1257 fmgr_info(get_opcode(operator), &opproc);
1258 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &opproc);
1260 selec = histogram_selectivity(&vardata, &opproc, constval, true,
1263 /* If not at least 100 entries, use the heuristic method */
1264 if (hist_size < 100)
1266 Selectivity heursel;
1267 Selectivity prefixsel;
1268 Selectivity restsel;
1270 if (pstatus == Pattern_Prefix_Partial)
1271 prefixsel = prefix_selectivity(root, &vardata, vartype,
1275 restsel = pattern_selectivity(rest, ptype);
1276 heursel = prefixsel * restsel;
1278 if (selec < 0) /* fewer than 10 histogram entries? */
1283 * For histogram sizes from 10 to 100, we combine the
1284 * histogram and heuristic selectivities, putting increasingly
1285 * more trust in the histogram for larger sizes.
1287 double hist_weight = hist_size / 100.0;
1289 selec = selec * hist_weight + heursel * (1.0 - hist_weight);
1293 /* In any case, don't believe extremely small or large estimates. */
1296 else if (selec > 0.9999)
1300 * If we have most-common-values info, add up the fractions of the MCV
1301 * entries that satisfy MCV OP PATTERN. These fractions contribute
1302 * directly to the result selectivity. Also add up the total fraction
1303 * represented by MCV entries.
1305 mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
1308 if (HeapTupleIsValid(vardata.statsTuple))
1309 nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
1314 * Now merge the results from the MCV and histogram calculations,
1315 * realizing that the histogram covers only the non-null values that
1316 * are not listed in MCV.
1318 selec *= 1.0 - nullfrac - sumcommon;
1321 /* result should be in range, but make sure... */
1322 CLAMP_PROBABILITY(selec);
1328 pfree(DatumGetPointer(prefix->constvalue));
1332 ReleaseVariableStats(vardata);
1334 return negate ? (1.0 - result) : result;
1338 * regexeqsel - Selectivity of regular-expression pattern match.
1341 regexeqsel(PG_FUNCTION_ARGS)
1343 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, false));
1347 * icregexeqsel - Selectivity of case-insensitive regex match.
1350 icregexeqsel(PG_FUNCTION_ARGS)
1352 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, false));
1356 * likesel - Selectivity of LIKE pattern match.
1359 likesel(PG_FUNCTION_ARGS)
1361 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, false));
1365 * iclikesel - Selectivity of ILIKE pattern match.
1368 iclikesel(PG_FUNCTION_ARGS)
1370 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, false));
1374 * regexnesel - Selectivity of regular-expression pattern non-match.
1377 regexnesel(PG_FUNCTION_ARGS)
1379 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, true));
1383 * icregexnesel - Selectivity of case-insensitive regex non-match.
1386 icregexnesel(PG_FUNCTION_ARGS)
1388 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, true));
1392 * nlikesel - Selectivity of LIKE pattern non-match.
1395 nlikesel(PG_FUNCTION_ARGS)
1397 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, true));
1401 * icnlikesel - Selectivity of ILIKE pattern non-match.
1404 icnlikesel(PG_FUNCTION_ARGS)
1406 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, true));
1410 * booltestsel - Selectivity of BooleanTest Node.
1413 booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
1414 int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1416 VariableStatData vardata;
1419 examine_variable(root, arg, varRelid, &vardata);
1421 if (HeapTupleIsValid(vardata.statsTuple))
1423 Form_pg_statistic stats;
1430 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1431 freq_null = stats->stanullfrac;
1433 if (get_attstatsslot(vardata.statsTuple,
1434 vardata.atttype, vardata.atttypmod,
1435 STATISTIC_KIND_MCV, InvalidOid,
1438 &numbers, &nnumbers)
1445 * Get first MCV frequency and derive frequency for true.
1447 if (DatumGetBool(values[0]))
1448 freq_true = numbers[0];
1450 freq_true = 1.0 - numbers[0] - freq_null;
1453 * Next derive frequency for false. Then use these as appropriate
1454 * to derive frequency for each case.
1456 freq_false = 1.0 - freq_true - freq_null;
1458 switch (booltesttype)
1461 /* select only NULL values */
1464 case IS_NOT_UNKNOWN:
1465 /* select non-NULL values */
1466 selec = 1.0 - freq_null;
1469 /* select only TRUE values */
1473 /* select non-TRUE values */
1474 selec = 1.0 - freq_true;
1477 /* select only FALSE values */
1481 /* select non-FALSE values */
1482 selec = 1.0 - freq_false;
1485 elog(ERROR, "unrecognized booltesttype: %d",
1486 (int) booltesttype);
1487 selec = 0.0; /* Keep compiler quiet */
1491 free_attstatsslot(vardata.atttype, values, nvalues,
1497 * No most-common-value info available. Still have null fraction
1498 * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1499 * for null fraction and assume an even split for boolean tests.
1501 switch (booltesttype)
1506 * Use freq_null directly.
1510 case IS_NOT_UNKNOWN:
1513 * Select not unknown (not null) values. Calculate from
1516 selec = 1.0 - freq_null;
1522 selec = (1.0 - freq_null) / 2.0;
1525 elog(ERROR, "unrecognized booltesttype: %d",
1526 (int) booltesttype);
1527 selec = 0.0; /* Keep compiler quiet */
1535 * If we can't get variable statistics for the argument, perhaps
1536 * clause_selectivity can do something with it. We ignore the
1537 * possibility of a NULL value when using clause_selectivity, and just
1538 * assume the value is either TRUE or FALSE.
1540 switch (booltesttype)
1543 selec = DEFAULT_UNK_SEL;
1545 case IS_NOT_UNKNOWN:
1546 selec = DEFAULT_NOT_UNK_SEL;
1550 selec = (double) clause_selectivity(root, arg,
1556 selec = 1.0 - (double) clause_selectivity(root, arg,
1561 elog(ERROR, "unrecognized booltesttype: %d",
1562 (int) booltesttype);
1563 selec = 0.0; /* Keep compiler quiet */
1568 ReleaseVariableStats(vardata);
1570 /* result should be in range, but make sure... */
1571 CLAMP_PROBABILITY(selec);
1573 return (Selectivity) selec;
1577 * nulltestsel - Selectivity of NullTest Node.
1580 nulltestsel(PlannerInfo *root, NullTestType nulltesttype, Node *arg,
1581 int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1583 VariableStatData vardata;
1586 examine_variable(root, arg, varRelid, &vardata);
1588 if (HeapTupleIsValid(vardata.statsTuple))
1590 Form_pg_statistic stats;
1593 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1594 freq_null = stats->stanullfrac;
1596 switch (nulltesttype)
1601 * Use freq_null directly.
1608 * Select not unknown (not null) values. Calculate from
1611 selec = 1.0 - freq_null;
1614 elog(ERROR, "unrecognized nulltesttype: %d",
1615 (int) nulltesttype);
1616 return (Selectivity) 0; /* keep compiler quiet */
1622 * No ANALYZE stats available, so make a guess
1624 switch (nulltesttype)
1627 selec = DEFAULT_UNK_SEL;
1630 selec = DEFAULT_NOT_UNK_SEL;
1633 elog(ERROR, "unrecognized nulltesttype: %d",
1634 (int) nulltesttype);
1635 return (Selectivity) 0; /* keep compiler quiet */
1639 ReleaseVariableStats(vardata);
1641 /* result should be in range, but make sure... */
1642 CLAMP_PROBABILITY(selec);
1644 return (Selectivity) selec;
1648 * strip_array_coercion - strip binary-compatible relabeling from an array expr
1650 * For array values, the parser normally generates ArrayCoerceExpr conversions,
1651 * but it seems possible that RelabelType might show up. Also, the planner
1652 * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1653 * so we need to be ready to deal with more than one level.
1656 strip_array_coercion(Node *node)
1660 if (node && IsA(node, ArrayCoerceExpr) &&
1661 ((ArrayCoerceExpr *) node)->elemfuncid == InvalidOid)
1663 node = (Node *) ((ArrayCoerceExpr *) node)->arg;
1665 else if (node && IsA(node, RelabelType))
1667 /* We don't really expect this case, but may as well cope */
1668 node = (Node *) ((RelabelType *) node)->arg;
1677 * scalararraysel - Selectivity of ScalarArrayOpExpr Node.
1680 scalararraysel(PlannerInfo *root,
1681 ScalarArrayOpExpr *clause,
1682 bool is_join_clause,
1685 SpecialJoinInfo *sjinfo)
1687 Oid operator = clause->opno;
1688 bool useOr = clause->useOr;
1691 Oid nominal_element_type;
1692 Oid nominal_element_collation;
1693 RegProcedure oprsel;
1694 FmgrInfo oprselproc;
1698 * First, look up the underlying operator's selectivity estimator. Punt if
1699 * it hasn't got one.
1702 oprsel = get_oprjoin(operator);
1704 oprsel = get_oprrest(operator);
1706 return (Selectivity) 0.5;
1707 fmgr_info(oprsel, &oprselproc);
1708 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &oprselproc);
1710 /* deconstruct the expression */
1711 Assert(list_length(clause->args) == 2);
1712 leftop = (Node *) linitial(clause->args);
1713 rightop = (Node *) lsecond(clause->args);
1715 /* get nominal (after relabeling) element type of rightop */
1716 nominal_element_type = get_base_element_type(exprType(rightop));
1717 if (!OidIsValid(nominal_element_type))
1718 return (Selectivity) 0.5; /* probably shouldn't happen */
1719 /* get nominal collation, too, for generating constants */
1720 nominal_element_collation = exprCollation(rightop);
1722 /* look through any binary-compatible relabeling of rightop */
1723 rightop = strip_array_coercion(rightop);
1726 * We consider three cases:
1728 * 1. rightop is an Array constant: deconstruct the array, apply the
1729 * operator's selectivity function for each array element, and merge the
1730 * results in the same way that clausesel.c does for AND/OR combinations.
1732 * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1733 * function for each element of the ARRAY[] construct, and merge.
1735 * 3. otherwise, make a guess ...
1737 if (rightop && IsA(rightop, Const))
1739 Datum arraydatum = ((Const *) rightop)->constvalue;
1740 bool arrayisnull = ((Const *) rightop)->constisnull;
1741 ArrayType *arrayval;
1750 if (arrayisnull) /* qual can't succeed if null array */
1751 return (Selectivity) 0.0;
1752 arrayval = DatumGetArrayTypeP(arraydatum);
1753 get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
1754 &elmlen, &elmbyval, &elmalign);
1755 deconstruct_array(arrayval,
1756 ARR_ELEMTYPE(arrayval),
1757 elmlen, elmbyval, elmalign,
1758 &elem_values, &elem_nulls, &num_elems);
1759 s1 = useOr ? 0.0 : 1.0;
1760 for (i = 0; i < num_elems; i++)
1765 args = list_make2(leftop,
1766 makeConst(nominal_element_type,
1768 nominal_element_collation,
1774 s2 = DatumGetFloat8(FunctionCall5(&oprselproc,
1775 PointerGetDatum(root),
1776 ObjectIdGetDatum(operator),
1777 PointerGetDatum(args),
1778 Int16GetDatum(jointype),
1779 PointerGetDatum(sjinfo)));
1781 s2 = DatumGetFloat8(FunctionCall4(&oprselproc,
1782 PointerGetDatum(root),
1783 ObjectIdGetDatum(operator),
1784 PointerGetDatum(args),
1785 Int32GetDatum(varRelid)));
1787 s1 = s1 + s2 - s1 * s2;
1792 else if (rightop && IsA(rightop, ArrayExpr) &&
1793 !((ArrayExpr *) rightop)->multidims)
1795 ArrayExpr *arrayexpr = (ArrayExpr *) rightop;
1800 get_typlenbyval(arrayexpr->element_typeid,
1801 &elmlen, &elmbyval);
1802 s1 = useOr ? 0.0 : 1.0;
1803 foreach(l, arrayexpr->elements)
1805 Node *elem = (Node *) lfirst(l);
1810 * Theoretically, if elem isn't of nominal_element_type we should
1811 * insert a RelabelType, but it seems unlikely that any operator
1812 * estimation function would really care ...
1814 args = list_make2(leftop, elem);
1816 s2 = DatumGetFloat8(FunctionCall5(&oprselproc,
1817 PointerGetDatum(root),
1818 ObjectIdGetDatum(operator),
1819 PointerGetDatum(args),
1820 Int16GetDatum(jointype),
1821 PointerGetDatum(sjinfo)));
1823 s2 = DatumGetFloat8(FunctionCall4(&oprselproc,
1824 PointerGetDatum(root),
1825 ObjectIdGetDatum(operator),
1826 PointerGetDatum(args),
1827 Int32GetDatum(varRelid)));
1829 s1 = s1 + s2 - s1 * s2;
1836 CaseTestExpr *dummyexpr;
1842 * We need a dummy rightop to pass to the operator selectivity
1843 * routine. It can be pretty much anything that doesn't look like a
1844 * constant; CaseTestExpr is a convenient choice.
1846 dummyexpr = makeNode(CaseTestExpr);
1847 dummyexpr->typeId = nominal_element_type;
1848 dummyexpr->typeMod = -1;
1849 dummyexpr->collation = clause->inputcollid;
1850 args = list_make2(leftop, dummyexpr);
1852 s2 = DatumGetFloat8(FunctionCall5(&oprselproc,
1853 PointerGetDatum(root),
1854 ObjectIdGetDatum(operator),
1855 PointerGetDatum(args),
1856 Int16GetDatum(jointype),
1857 PointerGetDatum(sjinfo)));
1859 s2 = DatumGetFloat8(FunctionCall4(&oprselproc,
1860 PointerGetDatum(root),
1861 ObjectIdGetDatum(operator),
1862 PointerGetDatum(args),
1863 Int32GetDatum(varRelid)));
1864 s1 = useOr ? 0.0 : 1.0;
1867 * Arbitrarily assume 10 elements in the eventual array value (see
1868 * also estimate_array_length)
1870 for (i = 0; i < 10; i++)
1873 s1 = s1 + s2 - s1 * s2;
1879 /* result should be in range, but make sure... */
1880 CLAMP_PROBABILITY(s1);
1886 * Estimate number of elements in the array yielded by an expression.
1888 * It's important that this agree with scalararraysel.
1891 estimate_array_length(Node *arrayexpr)
1893 /* look through any binary-compatible relabeling of arrayexpr */
1894 arrayexpr = strip_array_coercion(arrayexpr);
1896 if (arrayexpr && IsA(arrayexpr, Const))
1898 Datum arraydatum = ((Const *) arrayexpr)->constvalue;
1899 bool arrayisnull = ((Const *) arrayexpr)->constisnull;
1900 ArrayType *arrayval;
1904 arrayval = DatumGetArrayTypeP(arraydatum);
1905 return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
1907 else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
1908 !((ArrayExpr *) arrayexpr)->multidims)
1910 return list_length(((ArrayExpr *) arrayexpr)->elements);
1914 /* default guess --- see also scalararraysel */
1920 * rowcomparesel - Selectivity of RowCompareExpr Node.
1922 * We estimate RowCompare selectivity by considering just the first (high
1923 * order) columns, which makes it equivalent to an ordinary OpExpr. While
1924 * this estimate could be refined by considering additional columns, it
1925 * seems unlikely that we could do a lot better without multi-column
1929 rowcomparesel(PlannerInfo *root,
1930 RowCompareExpr *clause,
1931 int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1934 Oid opno = linitial_oid(clause->opnos);
1936 bool is_join_clause;
1938 /* Build equivalent arg list for single operator */
1939 opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
1942 * Decide if it's a join clause. This should match clausesel.c's
1943 * treat_as_join_clause(), except that we intentionally consider only the
1944 * leading columns and not the rest of the clause.
1949 * Caller is forcing restriction mode (eg, because we are examining an
1950 * inner indexscan qual).
1952 is_join_clause = false;
1954 else if (sjinfo == NULL)
1957 * It must be a restriction clause, since it's being evaluated at a
1960 is_join_clause = false;
1965 * Otherwise, it's a join if there's more than one relation used.
1967 is_join_clause = (NumRelids((Node *) opargs) > 1);
1972 /* Estimate selectivity for a join clause. */
1973 s1 = join_selectivity(root, opno,
1980 /* Estimate selectivity for a restriction clause. */
1981 s1 = restriction_selectivity(root, opno,
1990 * eqjoinsel - Join selectivity of "="
1993 eqjoinsel(PG_FUNCTION_ARGS)
1995 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1996 Oid operator = PG_GETARG_OID(1);
1997 List *args = (List *) PG_GETARG_POINTER(2);
2000 JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2002 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2004 VariableStatData vardata1;
2005 VariableStatData vardata2;
2006 bool join_is_reversed;
2008 get_join_variables(root, args, sjinfo,
2009 &vardata1, &vardata2, &join_is_reversed);
2011 switch (sjinfo->jointype)
2016 selec = eqjoinsel_inner(operator, &vardata1, &vardata2);
2020 if (!join_is_reversed)
2021 selec = eqjoinsel_semi(operator, &vardata1, &vardata2);
2023 selec = eqjoinsel_semi(get_commutator(operator),
2024 &vardata2, &vardata1);
2027 /* other values not expected here */
2028 elog(ERROR, "unrecognized join type: %d",
2029 (int) sjinfo->jointype);
2030 selec = 0; /* keep compiler quiet */
2034 ReleaseVariableStats(vardata1);
2035 ReleaseVariableStats(vardata2);
2037 CLAMP_PROBABILITY(selec);
2039 PG_RETURN_FLOAT8((float8) selec);
2043 * eqjoinsel_inner --- eqjoinsel for normal inner join
2045 * We also use this for LEFT/FULL outer joins; it's not presently clear
2046 * that it's worth trying to distinguish them here.
2049 eqjoinsel_inner(Oid operator,
2050 VariableStatData *vardata1, VariableStatData *vardata2)
2055 Form_pg_statistic stats1 = NULL;
2056 Form_pg_statistic stats2 = NULL;
2057 bool have_mcvs1 = false;
2058 Datum *values1 = NULL;
2060 float4 *numbers1 = NULL;
2062 bool have_mcvs2 = false;
2063 Datum *values2 = NULL;
2065 float4 *numbers2 = NULL;
2068 nd1 = get_variable_numdistinct(vardata1);
2069 nd2 = get_variable_numdistinct(vardata2);
2071 if (HeapTupleIsValid(vardata1->statsTuple))
2073 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2074 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2076 vardata1->atttypmod,
2080 &values1, &nvalues1,
2081 &numbers1, &nnumbers1);
2084 if (HeapTupleIsValid(vardata2->statsTuple))
2086 stats2 = (Form_pg_statistic) GETSTRUCT(vardata2->statsTuple);
2087 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2089 vardata2->atttypmod,
2093 &values2, &nvalues2,
2094 &numbers2, &nnumbers2);
2097 if (have_mcvs1 && have_mcvs2)
2100 * We have most-common-value lists for both relations. Run through
2101 * the lists to see which MCVs actually join to each other with the
2102 * given operator. This allows us to determine the exact join
2103 * selectivity for the portion of the relations represented by the MCV
2104 * lists. We still have to estimate for the remaining population, but
2105 * in a skewed distribution this gives us a big leg up in accuracy.
2106 * For motivation see the analysis in Y. Ioannidis and S.
2107 * Christodoulakis, "On the propagation of errors in the size of join
2108 * results", Technical Report 1018, Computer Science Dept., University
2109 * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2114 double nullfrac1 = stats1->stanullfrac;
2115 double nullfrac2 = stats2->stanullfrac;
2116 double matchprodfreq,
2128 fmgr_info(get_opcode(operator), &eqproc);
2129 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &eqproc);
2130 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2131 hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
2134 * Note we assume that each MCV will match at most one member of the
2135 * other MCV list. If the operator isn't really equality, there could
2136 * be multiple matches --- but we don't look for them, both for speed
2137 * and because the math wouldn't add up...
2139 matchprodfreq = 0.0;
2141 for (i = 0; i < nvalues1; i++)
2145 for (j = 0; j < nvalues2; j++)
2149 if (DatumGetBool(FunctionCall2(&eqproc,
2153 hasmatch1[i] = hasmatch2[j] = true;
2154 matchprodfreq += numbers1[i] * numbers2[j];
2160 CLAMP_PROBABILITY(matchprodfreq);
2161 /* Sum up frequencies of matched and unmatched MCVs */
2162 matchfreq1 = unmatchfreq1 = 0.0;
2163 for (i = 0; i < nvalues1; i++)
2166 matchfreq1 += numbers1[i];
2168 unmatchfreq1 += numbers1[i];
2170 CLAMP_PROBABILITY(matchfreq1);
2171 CLAMP_PROBABILITY(unmatchfreq1);
2172 matchfreq2 = unmatchfreq2 = 0.0;
2173 for (i = 0; i < nvalues2; i++)
2176 matchfreq2 += numbers2[i];
2178 unmatchfreq2 += numbers2[i];
2180 CLAMP_PROBABILITY(matchfreq2);
2181 CLAMP_PROBABILITY(unmatchfreq2);
2186 * Compute total frequency of non-null values that are not in the MCV
2189 otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2190 otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2191 CLAMP_PROBABILITY(otherfreq1);
2192 CLAMP_PROBABILITY(otherfreq2);
2195 * We can estimate the total selectivity from the point of view of
2196 * relation 1 as: the known selectivity for matched MCVs, plus
2197 * unmatched MCVs that are assumed to match against random members of
2198 * relation 2's non-MCV population, plus non-MCV values that are
2199 * assumed to match against random members of relation 2's unmatched
2200 * MCVs plus non-MCV values.
2202 totalsel1 = matchprodfreq;
2204 totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - nvalues2);
2206 totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2208 /* Same estimate from the point of view of relation 2. */
2209 totalsel2 = matchprodfreq;
2211 totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - nvalues1);
2213 totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2217 * Use the smaller of the two estimates. This can be justified in
2218 * essentially the same terms as given below for the no-stats case: to
2219 * a first approximation, we are estimating from the point of view of
2220 * the relation with smaller nd.
2222 selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2227 * We do not have MCV lists for both sides. Estimate the join
2228 * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2229 * is plausible if we assume that the join operator is strict and the
2230 * non-null values are about equally distributed: a given non-null
2231 * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2232 * of rel2, so total join rows are at most
2233 * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2234 * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2235 * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2236 * with MIN() is an upper bound. Using the MIN() means we estimate
2237 * from the point of view of the relation with smaller nd (since the
2238 * larger nd is determining the MIN). It is reasonable to assume that
2239 * most tuples in this rel will have join partners, so the bound is
2240 * probably reasonably tight and should be taken as-is.
2242 * XXX Can we be smarter if we have an MCV list for just one side? It
2243 * seems that if we assume equal distribution for the other side, we
2244 * end up with the same answer anyway.
2246 * An additional hack we use here is to clamp the nd1 and nd2 values
2247 * to not more than what we are estimating the input relation sizes to
2248 * be, providing a crude correction for the selectivity of restriction
2249 * clauses on those relations. (We don't do that in the other path
2250 * since there we are comparing the nd values to stats for the whole
2253 double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2254 double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2257 nd1 = Min(nd1, vardata1->rel->rows);
2259 nd2 = Min(nd2, vardata2->rel->rows);
2261 selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2269 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2270 numbers1, nnumbers1);
2272 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2273 numbers2, nnumbers2);
2279 * eqjoinsel_semi --- eqjoinsel for semi join
2281 * (Also used for anti join, which we are supposed to estimate the same way.)
2282 * Caller has ensured that vardata1 is the LHS variable.
2285 eqjoinsel_semi(Oid operator,
2286 VariableStatData *vardata1, VariableStatData *vardata2)
2291 Form_pg_statistic stats1 = NULL;
2292 bool have_mcvs1 = false;
2293 Datum *values1 = NULL;
2295 float4 *numbers1 = NULL;
2297 bool have_mcvs2 = false;
2298 Datum *values2 = NULL;
2300 float4 *numbers2 = NULL;
2303 nd1 = get_variable_numdistinct(vardata1);
2304 nd2 = get_variable_numdistinct(vardata2);
2306 if (HeapTupleIsValid(vardata1->statsTuple))
2308 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2309 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2311 vardata1->atttypmod,
2315 &values1, &nvalues1,
2316 &numbers1, &nnumbers1);
2319 if (HeapTupleIsValid(vardata2->statsTuple))
2321 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2323 vardata2->atttypmod,
2327 &values2, &nvalues2,
2328 &numbers2, &nnumbers2);
2331 if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
2334 * We have most-common-value lists for both relations. Run through
2335 * the lists to see which MCVs actually join to each other with the
2336 * given operator. This allows us to determine the exact join
2337 * selectivity for the portion of the relations represented by the MCV
2338 * lists. We still have to estimate for the remaining population, but
2339 * in a skewed distribution this gives us a big leg up in accuracy.
2344 double nullfrac1 = stats1->stanullfrac;
2351 fmgr_info(get_opcode(operator), &eqproc);
2352 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &eqproc);
2353 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2354 hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
2357 * Note we assume that each MCV will match at most one member of the
2358 * other MCV list. If the operator isn't really equality, there could
2359 * be multiple matches --- but we don't look for them, both for speed
2360 * and because the math wouldn't add up...
2363 for (i = 0; i < nvalues1; i++)
2367 for (j = 0; j < nvalues2; j++)
2371 if (DatumGetBool(FunctionCall2(&eqproc,
2375 hasmatch1[i] = hasmatch2[j] = true;
2381 /* Sum up frequencies of matched MCVs */
2383 for (i = 0; i < nvalues1; i++)
2386 matchfreq1 += numbers1[i];
2388 CLAMP_PROBABILITY(matchfreq1);
2393 * Now we need to estimate the fraction of relation 1 that has at
2394 * least one join partner. We know for certain that the matched MCVs
2395 * do, so that gives us a lower bound, but we're really in the dark
2396 * about everything else. Our crude approach is: if nd1 <= nd2 then
2397 * assume all non-null rel1 rows have join partners, else assume for
2398 * the uncertain rows that a fraction nd2/nd1 have join partners. We
2399 * can discount the known-matched MCVs from the distinct-values counts
2400 * before doing the division.
2402 * Crude as the above is, it's completely useless if we don't have
2403 * reliable ndistinct values for both sides. Hence, if either nd1
2404 * or nd2 is default, punt and assume half of the uncertain rows
2405 * have join partners.
2407 if (nd1 != DEFAULT_NUM_DISTINCT && nd2 != DEFAULT_NUM_DISTINCT)
2411 if (nd1 <= nd2 || nd2 <= 0)
2412 uncertainfrac = 1.0;
2414 uncertainfrac = nd2 / nd1;
2417 uncertainfrac = 0.5;
2418 uncertain = 1.0 - matchfreq1 - nullfrac1;
2419 CLAMP_PROBABILITY(uncertain);
2420 selec = matchfreq1 + uncertainfrac * uncertain;
2425 * Without MCV lists for both sides, we can only use the heuristic
2428 double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2430 if (nd1 != DEFAULT_NUM_DISTINCT && nd2 != DEFAULT_NUM_DISTINCT)
2433 nd1 = Min(nd1, vardata1->rel->rows);
2435 nd2 = Min(nd2, vardata2->rel->rows);
2437 if (nd1 <= nd2 || nd2 <= 0)
2438 selec = 1.0 - nullfrac1;
2440 selec = (nd2 / nd1) * (1.0 - nullfrac1);
2443 selec = 0.5 * (1.0 - nullfrac1);
2447 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2448 numbers1, nnumbers1);
2450 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2451 numbers2, nnumbers2);
2457 * neqjoinsel - Join selectivity of "!="
2460 neqjoinsel(PG_FUNCTION_ARGS)
2462 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2463 Oid operator = PG_GETARG_OID(1);
2464 List *args = (List *) PG_GETARG_POINTER(2);
2465 JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2466 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2471 * We want 1 - eqjoinsel() where the equality operator is the one
2472 * associated with this != operator, that is, its negator.
2474 eqop = get_negator(operator);
2477 result = DatumGetFloat8(DirectFunctionCall5(eqjoinsel,
2478 PointerGetDatum(root),
2479 ObjectIdGetDatum(eqop),
2480 PointerGetDatum(args),
2481 Int16GetDatum(jointype),
2482 PointerGetDatum(sjinfo)));
2486 /* Use default selectivity (should we raise an error instead?) */
2487 result = DEFAULT_EQ_SEL;
2489 result = 1.0 - result;
2490 PG_RETURN_FLOAT8(result);
2494 * scalarltjoinsel - Join selectivity of "<" and "<=" for scalars
2497 scalarltjoinsel(PG_FUNCTION_ARGS)
2499 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2503 * scalargtjoinsel - Join selectivity of ">" and ">=" for scalars
2506 scalargtjoinsel(PG_FUNCTION_ARGS)
2508 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2512 * patternjoinsel - Generic code for pattern-match join selectivity.
2515 patternjoinsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
2517 /* For the moment we just punt. */
2518 return negate ? (1.0 - DEFAULT_MATCH_SEL) : DEFAULT_MATCH_SEL;
2522 * regexeqjoinsel - Join selectivity of regular-expression pattern match.
2525 regexeqjoinsel(PG_FUNCTION_ARGS)
2527 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, false));
2531 * icregexeqjoinsel - Join selectivity of case-insensitive regex match.
2534 icregexeqjoinsel(PG_FUNCTION_ARGS)
2536 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, false));
2540 * likejoinsel - Join selectivity of LIKE pattern match.
2543 likejoinsel(PG_FUNCTION_ARGS)
2545 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, false));
2549 * iclikejoinsel - Join selectivity of ILIKE pattern match.
2552 iclikejoinsel(PG_FUNCTION_ARGS)
2554 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, false));
2558 * regexnejoinsel - Join selectivity of regex non-match.
2561 regexnejoinsel(PG_FUNCTION_ARGS)
2563 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, true));
2567 * icregexnejoinsel - Join selectivity of case-insensitive regex non-match.
2570 icregexnejoinsel(PG_FUNCTION_ARGS)
2572 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, true));
2576 * nlikejoinsel - Join selectivity of LIKE pattern non-match.
2579 nlikejoinsel(PG_FUNCTION_ARGS)
2581 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, true));
2585 * icnlikejoinsel - Join selectivity of ILIKE pattern non-match.
2588 icnlikejoinsel(PG_FUNCTION_ARGS)
2590 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, true));
2594 * mergejoinscansel - Scan selectivity of merge join.
2596 * A merge join will stop as soon as it exhausts either input stream.
2597 * Therefore, if we can estimate the ranges of both input variables,
2598 * we can estimate how much of the input will actually be read. This
2599 * can have a considerable impact on the cost when using indexscans.
2601 * Also, we can estimate how much of each input has to be read before the
2602 * first join pair is found, which will affect the join's startup time.
2604 * clause should be a clause already known to be mergejoinable. opfamily,
2605 * strategy, and nulls_first specify the sort ordering being used.
2608 * *leftstart is set to the fraction of the left-hand variable expected
2609 * to be scanned before the first join pair is found (0 to 1).
2610 * *leftend is set to the fraction of the left-hand variable expected
2611 * to be scanned before the join terminates (0 to 1).
2612 * *rightstart, *rightend similarly for the right-hand variable.
2615 mergejoinscansel(PlannerInfo *root, Node *clause,
2616 Oid opfamily, int strategy, bool nulls_first,
2617 Selectivity *leftstart, Selectivity *leftend,
2618 Selectivity *rightstart, Selectivity *rightend)
2622 VariableStatData leftvar,
2643 /* Set default results if we can't figure anything out. */
2644 /* XXX should default "start" fraction be a bit more than 0? */
2645 *leftstart = *rightstart = 0.0;
2646 *leftend = *rightend = 1.0;
2648 /* Deconstruct the merge clause */
2649 if (!is_opclause(clause))
2650 return; /* shouldn't happen */
2651 opno = ((OpExpr *) clause)->opno;
2652 left = get_leftop((Expr *) clause);
2653 right = get_rightop((Expr *) clause);
2655 return; /* shouldn't happen */
2657 /* Look for stats for the inputs */
2658 examine_variable(root, left, 0, &leftvar);
2659 examine_variable(root, right, 0, &rightvar);
2661 /* Extract the operator's declared left/right datatypes */
2662 get_op_opfamily_properties(opno, opfamily, false,
2666 Assert(op_strategy == BTEqualStrategyNumber);
2669 * Look up the various operators we need. If we don't find them all, it
2670 * probably means the opfamily is broken, but we just fail silently.
2672 * Note: we expect that pg_statistic histograms will be sorted by the '<'
2673 * operator, regardless of which sort direction we are considering.
2677 case BTLessStrategyNumber:
2679 if (op_lefttype == op_righttype)
2682 ltop = get_opfamily_member(opfamily,
2683 op_lefttype, op_righttype,
2684 BTLessStrategyNumber);
2685 leop = get_opfamily_member(opfamily,
2686 op_lefttype, op_righttype,
2687 BTLessEqualStrategyNumber);
2697 ltop = get_opfamily_member(opfamily,
2698 op_lefttype, op_righttype,
2699 BTLessStrategyNumber);
2700 leop = get_opfamily_member(opfamily,
2701 op_lefttype, op_righttype,
2702 BTLessEqualStrategyNumber);
2703 lsortop = get_opfamily_member(opfamily,
2704 op_lefttype, op_lefttype,
2705 BTLessStrategyNumber);
2706 rsortop = get_opfamily_member(opfamily,
2707 op_righttype, op_righttype,
2708 BTLessStrategyNumber);
2711 revltop = get_opfamily_member(opfamily,
2712 op_righttype, op_lefttype,
2713 BTLessStrategyNumber);
2714 revleop = get_opfamily_member(opfamily,
2715 op_righttype, op_lefttype,
2716 BTLessEqualStrategyNumber);
2719 case BTGreaterStrategyNumber:
2720 /* descending-order case */
2722 if (op_lefttype == op_righttype)
2725 ltop = get_opfamily_member(opfamily,
2726 op_lefttype, op_righttype,
2727 BTGreaterStrategyNumber);
2728 leop = get_opfamily_member(opfamily,
2729 op_lefttype, op_righttype,
2730 BTGreaterEqualStrategyNumber);
2733 lstatop = get_opfamily_member(opfamily,
2734 op_lefttype, op_lefttype,
2735 BTLessStrategyNumber);
2742 ltop = get_opfamily_member(opfamily,
2743 op_lefttype, op_righttype,
2744 BTGreaterStrategyNumber);
2745 leop = get_opfamily_member(opfamily,
2746 op_lefttype, op_righttype,
2747 BTGreaterEqualStrategyNumber);
2748 lsortop = get_opfamily_member(opfamily,
2749 op_lefttype, op_lefttype,
2750 BTGreaterStrategyNumber);
2751 rsortop = get_opfamily_member(opfamily,
2752 op_righttype, op_righttype,
2753 BTGreaterStrategyNumber);
2754 lstatop = get_opfamily_member(opfamily,
2755 op_lefttype, op_lefttype,
2756 BTLessStrategyNumber);
2757 rstatop = get_opfamily_member(opfamily,
2758 op_righttype, op_righttype,
2759 BTLessStrategyNumber);
2760 revltop = get_opfamily_member(opfamily,
2761 op_righttype, op_lefttype,
2762 BTGreaterStrategyNumber);
2763 revleop = get_opfamily_member(opfamily,
2764 op_righttype, op_lefttype,
2765 BTGreaterEqualStrategyNumber);
2769 goto fail; /* shouldn't get here */
2772 if (!OidIsValid(lsortop) ||
2773 !OidIsValid(rsortop) ||
2774 !OidIsValid(lstatop) ||
2775 !OidIsValid(rstatop) ||
2776 !OidIsValid(ltop) ||
2777 !OidIsValid(leop) ||
2778 !OidIsValid(revltop) ||
2779 !OidIsValid(revleop))
2780 goto fail; /* insufficient info in catalogs */
2782 /* Try to get ranges of both inputs */
2785 if (!get_variable_range(root, &leftvar, lstatop,
2786 &leftmin, &leftmax))
2787 goto fail; /* no range available from stats */
2788 if (!get_variable_range(root, &rightvar, rstatop,
2789 &rightmin, &rightmax))
2790 goto fail; /* no range available from stats */
2794 /* need to swap the max and min */
2795 if (!get_variable_range(root, &leftvar, lstatop,
2796 &leftmax, &leftmin))
2797 goto fail; /* no range available from stats */
2798 if (!get_variable_range(root, &rightvar, rstatop,
2799 &rightmax, &rightmin))
2800 goto fail; /* no range available from stats */
2804 * Now, the fraction of the left variable that will be scanned is the
2805 * fraction that's <= the right-side maximum value. But only believe
2806 * non-default estimates, else stick with our 1.0.
2808 selec = scalarineqsel(root, leop, isgt, &leftvar,
2809 rightmax, op_righttype);
2810 if (selec != DEFAULT_INEQ_SEL)
2813 /* And similarly for the right variable. */
2814 selec = scalarineqsel(root, revleop, isgt, &rightvar,
2815 leftmax, op_lefttype);
2816 if (selec != DEFAULT_INEQ_SEL)
2820 * Only one of the two "end" fractions can really be less than 1.0;
2821 * believe the smaller estimate and reset the other one to exactly 1.0. If
2822 * we get exactly equal estimates (as can easily happen with self-joins),
2825 if (*leftend > *rightend)
2827 else if (*leftend < *rightend)
2830 *leftend = *rightend = 1.0;
2833 * Also, the fraction of the left variable that will be scanned before the
2834 * first join pair is found is the fraction that's < the right-side
2835 * minimum value. But only believe non-default estimates, else stick with
2838 selec = scalarineqsel(root, ltop, isgt, &leftvar,
2839 rightmin, op_righttype);
2840 if (selec != DEFAULT_INEQ_SEL)
2843 /* And similarly for the right variable. */
2844 selec = scalarineqsel(root, revltop, isgt, &rightvar,
2845 leftmin, op_lefttype);
2846 if (selec != DEFAULT_INEQ_SEL)
2847 *rightstart = selec;
2850 * Only one of the two "start" fractions can really be more than zero;
2851 * believe the larger estimate and reset the other one to exactly 0.0. If
2852 * we get exactly equal estimates (as can easily happen with self-joins),
2855 if (*leftstart < *rightstart)
2857 else if (*leftstart > *rightstart)
2860 *leftstart = *rightstart = 0.0;
2863 * If the sort order is nulls-first, we're going to have to skip over any
2864 * nulls too. These would not have been counted by scalarineqsel, and we
2865 * can safely add in this fraction regardless of whether we believe
2866 * scalarineqsel's results or not. But be sure to clamp the sum to 1.0!
2870 Form_pg_statistic stats;
2872 if (HeapTupleIsValid(leftvar.statsTuple))
2874 stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
2875 *leftstart += stats->stanullfrac;
2876 CLAMP_PROBABILITY(*leftstart);
2877 *leftend += stats->stanullfrac;
2878 CLAMP_PROBABILITY(*leftend);
2880 if (HeapTupleIsValid(rightvar.statsTuple))
2882 stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
2883 *rightstart += stats->stanullfrac;
2884 CLAMP_PROBABILITY(*rightstart);
2885 *rightend += stats->stanullfrac;
2886 CLAMP_PROBABILITY(*rightend);
2890 /* Disbelieve start >= end, just in case that can happen */
2891 if (*leftstart >= *leftend)
2896 if (*rightstart >= *rightend)
2903 ReleaseVariableStats(leftvar);
2904 ReleaseVariableStats(rightvar);
2909 * Helper routine for estimate_num_groups: add an item to a list of
2910 * GroupVarInfos, but only if it's not known equal to any of the existing
2915 Node *var; /* might be an expression, not just a Var */
2916 RelOptInfo *rel; /* relation it belongs to */
2917 double ndistinct; /* # distinct values */
2921 add_unique_group_var(PlannerInfo *root, List *varinfos,
2922 Node *var, VariableStatData *vardata)
2924 GroupVarInfo *varinfo;
2928 ndistinct = get_variable_numdistinct(vardata);
2930 /* cannot use foreach here because of possible list_delete */
2931 lc = list_head(varinfos);
2934 varinfo = (GroupVarInfo *) lfirst(lc);
2936 /* must advance lc before list_delete possibly pfree's it */
2939 /* Drop exact duplicates */
2940 if (equal(var, varinfo->var))
2944 * Drop known-equal vars, but only if they belong to different
2945 * relations (see comments for estimate_num_groups)
2947 if (vardata->rel != varinfo->rel &&
2948 exprs_known_equal(root, var, varinfo->var))
2950 if (varinfo->ndistinct <= ndistinct)
2952 /* Keep older item, forget new one */
2957 /* Delete the older item */
2958 varinfos = list_delete_ptr(varinfos, varinfo);
2963 varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
2966 varinfo->rel = vardata->rel;
2967 varinfo->ndistinct = ndistinct;
2968 varinfos = lappend(varinfos, varinfo);
2973 * estimate_num_groups - Estimate number of groups in a grouped query
2975 * Given a query having a GROUP BY clause, estimate how many groups there
2976 * will be --- ie, the number of distinct combinations of the GROUP BY
2979 * This routine is also used to estimate the number of rows emitted by
2980 * a DISTINCT filtering step; that is an isomorphic problem. (Note:
2981 * actually, we only use it for DISTINCT when there's no grouping or
2982 * aggregation ahead of the DISTINCT.)
2986 * groupExprs - list of expressions being grouped by
2987 * input_rows - number of rows estimated to arrive at the group/unique
2990 * Given the lack of any cross-correlation statistics in the system, it's
2991 * impossible to do anything really trustworthy with GROUP BY conditions
2992 * involving multiple Vars. We should however avoid assuming the worst
2993 * case (all possible cross-product terms actually appear as groups) since
2994 * very often the grouped-by Vars are highly correlated. Our current approach
2996 * 1. Expressions yielding boolean are assumed to contribute two groups,
2997 * independently of their content, and are ignored in the subsequent
2998 * steps. This is mainly because tests like "col IS NULL" break the
2999 * heuristic used in step 2 especially badly.
3000 * 2. Reduce the given expressions to a list of unique Vars used. For
3001 * example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3002 * It is clearly correct not to count the same Var more than once.
3003 * It is also reasonable to treat f(x) the same as x: f() cannot
3004 * increase the number of distinct values (unless it is volatile,
3005 * which we consider unlikely for grouping), but it probably won't
3006 * reduce the number of distinct values much either.
3007 * As a special case, if a GROUP BY expression can be matched to an
3008 * expressional index for which we have statistics, then we treat the
3009 * whole expression as though it were just a Var.
3010 * 3. If the list contains Vars of different relations that are known equal
3011 * due to equivalence classes, then drop all but one of the Vars from each
3012 * known-equal set, keeping the one with smallest estimated # of values
3013 * (since the extra values of the others can't appear in joined rows).
3014 * Note the reason we only consider Vars of different relations is that
3015 * if we considered ones of the same rel, we'd be double-counting the
3016 * restriction selectivity of the equality in the next step.
3017 * 4. For Vars within a single source rel, we multiply together the numbers
3018 * of values, clamp to the number of rows in the rel (divided by 10 if
3019 * more than one Var), and then multiply by the selectivity of the
3020 * restriction clauses for that rel. When there's more than one Var,
3021 * the initial product is probably too high (it's the worst case) but
3022 * clamping to a fraction of the rel's rows seems to be a helpful
3023 * heuristic for not letting the estimate get out of hand. (The factor
3024 * of 10 is derived from pre-Postgres-7.4 practice.) Multiplying
3025 * by the restriction selectivity is effectively assuming that the
3026 * restriction clauses are independent of the grouping, which is a crummy
3027 * assumption, but it's hard to do better.
3028 * 5. If there are Vars from multiple rels, we repeat step 4 for each such
3029 * rel, and multiply the results together.
3030 * Note that rels not containing grouped Vars are ignored completely, as are
3031 * join clauses. Such rels cannot increase the number of groups, and we
3032 * assume such clauses do not reduce the number either (somewhat bogus,
3033 * but we don't have the info to do better).
3036 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
3038 List *varinfos = NIL;
3042 /* We should not be called unless query has GROUP BY (or DISTINCT) */
3043 Assert(groupExprs != NIL);
3046 * Count groups derived from boolean grouping expressions. For other
3047 * expressions, find the unique Vars used, treating an expression as a Var
3048 * if we can find stats for it. For each one, record the statistical
3049 * estimate of number of distinct values (total in its table, without
3050 * regard for filtering).
3054 foreach(l, groupExprs)
3056 Node *groupexpr = (Node *) lfirst(l);
3057 VariableStatData vardata;
3061 /* Short-circuit for expressions returning boolean */
3062 if (exprType(groupexpr) == BOOLOID)
3069 * If examine_variable is able to deduce anything about the GROUP BY
3070 * expression, treat it as a single variable even if it's really more
3073 examine_variable(root, groupexpr, 0, &vardata);
3074 if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3076 varinfos = add_unique_group_var(root, varinfos,
3077 groupexpr, &vardata);
3078 ReleaseVariableStats(vardata);
3081 ReleaseVariableStats(vardata);
3084 * Else pull out the component Vars. Handle PlaceHolderVars by
3085 * recursing into their arguments (effectively assuming that the
3086 * PlaceHolderVar doesn't change the number of groups, which boils
3087 * down to ignoring the possible addition of nulls to the result set).
3089 varshere = pull_var_clause(groupexpr, PVC_RECURSE_PLACEHOLDERS);
3092 * If we find any variable-free GROUP BY item, then either it is a
3093 * constant (and we can ignore it) or it contains a volatile function;
3094 * in the latter case we punt and assume that each input row will
3095 * yield a distinct group.
3097 if (varshere == NIL)
3099 if (contain_volatile_functions(groupexpr))
3105 * Else add variables to varinfos list
3107 foreach(l2, varshere)
3109 Node *var = (Node *) lfirst(l2);
3111 examine_variable(root, var, 0, &vardata);
3112 varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3113 ReleaseVariableStats(vardata);
3118 * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3121 if (varinfos == NIL)
3123 /* Guard against out-of-range answers */
3124 if (numdistinct > input_rows)
3125 numdistinct = input_rows;
3130 * Group Vars by relation and estimate total numdistinct.
3132 * For each iteration of the outer loop, we process the frontmost Var in
3133 * varinfos, plus all other Vars in the same relation. We remove these
3134 * Vars from the newvarinfos list for the next iteration. This is the
3135 * easiest way to group Vars of same rel together.
3139 GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3140 RelOptInfo *rel = varinfo1->rel;
3141 double reldistinct = varinfo1->ndistinct;
3142 double relmaxndistinct = reldistinct;
3143 int relvarcount = 1;
3144 List *newvarinfos = NIL;
3147 * Get the product of numdistinct estimates of the Vars for this rel.
3148 * Also, construct new varinfos list of remaining Vars.
3150 for_each_cell(l, lnext(list_head(varinfos)))
3152 GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3154 if (varinfo2->rel == varinfo1->rel)
3156 reldistinct *= varinfo2->ndistinct;
3157 if (relmaxndistinct < varinfo2->ndistinct)
3158 relmaxndistinct = varinfo2->ndistinct;
3163 /* not time to process varinfo2 yet */
3164 newvarinfos = lcons(varinfo2, newvarinfos);
3169 * Sanity check --- don't divide by zero if empty relation.
3171 Assert(rel->reloptkind == RELOPT_BASEREL);
3172 if (rel->tuples > 0)
3175 * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3176 * fudge factor is because the Vars are probably correlated but we
3177 * don't know by how much. We should never clamp to less than the
3178 * largest ndistinct value for any of the Vars, though, since
3179 * there will surely be at least that many groups.
3181 double clamp = rel->tuples;
3183 if (relvarcount > 1)
3186 if (clamp < relmaxndistinct)
3188 clamp = relmaxndistinct;
3189 /* for sanity in case some ndistinct is too large: */
3190 if (clamp > rel->tuples)
3191 clamp = rel->tuples;
3194 if (reldistinct > clamp)
3195 reldistinct = clamp;
3198 * Multiply by restriction selectivity.
3200 reldistinct *= rel->rows / rel->tuples;
3203 * Update estimate of total distinct groups.
3205 numdistinct *= reldistinct;
3208 varinfos = newvarinfos;
3209 } while (varinfos != NIL);
3211 numdistinct = ceil(numdistinct);
3213 /* Guard against out-of-range answers */
3214 if (numdistinct > input_rows)
3215 numdistinct = input_rows;
3216 if (numdistinct < 1.0)
3223 * Estimate hash bucketsize fraction (ie, number of entries in a bucket
3224 * divided by total tuples in relation) if the specified expression is used
3227 * XXX This is really pretty bogus since we're effectively assuming that the
3228 * distribution of hash keys will be the same after applying restriction
3229 * clauses as it was in the underlying relation. However, we are not nearly
3230 * smart enough to figure out how the restrict clauses might change the
3231 * distribution, so this will have to do for now.
3233 * We are passed the number of buckets the executor will use for the given
3234 * input relation. If the data were perfectly distributed, with the same
3235 * number of tuples going into each available bucket, then the bucketsize
3236 * fraction would be 1/nbuckets. But this happy state of affairs will occur
3237 * only if (a) there are at least nbuckets distinct data values, and (b)
3238 * we have a not-too-skewed data distribution. Otherwise the buckets will
3239 * be nonuniformly occupied. If the other relation in the join has a key
3240 * distribution similar to this one's, then the most-loaded buckets are
3241 * exactly those that will be probed most often. Therefore, the "average"
3242 * bucket size for costing purposes should really be taken as something close
3243 * to the "worst case" bucket size. We try to estimate this by adjusting the
3244 * fraction if there are too few distinct data values, and then scaling up
3245 * by the ratio of the most common value's frequency to the average frequency.
3247 * If no statistics are available, use a default estimate of 0.1. This will
3248 * discourage use of a hash rather strongly if the inner relation is large,
3249 * which is what we want. We do not want to hash unless we know that the
3250 * inner rel is well-dispersed (or the alternatives seem much worse).
3253 estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
3255 VariableStatData vardata;
3264 examine_variable(root, hashkey, 0, &vardata);
3266 /* Get number of distinct values and fraction that are null */
3267 ndistinct = get_variable_numdistinct(&vardata);
3269 if (HeapTupleIsValid(vardata.statsTuple))
3271 Form_pg_statistic stats;
3273 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3274 stanullfrac = stats->stanullfrac;
3279 * Believe a default ndistinct only if it came from stats. Otherwise
3280 * punt and return 0.1, per comments above.
3282 if (ndistinct == DEFAULT_NUM_DISTINCT)
3284 ReleaseVariableStats(vardata);
3285 return (Selectivity) 0.1;
3291 /* Compute avg freq of all distinct data values in raw relation */
3292 avgfreq = (1.0 - stanullfrac) / ndistinct;
3295 * Adjust ndistinct to account for restriction clauses. Observe we are
3296 * assuming that the data distribution is affected uniformly by the
3297 * restriction clauses!
3299 * XXX Possibly better way, but much more expensive: multiply by
3300 * selectivity of rel's restriction clauses that mention the target Var.
3303 ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3306 * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3307 * number of buckets is less than the expected number of distinct values;
3308 * otherwise it is 1/ndistinct.
3310 if (ndistinct > nbuckets)
3311 estfract = 1.0 / nbuckets;
3313 estfract = 1.0 / ndistinct;
3316 * Look up the frequency of the most common value, if available.
3320 if (HeapTupleIsValid(vardata.statsTuple))
3322 if (get_attstatsslot(vardata.statsTuple,
3323 vardata.atttype, vardata.atttypmod,
3324 STATISTIC_KIND_MCV, InvalidOid,
3327 &numbers, &nnumbers))
3330 * The first MCV stat is for the most common value.
3333 mcvfreq = numbers[0];
3334 free_attstatsslot(vardata.atttype, NULL, 0,
3340 * Adjust estimated bucketsize upward to account for skewed distribution.
3342 if (avgfreq > 0.0 && mcvfreq > avgfreq)
3343 estfract *= mcvfreq / avgfreq;
3346 * Clamp bucketsize to sane range (the above adjustment could easily
3347 * produce an out-of-range result). We set the lower bound a little above
3348 * zero, since zero isn't a very sane result.
3350 if (estfract < 1.0e-6)
3352 else if (estfract > 1.0)
3355 ReleaseVariableStats(vardata);
3357 return (Selectivity) estfract;
3361 /*-------------------------------------------------------------------------
3365 *-------------------------------------------------------------------------
3370 * Convert non-NULL values of the indicated types to the comparison
3371 * scale needed by scalarineqsel().
3372 * Returns "true" if successful.
3374 * XXX this routine is a hack: ideally we should look up the conversion
3375 * subroutines in pg_type.
3377 * All numeric datatypes are simply converted to their equivalent
3378 * "double" values. (NUMERIC values that are outside the range of "double"
3379 * are clamped to +/- HUGE_VAL.)
3381 * String datatypes are converted by convert_string_to_scalar(),
3382 * which is explained below. The reason why this routine deals with
3383 * three values at a time, not just one, is that we need it for strings.
3385 * The bytea datatype is just enough different from strings that it has
3386 * to be treated separately.
3388 * The several datatypes representing absolute times are all converted
3389 * to Timestamp, which is actually a double, and then we just use that
3390 * double value. Note this will give correct results even for the "special"
3391 * values of Timestamp, since those are chosen to compare correctly;
3392 * see timestamp_cmp.
3394 * The several datatypes representing relative times (intervals) are all
3395 * converted to measurements expressed in seconds.
3398 convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
3399 Datum lobound, Datum hibound, Oid boundstypid,
3400 double *scaledlobound, double *scaledhibound)
3403 * Both the valuetypid and the boundstypid should exactly match the
3404 * declared input type(s) of the operator we are invoked for, so we just
3405 * error out if either is not recognized.
3407 * XXX The histogram we are interpolating between points of could belong
3408 * to a column that's only binary-compatible with the declared type. In
3409 * essence we are assuming that the semantics of binary-compatible types
3410 * are enough alike that we can use a histogram generated with one type's
3411 * operators to estimate selectivity for the other's. This is outright
3412 * wrong in some cases --- in particular signed versus unsigned
3413 * interpretation could trip us up. But it's useful enough in the
3414 * majority of cases that we do it anyway. Should think about more
3415 * rigorous ways to do it.
3420 * Built-in numeric types
3431 case REGPROCEDUREOID:
3433 case REGOPERATOROID:
3437 case REGDICTIONARYOID:
3438 *scaledvalue = convert_numeric_to_scalar(value, valuetypid);
3439 *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid);
3440 *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid);
3444 * Built-in string types
3452 char *valstr = convert_string_datum(value, valuetypid);
3453 char *lostr = convert_string_datum(lobound, boundstypid);
3454 char *histr = convert_string_datum(hibound, boundstypid);
3456 convert_string_to_scalar(valstr, scaledvalue,
3457 lostr, scaledlobound,
3458 histr, scaledhibound);
3466 * Built-in bytea type
3470 convert_bytea_to_scalar(value, scaledvalue,
3471 lobound, scaledlobound,
3472 hibound, scaledhibound);
3477 * Built-in time types
3480 case TIMESTAMPTZOID:
3488 *scaledvalue = convert_timevalue_to_scalar(value, valuetypid);
3489 *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid);
3490 *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid);
3494 * Built-in network types
3499 *scaledvalue = convert_network_to_scalar(value, valuetypid);
3500 *scaledlobound = convert_network_to_scalar(lobound, boundstypid);
3501 *scaledhibound = convert_network_to_scalar(hibound, boundstypid);
3504 /* Don't know how to convert */
3505 *scaledvalue = *scaledlobound = *scaledhibound = 0;
3510 * Do convert_to_scalar()'s work for any numeric data type.
3513 convert_numeric_to_scalar(Datum value, Oid typid)
3518 return (double) DatumGetBool(value);
3520 return (double) DatumGetInt16(value);
3522 return (double) DatumGetInt32(value);
3524 return (double) DatumGetInt64(value);
3526 return (double) DatumGetFloat4(value);
3528 return (double) DatumGetFloat8(value);
3530 /* Note: out-of-range values will be clamped to +-HUGE_VAL */
3532 DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow,
3536 case REGPROCEDUREOID:
3538 case REGOPERATOROID:
3542 case REGDICTIONARYOID:
3543 /* we can treat OIDs as integers... */
3544 return (double) DatumGetObjectId(value);
3548 * Can't get here unless someone tries to use scalarltsel/scalargtsel on
3549 * an operator with one numeric and one non-numeric operand.
3551 elog(ERROR, "unsupported type: %u", typid);
3556 * Do convert_to_scalar()'s work for any character-string data type.
3558 * String datatypes are converted to a scale that ranges from 0 to 1,
3559 * where we visualize the bytes of the string as fractional digits.
3561 * We do not want the base to be 256, however, since that tends to
3562 * generate inflated selectivity estimates; few databases will have
3563 * occurrences of all 256 possible byte values at each position.
3564 * Instead, use the smallest and largest byte values seen in the bounds
3565 * as the estimated range for each byte, after some fudging to deal with
3566 * the fact that we probably aren't going to see the full range that way.
3568 * An additional refinement is that we discard any common prefix of the
3569 * three strings before computing the scaled values. This allows us to
3570 * "zoom in" when we encounter a narrow data range. An example is a phone
3571 * number database where all the values begin with the same area code.
3572 * (Actually, the bounds will be adjacent histogram-bin-boundary values,
3573 * so this is more likely to happen than you might think.)
3576 convert_string_to_scalar(char *value,
3577 double *scaledvalue,
3579 double *scaledlobound,
3581 double *scaledhibound)
3587 rangelo = rangehi = (unsigned char) hibound[0];
3588 for (sptr = lobound; *sptr; sptr++)
3590 if (rangelo > (unsigned char) *sptr)
3591 rangelo = (unsigned char) *sptr;
3592 if (rangehi < (unsigned char) *sptr)
3593 rangehi = (unsigned char) *sptr;
3595 for (sptr = hibound; *sptr; sptr++)
3597 if (rangelo > (unsigned char) *sptr)
3598 rangelo = (unsigned char) *sptr;
3599 if (rangehi < (unsigned char) *sptr)
3600 rangehi = (unsigned char) *sptr;
3602 /* If range includes any upper-case ASCII chars, make it include all */
3603 if (rangelo <= 'Z' && rangehi >= 'A')
3610 /* Ditto lower-case */
3611 if (rangelo <= 'z' && rangehi >= 'a')
3619 if (rangelo <= '9' && rangehi >= '0')
3628 * If range includes less than 10 chars, assume we have not got enough
3629 * data, and make it include regular ASCII set.
3631 if (rangehi - rangelo < 9)
3638 * Now strip any common prefix of the three strings.
3642 if (*lobound != *hibound || *lobound != *value)
3644 lobound++, hibound++, value++;
3648 * Now we can do the conversions.
3650 *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
3651 *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
3652 *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
3656 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
3658 int slen = strlen(value);
3664 return 0.0; /* empty string has scalar value 0 */
3667 * Since base is at least 10, need not consider more than about 20 chars
3672 /* Convert initial characters to fraction */
3673 base = rangehi - rangelo + 1;
3678 int ch = (unsigned char) *value++;
3682 else if (ch > rangehi)
3684 num += ((double) (ch - rangelo)) / denom;
3692 * Convert a string-type Datum into a palloc'd, null-terminated string.
3694 * When using a non-C locale, we must pass the string through strxfrm()
3695 * before continuing, so as to generate correct locale-specific results.
3698 convert_string_datum(Datum value, Oid typid)
3705 val = (char *) palloc(2);
3706 val[0] = DatumGetChar(value);
3712 val = TextDatumGetCString(value);
3716 NameData *nm = (NameData *) DatumGetPointer(value);
3718 val = pstrdup(NameStr(*nm));
3724 * Can't get here unless someone tries to use scalarltsel on an
3725 * operator with one string and one non-string operand.
3727 elog(ERROR, "unsupported type: %u", typid);
3731 if (!lc_collate_is_c(DEFAULT_COLLATION_OID))
3738 * Note: originally we guessed at a suitable output buffer size, and
3739 * only needed to call strxfrm twice if our guess was too small.
3740 * However, it seems that some versions of Solaris have buggy strxfrm
3741 * that can write past the specified buffer length in that scenario.
3742 * So, do it the dumb way for portability.
3744 * Yet other systems (e.g., glibc) sometimes return a smaller value
3745 * from the second call than the first; thus the Assert must be <= not
3746 * == as you'd expect. Can't any of these people program their way
3747 * out of a paper bag?
3749 * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
3750 * bogus data or set an error. This is not really a problem unless it
3751 * crashes since it will only give an estimation error and nothing
3754 #if _MSC_VER == 1400 /* VS.Net 2005 */
3758 * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?
3759 * FeedbackID=99694 */
3763 xfrmlen = strxfrm(x, val, 0);
3766 xfrmlen = strxfrm(NULL, val, 0);
3771 * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
3772 * of trying to allocate this much memory (and fail), just return the
3773 * original string unmodified as if we were in the C locale.
3775 if (xfrmlen == INT_MAX)
3778 xfrmstr = (char *) palloc(xfrmlen + 1);
3779 xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
3780 Assert(xfrmlen2 <= xfrmlen);
3789 * Do convert_to_scalar()'s work for any bytea data type.
3791 * Very similar to convert_string_to_scalar except we can't assume
3792 * null-termination and therefore pass explicit lengths around.
3794 * Also, assumptions about likely "normal" ranges of characters have been
3795 * removed - a data range of 0..255 is always used, for now. (Perhaps
3796 * someday we will add information about actual byte data range to
3800 convert_bytea_to_scalar(Datum value,
3801 double *scaledvalue,
3803 double *scaledlobound,
3805 double *scaledhibound)
3809 valuelen = VARSIZE(DatumGetPointer(value)) - VARHDRSZ,
3810 loboundlen = VARSIZE(DatumGetPointer(lobound)) - VARHDRSZ,
3811 hiboundlen = VARSIZE(DatumGetPointer(hibound)) - VARHDRSZ,
3814 unsigned char *valstr = (unsigned char *) VARDATA(DatumGetPointer(value)),
3815 *lostr = (unsigned char *) VARDATA(DatumGetPointer(lobound)),
3816 *histr = (unsigned char *) VARDATA(DatumGetPointer(hibound));
3819 * Assume bytea data is uniformly distributed across all byte values.
3825 * Now strip any common prefix of the three strings.
3827 minlen = Min(Min(valuelen, loboundlen), hiboundlen);
3828 for (i = 0; i < minlen; i++)
3830 if (*lostr != *histr || *lostr != *valstr)
3832 lostr++, histr++, valstr++;
3833 loboundlen--, hiboundlen--, valuelen--;
3837 * Now we can do the conversions.
3839 *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
3840 *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
3841 *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
3845 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
3846 int rangelo, int rangehi)
3853 return 0.0; /* empty string has scalar value 0 */
3856 * Since base is 256, need not consider more than about 10 chars (even
3857 * this many seems like overkill)
3862 /* Convert initial characters to fraction */
3863 base = rangehi - rangelo + 1;
3866 while (valuelen-- > 0)
3872 else if (ch > rangehi)
3874 num += ((double) (ch - rangelo)) / denom;
3882 * Do convert_to_scalar()'s work for any timevalue data type.
3885 convert_timevalue_to_scalar(Datum value, Oid typid)
3890 return DatumGetTimestamp(value);
3891 case TIMESTAMPTZOID:
3892 return DatumGetTimestampTz(value);
3894 return DatumGetTimestamp(DirectFunctionCall1(abstime_timestamp,
3897 return date2timestamp_no_overflow(DatumGetDateADT(value));
3900 Interval *interval = DatumGetIntervalP(value);
3903 * Convert the month part of Interval to days using assumed
3904 * average month length of 365.25/12.0 days. Not too
3905 * accurate, but plenty good enough for our purposes.
3907 #ifdef HAVE_INT64_TIMESTAMP
3908 return interval->time + interval->day * (double) USECS_PER_DAY +
3909 interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
3911 return interval->time + interval->day * SECS_PER_DAY +
3912 interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * (double) SECS_PER_DAY);
3916 #ifdef HAVE_INT64_TIMESTAMP
3917 return (DatumGetRelativeTime(value) * 1000000.0);
3919 return DatumGetRelativeTime(value);
3923 TimeInterval tinterval = DatumGetTimeInterval(value);
3925 #ifdef HAVE_INT64_TIMESTAMP
3926 if (tinterval->status != 0)
3927 return ((tinterval->data[1] - tinterval->data[0]) * 1000000.0);
3929 if (tinterval->status != 0)
3930 return tinterval->data[1] - tinterval->data[0];
3932 return 0; /* for lack of a better idea */
3935 return DatumGetTimeADT(value);
3938 TimeTzADT *timetz = DatumGetTimeTzADTP(value);
3940 /* use GMT-equivalent time */
3941 #ifdef HAVE_INT64_TIMESTAMP
3942 return (double) (timetz->time + (timetz->zone * 1000000.0));
3944 return (double) (timetz->time + timetz->zone);
3950 * Can't get here unless someone tries to use scalarltsel/scalargtsel on
3951 * an operator with one timevalue and one non-timevalue operand.
3953 elog(ERROR, "unsupported type: %u", typid);
3959 * get_restriction_variable
3960 * Examine the args of a restriction clause to see if it's of the
3961 * form (variable op pseudoconstant) or (pseudoconstant op variable),
3962 * where "variable" could be either a Var or an expression in vars of a
3963 * single relation. If so, extract information about the variable,
3964 * and also indicate which side it was on and the other argument.
3967 * root: the planner info
3968 * args: clause argument list
3969 * varRelid: see specs for restriction selectivity functions
3971 * Outputs: (these are valid only if TRUE is returned)
3972 * *vardata: gets information about variable (see examine_variable)
3973 * *other: gets other clause argument, aggressively reduced to a constant
3974 * *varonleft: set TRUE if variable is on the left, FALSE if on the right
3976 * Returns TRUE if a variable is identified, otherwise FALSE.
3978 * Note: if there are Vars on both sides of the clause, we must fail, because
3979 * callers are expecting that the other side will act like a pseudoconstant.
3982 get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
3983 VariableStatData *vardata, Node **other,
3988 VariableStatData rdata;
3990 /* Fail if not a binary opclause (probably shouldn't happen) */
3991 if (list_length(args) != 2)
3994 left = (Node *) linitial(args);
3995 right = (Node *) lsecond(args);
3998 * Examine both sides. Note that when varRelid is nonzero, Vars of other
3999 * relations will be treated as pseudoconstants.
4001 examine_variable(root, left, varRelid, vardata);
4002 examine_variable(root, right, varRelid, &rdata);
4005 * If one side is a variable and the other not, we win.
4007 if (vardata->rel && rdata.rel == NULL)
4010 *other = estimate_expression_value(root, rdata.var);
4011 /* Assume we need no ReleaseVariableStats(rdata) here */
4015 if (vardata->rel == NULL && rdata.rel)
4018 *other = estimate_expression_value(root, vardata->var);
4019 /* Assume we need no ReleaseVariableStats(*vardata) here */
4024 /* Ooops, clause has wrong structure (probably var op var) */
4025 ReleaseVariableStats(*vardata);
4026 ReleaseVariableStats(rdata);
4032 * get_join_variables
4033 * Apply examine_variable() to each side of a join clause.
4034 * Also, attempt to identify whether the join clause has the same
4035 * or reversed sense compared to the SpecialJoinInfo.
4037 * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4038 * or "reversed" if it is "rhs_var OP lhs_var". In complicated cases
4039 * where we can't tell for sure, we default to assuming it's normal.
4042 get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo,
4043 VariableStatData *vardata1, VariableStatData *vardata2,
4044 bool *join_is_reversed)
4049 if (list_length(args) != 2)
4050 elog(ERROR, "join operator should take two arguments");
4052 left = (Node *) linitial(args);
4053 right = (Node *) lsecond(args);
4055 examine_variable(root, left, 0, vardata1);
4056 examine_variable(root, right, 0, vardata2);
4058 if (vardata1->rel &&
4059 bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4060 *join_is_reversed = true; /* var1 is on RHS */
4061 else if (vardata2->rel &&
4062 bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4063 *join_is_reversed = true; /* var2 is on LHS */
4065 *join_is_reversed = false;
4070 * Try to look up statistical data about an expression.
4071 * Fill in a VariableStatData struct to describe the expression.
4074 * root: the planner info
4075 * node: the expression tree to examine
4076 * varRelid: see specs for restriction selectivity functions
4078 * Outputs: *vardata is filled as follows:
4079 * var: the input expression (with any binary relabeling stripped, if
4080 * it is or contains a variable; but otherwise the type is preserved)
4081 * rel: RelOptInfo for relation containing variable; NULL if expression
4082 * contains no Vars (NOTE this could point to a RelOptInfo of a
4083 * subquery, not one in the current query).
4084 * statsTuple: the pg_statistic entry for the variable, if one exists;
4086 * freefunc: pointer to a function to release statsTuple with.
4087 * vartype: exposed type of the expression; this should always match
4088 * the declared input type of the operator we are estimating for.
4089 * atttype, atttypmod: type data to pass to get_attstatsslot(). This is
4090 * commonly the same as the exposed type of the variable argument,
4091 * but can be different in binary-compatible-type cases.
4092 * isunique: TRUE if we were able to match the var to a unique index,
4093 * implying its values are unique for this query.
4095 * Caller is responsible for doing ReleaseVariableStats() before exiting.
4098 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4099 VariableStatData *vardata)
4105 /* Make sure we don't return dangling pointers in vardata */
4106 MemSet(vardata, 0, sizeof(VariableStatData));
4108 /* Save the exposed type of the expression */
4109 vardata->vartype = exprType(node);
4111 /* Look inside any binary-compatible relabeling */
4113 if (IsA(node, RelabelType))
4114 basenode = (Node *) ((RelabelType *) node)->arg;
4118 /* Fast path for a simple Var */
4120 if (IsA(basenode, Var) &&
4121 (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4123 Var *var = (Var *) basenode;
4126 vardata->var = basenode; /* return Var without relabeling */
4127 vardata->rel = find_base_rel(root, var->varno);
4128 vardata->atttype = var->vartype;
4129 vardata->atttypmod = var->vartypmod;
4130 vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4132 rte = root->simple_rte_array[var->varno];
4134 if (get_relation_stats_hook &&
4135 (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
4138 * The hook took control of acquiring a stats tuple. If it did
4139 * supply a tuple, it'd better have supplied a freefunc.
4141 if (HeapTupleIsValid(vardata->statsTuple) &&
4143 elog(ERROR, "no function provided to release variable stats with");
4145 else if (rte->rtekind == RTE_RELATION)
4147 vardata->statsTuple = SearchSysCache3(STATRELATTINH,
4148 ObjectIdGetDatum(rte->relid),
4149 Int16GetDatum(var->varattno),
4150 BoolGetDatum(rte->inh));
4151 vardata->freefunc = ReleaseSysCache;
4156 * XXX This means the Var comes from a JOIN or sub-SELECT. Later
4157 * add code to dig down into the join etc and see if we can trace
4158 * the variable to something with stats. (But beware of
4159 * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no
4160 * cases where this would really be useful, because we'd have
4161 * flattened the subselect if it is??)
4169 * Okay, it's a more complicated expression. Determine variable
4170 * membership. Note that when varRelid isn't zero, only vars of that
4171 * relation are considered "real" vars.
4173 varnos = pull_varnos(basenode);
4177 switch (bms_membership(varnos))
4180 /* No Vars at all ... must be pseudo-constant clause */
4183 if (varRelid == 0 || bms_is_member(varRelid, varnos))
4185 onerel = find_base_rel(root,
4186 (varRelid ? varRelid : bms_singleton_member(varnos)));
4187 vardata->rel = onerel;
4188 node = basenode; /* strip any relabeling */
4190 /* else treat it as a constant */
4195 /* treat it as a variable of a join relation */
4196 vardata->rel = find_join_rel(root, varnos);
4197 node = basenode; /* strip any relabeling */
4199 else if (bms_is_member(varRelid, varnos))
4201 /* ignore the vars belonging to other relations */
4202 vardata->rel = find_base_rel(root, varRelid);
4203 node = basenode; /* strip any relabeling */
4204 /* note: no point in expressional-index search here */
4206 /* else treat it as a constant */
4212 vardata->var = node;
4213 vardata->atttype = exprType(node);
4214 vardata->atttypmod = exprTypmod(node);
4219 * We have an expression in vars of a single relation. Try to match
4220 * it to expressional index columns, in hopes of finding some
4223 * XXX it's conceivable that there are multiple matches with different
4224 * index opfamilies; if so, we need to pick one that matches the
4225 * operator we are estimating for. FIXME later.
4229 foreach(ilist, onerel->indexlist)
4231 IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4232 ListCell *indexpr_item;
4235 indexpr_item = list_head(index->indexprs);
4236 if (indexpr_item == NULL)
4237 continue; /* no expressions here... */
4239 for (pos = 0; pos < index->ncolumns; pos++)
4241 if (index->indexkeys[pos] == 0)
4245 if (indexpr_item == NULL)
4246 elog(ERROR, "too few entries in indexprs list");
4247 indexkey = (Node *) lfirst(indexpr_item);
4248 if (indexkey && IsA(indexkey, RelabelType))
4249 indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4250 if (equal(node, indexkey))
4253 * Found a match ... is it a unique index? Tests here
4254 * should match has_unique_index().
4256 if (index->unique &&
4257 index->ncolumns == 1 &&
4258 (index->indpred == NIL || index->predOK))
4259 vardata->isunique = true;
4262 * Has it got stats? We only consider stats for
4263 * non-partial indexes, since partial indexes probably
4264 * don't reflect whole-relation statistics; the above
4265 * check for uniqueness is the only info we take from
4268 * An index stats hook, however, must make its own
4269 * decisions about what to do with partial indexes.
4271 if (get_index_stats_hook &&
4272 (*get_index_stats_hook) (root, index->indexoid,
4276 * The hook took control of acquiring a stats
4277 * tuple. If it did supply a tuple, it'd better
4278 * have supplied a freefunc.
4280 if (HeapTupleIsValid(vardata->statsTuple) &&
4282 elog(ERROR, "no function provided to release variable stats with");
4284 else if (index->indpred == NIL)
4286 vardata->statsTuple =
4287 SearchSysCache3(STATRELATTINH,
4288 ObjectIdGetDatum(index->indexoid),
4289 Int16GetDatum(pos + 1),
4290 BoolGetDatum(false));
4291 vardata->freefunc = ReleaseSysCache;
4293 if (vardata->statsTuple)
4296 indexpr_item = lnext(indexpr_item);
4299 if (vardata->statsTuple)
4306 * get_variable_numdistinct
4307 * Estimate the number of distinct values of a variable.
4309 * vardata: results of examine_variable
4311 * NB: be careful to produce an integral result, since callers may compare
4312 * the result to exact integer counts.
4315 get_variable_numdistinct(VariableStatData *vardata)
4321 * Determine the stadistinct value to use. There are cases where we can
4322 * get an estimate even without a pg_statistic entry, or can get a better
4323 * value than is in pg_statistic.
4325 if (HeapTupleIsValid(vardata->statsTuple))
4327 /* Use the pg_statistic entry */
4328 Form_pg_statistic stats;
4330 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
4331 stadistinct = stats->stadistinct;
4333 else if (vardata->vartype == BOOLOID)
4336 * Special-case boolean columns: presumably, two distinct values.
4338 * Are there any other datatypes we should wire in special estimates
4346 * We don't keep statistics for system columns, but in some cases we
4347 * can infer distinctness anyway.
4349 if (vardata->var && IsA(vardata->var, Var))
4351 switch (((Var *) vardata->var)->varattno)
4353 case ObjectIdAttributeNumber:
4354 case SelfItemPointerAttributeNumber:
4355 stadistinct = -1.0; /* unique */
4357 case TableOidAttributeNumber:
4358 stadistinct = 1.0; /* only 1 value */
4361 stadistinct = 0.0; /* means "unknown" */
4366 stadistinct = 0.0; /* means "unknown" */
4369 * XXX consider using estimate_num_groups on expressions?
4374 * If there is a unique index for the variable, assume it is unique no
4375 * matter what pg_statistic says; the statistics could be out of date, or
4376 * we might have found a partial unique index that proves the var is
4377 * unique for this query.
4379 if (vardata->isunique)
4383 * If we had an absolute estimate, use that.
4385 if (stadistinct > 0.0)
4389 * Otherwise we need to get the relation size; punt if not available.
4391 if (vardata->rel == NULL)
4392 return DEFAULT_NUM_DISTINCT;
4393 ntuples = vardata->rel->tuples;
4395 return DEFAULT_NUM_DISTINCT;
4398 * If we had a relative estimate, use that.
4400 if (stadistinct < 0.0)
4401 return floor((-stadistinct * ntuples) + 0.5);
4404 * With no data, estimate ndistinct = ntuples if the table is small, else
4407 if (ntuples < DEFAULT_NUM_DISTINCT)
4410 return DEFAULT_NUM_DISTINCT;
4414 * get_variable_range
4415 * Estimate the minimum and maximum value of the specified variable.
4416 * If successful, store values in *min and *max, and return TRUE.
4417 * If no data available, return FALSE.
4419 * sortop is the "<" comparison operator to use. This should generally
4420 * be "<" not ">", as only the former is likely to be found in pg_statistic.
4423 get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
4424 Datum *min, Datum *max)
4428 bool have_data = false;
4436 * XXX It's very tempting to try to use the actual column min and max, if
4437 * we can get them relatively-cheaply with an index probe. However, since
4438 * this function is called many times during join planning, that could
4439 * have unpleasant effects on planning speed. Need more investigation
4440 * before enabling this.
4443 if (get_actual_variable_range(root, vardata, sortop, min, max))
4447 if (!HeapTupleIsValid(vardata->statsTuple))
4449 /* no stats available, so default result */
4453 get_typlenbyval(vardata->atttype, &typLen, &typByVal);
4456 * If there is a histogram, grab the first and last values.
4458 * If there is a histogram that is sorted with some other operator than
4459 * the one we want, fail --- this suggests that there is data we can't
4462 if (get_attstatsslot(vardata->statsTuple,
4463 vardata->atttype, vardata->atttypmod,
4464 STATISTIC_KIND_HISTOGRAM, sortop,
4471 tmin = datumCopy(values[0], typByVal, typLen);
4472 tmax = datumCopy(values[nvalues - 1], typByVal, typLen);
4475 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4477 else if (get_attstatsslot(vardata->statsTuple,
4478 vardata->atttype, vardata->atttypmod,
4479 STATISTIC_KIND_HISTOGRAM, InvalidOid,
4484 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4489 * If we have most-common-values info, look for extreme MCVs. This is
4490 * needed even if we also have a histogram, since the histogram excludes
4491 * the MCVs. However, usually the MCVs will not be the extreme values, so
4492 * avoid unnecessary data copying.
4494 if (get_attstatsslot(vardata->statsTuple,
4495 vardata->atttype, vardata->atttypmod,
4496 STATISTIC_KIND_MCV, InvalidOid,
4501 bool tmin_is_mcv = false;
4502 bool tmax_is_mcv = false;
4505 fmgr_info(get_opcode(sortop), &opproc);
4506 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &opproc);
4508 for (i = 0; i < nvalues; i++)
4512 tmin = tmax = values[i];
4513 tmin_is_mcv = tmax_is_mcv = have_data = true;
4516 if (DatumGetBool(FunctionCall2(&opproc, values[i], tmin)))
4521 if (DatumGetBool(FunctionCall2(&opproc, tmax, values[i])))
4528 tmin = datumCopy(tmin, typByVal, typLen);
4530 tmax = datumCopy(tmax, typByVal, typLen);
4531 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4541 * get_actual_variable_range
4542 * Attempt to identify the current *actual* minimum and/or maximum
4543 * of the specified variable, by looking for a suitable btree index
4544 * and fetching its low and/or high values.
4545 * If successful, store values in *min and *max, and return TRUE.
4546 * (Either pointer can be NULL if that endpoint isn't needed.)
4547 * If no data available, return FALSE.
4549 * sortop is the "<" comparison operator to use.
4552 get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
4554 Datum *min, Datum *max)
4556 bool have_data = false;
4557 RelOptInfo *rel = vardata->rel;
4561 /* No hope if no relation or it doesn't have indexes */
4562 if (rel == NULL || rel->indexlist == NIL)
4564 /* If it has indexes it must be a plain relation */
4565 rte = root->simple_rte_array[rel->relid];
4566 Assert(rte->rtekind == RTE_RELATION);
4568 /* Search through the indexes to see if any match our problem */
4569 foreach(lc, rel->indexlist)
4571 IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
4572 ScanDirection indexscandir;
4574 /* Ignore non-btree indexes */
4575 if (index->relam != BTREE_AM_OID)
4579 * Ignore partial indexes --- we only want stats that cover the entire
4582 if (index->indpred != NIL)
4586 * The index list might include hypothetical indexes inserted by a
4587 * get_relation_info hook --- don't try to access them.
4589 if (index->hypothetical)
4593 * The first index column must match the desired variable and sort
4594 * operator --- but we can use a descending-order index.
4596 if (!match_index_to_operand(vardata->var, 0, index))
4598 switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
4600 case BTLessStrategyNumber:
4601 if (index->reverse_sort[0])
4602 indexscandir = BackwardScanDirection;
4604 indexscandir = ForwardScanDirection;
4606 case BTGreaterStrategyNumber:
4607 if (index->reverse_sort[0])
4608 indexscandir = ForwardScanDirection;
4610 indexscandir = BackwardScanDirection;
4613 /* index doesn't match the sortop */
4618 * Found a suitable index to extract data from. We'll need an EState
4619 * and a bunch of other infrastructure.
4623 ExprContext *econtext;
4624 MemoryContext tmpcontext;
4625 MemoryContext oldcontext;
4628 IndexInfo *indexInfo;
4629 TupleTableSlot *slot;
4632 ScanKeyData scankeys[1];
4633 IndexScanDesc index_scan;
4635 Datum values[INDEX_MAX_KEYS];
4636 bool isnull[INDEX_MAX_KEYS];
4638 estate = CreateExecutorState();
4639 econtext = GetPerTupleExprContext(estate);
4640 /* Make sure any cruft is generated in the econtext's memory */
4641 tmpcontext = econtext->ecxt_per_tuple_memory;
4642 oldcontext = MemoryContextSwitchTo(tmpcontext);
4645 * Open the table and index so we can read from them. We should
4646 * already have at least AccessShareLock on the table, but not
4647 * necessarily on the index.
4649 heapRel = heap_open(rte->relid, NoLock);
4650 indexRel = index_open(index->indexoid, AccessShareLock);
4652 /* extract index key information from the index's pg_index info */
4653 indexInfo = BuildIndexInfo(indexRel);
4655 /* some other stuff */
4656 slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRel));
4657 econtext->ecxt_scantuple = slot;
4658 get_typlenbyval(vardata->atttype, &typLen, &typByVal);
4660 /* set up an IS NOT NULL scan key so that we ignore nulls */
4661 ScanKeyEntryInitialize(&scankeys[0],
4662 SK_ISNULL | SK_SEARCHNOTNULL,
4663 1, /* index col to scan */
4664 InvalidStrategy, /* no strategy */
4665 InvalidOid, /* no strategy subtype */
4666 InvalidOid, /* no collation */
4667 InvalidOid, /* no reg proc for this */
4668 (Datum) 0); /* constant */
4672 /* If min is requested ... */
4675 index_scan = index_beginscan(heapRel, indexRel, SnapshotNow,
4677 index_rescan(index_scan, scankeys, 1, NULL, 0);
4679 /* Fetch first tuple in sortop's direction */
4680 if ((tup = index_getnext(index_scan,
4681 indexscandir)) != NULL)
4683 /* Extract the index column values from the heap tuple */
4684 ExecStoreTuple(tup, slot, InvalidBuffer, false);
4685 FormIndexDatum(indexInfo, slot, estate,
4688 /* Shouldn't have got a null, but be careful */
4690 elog(ERROR, "found unexpected null value in index \"%s\"",
4691 RelationGetRelationName(indexRel));
4693 /* Copy the index column value out to caller's context */
4694 MemoryContextSwitchTo(oldcontext);
4695 *min = datumCopy(values[0], typByVal, typLen);
4696 MemoryContextSwitchTo(tmpcontext);
4701 index_endscan(index_scan);
4704 /* If max is requested, and we didn't find the index is empty */
4705 if (max && have_data)
4707 index_scan = index_beginscan(heapRel, indexRel, SnapshotNow,
4709 index_rescan(index_scan, scankeys, 1, NULL, 0);
4711 /* Fetch first tuple in reverse direction */
4712 if ((tup = index_getnext(index_scan,
4713 -indexscandir)) != NULL)
4715 /* Extract the index column values from the heap tuple */
4716 ExecStoreTuple(tup, slot, InvalidBuffer, false);
4717 FormIndexDatum(indexInfo, slot, estate,
4720 /* Shouldn't have got a null, but be careful */
4722 elog(ERROR, "found unexpected null value in index \"%s\"",
4723 RelationGetRelationName(indexRel));
4725 /* Copy the index column value out to caller's context */
4726 MemoryContextSwitchTo(oldcontext);
4727 *max = datumCopy(values[0], typByVal, typLen);
4728 MemoryContextSwitchTo(tmpcontext);
4733 index_endscan(index_scan);
4736 /* Clean everything up */
4737 ExecDropSingleTupleTableSlot(slot);
4739 index_close(indexRel, AccessShareLock);
4740 heap_close(heapRel, NoLock);
4742 MemoryContextSwitchTo(oldcontext);
4743 FreeExecutorState(estate);
4745 /* And we're done */
4754 /*-------------------------------------------------------------------------
4756 * Pattern analysis functions
4758 * These routines support analysis of LIKE and regular-expression patterns
4759 * by the planner/optimizer. It's important that they agree with the
4760 * regular-expression code in backend/regex/ and the LIKE code in
4761 * backend/utils/adt/like.c. Also, the computation of the fixed prefix
4762 * must be conservative: if we report a string longer than the true fixed
4763 * prefix, the query may produce actually wrong answers, rather than just
4764 * getting a bad selectivity estimate!
4766 * Note that the prefix-analysis functions are called from
4767 * backend/optimizer/path/indxpath.c as well as from routines in this file.
4769 *-------------------------------------------------------------------------
4773 * Check whether char is a letter (and, hence, subject to case-folding)
4775 * In multibyte character sets, we can't use isalpha, and it does not seem
4776 * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
4777 * any multibyte char is potentially case-varying.
4780 pattern_char_isalpha(char c, bool is_multibyte,
4781 pg_locale_t locale, bool locale_is_c)
4784 return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
4785 else if (is_multibyte && IS_HIGHBIT_SET(c))
4787 #ifdef HAVE_LOCALE_T
4789 return isalpha_l((unsigned char) c, locale);
4792 return isalpha((unsigned char) c);
4796 * Extract the fixed prefix, if any, for a pattern.
4798 * *prefix is set to a palloc'd prefix string (in the form of a Const node),
4799 * or to NULL if no fixed prefix exists for the pattern.
4800 * *rest is set to a palloc'd Const representing the remainder of the pattern
4801 * after the portion describing the fixed prefix.
4802 * Each of these has the same type (TEXT or BYTEA) as the given pattern Const.
4804 * The return value distinguishes no fixed prefix, a partial prefix,
4805 * or an exact-match-only pattern.
4808 static Pattern_Prefix_Status
4809 like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
4810 Const **prefix_const, Const **rest_const)
4816 Oid typeid = patt_const->consttype;
4819 bool is_multibyte = (pg_database_encoding_max_length() > 1);
4820 pg_locale_t locale = 0;
4821 bool locale_is_c = false;
4823 /* the right-hand const is type text or bytea */
4824 Assert(typeid == BYTEAOID || typeid == TEXTOID);
4826 if (case_insensitive)
4828 if (typeid == BYTEAOID)
4830 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4831 errmsg("case insensitive matching not supported on type bytea")));
4833 /* If case-insensitive, we need locale info */
4834 if (lc_ctype_is_c(collation))
4836 else if (collation != DEFAULT_COLLATION_OID)
4838 if (!OidIsValid(collation))
4841 * This typically means that the parser could not resolve a
4842 * conflict of implicit collations, so report it that way.
4845 (errcode(ERRCODE_INDETERMINATE_COLLATION),
4846 errmsg("could not determine which collation to use for ILIKE"),
4847 errhint("Use the COLLATE clause to set the collation explicitly.")));
4849 locale = pg_newlocale_from_collation(collation);
4853 if (typeid != BYTEAOID)
4855 patt = TextDatumGetCString(patt_const->constvalue);
4856 pattlen = strlen(patt);
4860 bytea *bstr = DatumGetByteaP(patt_const->constvalue);
4862 pattlen = VARSIZE(bstr) - VARHDRSZ;
4863 patt = (char *) palloc(pattlen);
4864 memcpy(patt, VARDATA(bstr), pattlen);
4865 if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
4869 match = palloc(pattlen + 1);
4871 for (pos = 0; pos < pattlen; pos++)
4873 /* % and _ are wildcard characters in LIKE */
4874 if (patt[pos] == '%' ||
4878 /* Backslash escapes the next character */
4879 if (patt[pos] == '\\')
4886 /* Stop if case-varying character (it's sort of a wildcard) */
4887 if (case_insensitive &&
4888 pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
4891 match[match_pos++] = patt[pos];
4894 match[match_pos] = '\0';
4897 if (typeid != BYTEAOID)
4899 *prefix_const = string_to_const(match, typeid);
4900 *rest_const = string_to_const(rest, typeid);
4904 *prefix_const = string_to_bytea_const(match, match_pos);
4905 *rest_const = string_to_bytea_const(rest, pattlen - pos);
4911 /* in LIKE, an empty pattern is an exact match! */
4913 return Pattern_Prefix_Exact; /* reached end of pattern, so exact */
4916 return Pattern_Prefix_Partial;
4918 return Pattern_Prefix_None;
4921 static Pattern_Prefix_Status
4922 regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
4923 Const **prefix_const, Const **rest_const)
4930 bool have_leading_paren;
4933 Oid typeid = patt_const->consttype;
4934 bool is_multibyte = (pg_database_encoding_max_length() > 1);
4935 pg_locale_t locale = 0;
4936 bool locale_is_c = false;
4939 * Should be unnecessary, there are no bytea regex operators defined. As
4940 * such, it should be noted that the rest of this function has *not* been
4941 * made safe for binary (possibly NULL containing) strings.
4943 if (typeid == BYTEAOID)
4945 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4946 errmsg("regular-expression matching not supported on type bytea")));
4948 if (case_insensitive)
4950 /* If case-insensitive, we need locale info */
4951 if (lc_ctype_is_c(collation))
4953 else if (collation != DEFAULT_COLLATION_OID)
4955 if (!OidIsValid(collation))
4958 * This typically means that the parser could not resolve a
4959 * conflict of implicit collations, so report it that way.
4962 (errcode(ERRCODE_INDETERMINATE_COLLATION),
4963 errmsg("could not determine which collation to use for regular expression"),
4964 errhint("Use the COLLATE clause to set the collation explicitly.")));
4966 locale = pg_newlocale_from_collation(collation);
4970 /* the right-hand const is type text for all of these */
4971 patt = TextDatumGetCString(patt_const->constvalue);
4974 * Check for ARE director prefix. It's worth our trouble to recognize
4975 * this because similar_escape() used to use it, and some other code might
4976 * still use it, to force ARE mode.
4979 if (strncmp(patt, "***:", 4) == 0)
4982 /* Pattern must be anchored left */
4983 if (patt[pos] != '^')
4987 *prefix_const = NULL;
4988 *rest_const = string_to_const(rest, typeid);
4990 return Pattern_Prefix_None;
4995 * If '|' is present in pattern, then there may be multiple alternatives
4996 * for the start of the string. (There are cases where this isn't so, for
4997 * instance if the '|' is inside parens, but detecting that reliably is
5000 if (strchr(patt + pos, '|') != NULL)
5004 *prefix_const = NULL;
5005 *rest_const = string_to_const(rest, typeid);
5007 return Pattern_Prefix_None;
5010 /* OK, allocate space for pattern */
5011 match = palloc(strlen(patt) + 1);
5012 prev_match_pos = match_pos = 0;
5015 * We special-case the syntax '^(...)$' because psql uses it. But beware:
5016 * sequences beginning "(?" are not what they seem, unless they're "(?:".
5017 * (We must recognize that because of similar_escape().)
5019 have_leading_paren = false;
5020 if (patt[pos] == '(' &&
5021 (patt[pos + 1] != '?' || patt[pos + 2] == ':'))
5023 have_leading_paren = true;
5024 pos += (patt[pos + 1] != '?' ? 1 : 3);
5027 /* Scan remainder of pattern */
5034 * Check for characters that indicate multiple possible matches here.
5035 * Also, drop out at ')' or '$' so the termination test works right.
5037 if (patt[pos] == '.' ||
5045 /* Stop if case-varying character (it's sort of a wildcard) */
5046 if (case_insensitive &&
5047 pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
5051 * Check for quantifiers. Except for +, this means the preceding
5052 * character is optional, so we must remove it from the prefix too!
5054 if (patt[pos] == '*' ||
5058 match_pos = prev_match_pos;
5062 if (patt[pos] == '+')
5069 * Normally, backslash quotes the next character. But in AREs,
5070 * backslash followed by alphanumeric is an escape, not a quoted
5071 * character. Must treat it as having multiple possible matches.
5072 * Note: since only ASCII alphanumerics are escapes, we don't have to
5073 * be paranoid about multibyte or collations here.
5075 if (patt[pos] == '\\')
5077 if (isalnum((unsigned char) patt[pos + 1]))
5080 if (patt[pos] == '\0')
5083 /* save position in case we need to back up on next loop cycle */
5084 prev_match_pos = match_pos;
5086 /* must use encoding-aware processing here */
5087 len = pg_mblen(&patt[pos]);
5088 memcpy(&match[match_pos], &patt[pos], len);
5093 match[match_pos] = '\0';
5096 if (have_leading_paren && patt[pos] == ')')
5099 if (patt[pos] == '$' && patt[pos + 1] == '\0')
5101 rest = &patt[pos + 1];
5103 *prefix_const = string_to_const(match, typeid);
5104 *rest_const = string_to_const(rest, typeid);
5109 return Pattern_Prefix_Exact; /* pattern specifies exact match */
5112 *prefix_const = string_to_const(match, typeid);
5113 *rest_const = string_to_const(rest, typeid);
5119 return Pattern_Prefix_Partial;
5121 return Pattern_Prefix_None;
5124 Pattern_Prefix_Status
5125 pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
5126 Const **prefix, Const **rest)
5128 Pattern_Prefix_Status result;
5132 case Pattern_Type_Like:
5133 result = like_fixed_prefix(patt, false, collation, prefix, rest);
5135 case Pattern_Type_Like_IC:
5136 result = like_fixed_prefix(patt, true, collation, prefix, rest);
5138 case Pattern_Type_Regex:
5139 result = regex_fixed_prefix(patt, false, collation, prefix, rest);
5141 case Pattern_Type_Regex_IC:
5142 result = regex_fixed_prefix(patt, true, collation, prefix, rest);
5145 elog(ERROR, "unrecognized ptype: %d", (int) ptype);
5146 result = Pattern_Prefix_None; /* keep compiler quiet */
5153 * Estimate the selectivity of a fixed prefix for a pattern match.
5155 * A fixed prefix "foo" is estimated as the selectivity of the expression
5156 * "variable >= 'foo' AND variable < 'fop'" (see also indxpath.c).
5158 * The selectivity estimate is with respect to the portion of the column
5159 * population represented by the histogram --- the caller must fold this
5160 * together with info about MCVs and NULLs.
5162 * We use the >= and < operators from the specified btree opfamily to do the
5163 * estimation. The given variable and Const must be of the associated
5166 * XXX Note: we make use of the upper bound to estimate operator selectivity
5167 * even if the locale is such that we cannot rely on the upper-bound string.
5168 * The selectivity only needs to be approximately right anyway, so it seems
5169 * more useful to use the upper-bound code than not.
5172 prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
5173 Oid vartype, Oid opfamily, Const *prefixcon)
5175 Selectivity prefixsel;
5178 Const *greaterstrcon;
5181 cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5182 BTGreaterEqualStrategyNumber);
5183 if (cmpopr == InvalidOid)
5184 elog(ERROR, "no >= operator for opfamily %u", opfamily);
5185 fmgr_info(get_opcode(cmpopr), &opproc);
5186 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &opproc);
5188 prefixsel = ineq_histogram_selectivity(root, vardata, &opproc, true,
5189 prefixcon->constvalue,
5190 prefixcon->consttype);
5192 if (prefixsel < 0.0)
5194 /* No histogram is present ... return a suitable default estimate */
5195 return DEFAULT_MATCH_SEL;
5199 * If we can create a string larger than the prefix, say
5203 cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5204 BTLessStrategyNumber);
5205 if (cmpopr == InvalidOid)
5206 elog(ERROR, "no < operator for opfamily %u", opfamily);
5207 fmgr_info(get_opcode(cmpopr), &opproc);
5208 fmgr_info_set_collation(DEFAULT_COLLATION_OID, &opproc);
5210 greaterstrcon = make_greater_string(prefixcon, &opproc);
5215 topsel = ineq_histogram_selectivity(root, vardata, &opproc, false,
5216 greaterstrcon->constvalue,
5217 greaterstrcon->consttype);
5219 /* ineq_histogram_selectivity worked before, it shouldn't fail now */
5220 Assert(topsel >= 0.0);
5223 * Merge the two selectivities in the same way as for a range query
5224 * (see clauselist_selectivity()). Note that we don't need to worry
5225 * about double-exclusion of nulls, since ineq_histogram_selectivity
5226 * doesn't count those anyway.
5228 prefixsel = topsel + prefixsel - 1.0;
5232 * If the prefix is long then the two bounding values might be too close
5233 * together for the histogram to distinguish them usefully, resulting in a
5234 * zero estimate (plus or minus roundoff error). To avoid returning a
5235 * ridiculously small estimate, compute the estimated selectivity for
5236 * "variable = 'foo'", and clamp to that. (Obviously, the resultant
5237 * estimate should be at least that.)
5239 * We apply this even if we couldn't make a greater string. That case
5240 * suggests that the prefix is near the maximum possible, and thus
5241 * probably off the end of the histogram, and thus we probably got a very
5242 * small estimate from the >= condition; so we still need to clamp.
5244 cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5245 BTEqualStrategyNumber);
5246 if (cmpopr == InvalidOid)
5247 elog(ERROR, "no = operator for opfamily %u", opfamily);
5248 eq_sel = var_eq_const(vardata, cmpopr, prefixcon->constvalue,
5251 prefixsel = Max(prefixsel, eq_sel);
5258 * Estimate the selectivity of a pattern of the specified type.
5259 * Note that any fixed prefix of the pattern will have been removed already.
5261 * For now, we use a very simplistic approach: fixed characters reduce the
5262 * selectivity a good deal, character ranges reduce it a little,
5263 * wildcards (such as % for LIKE or .* for regex) increase it.
5266 #define FIXED_CHAR_SEL 0.20 /* about 1/5 */
5267 #define CHAR_RANGE_SEL 0.25
5268 #define ANY_CHAR_SEL 0.9 /* not 1, since it won't match end-of-string */
5269 #define FULL_WILDCARD_SEL 5.0
5270 #define PARTIAL_WILDCARD_SEL 2.0
5273 like_selectivity(Const *patt_const, bool case_insensitive)
5275 Selectivity sel = 1.0;
5277 Oid typeid = patt_const->consttype;
5281 /* the right-hand const is type text or bytea */
5282 Assert(typeid == BYTEAOID || typeid == TEXTOID);
5284 if (typeid == BYTEAOID && case_insensitive)
5286 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5287 errmsg("case insensitive matching not supported on type bytea")));
5289 if (typeid != BYTEAOID)
5291 patt = TextDatumGetCString(patt_const->constvalue);
5292 pattlen = strlen(patt);
5296 bytea *bstr = DatumGetByteaP(patt_const->constvalue);
5298 pattlen = VARSIZE(bstr) - VARHDRSZ;
5299 patt = (char *) palloc(pattlen);
5300 memcpy(patt, VARDATA(bstr), pattlen);
5301 if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
5305 /* Skip any leading wildcard; it's already factored into initial sel */
5306 for (pos = 0; pos < pattlen; pos++)
5308 if (patt[pos] != '%' && patt[pos] != '_')
5312 for (; pos < pattlen; pos++)
5314 /* % and _ are wildcard characters in LIKE */
5315 if (patt[pos] == '%')
5316 sel *= FULL_WILDCARD_SEL;
5317 else if (patt[pos] == '_')
5318 sel *= ANY_CHAR_SEL;
5319 else if (patt[pos] == '\\')
5321 /* Backslash quotes the next character */
5325 sel *= FIXED_CHAR_SEL;
5328 sel *= FIXED_CHAR_SEL;
5330 /* Could get sel > 1 if multiple wildcards */
5339 regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
5341 Selectivity sel = 1.0;
5342 int paren_depth = 0;
5343 int paren_pos = 0; /* dummy init to keep compiler quiet */
5346 for (pos = 0; pos < pattlen; pos++)
5348 if (patt[pos] == '(')
5350 if (paren_depth == 0)
5351 paren_pos = pos; /* remember start of parenthesized item */
5354 else if (patt[pos] == ')' && paren_depth > 0)
5357 if (paren_depth == 0)
5358 sel *= regex_selectivity_sub(patt + (paren_pos + 1),
5359 pos - (paren_pos + 1),
5362 else if (patt[pos] == '|' && paren_depth == 0)
5365 * If unquoted | is present at paren level 0 in pattern, we have
5366 * multiple alternatives; sum their probabilities.
5368 sel += regex_selectivity_sub(patt + (pos + 1),
5369 pattlen - (pos + 1),
5371 break; /* rest of pattern is now processed */
5373 else if (patt[pos] == '[')
5375 bool negclass = false;
5377 if (patt[++pos] == '^')
5382 if (patt[pos] == ']') /* ']' at start of class is not
5385 while (pos < pattlen && patt[pos] != ']')
5387 if (paren_depth == 0)
5388 sel *= (negclass ? (1.0 - CHAR_RANGE_SEL) : CHAR_RANGE_SEL);
5390 else if (patt[pos] == '.')
5392 if (paren_depth == 0)
5393 sel *= ANY_CHAR_SEL;
5395 else if (patt[pos] == '*' ||
5399 /* Ought to be smarter about quantifiers... */
5400 if (paren_depth == 0)
5401 sel *= PARTIAL_WILDCARD_SEL;
5403 else if (patt[pos] == '{')
5405 while (pos < pattlen && patt[pos] != '}')
5407 if (paren_depth == 0)
5408 sel *= PARTIAL_WILDCARD_SEL;
5410 else if (patt[pos] == '\\')
5412 /* backslash quotes the next character */
5416 if (paren_depth == 0)
5417 sel *= FIXED_CHAR_SEL;
5421 if (paren_depth == 0)
5422 sel *= FIXED_CHAR_SEL;
5425 /* Could get sel > 1 if multiple wildcards */
5432 regex_selectivity(Const *patt_const, bool case_insensitive)
5437 Oid typeid = patt_const->consttype;
5440 * Should be unnecessary, there are no bytea regex operators defined. As
5441 * such, it should be noted that the rest of this function has *not* been
5442 * made safe for binary (possibly NULL containing) strings.
5444 if (typeid == BYTEAOID)
5446 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5447 errmsg("regular-expression matching not supported on type bytea")));
5449 /* the right-hand const is type text for all of these */
5450 patt = TextDatumGetCString(patt_const->constvalue);
5451 pattlen = strlen(patt);
5453 /* If patt doesn't end with $, consider it to have a trailing wildcard */
5454 if (pattlen > 0 && patt[pattlen - 1] == '$' &&
5455 (pattlen == 1 || patt[pattlen - 2] != '\\'))
5457 /* has trailing $ */
5458 sel = regex_selectivity_sub(patt, pattlen - 1, case_insensitive);
5463 sel = regex_selectivity_sub(patt, pattlen, case_insensitive);
5464 sel *= FULL_WILDCARD_SEL;
5472 pattern_selectivity(Const *patt, Pattern_Type ptype)
5478 case Pattern_Type_Like:
5479 result = like_selectivity(patt, false);
5481 case Pattern_Type_Like_IC:
5482 result = like_selectivity(patt, true);
5484 case Pattern_Type_Regex:
5485 result = regex_selectivity(patt, false);
5487 case Pattern_Type_Regex_IC:
5488 result = regex_selectivity(patt, true);
5491 elog(ERROR, "unrecognized ptype: %d", (int) ptype);
5492 result = 1.0; /* keep compiler quiet */
5500 * Try to generate a string greater than the given string or any
5501 * string it is a prefix of. If successful, return a palloc'd string
5502 * in the form of a Const node; else return NULL.
5504 * The caller must provide the appropriate "less than" comparison function
5505 * for testing the strings. In particular, ltproc->fn_collation specifies
5506 * the locale for comparisons.
5508 * The key requirement here is that given a prefix string, say "foo",
5509 * we must be able to generate another string "fop" that is greater than
5510 * all strings "foobar" starting with "foo". We can test that we have
5511 * generated a string greater than the prefix string, but in non-C locales
5512 * that is not a bulletproof guarantee that an extension of the string might
5513 * not sort after it; an example is that "foo " is less than "foo!", but it
5514 * is not clear that a "dictionary" sort ordering will consider "foo!" less
5515 * than "foo bar". CAUTION: Therefore, this function should be used only for
5516 * estimation purposes when working in a non-C locale.
5518 * To try to catch most cases where an extended string might otherwise sort
5519 * before the result value, we determine which of the strings "Z", "z", "y",
5520 * and "9" is seen as largest by the locale, and append that to the given
5521 * prefix before trying to find a string that compares as larger.
5523 * If we max out the righthand byte, truncate off the last character
5524 * and start incrementing the next. For example, if "z" were the last
5525 * character in the sort order, then we could produce "foo" as a
5526 * string greater than "fonz".
5528 * This could be rather slow in the worst case, but in most cases we
5529 * won't have to try more than one or two strings before succeeding.
5532 make_greater_string(const Const *str_const, FmgrInfo *ltproc)
5534 Oid datatype = str_const->consttype;
5538 text *cmptxt = NULL;
5541 * Get a modifiable copy of the prefix string in C-string format, and set
5542 * up the string we will compare to as a Datum. In C locale this can just
5543 * be the given prefix string, otherwise we need to add a suffix. Types
5544 * NAME and BYTEA sort bytewise so they don't need a suffix either.
5546 if (datatype == NAMEOID)
5548 workstr = DatumGetCString(DirectFunctionCall1(nameout,
5549 str_const->constvalue));
5550 len = strlen(workstr);
5551 cmpstr = str_const->constvalue;
5553 else if (datatype == BYTEAOID)
5555 bytea *bstr = DatumGetByteaP(str_const->constvalue);
5557 len = VARSIZE(bstr) - VARHDRSZ;
5558 workstr = (char *) palloc(len);
5559 memcpy(workstr, VARDATA(bstr), len);
5560 if ((Pointer) bstr != DatumGetPointer(str_const->constvalue))
5562 cmpstr = str_const->constvalue;
5566 workstr = TextDatumGetCString(str_const->constvalue);
5567 len = strlen(workstr);
5568 if (lc_collate_is_c(ltproc->fn_collation) || len == 0)
5569 cmpstr = str_const->constvalue;
5572 /* If first time through, determine the suffix to use */
5573 static char suffixchar = 0;
5574 static Oid suffixcollation = 0;
5576 if (!suffixchar || suffixcollation != ltproc->fn_collation)
5581 if (varstr_cmp(best, 1, "z", 1, ltproc->fn_collation) < 0)
5583 if (varstr_cmp(best, 1, "y", 1, ltproc->fn_collation) < 0)
5585 if (varstr_cmp(best, 1, "9", 1, ltproc->fn_collation) < 0)
5588 suffixcollation = ltproc->fn_collation;
5591 /* And build the string to compare to */
5592 cmptxt = (text *) palloc(VARHDRSZ + len + 1);
5593 SET_VARSIZE(cmptxt, VARHDRSZ + len + 1);
5594 memcpy(VARDATA(cmptxt), workstr, len);
5595 *(VARDATA(cmptxt) + len) = suffixchar;
5596 cmpstr = PointerGetDatum(cmptxt);
5602 unsigned char *lastchar = (unsigned char *) (workstr + len - 1);
5603 unsigned char savelastchar = *lastchar;
5606 * Try to generate a larger string by incrementing the last byte.
5608 while (*lastchar < (unsigned char) 255)
5610 Const *workstr_const;
5614 if (datatype != BYTEAOID)
5616 /* do not generate invalid encoding sequences */
5617 if (!pg_verifymbstr(workstr, len, true))
5619 workstr_const = string_to_const(workstr, datatype);
5622 workstr_const = string_to_bytea_const(workstr, len);
5624 if (DatumGetBool(FunctionCall2(ltproc,
5626 workstr_const->constvalue)))
5628 /* Successfully made a string larger than cmpstr */
5632 return workstr_const;
5635 /* No good, release unusable value and try again */
5636 pfree(DatumGetPointer(workstr_const->constvalue));
5637 pfree(workstr_const);
5640 /* restore last byte so we don't confuse pg_mbcliplen */
5641 *lastchar = savelastchar;
5644 * Truncate off the last character, which might be more than 1 byte,
5645 * depending on the character encoding.
5647 if (datatype != BYTEAOID && pg_database_encoding_max_length() > 1)
5648 len = pg_mbcliplen(workstr, len, len - 1);
5652 if (datatype != BYTEAOID)
5653 workstr[len] = '\0';
5665 * Generate a Datum of the appropriate type from a C string.
5666 * Note that all of the supported types are pass-by-ref, so the
5667 * returned value should be pfree'd if no longer needed.
5670 string_to_datum(const char *str, Oid datatype)
5672 Assert(str != NULL);
5675 * We cheat a little by assuming that CStringGetTextDatum() will do for
5676 * bpchar and varchar constants too...
5678 if (datatype == NAMEOID)
5679 return DirectFunctionCall1(namein, CStringGetDatum(str));
5680 else if (datatype == BYTEAOID)
5681 return DirectFunctionCall1(byteain, CStringGetDatum(str));
5683 return CStringGetTextDatum(str);
5687 * Generate a Const node of the appropriate type from a C string.
5690 string_to_const(const char *str, Oid datatype)
5692 Datum conval = string_to_datum(str, datatype);
5697 * We only need to support a few datatypes here, so hard-wire properties
5698 * instead of incurring the expense of catalog lookups.
5705 collation = DEFAULT_COLLATION_OID;
5710 collation = InvalidOid;
5711 constlen = NAMEDATALEN;
5715 collation = InvalidOid;
5720 elog(ERROR, "unexpected datatype in string_to_const: %u",
5725 return makeConst(datatype, -1, collation, constlen,
5726 conval, false, false);
5730 * Generate a Const node of bytea type from a binary C string and a length.
5733 string_to_bytea_const(const char *str, size_t str_len)
5735 bytea *bstr = palloc(VARHDRSZ + str_len);
5738 memcpy(VARDATA(bstr), str, str_len);
5739 SET_VARSIZE(bstr, VARHDRSZ + str_len);
5740 conval = PointerGetDatum(bstr);
5742 return makeConst(BYTEAOID, -1, InvalidOid, -1, conval, false, false);
5745 /*-------------------------------------------------------------------------
5747 * Index cost estimation functions
5749 * genericcostestimate is a general-purpose estimator for use when we
5750 * don't have any better idea about how to estimate. Index-type-specific
5751 * knowledge can be incorporated in the type-specific routines.
5753 * One bit of index-type-specific knowledge we can relatively easily use
5754 * in genericcostestimate is the estimate of the number of index tuples
5755 * visited. If numIndexTuples is not 0 then it is used as the estimate,
5756 * otherwise we compute a generic estimate.
5758 *-------------------------------------------------------------------------
5762 genericcostestimate(PlannerInfo *root,
5763 IndexOptInfo *index,
5765 List *indexOrderBys,
5766 RelOptInfo *outer_rel,
5767 double numIndexTuples,
5768 Cost *indexStartupCost,
5769 Cost *indexTotalCost,
5770 Selectivity *indexSelectivity,
5771 double *indexCorrelation)
5773 double numIndexPages;
5774 double num_sa_scans;
5775 double num_outer_scans;
5777 QualCost index_qual_cost;
5778 double qual_op_cost;
5779 double qual_arg_cost;
5780 double spc_random_page_cost;
5781 List *selectivityQuals;
5785 * If the index is partial, AND the index predicate with the explicitly
5786 * given indexquals to produce a more accurate idea of the index
5787 * selectivity. However, we need to be careful not to insert redundant
5788 * clauses, because clauselist_selectivity() is easily fooled into
5789 * computing a too-low selectivity estimate. Our approach is to add
5790 * only the index predicate clause(s) that cannot be proven to be implied
5791 * by the given indexquals. This successfully handles cases such as a
5792 * qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
5793 * There are many other cases where we won't detect redundancy, leading
5794 * to a too-low selectivity estimate, which will bias the system in favor
5795 * of using partial indexes where possible. That is not necessarily bad
5798 * Note that indexQuals contains RestrictInfo nodes while the indpred
5799 * does not. This is OK for both predicate_implied_by() and
5800 * clauselist_selectivity().
5803 if (index->indpred != NIL)
5805 List *predExtraQuals = NIL;
5807 foreach(l, index->indpred)
5809 Node *predQual = (Node *) lfirst(l);
5810 List *oneQual = list_make1(predQual);
5812 if (!predicate_implied_by(oneQual, indexQuals))
5813 predExtraQuals = list_concat(predExtraQuals, oneQual);
5815 /* list_concat avoids modifying the passed-in indexQuals list */
5816 selectivityQuals = list_concat(predExtraQuals, indexQuals);
5819 selectivityQuals = indexQuals;
5822 * Check for ScalarArrayOpExpr index quals, and estimate the number of
5823 * index scans that will be performed.
5826 foreach(l, indexQuals)
5828 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
5830 if (IsA(rinfo->clause, ScalarArrayOpExpr))
5832 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
5833 int alength = estimate_array_length(lsecond(saop->args));
5836 num_sa_scans *= alength;
5840 /* Estimate the fraction of main-table tuples that will be visited */
5841 *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
5847 * If caller didn't give us an estimate, estimate the number of index
5848 * tuples that will be visited. We do it in this rather peculiar-looking
5849 * way in order to get the right answer for partial indexes.
5851 if (numIndexTuples <= 0.0)
5853 numIndexTuples = *indexSelectivity * index->rel->tuples;
5856 * The above calculation counts all the tuples visited across all
5857 * scans induced by ScalarArrayOpExpr nodes. We want to consider the
5858 * average per-indexscan number, so adjust. This is a handy place to
5859 * round to integer, too. (If caller supplied tuple estimate, it's
5860 * responsible for handling these considerations.)
5862 numIndexTuples = rint(numIndexTuples / num_sa_scans);
5866 * We can bound the number of tuples by the index size in any case. Also,
5867 * always estimate at least one tuple is touched, even when
5868 * indexSelectivity estimate is tiny.
5870 if (numIndexTuples > index->tuples)
5871 numIndexTuples = index->tuples;
5872 if (numIndexTuples < 1.0)
5873 numIndexTuples = 1.0;
5876 * Estimate the number of index pages that will be retrieved.
5878 * We use the simplistic method of taking a pro-rata fraction of the total
5879 * number of index pages. In effect, this counts only leaf pages and not
5880 * any overhead such as index metapage or upper tree levels. In practice
5881 * this seems a better approximation than charging for access to the upper
5882 * levels, perhaps because those tend to stay in cache under load.
5884 if (index->pages > 1 && index->tuples > 1)
5885 numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
5887 numIndexPages = 1.0;
5889 /* fetch estimated page cost for schema containing index */
5890 get_tablespace_page_costs(index->reltablespace,
5891 &spc_random_page_cost,
5895 * Now compute the disk access costs.
5897 * The above calculations are all per-index-scan. However, if we are in a
5898 * nestloop inner scan, we can expect the scan to be repeated (with
5899 * different search keys) for each row of the outer relation. Likewise,
5900 * ScalarArrayOpExpr quals result in multiple index scans. This creates
5901 * the potential for cache effects to reduce the number of disk page
5902 * fetches needed. We want to estimate the average per-scan I/O cost in
5903 * the presence of caching.
5905 * We use the Mackert-Lohman formula (see costsize.c for details) to
5906 * estimate the total number of page fetches that occur. While this
5907 * wasn't what it was designed for, it seems a reasonable model anyway.
5908 * Note that we are counting pages not tuples anymore, so we take N = T =
5909 * index size, as if there were one "tuple" per page.
5911 if (outer_rel != NULL && outer_rel->rows > 1)
5913 num_outer_scans = outer_rel->rows;
5914 num_scans = num_sa_scans * num_outer_scans;
5918 num_outer_scans = 1;
5919 num_scans = num_sa_scans;
5924 double pages_fetched;
5926 /* total page fetches ignoring cache effects */
5927 pages_fetched = numIndexPages * num_scans;
5929 /* use Mackert and Lohman formula to adjust for cache effects */
5930 pages_fetched = index_pages_fetched(pages_fetched,
5932 (double) index->pages,
5936 * Now compute the total disk access cost, and then report a pro-rated
5937 * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
5938 * since that's internal to the indexscan.)
5940 *indexTotalCost = (pages_fetched * spc_random_page_cost)
5946 * For a single index scan, we just charge spc_random_page_cost per
5949 *indexTotalCost = numIndexPages * spc_random_page_cost;
5953 * A difficulty with the leaf-pages-only cost approach is that for small
5954 * selectivities (eg, single index tuple fetched) all indexes will look
5955 * equally attractive because we will estimate exactly 1 leaf page to be
5956 * fetched. All else being equal, we should prefer physically smaller
5957 * indexes over larger ones. (An index might be smaller because it is
5958 * partial or because it contains fewer columns; presumably the other
5959 * columns in the larger index aren't useful to the query, or the larger
5960 * index would have better selectivity.)
5962 * We can deal with this by adding a very small "fudge factor" that
5963 * depends on the index size. The fudge factor used here is one
5964 * spc_random_page_cost per 100000 index pages, which should be small
5965 * enough to not alter index-vs-seqscan decisions, but will prevent
5966 * indexes of different sizes from looking exactly equally attractive.
5968 *indexTotalCost += index->pages * spc_random_page_cost / 100000.0;
5971 * CPU cost: any complex expressions in the indexquals will need to be
5972 * evaluated once at the start of the scan to reduce them to runtime keys
5973 * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
5974 * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
5975 * indexqual operator. Because we have numIndexTuples as a per-scan
5976 * number, we have to multiply by num_sa_scans to get the correct result
5977 * for ScalarArrayOpExpr cases. Similarly add in costs for any index
5978 * ORDER BY expressions.
5980 * Note: this neglects the possible costs of rechecking lossy operators
5981 * and OR-clause expressions. Detecting that that might be needed seems
5982 * more expensive than it's worth, though, considering all the other
5983 * inaccuracies here ...
5985 cost_qual_eval(&index_qual_cost, indexQuals, root);
5986 qual_arg_cost = index_qual_cost.startup + index_qual_cost.per_tuple;
5987 cost_qual_eval(&index_qual_cost, indexOrderBys, root);
5988 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
5989 qual_op_cost = cpu_operator_cost *
5990 (list_length(indexQuals) + list_length(indexOrderBys));
5991 qual_arg_cost -= qual_op_cost;
5992 if (qual_arg_cost < 0) /* just in case... */
5995 *indexStartupCost = qual_arg_cost;
5996 *indexTotalCost += qual_arg_cost;
5997 *indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6000 * We also add a CPU-cost component to represent the general costs of
6001 * starting an indexscan, such as analysis of btree index keys and initial
6002 * tree descent. This is estimated at 100x cpu_operator_cost, which is a
6003 * bit arbitrary but seems the right order of magnitude. (As noted above,
6004 * we don't charge any I/O for touching upper tree levels, but charging
6005 * nothing at all has been found too optimistic.)
6007 * Although this is startup cost with respect to any one scan, we add it
6008 * to the "total" cost component because it's only very interesting in the
6009 * many-ScalarArrayOpExpr-scan case, and there it will be paid over the
6010 * life of the scan node.
6012 *indexTotalCost += num_sa_scans * 100.0 * cpu_operator_cost;
6015 * Generic assumption about index correlation: there isn't any.
6017 *indexCorrelation = 0.0;
6022 btcostestimate(PG_FUNCTION_ARGS)
6024 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
6025 IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
6026 List *indexQuals = (List *) PG_GETARG_POINTER(2);
6027 List *indexOrderBys = (List *) PG_GETARG_POINTER(3);
6028 RelOptInfo *outer_rel = (RelOptInfo *) PG_GETARG_POINTER(4);
6029 Cost *indexStartupCost = (Cost *) PG_GETARG_POINTER(5);
6030 Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(6);
6031 Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(7);
6032 double *indexCorrelation = (double *) PG_GETARG_POINTER(8);
6035 VariableStatData vardata;
6036 double numIndexTuples;
6037 List *indexBoundQuals;
6041 bool found_is_null_op;
6042 double num_sa_scans;
6046 * For a btree scan, only leading '=' quals plus inequality quals for the
6047 * immediately next attribute contribute to index selectivity (these are
6048 * the "boundary quals" that determine the starting and stopping points of
6049 * the index scan). Additional quals can suppress visits to the heap, so
6050 * it's OK to count them in indexSelectivity, but they should not count
6051 * for estimating numIndexTuples. So we must examine the given indexQuals
6052 * to find out which ones count as boundary quals. We rely on the
6053 * knowledge that they are given in index column order.
6055 * For a RowCompareExpr, we consider only the first column, just as
6056 * rowcomparesel() does.
6058 * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6059 * index scans not one, but the ScalarArrayOpExpr's operator can be
6060 * considered to act the same as it normally does.
6062 indexBoundQuals = NIL;
6066 found_is_null_op = false;
6068 foreach(l, indexQuals)
6070 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6076 bool is_null_op = false;
6078 Assert(IsA(rinfo, RestrictInfo));
6079 clause = rinfo->clause;
6080 if (IsA(clause, OpExpr))
6082 leftop = get_leftop(clause);
6083 rightop = get_rightop(clause);
6084 clause_op = ((OpExpr *) clause)->opno;
6086 else if (IsA(clause, RowCompareExpr))
6088 RowCompareExpr *rc = (RowCompareExpr *) clause;
6090 leftop = (Node *) linitial(rc->largs);
6091 rightop = (Node *) linitial(rc->rargs);
6092 clause_op = linitial_oid(rc->opnos);
6094 else if (IsA(clause, ScalarArrayOpExpr))
6096 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6098 leftop = (Node *) linitial(saop->args);
6099 rightop = (Node *) lsecond(saop->args);
6100 clause_op = saop->opno;
6103 else if (IsA(clause, NullTest))
6105 NullTest *nt = (NullTest *) clause;
6107 leftop = (Node *) nt->arg;
6109 clause_op = InvalidOid;
6110 if (nt->nulltesttype == IS_NULL)
6112 found_is_null_op = true;
6118 elog(ERROR, "unsupported indexqual type: %d",
6119 (int) nodeTag(clause));
6120 continue; /* keep compiler quiet */
6122 if (match_index_to_operand(leftop, indexcol, index))
6124 /* clause_op is correct */
6126 else if (match_index_to_operand(rightop, indexcol, index))
6128 /* Must flip operator to get the opfamily member */
6129 clause_op = get_commutator(clause_op);
6133 /* Must be past the end of quals for indexcol, try next */
6135 break; /* done if no '=' qual for indexcol */
6138 if (match_index_to_operand(leftop, indexcol, index))
6140 /* clause_op is correct */
6142 else if (match_index_to_operand(rightop, indexcol, index))
6144 /* Must flip operator to get the opfamily member */
6145 clause_op = get_commutator(clause_op);
6149 /* No quals for new indexcol, so we are done */
6153 /* check for equality operator */
6154 if (OidIsValid(clause_op))
6156 op_strategy = get_op_opfamily_strategy(clause_op,
6157 index->opfamily[indexcol]);
6158 Assert(op_strategy != 0); /* not a member of opfamily?? */
6159 if (op_strategy == BTEqualStrategyNumber)
6162 else if (is_null_op)
6164 /* IS NULL is like = for purposes of selectivity determination */
6167 /* count up number of SA scans induced by indexBoundQuals only */
6168 if (IsA(clause, ScalarArrayOpExpr))
6170 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6171 int alength = estimate_array_length(lsecond(saop->args));
6174 num_sa_scans *= alength;
6176 indexBoundQuals = lappend(indexBoundQuals, rinfo);
6180 * If index is unique and we found an '=' clause for each column, we can
6181 * just assume numIndexTuples = 1 and skip the expensive
6182 * clauselist_selectivity calculations. However, a ScalarArrayOp or
6183 * NullTest invalidates that theory, even though it sets eqQualHere.
6185 if (index->unique &&
6186 indexcol == index->ncolumns - 1 &&
6190 numIndexTuples = 1.0;
6193 Selectivity btreeSelectivity;
6195 btreeSelectivity = clauselist_selectivity(root, indexBoundQuals,
6199 numIndexTuples = btreeSelectivity * index->rel->tuples;
6202 * As in genericcostestimate(), we have to adjust for any
6203 * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6206 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6209 genericcostestimate(root, index, indexQuals, indexOrderBys,
6210 outer_rel, numIndexTuples,
6211 indexStartupCost, indexTotalCost,
6212 indexSelectivity, indexCorrelation);
6215 * If we can get an estimate of the first column's ordering correlation C
6216 * from pg_statistic, estimate the index correlation as C for a
6217 * single-column index, or C * 0.75 for multiple columns. (The idea here
6218 * is that multiple columns dilute the importance of the first column's
6219 * ordering, but don't negate it entirely. Before 8.0 we divided the
6220 * correlation by the number of columns, but that seems too strong.)
6222 * We can skip all this if we found a ScalarArrayOpExpr, because then the
6223 * call must be for a bitmap index scan, and the caller isn't going to
6224 * care what the index correlation is.
6229 MemSet(&vardata, 0, sizeof(vardata));
6231 if (index->indexkeys[0] != 0)
6233 /* Simple variable --- look to stats for the underlying table */
6234 RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6236 Assert(rte->rtekind == RTE_RELATION);
6238 Assert(relid != InvalidOid);
6239 colnum = index->indexkeys[0];
6241 if (get_relation_stats_hook &&
6242 (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6245 * The hook took control of acquiring a stats tuple. If it did
6246 * supply a tuple, it'd better have supplied a freefunc.
6248 if (HeapTupleIsValid(vardata.statsTuple) &&
6250 elog(ERROR, "no function provided to release variable stats with");
6254 vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6255 ObjectIdGetDatum(relid),
6256 Int16GetDatum(colnum),
6257 BoolGetDatum(rte->inh));
6258 vardata.freefunc = ReleaseSysCache;
6263 /* Expression --- maybe there are stats for the index itself */
6264 relid = index->indexoid;
6267 if (get_index_stats_hook &&
6268 (*get_index_stats_hook) (root, relid, colnum, &vardata))
6271 * The hook took control of acquiring a stats tuple. If it did
6272 * supply a tuple, it'd better have supplied a freefunc.
6274 if (HeapTupleIsValid(vardata.statsTuple) &&
6276 elog(ERROR, "no function provided to release variable stats with");
6280 vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6281 ObjectIdGetDatum(relid),
6282 Int16GetDatum(colnum),
6283 BoolGetDatum(false));
6284 vardata.freefunc = ReleaseSysCache;
6288 if (HeapTupleIsValid(vardata.statsTuple))
6294 sortop = get_opfamily_member(index->opfamily[0],
6295 index->opcintype[0],
6296 index->opcintype[0],
6297 BTLessStrategyNumber);
6298 if (OidIsValid(sortop) &&
6299 get_attstatsslot(vardata.statsTuple, InvalidOid, 0,
6300 STATISTIC_KIND_CORRELATION,
6304 &numbers, &nnumbers))
6306 double varCorrelation;
6308 Assert(nnumbers == 1);
6309 varCorrelation = numbers[0];
6311 if (index->reverse_sort[0])
6312 varCorrelation = -varCorrelation;
6314 if (index->ncolumns > 1)
6315 *indexCorrelation = varCorrelation * 0.75;
6317 *indexCorrelation = varCorrelation;
6319 free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
6323 ReleaseVariableStats(vardata);
6329 hashcostestimate(PG_FUNCTION_ARGS)
6331 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
6332 IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
6333 List *indexQuals = (List *) PG_GETARG_POINTER(2);
6334 List *indexOrderBys = (List *) PG_GETARG_POINTER(3);
6335 RelOptInfo *outer_rel = (RelOptInfo *) PG_GETARG_POINTER(4);
6336 Cost *indexStartupCost = (Cost *) PG_GETARG_POINTER(5);
6337 Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(6);
6338 Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(7);
6339 double *indexCorrelation = (double *) PG_GETARG_POINTER(8);
6341 genericcostestimate(root, index, indexQuals, indexOrderBys, outer_rel, 0.0,
6342 indexStartupCost, indexTotalCost,
6343 indexSelectivity, indexCorrelation);
6349 gistcostestimate(PG_FUNCTION_ARGS)
6351 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
6352 IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
6353 List *indexQuals = (List *) PG_GETARG_POINTER(2);
6354 List *indexOrderBys = (List *) PG_GETARG_POINTER(3);
6355 RelOptInfo *outer_rel = (RelOptInfo *) PG_GETARG_POINTER(4);
6356 Cost *indexStartupCost = (Cost *) PG_GETARG_POINTER(5);
6357 Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(6);
6358 Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(7);
6359 double *indexCorrelation = (double *) PG_GETARG_POINTER(8);
6361 genericcostestimate(root, index, indexQuals, indexOrderBys, outer_rel, 0.0,
6362 indexStartupCost, indexTotalCost,
6363 indexSelectivity, indexCorrelation);
6368 /* Find the index column matching "op"; return its index, or -1 if no match */
6370 find_index_column(Node *op, IndexOptInfo *index)
6374 for (i = 0; i < index->ncolumns; i++)
6376 if (match_index_to_operand(op, i, index))
6384 * GIN has search behavior completely different from other index types
6387 gincostestimate(PG_FUNCTION_ARGS)
6389 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
6390 IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
6391 List *indexQuals = (List *) PG_GETARG_POINTER(2);
6392 List *indexOrderBys = (List *) PG_GETARG_POINTER(3);
6393 RelOptInfo *outer_rel = (RelOptInfo *) PG_GETARG_POINTER(4);
6394 Cost *indexStartupCost = (Cost *) PG_GETARG_POINTER(5);
6395 Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(6);
6396 Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(7);
6397 double *indexCorrelation = (double *) PG_GETARG_POINTER(8);
6399 List *selectivityQuals;
6400 double numPages = index->pages,
6401 numTuples = index->tuples;
6402 double numEntryPages,
6406 bool haveFullScan = false;
6407 double partialEntriesInQuals = 0.0;
6408 double searchEntriesInQuals = 0.0;
6409 double exactEntriesInQuals = 0.0;
6410 double entryPagesFetched,
6412 dataPagesFetchedBySel;
6413 double qual_op_cost,
6415 spc_random_page_cost,
6417 QualCost index_qual_cost;
6419 GinStatsData ginStats;
6422 * Obtain statistic information from the meta page
6424 indexRel = index_open(index->indexoid, AccessShareLock);
6425 ginGetStats(indexRel, &ginStats);
6426 index_close(indexRel, AccessShareLock);
6428 numEntryPages = ginStats.nEntryPages;
6429 numDataPages = ginStats.nDataPages;
6430 numPendingPages = ginStats.nPendingPages;
6431 numEntries = ginStats.nEntries;
6434 * nPendingPages can be trusted, but the other fields are as of the last
6435 * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
6436 * growth since then. If the fields are zero (implying no VACUUM at all,
6437 * and an index created pre-9.1), assume all pages are entry pages.
6439 if (ginStats.nTotalPages == 0 || ginStats.nEntryPages == 0)
6441 numEntryPages = numPages;
6443 numEntries = numTuples; /* bogus, but no other info available */
6447 double scale = numPages / ginStats.nTotalPages;
6449 numEntryPages = ceil(numEntryPages * scale);
6450 numDataPages = ceil(numDataPages * scale);
6451 numEntries = ceil(numEntries * scale);
6452 /* ensure we didn't round up too much */
6453 numEntryPages = Min(numEntryPages, numPages);
6454 numDataPages = Min(numDataPages, numPages - numEntryPages);
6458 * Include predicate in selectivityQuals (should match
6459 * genericcostestimate)
6461 if (index->indpred != NIL)
6463 List *predExtraQuals = NIL;
6465 foreach(l, index->indpred)
6467 Node *predQual = (Node *) lfirst(l);
6468 List *oneQual = list_make1(predQual);
6470 if (!predicate_implied_by(oneQual, indexQuals))
6471 predExtraQuals = list_concat(predExtraQuals, oneQual);
6473 /* list_concat avoids modifying the passed-in indexQuals list */
6474 selectivityQuals = list_concat(predExtraQuals, indexQuals);
6477 selectivityQuals = indexQuals;
6479 /* Estimate the fraction of main-table tuples that will be visited */
6480 *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6485 /* fetch estimated page cost for schema containing index */
6486 get_tablespace_page_costs(index->reltablespace,
6487 &spc_random_page_cost,
6491 * Generic assumption about index correlation: there isn't any.
6493 *indexCorrelation = 0.0;
6496 * Examine quals to estimate number of search entries & partial matches
6498 foreach(l, indexQuals)
6500 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6511 bool *partial_matches = NULL;
6512 Pointer *extra_data = NULL;
6513 bool *nullFlags = NULL;
6514 int32 searchMode = GIN_SEARCH_MODE_DEFAULT;
6517 Assert(IsA(rinfo, RestrictInfo));
6518 clause = rinfo->clause;
6519 Assert(IsA(clause, OpExpr));
6520 leftop = get_leftop(clause);
6521 rightop = get_rightop(clause);
6522 clause_op = ((OpExpr *) clause)->opno;
6524 if ((indexcol = find_index_column(leftop, index)) >= 0)
6528 else if ((indexcol = find_index_column(rightop, index)) >= 0)
6531 clause_op = get_commutator(clause_op);
6535 elog(ERROR, "could not match index to operand");
6536 operand = NULL; /* keep compiler quiet */
6539 if (IsA(operand, RelabelType))
6540 operand = (Node *) ((RelabelType *) operand)->arg;
6543 * It's impossible to call extractQuery method for unknown operand. So
6544 * unless operand is a Const we can't do much; just assume there will
6545 * be one ordinary search entry from the operand at runtime.
6547 if (!IsA(operand, Const))
6549 searchEntriesInQuals++;
6553 /* If Const is null, there can be no matches */
6554 if (((Const *) operand)->constisnull)
6556 *indexStartupCost = 0;
6557 *indexTotalCost = 0;
6558 *indexSelectivity = 0;
6563 * Get the operator's strategy number and declared input data types
6564 * within the index opfamily. (We don't need the latter, but we use
6565 * get_op_opfamily_properties because it will throw error if it fails
6566 * to find a matching pg_amop entry.)
6568 get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
6569 &strategy_op, &lefttype, &righttype);
6572 * GIN always uses the "default" support functions, which are those
6573 * with lefttype == righttype == the opclass' opcintype (see
6574 * IndexSupportInitialize in relcache.c).
6576 extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
6577 index->opcintype[indexcol],
6578 index->opcintype[indexcol],
6579 GIN_EXTRACTQUERY_PROC);
6581 if (!OidIsValid(extractProcOid))
6583 /* should not happen; throw same error as index_getprocinfo */
6584 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
6585 GIN_EXTRACTQUERY_PROC, indexcol + 1,
6586 get_rel_name(index->indexoid));
6589 OidFunctionCall7(extractProcOid,
6590 ((Const *) operand)->constvalue,
6591 PointerGetDatum(&nentries),
6592 UInt16GetDatum(strategy_op),
6593 PointerGetDatum(&partial_matches),
6594 PointerGetDatum(&extra_data),
6595 PointerGetDatum(&nullFlags),
6596 PointerGetDatum(&searchMode));
6598 if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
6600 /* No match is possible */
6601 *indexStartupCost = 0;
6602 *indexTotalCost = 0;
6603 *indexSelectivity = 0;
6610 for (i = 0; i < nentries; i++)
6613 * For partial match we haven't any information to estimate
6614 * number of matched entries in index, so, we just estimate it
6617 if (partial_matches && partial_matches[i])
6618 partialEntriesInQuals += 100;
6620 exactEntriesInQuals++;
6622 searchEntriesInQuals++;
6626 if (searchMode == GIN_SEARCH_MODE_INCLUDE_EMPTY)
6628 /* Treat "include empty" like an exact-match item */
6629 exactEntriesInQuals++;
6630 searchEntriesInQuals++;
6632 else if (searchMode != GIN_SEARCH_MODE_DEFAULT)
6634 /* It's GIN_SEARCH_MODE_ALL */
6635 haveFullScan = true;
6639 if (haveFullScan || indexQuals == NIL)
6642 * Full index scan will be required. We treat this as if every key in
6643 * the index had been listed in the query; is that reasonable?
6645 searchEntriesInQuals = numEntries;
6648 /* Will we have more than one iteration of a nestloop scan? */
6649 if (outer_rel != NULL && outer_rel->rows > 1)
6650 num_scans = outer_rel->rows;
6655 * cost to begin scan, first of all, pay attention to pending list.
6657 entryPagesFetched = numPendingPages;
6660 * Estimate number of entry pages read. We need to do
6661 * searchEntriesInQuals searches. Use a power function as it should be,
6662 * but tuples on leaf pages usually is much greater. Here we include all
6663 * searches in entry tree, including search of first entry in partial
6666 entryPagesFetched += ceil(searchEntriesInQuals * rint(pow(numEntryPages, 0.15)));
6669 * Add an estimate of entry pages read by partial match algorithm. It's a
6670 * scan over leaf pages in entry tree. We haven't any useful stats here,
6671 * so estimate it as proportion.
6673 entryPagesFetched += ceil(numEntryPages * partialEntriesInQuals / numEntries);
6676 * Partial match algorithm reads all data pages before doing actual scan,
6677 * so it's a startup cost. Again, we havn't any useful stats here, so,
6678 * estimate it as proportion
6680 dataPagesFetched = ceil(numDataPages * partialEntriesInQuals / numEntries);
6682 /* calculate cache effects */
6683 if (num_scans > 1 || searchEntriesInQuals > 1)
6685 entryPagesFetched = index_pages_fetched(entryPagesFetched,
6686 (BlockNumber) numEntryPages,
6687 numEntryPages, root);
6688 dataPagesFetched = index_pages_fetched(dataPagesFetched,
6689 (BlockNumber) numDataPages,
6690 numDataPages, root);
6694 * Here we use random page cost because logically-close pages could be far
6697 *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
6699 /* cost to scan data pages for each exact (non-partial) matched entry */
6700 dataPagesFetched = ceil(numDataPages * exactEntriesInQuals / numEntries);
6703 * Estimate number of data pages read, using selectivity estimation and
6704 * capacity of data page.
6706 dataPagesFetchedBySel = ceil(*indexSelectivity *
6707 (numTuples / (BLCKSZ / SizeOfIptrData)));
6709 if (dataPagesFetchedBySel > dataPagesFetched)
6712 * At least one of entries is very frequent and, unfortunately, we
6713 * couldn't get statistic about entries (only tsvector has such
6714 * statistics). So, we obviously have too small estimation of pages
6715 * fetched from data tree. Re-estimate it from known capacity of data
6718 dataPagesFetched = dataPagesFetchedBySel;
6722 dataPagesFetched = index_pages_fetched(dataPagesFetched,
6723 (BlockNumber) numDataPages,
6724 numDataPages, root);
6725 *indexTotalCost = *indexStartupCost +
6726 dataPagesFetched * spc_random_page_cost;
6729 * Add on index qual eval costs, much as in genericcostestimate
6731 cost_qual_eval(&index_qual_cost, indexQuals, root);
6732 qual_arg_cost = index_qual_cost.startup + index_qual_cost.per_tuple;
6733 cost_qual_eval(&index_qual_cost, indexOrderBys, root);
6734 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6735 qual_op_cost = cpu_operator_cost *
6736 (list_length(indexQuals) + list_length(indexOrderBys));
6737 qual_arg_cost -= qual_op_cost;
6738 if (qual_arg_cost < 0) /* just in case... */
6741 *indexStartupCost += qual_arg_cost;
6742 *indexTotalCost += qual_arg_cost;
6743 *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);