1 /*-------------------------------------------------------------------------
4 * Selectivity functions and index cost estimation functions for
5 * standard operators and index access methods.
7 * Selectivity routines are registered in the pg_operator catalog
8 * in the "oprrest" and "oprjoin" attributes.
10 * Index cost functions are registered in the pg_am catalog
11 * in the "amcostestimate" attribute.
13 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
14 * Portions Copyright (c) 1994, Regents of the University of California
18 * src/backend/utils/adt/selfuncs.c
20 *-------------------------------------------------------------------------
24 * Operator selectivity estimation functions are called to estimate the
25 * selectivity of WHERE clauses whose top-level operator is their operator.
26 * We divide the problem into two cases:
27 * Restriction clause estimation: the clause involves vars of just
29 * Join clause estimation: the clause involves vars of multiple rels.
30 * Join selectivity estimation is far more difficult and usually less accurate
31 * than restriction estimation.
33 * When dealing with the inner scan of a nestloop join, we consider the
34 * join's joinclauses as restriction clauses for the inner relation, and
35 * treat vars of the outer relation as parameters (a/k/a constants of unknown
36 * values). So, restriction estimators need to be able to accept an argument
37 * telling which relation is to be treated as the variable.
39 * The call convention for a restriction estimator (oprrest function) is
41 * Selectivity oprrest (PlannerInfo *root,
46 * root: general information about the query (rtable and RelOptInfo lists
47 * are particularly important for the estimator).
48 * operator: OID of the specific operator in question.
49 * args: argument list from the operator clause.
50 * varRelid: if not zero, the relid (rtable index) of the relation to
51 * be treated as the variable relation. May be zero if the args list
52 * is known to contain vars of only one relation.
54 * This is represented at the SQL level (in pg_proc) as
56 * float8 oprrest (internal, oid, internal, int4);
58 * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59 * of the relation that are expected to produce a TRUE result for the
62 * The call convention for a join estimator (oprjoin function) is similar
63 * except that varRelid is not needed, and instead join information is
66 * Selectivity oprjoin (PlannerInfo *root,
70 * SpecialJoinInfo *sjinfo);
72 * float8 oprjoin (internal, oid, internal, int2, internal);
74 * (Before Postgres 8.4, join estimators had only the first four of these
75 * parameters. That signature is still allowed, but deprecated.) The
76 * relationship between jointype and sjinfo is explained in the comments for
77 * clause_selectivity() --- the short version is that jointype is usually
78 * best ignored in favor of examining sjinfo.
80 * Join selectivity for regular inner and outer joins is defined as the
81 * fraction (0 to 1) of the cross product of the relations that is expected
82 * to produce a TRUE result for the given operator. For both semi and anti
83 * joins, however, the selectivity is defined as the fraction of the left-hand
84 * side relation's rows that are expected to have a match (ie, at least one
85 * row with a TRUE result) in the right-hand side.
94 #include "access/gin.h"
95 #include "access/sysattr.h"
96 #include "catalog/index.h"
97 #include "catalog/pg_collation.h"
98 #include "catalog/pg_opfamily.h"
99 #include "catalog/pg_statistic.h"
100 #include "catalog/pg_type.h"
101 #include "executor/executor.h"
102 #include "mb/pg_wchar.h"
103 #include "nodes/makefuncs.h"
104 #include "nodes/nodeFuncs.h"
105 #include "optimizer/clauses.h"
106 #include "optimizer/cost.h"
107 #include "optimizer/pathnode.h"
108 #include "optimizer/paths.h"
109 #include "optimizer/plancat.h"
110 #include "optimizer/predtest.h"
111 #include "optimizer/restrictinfo.h"
112 #include "optimizer/var.h"
113 #include "parser/parse_coerce.h"
114 #include "parser/parsetree.h"
115 #include "utils/builtins.h"
116 #include "utils/bytea.h"
117 #include "utils/date.h"
118 #include "utils/datum.h"
119 #include "utils/fmgroids.h"
120 #include "utils/lsyscache.h"
121 #include "utils/nabstime.h"
122 #include "utils/pg_locale.h"
123 #include "utils/rel.h"
124 #include "utils/selfuncs.h"
125 #include "utils/spccache.h"
126 #include "utils/syscache.h"
127 #include "utils/timestamp.h"
128 #include "utils/tqual.h"
131 /* Hooks for plugins to get control when we ask for stats */
132 get_relation_stats_hook_type get_relation_stats_hook = NULL;
133 get_index_stats_hook_type get_index_stats_hook = NULL;
135 static double var_eq_const(VariableStatData *vardata, Oid operator,
136 Datum constval, bool constisnull,
138 static double var_eq_non_const(VariableStatData *vardata, Oid operator,
141 static double ineq_histogram_selectivity(PlannerInfo *root,
142 VariableStatData *vardata,
143 FmgrInfo *opproc, bool isgt,
144 Datum constval, Oid consttype);
145 static double eqjoinsel_inner(Oid operator,
146 VariableStatData *vardata1, VariableStatData *vardata2);
147 static double eqjoinsel_semi(Oid operator,
148 VariableStatData *vardata1, VariableStatData *vardata2,
149 RelOptInfo *inner_rel);
150 static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
151 Datum lobound, Datum hibound, Oid boundstypid,
152 double *scaledlobound, double *scaledhibound);
153 static double convert_numeric_to_scalar(Datum value, Oid typid);
154 static void convert_string_to_scalar(char *value,
157 double *scaledlobound,
159 double *scaledhibound);
160 static void convert_bytea_to_scalar(Datum value,
163 double *scaledlobound,
165 double *scaledhibound);
166 static double convert_one_string_to_scalar(char *value,
167 int rangelo, int rangehi);
168 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
169 int rangelo, int rangehi);
170 static char *convert_string_datum(Datum value, Oid typid);
171 static double convert_timevalue_to_scalar(Datum value, Oid typid);
172 static void examine_simple_variable(PlannerInfo *root, Var *var,
173 VariableStatData *vardata);
174 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
175 Oid sortop, Datum *min, Datum *max);
176 static bool get_actual_variable_range(PlannerInfo *root,
177 VariableStatData *vardata,
179 Datum *min, Datum *max);
180 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
181 static Selectivity prefix_selectivity(PlannerInfo *root,
182 VariableStatData *vardata,
183 Oid vartype, Oid opfamily, Const *prefixcon);
184 static Selectivity pattern_selectivity(Const *patt, Pattern_Type ptype);
185 static Datum string_to_datum(const char *str, Oid datatype);
186 static Const *string_to_const(const char *str, Oid datatype);
187 static Const *string_to_bytea_const(const char *str, size_t str_len);
191 * eqsel - Selectivity of "=" for any data types.
193 * Note: this routine is also used to estimate selectivity for some
194 * operators that are not "=" but have comparable selectivity behavior,
195 * such as "~=" (geometric approximate-match). Even for "=", we must
196 * keep in mind that the left and right datatypes may differ.
199 eqsel(PG_FUNCTION_ARGS)
201 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
202 Oid operator = PG_GETARG_OID(1);
203 List *args = (List *) PG_GETARG_POINTER(2);
204 int varRelid = PG_GETARG_INT32(3);
205 VariableStatData vardata;
211 * If expression is not variable = something or something = variable, then
212 * punt and return a default estimate.
214 if (!get_restriction_variable(root, args, varRelid,
215 &vardata, &other, &varonleft))
216 PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
219 * We can do a lot better if the something is a constant. (Note: the
220 * Const might result from estimation rather than being a simple constant
223 if (IsA(other, Const))
224 selec = var_eq_const(&vardata, operator,
225 ((Const *) other)->constvalue,
226 ((Const *) other)->constisnull,
229 selec = var_eq_non_const(&vardata, operator, other,
232 ReleaseVariableStats(vardata);
234 PG_RETURN_FLOAT8((float8) selec);
238 * var_eq_const --- eqsel for var = const case
240 * This is split out so that some other estimation functions can use it.
243 var_eq_const(VariableStatData *vardata, Oid operator,
244 Datum constval, bool constisnull,
251 * If the constant is NULL, assume operator is strict and return zero, ie,
252 * operator will never return TRUE.
258 * If we matched the var to a unique index, assume there is exactly one
259 * match regardless of anything else. (This is slightly bogus, since the
260 * index's equality operator might be different from ours, but it's more
261 * likely to be right than ignoring the information.)
263 if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
264 return 1.0 / vardata->rel->tuples;
266 if (HeapTupleIsValid(vardata->statsTuple))
268 Form_pg_statistic stats;
276 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
279 * Is the constant "=" to any of the column's most common values?
280 * (Although the given operator may not really be "=", we will assume
281 * that seeing whether it returns TRUE is an appropriate test. If you
282 * don't like this, maybe you shouldn't be using eqsel for your
285 if (get_attstatsslot(vardata->statsTuple,
286 vardata->atttype, vardata->atttypmod,
287 STATISTIC_KIND_MCV, InvalidOid,
290 &numbers, &nnumbers))
294 fmgr_info(get_opcode(operator), &eqproc);
296 for (i = 0; i < nvalues; i++)
298 /* be careful to apply operator right way 'round */
300 match = DatumGetBool(FunctionCall2Coll(&eqproc,
301 DEFAULT_COLLATION_OID,
305 match = DatumGetBool(FunctionCall2Coll(&eqproc,
306 DEFAULT_COLLATION_OID,
315 /* no most-common-value info available */
318 i = nvalues = nnumbers = 0;
324 * Constant is "=" to this common value. We know selectivity
325 * exactly (or as exactly as ANALYZE could calculate it, anyway).
332 * Comparison is against a constant that is neither NULL nor any
333 * of the common values. Its selectivity cannot be more than
336 double sumcommon = 0.0;
337 double otherdistinct;
339 for (i = 0; i < nnumbers; i++)
340 sumcommon += numbers[i];
341 selec = 1.0 - sumcommon - stats->stanullfrac;
342 CLAMP_PROBABILITY(selec);
345 * and in fact it's probably a good deal less. We approximate that
346 * all the not-common values share this remaining fraction
347 * equally, so we divide by the number of other distinct values.
349 otherdistinct = get_variable_numdistinct(vardata, &isdefault) - nnumbers;
350 if (otherdistinct > 1)
351 selec /= otherdistinct;
354 * Another cross-check: selectivity shouldn't be estimated as more
355 * than the least common "most common value".
357 if (nnumbers > 0 && selec > numbers[nnumbers - 1])
358 selec = numbers[nnumbers - 1];
361 free_attstatsslot(vardata->atttype, values, nvalues,
367 * No ANALYZE stats available, so make a guess using estimated number
368 * of distinct values and assuming they are equally common. (The guess
369 * is unlikely to be very good, but we do know a few special cases.)
371 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
374 /* result should be in range, but make sure... */
375 CLAMP_PROBABILITY(selec);
381 * var_eq_non_const --- eqsel for var = something-other-than-const case
384 var_eq_non_const(VariableStatData *vardata, Oid operator,
392 * If we matched the var to a unique index, assume there is exactly one
393 * match regardless of anything else. (This is slightly bogus, since the
394 * index's equality operator might be different from ours, but it's more
395 * likely to be right than ignoring the information.)
397 if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
398 return 1.0 / vardata->rel->tuples;
400 if (HeapTupleIsValid(vardata->statsTuple))
402 Form_pg_statistic stats;
407 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
410 * Search is for a value that we do not know a priori, but we will
411 * assume it is not NULL. Estimate the selectivity as non-null
412 * fraction divided by number of distinct values, so that we get a
413 * result averaged over all possible values whether common or
414 * uncommon. (Essentially, we are assuming that the not-yet-known
415 * comparison value is equally likely to be any of the possible
416 * values, regardless of their frequency in the table. Is that a good
419 selec = 1.0 - stats->stanullfrac;
420 ndistinct = get_variable_numdistinct(vardata, &isdefault);
425 * Cross-check: selectivity should never be estimated as more than the
426 * most common value's.
428 if (get_attstatsslot(vardata->statsTuple,
429 vardata->atttype, vardata->atttypmod,
430 STATISTIC_KIND_MCV, InvalidOid,
433 &numbers, &nnumbers))
435 if (nnumbers > 0 && selec > numbers[0])
437 free_attstatsslot(vardata->atttype, NULL, 0, numbers, nnumbers);
443 * No ANALYZE stats available, so make a guess using estimated number
444 * of distinct values and assuming they are equally common. (The guess
445 * is unlikely to be very good, but we do know a few special cases.)
447 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
450 /* result should be in range, but make sure... */
451 CLAMP_PROBABILITY(selec);
457 * neqsel - Selectivity of "!=" for any data types.
459 * This routine is also used for some operators that are not "!="
460 * but have comparable selectivity behavior. See above comments
464 neqsel(PG_FUNCTION_ARGS)
466 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
467 Oid operator = PG_GETARG_OID(1);
468 List *args = (List *) PG_GETARG_POINTER(2);
469 int varRelid = PG_GETARG_INT32(3);
474 * We want 1 - eqsel() where the equality operator is the one associated
475 * with this != operator, that is, its negator.
477 eqop = get_negator(operator);
480 result = DatumGetFloat8(DirectFunctionCall4(eqsel,
481 PointerGetDatum(root),
482 ObjectIdGetDatum(eqop),
483 PointerGetDatum(args),
484 Int32GetDatum(varRelid)));
488 /* Use default selectivity (should we raise an error instead?) */
489 result = DEFAULT_EQ_SEL;
491 result = 1.0 - result;
492 PG_RETURN_FLOAT8(result);
496 * scalarineqsel - Selectivity of "<", "<=", ">", ">=" for scalars.
498 * This is the guts of both scalarltsel and scalargtsel. The caller has
499 * commuted the clause, if necessary, so that we can treat the variable as
500 * being on the left. The caller must also make sure that the other side
501 * of the clause is a non-null Const, and dissect same into a value and
504 * This routine works for any datatype (or pair of datatypes) known to
505 * convert_to_scalar(). If it is applied to some other datatype,
506 * it will return a default estimate.
509 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
510 VariableStatData *vardata, Datum constval, Oid consttype)
512 Form_pg_statistic stats;
519 if (!HeapTupleIsValid(vardata->statsTuple))
521 /* no stats available, so default result */
522 return DEFAULT_INEQ_SEL;
524 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
526 fmgr_info(get_opcode(operator), &opproc);
529 * If we have most-common-values info, add up the fractions of the MCV
530 * entries that satisfy MCV OP CONST. These fractions contribute directly
531 * to the result selectivity. Also add up the total fraction represented
534 mcv_selec = mcv_selectivity(vardata, &opproc, constval, true,
538 * If there is a histogram, determine which bin the constant falls in, and
539 * compute the resulting contribution to selectivity.
541 hist_selec = ineq_histogram_selectivity(root, vardata, &opproc, isgt,
542 constval, consttype);
545 * Now merge the results from the MCV and histogram calculations,
546 * realizing that the histogram covers only the non-null values that are
549 selec = 1.0 - stats->stanullfrac - sumcommon;
551 if (hist_selec >= 0.0)
556 * If no histogram but there are values not accounted for by MCV,
557 * arbitrarily assume half of them will match.
564 /* result should be in range, but make sure... */
565 CLAMP_PROBABILITY(selec);
571 * mcv_selectivity - Examine the MCV list for selectivity estimates
573 * Determine the fraction of the variable's MCV population that satisfies
574 * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft. Also
575 * compute the fraction of the total column population represented by the MCV
576 * list. This code will work for any boolean-returning predicate operator.
578 * The function result is the MCV selectivity, and the fraction of the
579 * total population is returned into *sumcommonp. Zeroes are returned
580 * if there is no MCV list.
583 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
584 Datum constval, bool varonleft,
598 if (HeapTupleIsValid(vardata->statsTuple) &&
599 get_attstatsslot(vardata->statsTuple,
600 vardata->atttype, vardata->atttypmod,
601 STATISTIC_KIND_MCV, InvalidOid,
604 &numbers, &nnumbers))
606 for (i = 0; i < nvalues; i++)
609 DatumGetBool(FunctionCall2Coll(opproc,
610 DEFAULT_COLLATION_OID,
613 DatumGetBool(FunctionCall2Coll(opproc,
614 DEFAULT_COLLATION_OID,
617 mcv_selec += numbers[i];
618 sumcommon += numbers[i];
620 free_attstatsslot(vardata->atttype, values, nvalues,
624 *sumcommonp = sumcommon;
629 * histogram_selectivity - Examine the histogram for selectivity estimates
631 * Determine the fraction of the variable's histogram entries that satisfy
632 * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
634 * This code will work for any boolean-returning predicate operator, whether
635 * or not it has anything to do with the histogram sort operator. We are
636 * essentially using the histogram just as a representative sample. However,
637 * small histograms are unlikely to be all that representative, so the caller
638 * should be prepared to fall back on some other estimation approach when the
639 * histogram is missing or very small. It may also be prudent to combine this
640 * approach with another one when the histogram is small.
642 * If the actual histogram size is not at least min_hist_size, we won't bother
643 * to do the calculation at all. Also, if the n_skip parameter is > 0, we
644 * ignore the first and last n_skip histogram elements, on the grounds that
645 * they are outliers and hence not very representative. Typical values for
646 * these parameters are 10 and 1.
648 * The function result is the selectivity, or -1 if there is no histogram
649 * or it's smaller than min_hist_size.
651 * The output parameter *hist_size receives the actual histogram size,
652 * or zero if no histogram. Callers may use this number to decide how
653 * much faith to put in the function result.
655 * Note that the result disregards both the most-common-values (if any) and
656 * null entries. The caller is expected to combine this result with
657 * statistics for those portions of the column population. It may also be
658 * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
661 histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
662 Datum constval, bool varonleft,
663 int min_hist_size, int n_skip,
670 /* check sanity of parameters */
672 Assert(min_hist_size > 2 * n_skip);
674 if (HeapTupleIsValid(vardata->statsTuple) &&
675 get_attstatsslot(vardata->statsTuple,
676 vardata->atttype, vardata->atttypmod,
677 STATISTIC_KIND_HISTOGRAM, InvalidOid,
682 *hist_size = nvalues;
683 if (nvalues >= min_hist_size)
688 for (i = n_skip; i < nvalues - n_skip; i++)
691 DatumGetBool(FunctionCall2Coll(opproc,
692 DEFAULT_COLLATION_OID,
695 DatumGetBool(FunctionCall2Coll(opproc,
696 DEFAULT_COLLATION_OID,
701 result = ((double) nmatch) / ((double) (nvalues - 2 * n_skip));
705 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
717 * ineq_histogram_selectivity - Examine the histogram for scalarineqsel
719 * Determine the fraction of the variable's histogram population that
720 * satisfies the inequality condition, ie, VAR < CONST or VAR > CONST.
722 * Returns -1 if there is no histogram (valid results will always be >= 0).
724 * Note that the result disregards both the most-common-values (if any) and
725 * null entries. The caller is expected to combine this result with
726 * statistics for those portions of the column population.
729 ineq_histogram_selectivity(PlannerInfo *root,
730 VariableStatData *vardata,
731 FmgrInfo *opproc, bool isgt,
732 Datum constval, Oid consttype)
742 * Someday, ANALYZE might store more than one histogram per rel/att,
743 * corresponding to more than one possible sort ordering defined for the
744 * column type. However, to make that work we will need to figure out
745 * which staop to search for --- it's not necessarily the one we have at
746 * hand! (For example, we might have a '<=' operator rather than the '<'
747 * operator that will appear in staop.) For now, assume that whatever
748 * appears in pg_statistic is sorted the same way our operator sorts, or
749 * the reverse way if isgt is TRUE.
751 if (HeapTupleIsValid(vardata->statsTuple) &&
752 get_attstatsslot(vardata->statsTuple,
753 vardata->atttype, vardata->atttypmod,
754 STATISTIC_KIND_HISTOGRAM, InvalidOid,
762 * Use binary search to find proper location, ie, the first slot
763 * at which the comparison fails. (If the given operator isn't
764 * actually sort-compatible with the histogram, you'll get garbage
765 * results ... but probably not any more garbage-y than you would
766 * from the old linear search.)
768 * If the binary search accesses the first or last histogram
769 * entry, we try to replace that endpoint with the true column min
770 * or max as found by get_actual_variable_range(). This
771 * ameliorates misestimates when the min or max is moving as a
772 * result of changes since the last ANALYZE. Note that this could
773 * result in effectively including MCVs into the histogram that
774 * weren't there before, but we don't try to correct for that.
777 int lobound = 0; /* first possible slot to search */
778 int hibound = nvalues; /* last+1 slot to search */
779 bool have_end = false;
782 * If there are only two histogram entries, we'll want up-to-date
783 * values for both. (If there are more than two, we need at most
784 * one of them to be updated, so we deal with that within the
788 have_end = get_actual_variable_range(root,
794 while (lobound < hibound)
796 int probe = (lobound + hibound) / 2;
800 * If we find ourselves about to compare to the first or last
801 * histogram entry, first try to replace it with the actual
802 * current min or max (unless we already did so above).
804 if (probe == 0 && nvalues > 2)
805 have_end = get_actual_variable_range(root,
810 else if (probe == nvalues - 1 && nvalues > 2)
811 have_end = get_actual_variable_range(root,
817 ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
818 DEFAULT_COLLATION_OID,
831 /* Constant is below lower histogram boundary. */
834 else if (lobound >= nvalues)
836 /* Constant is above upper histogram boundary. */
848 * We have values[i-1] <= constant <= values[i].
850 * Convert the constant and the two nearest bin boundary
851 * values to a uniform comparison scale, and do a linear
852 * interpolation within this bin.
854 if (convert_to_scalar(constval, consttype, &val,
855 values[i - 1], values[i],
861 /* cope if bin boundaries appear identical */
866 else if (val >= high)
870 binfrac = (val - low) / (high - low);
873 * Watch out for the possibility that we got a NaN or
874 * Infinity from the division. This can happen
875 * despite the previous checks, if for example "low"
878 if (isnan(binfrac) ||
879 binfrac < 0.0 || binfrac > 1.0)
886 * Ideally we'd produce an error here, on the grounds that
887 * the given operator shouldn't have scalarXXsel
888 * registered as its selectivity func unless we can deal
889 * with its operand types. But currently, all manner of
890 * stuff is invoking scalarXXsel, so give a default
891 * estimate until that can be fixed.
897 * Now, compute the overall selectivity across the values
898 * represented by the histogram. We have i-1 full bins and
899 * binfrac partial bin below the constant.
901 histfrac = (double) (i - 1) + binfrac;
902 histfrac /= (double) (nvalues - 1);
906 * Now histfrac = fraction of histogram entries below the
909 * Account for "<" vs ">"
911 hist_selec = isgt ? (1.0 - histfrac) : histfrac;
914 * The histogram boundaries are only approximate to begin with,
915 * and may well be out of date anyway. Therefore, don't believe
916 * extremely small or large selectivity estimates --- unless we
917 * got actual current endpoint values from the table.
920 CLAMP_PROBABILITY(hist_selec);
923 if (hist_selec < 0.0001)
925 else if (hist_selec > 0.9999)
930 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
937 * scalarltsel - Selectivity of "<" (also "<=") for scalars.
940 scalarltsel(PG_FUNCTION_ARGS)
942 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
943 Oid operator = PG_GETARG_OID(1);
944 List *args = (List *) PG_GETARG_POINTER(2);
945 int varRelid = PG_GETARG_INT32(3);
946 VariableStatData vardata;
955 * If expression is not variable op something or something op variable,
956 * then punt and return a default estimate.
958 if (!get_restriction_variable(root, args, varRelid,
959 &vardata, &other, &varonleft))
960 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
963 * Can't do anything useful if the something is not a constant, either.
965 if (!IsA(other, Const))
967 ReleaseVariableStats(vardata);
968 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
972 * If the constant is NULL, assume operator is strict and return zero, ie,
973 * operator will never return TRUE.
975 if (((Const *) other)->constisnull)
977 ReleaseVariableStats(vardata);
978 PG_RETURN_FLOAT8(0.0);
980 constval = ((Const *) other)->constvalue;
981 consttype = ((Const *) other)->consttype;
984 * Force the var to be on the left to simplify logic in scalarineqsel.
988 /* we have var < other */
993 /* we have other < var, commute to make var > other */
994 operator = get_commutator(operator);
997 /* Use default selectivity (should we raise an error instead?) */
998 ReleaseVariableStats(vardata);
999 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1004 selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1006 ReleaseVariableStats(vardata);
1008 PG_RETURN_FLOAT8((float8) selec);
1012 * scalargtsel - Selectivity of ">" (also ">=") for integers.
1015 scalargtsel(PG_FUNCTION_ARGS)
1017 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1018 Oid operator = PG_GETARG_OID(1);
1019 List *args = (List *) PG_GETARG_POINTER(2);
1020 int varRelid = PG_GETARG_INT32(3);
1021 VariableStatData vardata;
1030 * If expression is not variable op something or something op variable,
1031 * then punt and return a default estimate.
1033 if (!get_restriction_variable(root, args, varRelid,
1034 &vardata, &other, &varonleft))
1035 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1038 * Can't do anything useful if the something is not a constant, either.
1040 if (!IsA(other, Const))
1042 ReleaseVariableStats(vardata);
1043 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1047 * If the constant is NULL, assume operator is strict and return zero, ie,
1048 * operator will never return TRUE.
1050 if (((Const *) other)->constisnull)
1052 ReleaseVariableStats(vardata);
1053 PG_RETURN_FLOAT8(0.0);
1055 constval = ((Const *) other)->constvalue;
1056 consttype = ((Const *) other)->consttype;
1059 * Force the var to be on the left to simplify logic in scalarineqsel.
1063 /* we have var > other */
1068 /* we have other > var, commute to make var < other */
1069 operator = get_commutator(operator);
1072 /* Use default selectivity (should we raise an error instead?) */
1073 ReleaseVariableStats(vardata);
1074 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1079 selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1081 ReleaseVariableStats(vardata);
1083 PG_RETURN_FLOAT8((float8) selec);
1087 * patternsel - Generic code for pattern-match selectivity.
1090 patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
1092 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1093 Oid operator = PG_GETARG_OID(1);
1094 List *args = (List *) PG_GETARG_POINTER(2);
1095 int varRelid = PG_GETARG_INT32(3);
1096 VariableStatData vardata;
1103 Pattern_Prefix_Status pstatus;
1105 Const *prefix = NULL;
1110 * If this is for a NOT LIKE or similar operator, get the corresponding
1111 * positive-match operator and work with that. Set result to the correct
1112 * default estimate, too.
1116 operator = get_negator(operator);
1117 if (!OidIsValid(operator))
1118 elog(ERROR, "patternsel called for operator without a negator");
1119 result = 1.0 - DEFAULT_MATCH_SEL;
1123 result = DEFAULT_MATCH_SEL;
1127 * If expression is not variable op constant, then punt and return a
1130 if (!get_restriction_variable(root, args, varRelid,
1131 &vardata, &other, &varonleft))
1133 if (!varonleft || !IsA(other, Const))
1135 ReleaseVariableStats(vardata);
1140 * If the constant is NULL, assume operator is strict and return zero, ie,
1141 * operator will never return TRUE. (It's zero even for a negator op.)
1143 if (((Const *) other)->constisnull)
1145 ReleaseVariableStats(vardata);
1148 constval = ((Const *) other)->constvalue;
1149 consttype = ((Const *) other)->consttype;
1152 * The right-hand const is type text or bytea for all supported operators.
1153 * We do not expect to see binary-compatible types here, since
1154 * const-folding should have relabeled the const to exactly match the
1155 * operator's declared type.
1157 if (consttype != TEXTOID && consttype != BYTEAOID)
1159 ReleaseVariableStats(vardata);
1164 * Similarly, the exposed type of the left-hand side should be one of
1165 * those we know. (Do not look at vardata.atttype, which might be
1166 * something binary-compatible but different.) We can use it to choose
1167 * the index opfamily from which we must draw the comparison operators.
1169 * NOTE: It would be more correct to use the PATTERN opfamilies than the
1170 * simple ones, but at the moment ANALYZE will not generate statistics for
1171 * the PATTERN operators. But our results are so approximate anyway that
1172 * it probably hardly matters.
1174 vartype = vardata.vartype;
1179 opfamily = TEXT_BTREE_FAM_OID;
1182 opfamily = BPCHAR_BTREE_FAM_OID;
1185 opfamily = NAME_BTREE_FAM_OID;
1188 opfamily = BYTEA_BTREE_FAM_OID;
1191 ReleaseVariableStats(vardata);
1196 * Divide pattern into fixed prefix and remainder. XXX we have to assume
1197 * default collation here, because we don't have access to the actual
1198 * input collation for the operator. FIXME ...
1200 patt = (Const *) other;
1201 pstatus = pattern_fixed_prefix(patt, ptype, DEFAULT_COLLATION_OID,
1205 * If necessary, coerce the prefix constant to the right type. (The "rest"
1206 * constant need not be changed.)
1208 if (prefix && prefix->consttype != vartype)
1212 switch (prefix->consttype)
1215 prefixstr = TextDatumGetCString(prefix->constvalue);
1218 prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
1219 prefix->constvalue));
1222 elog(ERROR, "unrecognized consttype: %u",
1224 ReleaseVariableStats(vardata);
1227 prefix = string_to_const(prefixstr, vartype);
1231 if (pstatus == Pattern_Prefix_Exact)
1234 * Pattern specifies an exact match, so pretend operator is '='
1236 Oid eqopr = get_opfamily_member(opfamily, vartype, vartype,
1237 BTEqualStrategyNumber);
1239 if (eqopr == InvalidOid)
1240 elog(ERROR, "no = operator for opfamily %u", opfamily);
1241 result = var_eq_const(&vardata, eqopr, prefix->constvalue,
1247 * Not exact-match pattern. If we have a sufficiently large
1248 * histogram, estimate selectivity for the histogram part of the
1249 * population by counting matches in the histogram. If not, estimate
1250 * selectivity of the fixed prefix and remainder of pattern
1251 * separately, then combine the two to get an estimate of the
1252 * selectivity for the part of the column population represented by
1253 * the histogram. (For small histograms, we combine these
1256 * We then add up data for any most-common-values values; these are
1257 * not in the histogram population, and we can get exact answers for
1258 * them by applying the pattern operator, so there's no reason to
1259 * approximate. (If the MCVs cover a significant part of the total
1260 * population, this gives us a big leg up in accuracy.)
1269 /* Try to use the histogram entries to get selectivity */
1270 fmgr_info(get_opcode(operator), &opproc);
1272 selec = histogram_selectivity(&vardata, &opproc, constval, true,
1275 /* If not at least 100 entries, use the heuristic method */
1276 if (hist_size < 100)
1278 Selectivity heursel;
1279 Selectivity prefixsel;
1280 Selectivity restsel;
1282 if (pstatus == Pattern_Prefix_Partial)
1283 prefixsel = prefix_selectivity(root, &vardata, vartype,
1287 restsel = pattern_selectivity(rest, ptype);
1288 heursel = prefixsel * restsel;
1290 if (selec < 0) /* fewer than 10 histogram entries? */
1295 * For histogram sizes from 10 to 100, we combine the
1296 * histogram and heuristic selectivities, putting increasingly
1297 * more trust in the histogram for larger sizes.
1299 double hist_weight = hist_size / 100.0;
1301 selec = selec * hist_weight + heursel * (1.0 - hist_weight);
1305 /* In any case, don't believe extremely small or large estimates. */
1308 else if (selec > 0.9999)
1312 * If we have most-common-values info, add up the fractions of the MCV
1313 * entries that satisfy MCV OP PATTERN. These fractions contribute
1314 * directly to the result selectivity. Also add up the total fraction
1315 * represented by MCV entries.
1317 mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
1320 if (HeapTupleIsValid(vardata.statsTuple))
1321 nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
1326 * Now merge the results from the MCV and histogram calculations,
1327 * realizing that the histogram covers only the non-null values that
1328 * are not listed in MCV.
1330 selec *= 1.0 - nullfrac - sumcommon;
1333 /* result should be in range, but make sure... */
1334 CLAMP_PROBABILITY(selec);
1340 pfree(DatumGetPointer(prefix->constvalue));
1344 ReleaseVariableStats(vardata);
1346 return negate ? (1.0 - result) : result;
1350 * regexeqsel - Selectivity of regular-expression pattern match.
1353 regexeqsel(PG_FUNCTION_ARGS)
1355 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, false));
1359 * icregexeqsel - Selectivity of case-insensitive regex match.
1362 icregexeqsel(PG_FUNCTION_ARGS)
1364 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, false));
1368 * likesel - Selectivity of LIKE pattern match.
1371 likesel(PG_FUNCTION_ARGS)
1373 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, false));
1377 * iclikesel - Selectivity of ILIKE pattern match.
1380 iclikesel(PG_FUNCTION_ARGS)
1382 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, false));
1386 * regexnesel - Selectivity of regular-expression pattern non-match.
1389 regexnesel(PG_FUNCTION_ARGS)
1391 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, true));
1395 * icregexnesel - Selectivity of case-insensitive regex non-match.
1398 icregexnesel(PG_FUNCTION_ARGS)
1400 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, true));
1404 * nlikesel - Selectivity of LIKE pattern non-match.
1407 nlikesel(PG_FUNCTION_ARGS)
1409 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, true));
1413 * icnlikesel - Selectivity of ILIKE pattern non-match.
1416 icnlikesel(PG_FUNCTION_ARGS)
1418 PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, true));
1422 * booltestsel - Selectivity of BooleanTest Node.
1425 booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
1426 int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1428 VariableStatData vardata;
1431 examine_variable(root, arg, varRelid, &vardata);
1433 if (HeapTupleIsValid(vardata.statsTuple))
1435 Form_pg_statistic stats;
1442 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1443 freq_null = stats->stanullfrac;
1445 if (get_attstatsslot(vardata.statsTuple,
1446 vardata.atttype, vardata.atttypmod,
1447 STATISTIC_KIND_MCV, InvalidOid,
1450 &numbers, &nnumbers)
1457 * Get first MCV frequency and derive frequency for true.
1459 if (DatumGetBool(values[0]))
1460 freq_true = numbers[0];
1462 freq_true = 1.0 - numbers[0] - freq_null;
1465 * Next derive frequency for false. Then use these as appropriate
1466 * to derive frequency for each case.
1468 freq_false = 1.0 - freq_true - freq_null;
1470 switch (booltesttype)
1473 /* select only NULL values */
1476 case IS_NOT_UNKNOWN:
1477 /* select non-NULL values */
1478 selec = 1.0 - freq_null;
1481 /* select only TRUE values */
1485 /* select non-TRUE values */
1486 selec = 1.0 - freq_true;
1489 /* select only FALSE values */
1493 /* select non-FALSE values */
1494 selec = 1.0 - freq_false;
1497 elog(ERROR, "unrecognized booltesttype: %d",
1498 (int) booltesttype);
1499 selec = 0.0; /* Keep compiler quiet */
1503 free_attstatsslot(vardata.atttype, values, nvalues,
1509 * No most-common-value info available. Still have null fraction
1510 * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1511 * for null fraction and assume an even split for boolean tests.
1513 switch (booltesttype)
1518 * Use freq_null directly.
1522 case IS_NOT_UNKNOWN:
1525 * Select not unknown (not null) values. Calculate from
1528 selec = 1.0 - freq_null;
1534 selec = (1.0 - freq_null) / 2.0;
1537 elog(ERROR, "unrecognized booltesttype: %d",
1538 (int) booltesttype);
1539 selec = 0.0; /* Keep compiler quiet */
1547 * If we can't get variable statistics for the argument, perhaps
1548 * clause_selectivity can do something with it. We ignore the
1549 * possibility of a NULL value when using clause_selectivity, and just
1550 * assume the value is either TRUE or FALSE.
1552 switch (booltesttype)
1555 selec = DEFAULT_UNK_SEL;
1557 case IS_NOT_UNKNOWN:
1558 selec = DEFAULT_NOT_UNK_SEL;
1562 selec = (double) clause_selectivity(root, arg,
1568 selec = 1.0 - (double) clause_selectivity(root, arg,
1573 elog(ERROR, "unrecognized booltesttype: %d",
1574 (int) booltesttype);
1575 selec = 0.0; /* Keep compiler quiet */
1580 ReleaseVariableStats(vardata);
1582 /* result should be in range, but make sure... */
1583 CLAMP_PROBABILITY(selec);
1585 return (Selectivity) selec;
1589 * nulltestsel - Selectivity of NullTest Node.
1592 nulltestsel(PlannerInfo *root, NullTestType nulltesttype, Node *arg,
1593 int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1595 VariableStatData vardata;
1598 examine_variable(root, arg, varRelid, &vardata);
1600 if (HeapTupleIsValid(vardata.statsTuple))
1602 Form_pg_statistic stats;
1605 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1606 freq_null = stats->stanullfrac;
1608 switch (nulltesttype)
1613 * Use freq_null directly.
1620 * Select not unknown (not null) values. Calculate from
1623 selec = 1.0 - freq_null;
1626 elog(ERROR, "unrecognized nulltesttype: %d",
1627 (int) nulltesttype);
1628 return (Selectivity) 0; /* keep compiler quiet */
1634 * No ANALYZE stats available, so make a guess
1636 switch (nulltesttype)
1639 selec = DEFAULT_UNK_SEL;
1642 selec = DEFAULT_NOT_UNK_SEL;
1645 elog(ERROR, "unrecognized nulltesttype: %d",
1646 (int) nulltesttype);
1647 return (Selectivity) 0; /* keep compiler quiet */
1651 ReleaseVariableStats(vardata);
1653 /* result should be in range, but make sure... */
1654 CLAMP_PROBABILITY(selec);
1656 return (Selectivity) selec;
1660 * strip_array_coercion - strip binary-compatible relabeling from an array expr
1662 * For array values, the parser normally generates ArrayCoerceExpr conversions,
1663 * but it seems possible that RelabelType might show up. Also, the planner
1664 * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1665 * so we need to be ready to deal with more than one level.
1668 strip_array_coercion(Node *node)
1672 if (node && IsA(node, ArrayCoerceExpr) &&
1673 ((ArrayCoerceExpr *) node)->elemfuncid == InvalidOid)
1675 node = (Node *) ((ArrayCoerceExpr *) node)->arg;
1677 else if (node && IsA(node, RelabelType))
1679 /* We don't really expect this case, but may as well cope */
1680 node = (Node *) ((RelabelType *) node)->arg;
1689 * scalararraysel - Selectivity of ScalarArrayOpExpr Node.
1692 scalararraysel(PlannerInfo *root,
1693 ScalarArrayOpExpr *clause,
1694 bool is_join_clause,
1697 SpecialJoinInfo *sjinfo)
1699 Oid operator = clause->opno;
1700 bool useOr = clause->useOr;
1703 Oid nominal_element_type;
1704 Oid nominal_element_collation;
1705 RegProcedure oprsel;
1706 FmgrInfo oprselproc;
1710 * First, look up the underlying operator's selectivity estimator. Punt if
1711 * it hasn't got one.
1714 oprsel = get_oprjoin(operator);
1716 oprsel = get_oprrest(operator);
1718 return (Selectivity) 0.5;
1719 fmgr_info(oprsel, &oprselproc);
1721 /* deconstruct the expression */
1722 Assert(list_length(clause->args) == 2);
1723 leftop = (Node *) linitial(clause->args);
1724 rightop = (Node *) lsecond(clause->args);
1726 /* get nominal (after relabeling) element type of rightop */
1727 nominal_element_type = get_base_element_type(exprType(rightop));
1728 if (!OidIsValid(nominal_element_type))
1729 return (Selectivity) 0.5; /* probably shouldn't happen */
1730 /* get nominal collation, too, for generating constants */
1731 nominal_element_collation = exprCollation(rightop);
1733 /* look through any binary-compatible relabeling of rightop */
1734 rightop = strip_array_coercion(rightop);
1737 * We consider three cases:
1739 * 1. rightop is an Array constant: deconstruct the array, apply the
1740 * operator's selectivity function for each array element, and merge the
1741 * results in the same way that clausesel.c does for AND/OR combinations.
1743 * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1744 * function for each element of the ARRAY[] construct, and merge.
1746 * 3. otherwise, make a guess ...
1748 if (rightop && IsA(rightop, Const))
1750 Datum arraydatum = ((Const *) rightop)->constvalue;
1751 bool arrayisnull = ((Const *) rightop)->constisnull;
1752 ArrayType *arrayval;
1761 if (arrayisnull) /* qual can't succeed if null array */
1762 return (Selectivity) 0.0;
1763 arrayval = DatumGetArrayTypeP(arraydatum);
1764 get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
1765 &elmlen, &elmbyval, &elmalign);
1766 deconstruct_array(arrayval,
1767 ARR_ELEMTYPE(arrayval),
1768 elmlen, elmbyval, elmalign,
1769 &elem_values, &elem_nulls, &num_elems);
1770 s1 = useOr ? 0.0 : 1.0;
1771 for (i = 0; i < num_elems; i++)
1776 args = list_make2(leftop,
1777 makeConst(nominal_element_type,
1779 nominal_element_collation,
1785 s2 = DatumGetFloat8(FunctionCall5(&oprselproc,
1786 PointerGetDatum(root),
1787 ObjectIdGetDatum(operator),
1788 PointerGetDatum(args),
1789 Int16GetDatum(jointype),
1790 PointerGetDatum(sjinfo)));
1792 s2 = DatumGetFloat8(FunctionCall4(&oprselproc,
1793 PointerGetDatum(root),
1794 ObjectIdGetDatum(operator),
1795 PointerGetDatum(args),
1796 Int32GetDatum(varRelid)));
1798 s1 = s1 + s2 - s1 * s2;
1803 else if (rightop && IsA(rightop, ArrayExpr) &&
1804 !((ArrayExpr *) rightop)->multidims)
1806 ArrayExpr *arrayexpr = (ArrayExpr *) rightop;
1811 get_typlenbyval(arrayexpr->element_typeid,
1812 &elmlen, &elmbyval);
1813 s1 = useOr ? 0.0 : 1.0;
1814 foreach(l, arrayexpr->elements)
1816 Node *elem = (Node *) lfirst(l);
1821 * Theoretically, if elem isn't of nominal_element_type we should
1822 * insert a RelabelType, but it seems unlikely that any operator
1823 * estimation function would really care ...
1825 args = list_make2(leftop, elem);
1827 s2 = DatumGetFloat8(FunctionCall5(&oprselproc,
1828 PointerGetDatum(root),
1829 ObjectIdGetDatum(operator),
1830 PointerGetDatum(args),
1831 Int16GetDatum(jointype),
1832 PointerGetDatum(sjinfo)));
1834 s2 = DatumGetFloat8(FunctionCall4(&oprselproc,
1835 PointerGetDatum(root),
1836 ObjectIdGetDatum(operator),
1837 PointerGetDatum(args),
1838 Int32GetDatum(varRelid)));
1840 s1 = s1 + s2 - s1 * s2;
1847 CaseTestExpr *dummyexpr;
1853 * We need a dummy rightop to pass to the operator selectivity
1854 * routine. It can be pretty much anything that doesn't look like a
1855 * constant; CaseTestExpr is a convenient choice.
1857 dummyexpr = makeNode(CaseTestExpr);
1858 dummyexpr->typeId = nominal_element_type;
1859 dummyexpr->typeMod = -1;
1860 dummyexpr->collation = clause->inputcollid;
1861 args = list_make2(leftop, dummyexpr);
1863 s2 = DatumGetFloat8(FunctionCall5(&oprselproc,
1864 PointerGetDatum(root),
1865 ObjectIdGetDatum(operator),
1866 PointerGetDatum(args),
1867 Int16GetDatum(jointype),
1868 PointerGetDatum(sjinfo)));
1870 s2 = DatumGetFloat8(FunctionCall4(&oprselproc,
1871 PointerGetDatum(root),
1872 ObjectIdGetDatum(operator),
1873 PointerGetDatum(args),
1874 Int32GetDatum(varRelid)));
1875 s1 = useOr ? 0.0 : 1.0;
1878 * Arbitrarily assume 10 elements in the eventual array value (see
1879 * also estimate_array_length)
1881 for (i = 0; i < 10; i++)
1884 s1 = s1 + s2 - s1 * s2;
1890 /* result should be in range, but make sure... */
1891 CLAMP_PROBABILITY(s1);
1897 * Estimate number of elements in the array yielded by an expression.
1899 * It's important that this agree with scalararraysel.
1902 estimate_array_length(Node *arrayexpr)
1904 /* look through any binary-compatible relabeling of arrayexpr */
1905 arrayexpr = strip_array_coercion(arrayexpr);
1907 if (arrayexpr && IsA(arrayexpr, Const))
1909 Datum arraydatum = ((Const *) arrayexpr)->constvalue;
1910 bool arrayisnull = ((Const *) arrayexpr)->constisnull;
1911 ArrayType *arrayval;
1915 arrayval = DatumGetArrayTypeP(arraydatum);
1916 return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
1918 else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
1919 !((ArrayExpr *) arrayexpr)->multidims)
1921 return list_length(((ArrayExpr *) arrayexpr)->elements);
1925 /* default guess --- see also scalararraysel */
1931 * rowcomparesel - Selectivity of RowCompareExpr Node.
1933 * We estimate RowCompare selectivity by considering just the first (high
1934 * order) columns, which makes it equivalent to an ordinary OpExpr. While
1935 * this estimate could be refined by considering additional columns, it
1936 * seems unlikely that we could do a lot better without multi-column
1940 rowcomparesel(PlannerInfo *root,
1941 RowCompareExpr *clause,
1942 int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1945 Oid opno = linitial_oid(clause->opnos);
1947 bool is_join_clause;
1949 /* Build equivalent arg list for single operator */
1950 opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
1953 * Decide if it's a join clause. This should match clausesel.c's
1954 * treat_as_join_clause(), except that we intentionally consider only the
1955 * leading columns and not the rest of the clause.
1960 * Caller is forcing restriction mode (eg, because we are examining an
1961 * inner indexscan qual).
1963 is_join_clause = false;
1965 else if (sjinfo == NULL)
1968 * It must be a restriction clause, since it's being evaluated at a
1971 is_join_clause = false;
1976 * Otherwise, it's a join if there's more than one relation used.
1978 is_join_clause = (NumRelids((Node *) opargs) > 1);
1983 /* Estimate selectivity for a join clause. */
1984 s1 = join_selectivity(root, opno,
1991 /* Estimate selectivity for a restriction clause. */
1992 s1 = restriction_selectivity(root, opno,
2001 * eqjoinsel - Join selectivity of "="
2004 eqjoinsel(PG_FUNCTION_ARGS)
2006 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2007 Oid operator = PG_GETARG_OID(1);
2008 List *args = (List *) PG_GETARG_POINTER(2);
2011 JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2013 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2015 VariableStatData vardata1;
2016 VariableStatData vardata2;
2017 bool join_is_reversed;
2018 RelOptInfo *inner_rel;
2020 get_join_variables(root, args, sjinfo,
2021 &vardata1, &vardata2, &join_is_reversed);
2023 switch (sjinfo->jointype)
2028 selec = eqjoinsel_inner(operator, &vardata1, &vardata2);
2033 * Look up the join's inner relation. min_righthand is sufficient
2034 * information because neither SEMI nor ANTI joins permit any
2035 * reassociation into or out of their RHS, so the righthand will
2036 * always be exactly that set of rels.
2038 inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2040 if (!join_is_reversed)
2041 selec = eqjoinsel_semi(operator, &vardata1, &vardata2,
2044 selec = eqjoinsel_semi(get_commutator(operator),
2045 &vardata2, &vardata1,
2049 /* other values not expected here */
2050 elog(ERROR, "unrecognized join type: %d",
2051 (int) sjinfo->jointype);
2052 selec = 0; /* keep compiler quiet */
2056 ReleaseVariableStats(vardata1);
2057 ReleaseVariableStats(vardata2);
2059 CLAMP_PROBABILITY(selec);
2061 PG_RETURN_FLOAT8((float8) selec);
2065 * eqjoinsel_inner --- eqjoinsel for normal inner join
2067 * We also use this for LEFT/FULL outer joins; it's not presently clear
2068 * that it's worth trying to distinguish them here.
2071 eqjoinsel_inner(Oid operator,
2072 VariableStatData *vardata1, VariableStatData *vardata2)
2079 Form_pg_statistic stats1 = NULL;
2080 Form_pg_statistic stats2 = NULL;
2081 bool have_mcvs1 = false;
2082 Datum *values1 = NULL;
2084 float4 *numbers1 = NULL;
2086 bool have_mcvs2 = false;
2087 Datum *values2 = NULL;
2089 float4 *numbers2 = NULL;
2092 nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2093 nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2095 if (HeapTupleIsValid(vardata1->statsTuple))
2097 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2098 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2100 vardata1->atttypmod,
2104 &values1, &nvalues1,
2105 &numbers1, &nnumbers1);
2108 if (HeapTupleIsValid(vardata2->statsTuple))
2110 stats2 = (Form_pg_statistic) GETSTRUCT(vardata2->statsTuple);
2111 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2113 vardata2->atttypmod,
2117 &values2, &nvalues2,
2118 &numbers2, &nnumbers2);
2121 if (have_mcvs1 && have_mcvs2)
2124 * We have most-common-value lists for both relations. Run through
2125 * the lists to see which MCVs actually join to each other with the
2126 * given operator. This allows us to determine the exact join
2127 * selectivity for the portion of the relations represented by the MCV
2128 * lists. We still have to estimate for the remaining population, but
2129 * in a skewed distribution this gives us a big leg up in accuracy.
2130 * For motivation see the analysis in Y. Ioannidis and S.
2131 * Christodoulakis, "On the propagation of errors in the size of join
2132 * results", Technical Report 1018, Computer Science Dept., University
2133 * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2138 double nullfrac1 = stats1->stanullfrac;
2139 double nullfrac2 = stats2->stanullfrac;
2140 double matchprodfreq,
2152 fmgr_info(get_opcode(operator), &eqproc);
2153 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2154 hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
2157 * Note we assume that each MCV will match at most one member of the
2158 * other MCV list. If the operator isn't really equality, there could
2159 * be multiple matches --- but we don't look for them, both for speed
2160 * and because the math wouldn't add up...
2162 matchprodfreq = 0.0;
2164 for (i = 0; i < nvalues1; i++)
2168 for (j = 0; j < nvalues2; j++)
2172 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2173 DEFAULT_COLLATION_OID,
2177 hasmatch1[i] = hasmatch2[j] = true;
2178 matchprodfreq += numbers1[i] * numbers2[j];
2184 CLAMP_PROBABILITY(matchprodfreq);
2185 /* Sum up frequencies of matched and unmatched MCVs */
2186 matchfreq1 = unmatchfreq1 = 0.0;
2187 for (i = 0; i < nvalues1; i++)
2190 matchfreq1 += numbers1[i];
2192 unmatchfreq1 += numbers1[i];
2194 CLAMP_PROBABILITY(matchfreq1);
2195 CLAMP_PROBABILITY(unmatchfreq1);
2196 matchfreq2 = unmatchfreq2 = 0.0;
2197 for (i = 0; i < nvalues2; i++)
2200 matchfreq2 += numbers2[i];
2202 unmatchfreq2 += numbers2[i];
2204 CLAMP_PROBABILITY(matchfreq2);
2205 CLAMP_PROBABILITY(unmatchfreq2);
2210 * Compute total frequency of non-null values that are not in the MCV
2213 otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2214 otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2215 CLAMP_PROBABILITY(otherfreq1);
2216 CLAMP_PROBABILITY(otherfreq2);
2219 * We can estimate the total selectivity from the point of view of
2220 * relation 1 as: the known selectivity for matched MCVs, plus
2221 * unmatched MCVs that are assumed to match against random members of
2222 * relation 2's non-MCV population, plus non-MCV values that are
2223 * assumed to match against random members of relation 2's unmatched
2224 * MCVs plus non-MCV values.
2226 totalsel1 = matchprodfreq;
2228 totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - nvalues2);
2230 totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2232 /* Same estimate from the point of view of relation 2. */
2233 totalsel2 = matchprodfreq;
2235 totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - nvalues1);
2237 totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2241 * Use the smaller of the two estimates. This can be justified in
2242 * essentially the same terms as given below for the no-stats case: to
2243 * a first approximation, we are estimating from the point of view of
2244 * the relation with smaller nd.
2246 selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2251 * We do not have MCV lists for both sides. Estimate the join
2252 * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2253 * is plausible if we assume that the join operator is strict and the
2254 * non-null values are about equally distributed: a given non-null
2255 * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2256 * of rel2, so total join rows are at most
2257 * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2258 * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2259 * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2260 * with MIN() is an upper bound. Using the MIN() means we estimate
2261 * from the point of view of the relation with smaller nd (since the
2262 * larger nd is determining the MIN). It is reasonable to assume that
2263 * most tuples in this rel will have join partners, so the bound is
2264 * probably reasonably tight and should be taken as-is.
2266 * XXX Can we be smarter if we have an MCV list for just one side? It
2267 * seems that if we assume equal distribution for the other side, we
2268 * end up with the same answer anyway.
2270 double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2271 double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2273 selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2281 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2282 numbers1, nnumbers1);
2284 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2285 numbers2, nnumbers2);
2291 * eqjoinsel_semi --- eqjoinsel for semi join
2293 * (Also used for anti join, which we are supposed to estimate the same way.)
2294 * Caller has ensured that vardata1 is the LHS variable.
2297 eqjoinsel_semi(Oid operator,
2298 VariableStatData *vardata1, VariableStatData *vardata2,
2299 RelOptInfo *inner_rel)
2306 Form_pg_statistic stats1 = NULL;
2307 bool have_mcvs1 = false;
2308 Datum *values1 = NULL;
2310 float4 *numbers1 = NULL;
2312 bool have_mcvs2 = false;
2313 Datum *values2 = NULL;
2315 float4 *numbers2 = NULL;
2318 nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2319 nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2322 * We clamp nd2 to be not more than what we estimate the inner relation's
2323 * size to be. This is intuitively somewhat reasonable since obviously
2324 * there can't be more than that many distinct values coming from the
2325 * inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
2326 * likewise) is that this is the only pathway by which restriction clauses
2327 * applied to the inner rel will affect the join result size estimate,
2328 * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2329 * only the outer rel's size. If we clamped nd1 we'd be double-counting
2330 * the selectivity of outer-rel restrictions.
2332 * We can apply this clamping both with respect to the base relation from
2333 * which the join variable comes (if there is just one), and to the
2334 * immediate inner input relation of the current join.
2337 nd2 = Min(nd2, vardata2->rel->rows);
2338 nd2 = Min(nd2, inner_rel->rows);
2340 if (HeapTupleIsValid(vardata1->statsTuple))
2342 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2343 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2345 vardata1->atttypmod,
2349 &values1, &nvalues1,
2350 &numbers1, &nnumbers1);
2353 if (HeapTupleIsValid(vardata2->statsTuple))
2355 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2357 vardata2->atttypmod,
2361 &values2, &nvalues2,
2362 &numbers2, &nnumbers2);
2365 if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
2368 * We have most-common-value lists for both relations. Run through
2369 * the lists to see which MCVs actually join to each other with the
2370 * given operator. This allows us to determine the exact join
2371 * selectivity for the portion of the relations represented by the MCV
2372 * lists. We still have to estimate for the remaining population, but
2373 * in a skewed distribution this gives us a big leg up in accuracy.
2378 double nullfrac1 = stats1->stanullfrac;
2387 * The clamping above could have resulted in nd2 being less than
2388 * nvalues2; in which case, we assume that precisely the nd2 most
2389 * common values in the relation will appear in the join input, and so
2390 * compare to only the first nd2 members of the MCV list. Of course
2391 * this is frequently wrong, but it's the best bet we can make.
2393 clamped_nvalues2 = Min(nvalues2, nd2);
2395 fmgr_info(get_opcode(operator), &eqproc);
2396 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2397 hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2400 * Note we assume that each MCV will match at most one member of the
2401 * other MCV list. If the operator isn't really equality, there could
2402 * be multiple matches --- but we don't look for them, both for speed
2403 * and because the math wouldn't add up...
2406 for (i = 0; i < nvalues1; i++)
2410 for (j = 0; j < clamped_nvalues2; j++)
2414 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2415 DEFAULT_COLLATION_OID,
2419 hasmatch1[i] = hasmatch2[j] = true;
2425 /* Sum up frequencies of matched MCVs */
2427 for (i = 0; i < nvalues1; i++)
2430 matchfreq1 += numbers1[i];
2432 CLAMP_PROBABILITY(matchfreq1);
2437 * Now we need to estimate the fraction of relation 1 that has at
2438 * least one join partner. We know for certain that the matched MCVs
2439 * do, so that gives us a lower bound, but we're really in the dark
2440 * about everything else. Our crude approach is: if nd1 <= nd2 then
2441 * assume all non-null rel1 rows have join partners, else assume for
2442 * the uncertain rows that a fraction nd2/nd1 have join partners. We
2443 * can discount the known-matched MCVs from the distinct-values counts
2444 * before doing the division.
2446 * Crude as the above is, it's completely useless if we don't have
2447 * reliable ndistinct values for both sides. Hence, if either nd1 or
2448 * nd2 is default, punt and assume half of the uncertain rows have
2451 if (!isdefault1 && !isdefault2)
2455 if (nd1 <= nd2 || nd2 < 0)
2456 uncertainfrac = 1.0;
2458 uncertainfrac = nd2 / nd1;
2461 uncertainfrac = 0.5;
2462 uncertain = 1.0 - matchfreq1 - nullfrac1;
2463 CLAMP_PROBABILITY(uncertain);
2464 selec = matchfreq1 + uncertainfrac * uncertain;
2469 * Without MCV lists for both sides, we can only use the heuristic
2472 double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2474 if (!isdefault1 && !isdefault2)
2476 if (nd1 <= nd2 || nd2 < 0)
2477 selec = 1.0 - nullfrac1;
2479 selec = (nd2 / nd1) * (1.0 - nullfrac1);
2482 selec = 0.5 * (1.0 - nullfrac1);
2486 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2487 numbers1, nnumbers1);
2489 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2490 numbers2, nnumbers2);
2496 * neqjoinsel - Join selectivity of "!="
2499 neqjoinsel(PG_FUNCTION_ARGS)
2501 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2502 Oid operator = PG_GETARG_OID(1);
2503 List *args = (List *) PG_GETARG_POINTER(2);
2504 JoinType jointype = (JoinType) PG_GETARG_INT16(3);
2505 SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2510 * We want 1 - eqjoinsel() where the equality operator is the one
2511 * associated with this != operator, that is, its negator.
2513 eqop = get_negator(operator);
2516 result = DatumGetFloat8(DirectFunctionCall5(eqjoinsel,
2517 PointerGetDatum(root),
2518 ObjectIdGetDatum(eqop),
2519 PointerGetDatum(args),
2520 Int16GetDatum(jointype),
2521 PointerGetDatum(sjinfo)));
2525 /* Use default selectivity (should we raise an error instead?) */
2526 result = DEFAULT_EQ_SEL;
2528 result = 1.0 - result;
2529 PG_RETURN_FLOAT8(result);
2533 * scalarltjoinsel - Join selectivity of "<" and "<=" for scalars
2536 scalarltjoinsel(PG_FUNCTION_ARGS)
2538 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2542 * scalargtjoinsel - Join selectivity of ">" and ">=" for scalars
2545 scalargtjoinsel(PG_FUNCTION_ARGS)
2547 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2551 * patternjoinsel - Generic code for pattern-match join selectivity.
2554 patternjoinsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
2556 /* For the moment we just punt. */
2557 return negate ? (1.0 - DEFAULT_MATCH_SEL) : DEFAULT_MATCH_SEL;
2561 * regexeqjoinsel - Join selectivity of regular-expression pattern match.
2564 regexeqjoinsel(PG_FUNCTION_ARGS)
2566 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, false));
2570 * icregexeqjoinsel - Join selectivity of case-insensitive regex match.
2573 icregexeqjoinsel(PG_FUNCTION_ARGS)
2575 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, false));
2579 * likejoinsel - Join selectivity of LIKE pattern match.
2582 likejoinsel(PG_FUNCTION_ARGS)
2584 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, false));
2588 * iclikejoinsel - Join selectivity of ILIKE pattern match.
2591 iclikejoinsel(PG_FUNCTION_ARGS)
2593 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, false));
2597 * regexnejoinsel - Join selectivity of regex non-match.
2600 regexnejoinsel(PG_FUNCTION_ARGS)
2602 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, true));
2606 * icregexnejoinsel - Join selectivity of case-insensitive regex non-match.
2609 icregexnejoinsel(PG_FUNCTION_ARGS)
2611 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, true));
2615 * nlikejoinsel - Join selectivity of LIKE pattern non-match.
2618 nlikejoinsel(PG_FUNCTION_ARGS)
2620 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, true));
2624 * icnlikejoinsel - Join selectivity of ILIKE pattern non-match.
2627 icnlikejoinsel(PG_FUNCTION_ARGS)
2629 PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, true));
2633 * mergejoinscansel - Scan selectivity of merge join.
2635 * A merge join will stop as soon as it exhausts either input stream.
2636 * Therefore, if we can estimate the ranges of both input variables,
2637 * we can estimate how much of the input will actually be read. This
2638 * can have a considerable impact on the cost when using indexscans.
2640 * Also, we can estimate how much of each input has to be read before the
2641 * first join pair is found, which will affect the join's startup time.
2643 * clause should be a clause already known to be mergejoinable. opfamily,
2644 * strategy, and nulls_first specify the sort ordering being used.
2647 * *leftstart is set to the fraction of the left-hand variable expected
2648 * to be scanned before the first join pair is found (0 to 1).
2649 * *leftend is set to the fraction of the left-hand variable expected
2650 * to be scanned before the join terminates (0 to 1).
2651 * *rightstart, *rightend similarly for the right-hand variable.
2654 mergejoinscansel(PlannerInfo *root, Node *clause,
2655 Oid opfamily, int strategy, bool nulls_first,
2656 Selectivity *leftstart, Selectivity *leftend,
2657 Selectivity *rightstart, Selectivity *rightend)
2661 VariableStatData leftvar,
2682 /* Set default results if we can't figure anything out. */
2683 /* XXX should default "start" fraction be a bit more than 0? */
2684 *leftstart = *rightstart = 0.0;
2685 *leftend = *rightend = 1.0;
2687 /* Deconstruct the merge clause */
2688 if (!is_opclause(clause))
2689 return; /* shouldn't happen */
2690 opno = ((OpExpr *) clause)->opno;
2691 left = get_leftop((Expr *) clause);
2692 right = get_rightop((Expr *) clause);
2694 return; /* shouldn't happen */
2696 /* Look for stats for the inputs */
2697 examine_variable(root, left, 0, &leftvar);
2698 examine_variable(root, right, 0, &rightvar);
2700 /* Extract the operator's declared left/right datatypes */
2701 get_op_opfamily_properties(opno, opfamily, false,
2705 Assert(op_strategy == BTEqualStrategyNumber);
2708 * Look up the various operators we need. If we don't find them all, it
2709 * probably means the opfamily is broken, but we just fail silently.
2711 * Note: we expect that pg_statistic histograms will be sorted by the '<'
2712 * operator, regardless of which sort direction we are considering.
2716 case BTLessStrategyNumber:
2718 if (op_lefttype == op_righttype)
2721 ltop = get_opfamily_member(opfamily,
2722 op_lefttype, op_righttype,
2723 BTLessStrategyNumber);
2724 leop = get_opfamily_member(opfamily,
2725 op_lefttype, op_righttype,
2726 BTLessEqualStrategyNumber);
2736 ltop = get_opfamily_member(opfamily,
2737 op_lefttype, op_righttype,
2738 BTLessStrategyNumber);
2739 leop = get_opfamily_member(opfamily,
2740 op_lefttype, op_righttype,
2741 BTLessEqualStrategyNumber);
2742 lsortop = get_opfamily_member(opfamily,
2743 op_lefttype, op_lefttype,
2744 BTLessStrategyNumber);
2745 rsortop = get_opfamily_member(opfamily,
2746 op_righttype, op_righttype,
2747 BTLessStrategyNumber);
2750 revltop = get_opfamily_member(opfamily,
2751 op_righttype, op_lefttype,
2752 BTLessStrategyNumber);
2753 revleop = get_opfamily_member(opfamily,
2754 op_righttype, op_lefttype,
2755 BTLessEqualStrategyNumber);
2758 case BTGreaterStrategyNumber:
2759 /* descending-order case */
2761 if (op_lefttype == op_righttype)
2764 ltop = get_opfamily_member(opfamily,
2765 op_lefttype, op_righttype,
2766 BTGreaterStrategyNumber);
2767 leop = get_opfamily_member(opfamily,
2768 op_lefttype, op_righttype,
2769 BTGreaterEqualStrategyNumber);
2772 lstatop = get_opfamily_member(opfamily,
2773 op_lefttype, op_lefttype,
2774 BTLessStrategyNumber);
2781 ltop = get_opfamily_member(opfamily,
2782 op_lefttype, op_righttype,
2783 BTGreaterStrategyNumber);
2784 leop = get_opfamily_member(opfamily,
2785 op_lefttype, op_righttype,
2786 BTGreaterEqualStrategyNumber);
2787 lsortop = get_opfamily_member(opfamily,
2788 op_lefttype, op_lefttype,
2789 BTGreaterStrategyNumber);
2790 rsortop = get_opfamily_member(opfamily,
2791 op_righttype, op_righttype,
2792 BTGreaterStrategyNumber);
2793 lstatop = get_opfamily_member(opfamily,
2794 op_lefttype, op_lefttype,
2795 BTLessStrategyNumber);
2796 rstatop = get_opfamily_member(opfamily,
2797 op_righttype, op_righttype,
2798 BTLessStrategyNumber);
2799 revltop = get_opfamily_member(opfamily,
2800 op_righttype, op_lefttype,
2801 BTGreaterStrategyNumber);
2802 revleop = get_opfamily_member(opfamily,
2803 op_righttype, op_lefttype,
2804 BTGreaterEqualStrategyNumber);
2808 goto fail; /* shouldn't get here */
2811 if (!OidIsValid(lsortop) ||
2812 !OidIsValid(rsortop) ||
2813 !OidIsValid(lstatop) ||
2814 !OidIsValid(rstatop) ||
2815 !OidIsValid(ltop) ||
2816 !OidIsValid(leop) ||
2817 !OidIsValid(revltop) ||
2818 !OidIsValid(revleop))
2819 goto fail; /* insufficient info in catalogs */
2821 /* Try to get ranges of both inputs */
2824 if (!get_variable_range(root, &leftvar, lstatop,
2825 &leftmin, &leftmax))
2826 goto fail; /* no range available from stats */
2827 if (!get_variable_range(root, &rightvar, rstatop,
2828 &rightmin, &rightmax))
2829 goto fail; /* no range available from stats */
2833 /* need to swap the max and min */
2834 if (!get_variable_range(root, &leftvar, lstatop,
2835 &leftmax, &leftmin))
2836 goto fail; /* no range available from stats */
2837 if (!get_variable_range(root, &rightvar, rstatop,
2838 &rightmax, &rightmin))
2839 goto fail; /* no range available from stats */
2843 * Now, the fraction of the left variable that will be scanned is the
2844 * fraction that's <= the right-side maximum value. But only believe
2845 * non-default estimates, else stick with our 1.0.
2847 selec = scalarineqsel(root, leop, isgt, &leftvar,
2848 rightmax, op_righttype);
2849 if (selec != DEFAULT_INEQ_SEL)
2852 /* And similarly for the right variable. */
2853 selec = scalarineqsel(root, revleop, isgt, &rightvar,
2854 leftmax, op_lefttype);
2855 if (selec != DEFAULT_INEQ_SEL)
2859 * Only one of the two "end" fractions can really be less than 1.0;
2860 * believe the smaller estimate and reset the other one to exactly 1.0. If
2861 * we get exactly equal estimates (as can easily happen with self-joins),
2864 if (*leftend > *rightend)
2866 else if (*leftend < *rightend)
2869 *leftend = *rightend = 1.0;
2872 * Also, the fraction of the left variable that will be scanned before the
2873 * first join pair is found is the fraction that's < the right-side
2874 * minimum value. But only believe non-default estimates, else stick with
2877 selec = scalarineqsel(root, ltop, isgt, &leftvar,
2878 rightmin, op_righttype);
2879 if (selec != DEFAULT_INEQ_SEL)
2882 /* And similarly for the right variable. */
2883 selec = scalarineqsel(root, revltop, isgt, &rightvar,
2884 leftmin, op_lefttype);
2885 if (selec != DEFAULT_INEQ_SEL)
2886 *rightstart = selec;
2889 * Only one of the two "start" fractions can really be more than zero;
2890 * believe the larger estimate and reset the other one to exactly 0.0. If
2891 * we get exactly equal estimates (as can easily happen with self-joins),
2894 if (*leftstart < *rightstart)
2896 else if (*leftstart > *rightstart)
2899 *leftstart = *rightstart = 0.0;
2902 * If the sort order is nulls-first, we're going to have to skip over any
2903 * nulls too. These would not have been counted by scalarineqsel, and we
2904 * can safely add in this fraction regardless of whether we believe
2905 * scalarineqsel's results or not. But be sure to clamp the sum to 1.0!
2909 Form_pg_statistic stats;
2911 if (HeapTupleIsValid(leftvar.statsTuple))
2913 stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
2914 *leftstart += stats->stanullfrac;
2915 CLAMP_PROBABILITY(*leftstart);
2916 *leftend += stats->stanullfrac;
2917 CLAMP_PROBABILITY(*leftend);
2919 if (HeapTupleIsValid(rightvar.statsTuple))
2921 stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
2922 *rightstart += stats->stanullfrac;
2923 CLAMP_PROBABILITY(*rightstart);
2924 *rightend += stats->stanullfrac;
2925 CLAMP_PROBABILITY(*rightend);
2929 /* Disbelieve start >= end, just in case that can happen */
2930 if (*leftstart >= *leftend)
2935 if (*rightstart >= *rightend)
2942 ReleaseVariableStats(leftvar);
2943 ReleaseVariableStats(rightvar);
2948 * Helper routine for estimate_num_groups: add an item to a list of
2949 * GroupVarInfos, but only if it's not known equal to any of the existing
2954 Node *var; /* might be an expression, not just a Var */
2955 RelOptInfo *rel; /* relation it belongs to */
2956 double ndistinct; /* # distinct values */
2960 add_unique_group_var(PlannerInfo *root, List *varinfos,
2961 Node *var, VariableStatData *vardata)
2963 GroupVarInfo *varinfo;
2968 ndistinct = get_variable_numdistinct(vardata, &isdefault);
2970 /* cannot use foreach here because of possible list_delete */
2971 lc = list_head(varinfos);
2974 varinfo = (GroupVarInfo *) lfirst(lc);
2976 /* must advance lc before list_delete possibly pfree's it */
2979 /* Drop exact duplicates */
2980 if (equal(var, varinfo->var))
2984 * Drop known-equal vars, but only if they belong to different
2985 * relations (see comments for estimate_num_groups)
2987 if (vardata->rel != varinfo->rel &&
2988 exprs_known_equal(root, var, varinfo->var))
2990 if (varinfo->ndistinct <= ndistinct)
2992 /* Keep older item, forget new one */
2997 /* Delete the older item */
2998 varinfos = list_delete_ptr(varinfos, varinfo);
3003 varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3006 varinfo->rel = vardata->rel;
3007 varinfo->ndistinct = ndistinct;
3008 varinfos = lappend(varinfos, varinfo);
3013 * estimate_num_groups - Estimate number of groups in a grouped query
3015 * Given a query having a GROUP BY clause, estimate how many groups there
3016 * will be --- ie, the number of distinct combinations of the GROUP BY
3019 * This routine is also used to estimate the number of rows emitted by
3020 * a DISTINCT filtering step; that is an isomorphic problem. (Note:
3021 * actually, we only use it for DISTINCT when there's no grouping or
3022 * aggregation ahead of the DISTINCT.)
3026 * groupExprs - list of expressions being grouped by
3027 * input_rows - number of rows estimated to arrive at the group/unique
3030 * Given the lack of any cross-correlation statistics in the system, it's
3031 * impossible to do anything really trustworthy with GROUP BY conditions
3032 * involving multiple Vars. We should however avoid assuming the worst
3033 * case (all possible cross-product terms actually appear as groups) since
3034 * very often the grouped-by Vars are highly correlated. Our current approach
3036 * 1. Expressions yielding boolean are assumed to contribute two groups,
3037 * independently of their content, and are ignored in the subsequent
3038 * steps. This is mainly because tests like "col IS NULL" break the
3039 * heuristic used in step 2 especially badly.
3040 * 2. Reduce the given expressions to a list of unique Vars used. For
3041 * example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3042 * It is clearly correct not to count the same Var more than once.
3043 * It is also reasonable to treat f(x) the same as x: f() cannot
3044 * increase the number of distinct values (unless it is volatile,
3045 * which we consider unlikely for grouping), but it probably won't
3046 * reduce the number of distinct values much either.
3047 * As a special case, if a GROUP BY expression can be matched to an
3048 * expressional index for which we have statistics, then we treat the
3049 * whole expression as though it were just a Var.
3050 * 3. If the list contains Vars of different relations that are known equal
3051 * due to equivalence classes, then drop all but one of the Vars from each
3052 * known-equal set, keeping the one with smallest estimated # of values
3053 * (since the extra values of the others can't appear in joined rows).
3054 * Note the reason we only consider Vars of different relations is that
3055 * if we considered ones of the same rel, we'd be double-counting the
3056 * restriction selectivity of the equality in the next step.
3057 * 4. For Vars within a single source rel, we multiply together the numbers
3058 * of values, clamp to the number of rows in the rel (divided by 10 if
3059 * more than one Var), and then multiply by the selectivity of the
3060 * restriction clauses for that rel. When there's more than one Var,
3061 * the initial product is probably too high (it's the worst case) but
3062 * clamping to a fraction of the rel's rows seems to be a helpful
3063 * heuristic for not letting the estimate get out of hand. (The factor
3064 * of 10 is derived from pre-Postgres-7.4 practice.) Multiplying
3065 * by the restriction selectivity is effectively assuming that the
3066 * restriction clauses are independent of the grouping, which is a crummy
3067 * assumption, but it's hard to do better.
3068 * 5. If there are Vars from multiple rels, we repeat step 4 for each such
3069 * rel, and multiply the results together.
3070 * Note that rels not containing grouped Vars are ignored completely, as are
3071 * join clauses. Such rels cannot increase the number of groups, and we
3072 * assume such clauses do not reduce the number either (somewhat bogus,
3073 * but we don't have the info to do better).
3076 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
3078 List *varinfos = NIL;
3082 /* We should not be called unless query has GROUP BY (or DISTINCT) */
3083 Assert(groupExprs != NIL);
3086 * Count groups derived from boolean grouping expressions. For other
3087 * expressions, find the unique Vars used, treating an expression as a Var
3088 * if we can find stats for it. For each one, record the statistical
3089 * estimate of number of distinct values (total in its table, without
3090 * regard for filtering).
3094 foreach(l, groupExprs)
3096 Node *groupexpr = (Node *) lfirst(l);
3097 VariableStatData vardata;
3101 /* Short-circuit for expressions returning boolean */
3102 if (exprType(groupexpr) == BOOLOID)
3109 * If examine_variable is able to deduce anything about the GROUP BY
3110 * expression, treat it as a single variable even if it's really more
3113 examine_variable(root, groupexpr, 0, &vardata);
3114 if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3116 varinfos = add_unique_group_var(root, varinfos,
3117 groupexpr, &vardata);
3118 ReleaseVariableStats(vardata);
3121 ReleaseVariableStats(vardata);
3124 * Else pull out the component Vars. Handle PlaceHolderVars by
3125 * recursing into their arguments (effectively assuming that the
3126 * PlaceHolderVar doesn't change the number of groups, which boils
3127 * down to ignoring the possible addition of nulls to the result set).
3129 varshere = pull_var_clause(groupexpr,
3130 PVC_RECURSE_AGGREGATES,
3131 PVC_RECURSE_PLACEHOLDERS);
3134 * If we find any variable-free GROUP BY item, then either it is a
3135 * constant (and we can ignore it) or it contains a volatile function;
3136 * in the latter case we punt and assume that each input row will
3137 * yield a distinct group.
3139 if (varshere == NIL)
3141 if (contain_volatile_functions(groupexpr))
3147 * Else add variables to varinfos list
3149 foreach(l2, varshere)
3151 Node *var = (Node *) lfirst(l2);
3153 examine_variable(root, var, 0, &vardata);
3154 varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3155 ReleaseVariableStats(vardata);
3160 * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3163 if (varinfos == NIL)
3165 /* Guard against out-of-range answers */
3166 if (numdistinct > input_rows)
3167 numdistinct = input_rows;
3172 * Group Vars by relation and estimate total numdistinct.
3174 * For each iteration of the outer loop, we process the frontmost Var in
3175 * varinfos, plus all other Vars in the same relation. We remove these
3176 * Vars from the newvarinfos list for the next iteration. This is the
3177 * easiest way to group Vars of same rel together.
3181 GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3182 RelOptInfo *rel = varinfo1->rel;
3183 double reldistinct = varinfo1->ndistinct;
3184 double relmaxndistinct = reldistinct;
3185 int relvarcount = 1;
3186 List *newvarinfos = NIL;
3189 * Get the product of numdistinct estimates of the Vars for this rel.
3190 * Also, construct new varinfos list of remaining Vars.
3192 for_each_cell(l, lnext(list_head(varinfos)))
3194 GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3196 if (varinfo2->rel == varinfo1->rel)
3198 reldistinct *= varinfo2->ndistinct;
3199 if (relmaxndistinct < varinfo2->ndistinct)
3200 relmaxndistinct = varinfo2->ndistinct;
3205 /* not time to process varinfo2 yet */
3206 newvarinfos = lcons(varinfo2, newvarinfos);
3211 * Sanity check --- don't divide by zero if empty relation.
3213 Assert(rel->reloptkind == RELOPT_BASEREL);
3214 if (rel->tuples > 0)
3217 * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3218 * fudge factor is because the Vars are probably correlated but we
3219 * don't know by how much. We should never clamp to less than the
3220 * largest ndistinct value for any of the Vars, though, since
3221 * there will surely be at least that many groups.
3223 double clamp = rel->tuples;
3225 if (relvarcount > 1)
3228 if (clamp < relmaxndistinct)
3230 clamp = relmaxndistinct;
3231 /* for sanity in case some ndistinct is too large: */
3232 if (clamp > rel->tuples)
3233 clamp = rel->tuples;
3236 if (reldistinct > clamp)
3237 reldistinct = clamp;
3240 * Multiply by restriction selectivity.
3242 reldistinct *= rel->rows / rel->tuples;
3245 * Update estimate of total distinct groups.
3247 numdistinct *= reldistinct;
3250 varinfos = newvarinfos;
3251 } while (varinfos != NIL);
3253 numdistinct = ceil(numdistinct);
3255 /* Guard against out-of-range answers */
3256 if (numdistinct > input_rows)
3257 numdistinct = input_rows;
3258 if (numdistinct < 1.0)
3265 * Estimate hash bucketsize fraction (ie, number of entries in a bucket
3266 * divided by total tuples in relation) if the specified expression is used
3269 * XXX This is really pretty bogus since we're effectively assuming that the
3270 * distribution of hash keys will be the same after applying restriction
3271 * clauses as it was in the underlying relation. However, we are not nearly
3272 * smart enough to figure out how the restrict clauses might change the
3273 * distribution, so this will have to do for now.
3275 * We are passed the number of buckets the executor will use for the given
3276 * input relation. If the data were perfectly distributed, with the same
3277 * number of tuples going into each available bucket, then the bucketsize
3278 * fraction would be 1/nbuckets. But this happy state of affairs will occur
3279 * only if (a) there are at least nbuckets distinct data values, and (b)
3280 * we have a not-too-skewed data distribution. Otherwise the buckets will
3281 * be nonuniformly occupied. If the other relation in the join has a key
3282 * distribution similar to this one's, then the most-loaded buckets are
3283 * exactly those that will be probed most often. Therefore, the "average"
3284 * bucket size for costing purposes should really be taken as something close
3285 * to the "worst case" bucket size. We try to estimate this by adjusting the
3286 * fraction if there are too few distinct data values, and then scaling up
3287 * by the ratio of the most common value's frequency to the average frequency.
3289 * If no statistics are available, use a default estimate of 0.1. This will
3290 * discourage use of a hash rather strongly if the inner relation is large,
3291 * which is what we want. We do not want to hash unless we know that the
3292 * inner rel is well-dispersed (or the alternatives seem much worse).
3295 estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
3297 VariableStatData vardata;
3307 examine_variable(root, hashkey, 0, &vardata);
3309 /* Get number of distinct values */
3310 ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3312 /* If ndistinct isn't real, punt and return 0.1, per comments above */
3315 ReleaseVariableStats(vardata);
3316 return (Selectivity) 0.1;
3319 /* Get fraction that are null */
3320 if (HeapTupleIsValid(vardata.statsTuple))
3322 Form_pg_statistic stats;
3324 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3325 stanullfrac = stats->stanullfrac;
3330 /* Compute avg freq of all distinct data values in raw relation */
3331 avgfreq = (1.0 - stanullfrac) / ndistinct;
3334 * Adjust ndistinct to account for restriction clauses. Observe we are
3335 * assuming that the data distribution is affected uniformly by the
3336 * restriction clauses!
3338 * XXX Possibly better way, but much more expensive: multiply by
3339 * selectivity of rel's restriction clauses that mention the target Var.
3342 ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3345 * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3346 * number of buckets is less than the expected number of distinct values;
3347 * otherwise it is 1/ndistinct.
3349 if (ndistinct > nbuckets)
3350 estfract = 1.0 / nbuckets;
3352 estfract = 1.0 / ndistinct;
3355 * Look up the frequency of the most common value, if available.
3359 if (HeapTupleIsValid(vardata.statsTuple))
3361 if (get_attstatsslot(vardata.statsTuple,
3362 vardata.atttype, vardata.atttypmod,
3363 STATISTIC_KIND_MCV, InvalidOid,
3366 &numbers, &nnumbers))
3369 * The first MCV stat is for the most common value.
3372 mcvfreq = numbers[0];
3373 free_attstatsslot(vardata.atttype, NULL, 0,
3379 * Adjust estimated bucketsize upward to account for skewed distribution.
3381 if (avgfreq > 0.0 && mcvfreq > avgfreq)
3382 estfract *= mcvfreq / avgfreq;
3385 * Clamp bucketsize to sane range (the above adjustment could easily
3386 * produce an out-of-range result). We set the lower bound a little above
3387 * zero, since zero isn't a very sane result.
3389 if (estfract < 1.0e-6)
3391 else if (estfract > 1.0)
3394 ReleaseVariableStats(vardata);
3396 return (Selectivity) estfract;
3400 /*-------------------------------------------------------------------------
3404 *-------------------------------------------------------------------------
3409 * Convert non-NULL values of the indicated types to the comparison
3410 * scale needed by scalarineqsel().
3411 * Returns "true" if successful.
3413 * XXX this routine is a hack: ideally we should look up the conversion
3414 * subroutines in pg_type.
3416 * All numeric datatypes are simply converted to their equivalent
3417 * "double" values. (NUMERIC values that are outside the range of "double"
3418 * are clamped to +/- HUGE_VAL.)
3420 * String datatypes are converted by convert_string_to_scalar(),
3421 * which is explained below. The reason why this routine deals with
3422 * three values at a time, not just one, is that we need it for strings.
3424 * The bytea datatype is just enough different from strings that it has
3425 * to be treated separately.
3427 * The several datatypes representing absolute times are all converted
3428 * to Timestamp, which is actually a double, and then we just use that
3429 * double value. Note this will give correct results even for the "special"
3430 * values of Timestamp, since those are chosen to compare correctly;
3431 * see timestamp_cmp.
3433 * The several datatypes representing relative times (intervals) are all
3434 * converted to measurements expressed in seconds.
3437 convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
3438 Datum lobound, Datum hibound, Oid boundstypid,
3439 double *scaledlobound, double *scaledhibound)
3442 * Both the valuetypid and the boundstypid should exactly match the
3443 * declared input type(s) of the operator we are invoked for, so we just
3444 * error out if either is not recognized.
3446 * XXX The histogram we are interpolating between points of could belong
3447 * to a column that's only binary-compatible with the declared type. In
3448 * essence we are assuming that the semantics of binary-compatible types
3449 * are enough alike that we can use a histogram generated with one type's
3450 * operators to estimate selectivity for the other's. This is outright
3451 * wrong in some cases --- in particular signed versus unsigned
3452 * interpretation could trip us up. But it's useful enough in the
3453 * majority of cases that we do it anyway. Should think about more
3454 * rigorous ways to do it.
3459 * Built-in numeric types
3470 case REGPROCEDUREOID:
3472 case REGOPERATOROID:
3476 case REGDICTIONARYOID:
3477 *scaledvalue = convert_numeric_to_scalar(value, valuetypid);
3478 *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid);
3479 *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid);
3483 * Built-in string types
3491 char *valstr = convert_string_datum(value, valuetypid);
3492 char *lostr = convert_string_datum(lobound, boundstypid);
3493 char *histr = convert_string_datum(hibound, boundstypid);
3495 convert_string_to_scalar(valstr, scaledvalue,
3496 lostr, scaledlobound,
3497 histr, scaledhibound);
3505 * Built-in bytea type
3509 convert_bytea_to_scalar(value, scaledvalue,
3510 lobound, scaledlobound,
3511 hibound, scaledhibound);
3516 * Built-in time types
3519 case TIMESTAMPTZOID:
3527 *scaledvalue = convert_timevalue_to_scalar(value, valuetypid);
3528 *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid);
3529 *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid);
3533 * Built-in network types
3538 *scaledvalue = convert_network_to_scalar(value, valuetypid);
3539 *scaledlobound = convert_network_to_scalar(lobound, boundstypid);
3540 *scaledhibound = convert_network_to_scalar(hibound, boundstypid);
3543 /* Don't know how to convert */
3544 *scaledvalue = *scaledlobound = *scaledhibound = 0;
3549 * Do convert_to_scalar()'s work for any numeric data type.
3552 convert_numeric_to_scalar(Datum value, Oid typid)
3557 return (double) DatumGetBool(value);
3559 return (double) DatumGetInt16(value);
3561 return (double) DatumGetInt32(value);
3563 return (double) DatumGetInt64(value);
3565 return (double) DatumGetFloat4(value);
3567 return (double) DatumGetFloat8(value);
3569 /* Note: out-of-range values will be clamped to +-HUGE_VAL */
3571 DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow,
3575 case REGPROCEDUREOID:
3577 case REGOPERATOROID:
3581 case REGDICTIONARYOID:
3582 /* we can treat OIDs as integers... */
3583 return (double) DatumGetObjectId(value);
3587 * Can't get here unless someone tries to use scalarltsel/scalargtsel on
3588 * an operator with one numeric and one non-numeric operand.
3590 elog(ERROR, "unsupported type: %u", typid);
3595 * Do convert_to_scalar()'s work for any character-string data type.
3597 * String datatypes are converted to a scale that ranges from 0 to 1,
3598 * where we visualize the bytes of the string as fractional digits.
3600 * We do not want the base to be 256, however, since that tends to
3601 * generate inflated selectivity estimates; few databases will have
3602 * occurrences of all 256 possible byte values at each position.
3603 * Instead, use the smallest and largest byte values seen in the bounds
3604 * as the estimated range for each byte, after some fudging to deal with
3605 * the fact that we probably aren't going to see the full range that way.
3607 * An additional refinement is that we discard any common prefix of the
3608 * three strings before computing the scaled values. This allows us to
3609 * "zoom in" when we encounter a narrow data range. An example is a phone
3610 * number database where all the values begin with the same area code.
3611 * (Actually, the bounds will be adjacent histogram-bin-boundary values,
3612 * so this is more likely to happen than you might think.)
3615 convert_string_to_scalar(char *value,
3616 double *scaledvalue,
3618 double *scaledlobound,
3620 double *scaledhibound)
3626 rangelo = rangehi = (unsigned char) hibound[0];
3627 for (sptr = lobound; *sptr; sptr++)
3629 if (rangelo > (unsigned char) *sptr)
3630 rangelo = (unsigned char) *sptr;
3631 if (rangehi < (unsigned char) *sptr)
3632 rangehi = (unsigned char) *sptr;
3634 for (sptr = hibound; *sptr; sptr++)
3636 if (rangelo > (unsigned char) *sptr)
3637 rangelo = (unsigned char) *sptr;
3638 if (rangehi < (unsigned char) *sptr)
3639 rangehi = (unsigned char) *sptr;
3641 /* If range includes any upper-case ASCII chars, make it include all */
3642 if (rangelo <= 'Z' && rangehi >= 'A')
3649 /* Ditto lower-case */
3650 if (rangelo <= 'z' && rangehi >= 'a')
3658 if (rangelo <= '9' && rangehi >= '0')
3667 * If range includes less than 10 chars, assume we have not got enough
3668 * data, and make it include regular ASCII set.
3670 if (rangehi - rangelo < 9)
3677 * Now strip any common prefix of the three strings.
3681 if (*lobound != *hibound || *lobound != *value)
3683 lobound++, hibound++, value++;
3687 * Now we can do the conversions.
3689 *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
3690 *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
3691 *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
3695 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
3697 int slen = strlen(value);
3703 return 0.0; /* empty string has scalar value 0 */
3706 * Since base is at least 10, need not consider more than about 20 chars
3711 /* Convert initial characters to fraction */
3712 base = rangehi - rangelo + 1;
3717 int ch = (unsigned char) *value++;
3721 else if (ch > rangehi)
3723 num += ((double) (ch - rangelo)) / denom;
3731 * Convert a string-type Datum into a palloc'd, null-terminated string.
3733 * When using a non-C locale, we must pass the string through strxfrm()
3734 * before continuing, so as to generate correct locale-specific results.
3737 convert_string_datum(Datum value, Oid typid)
3744 val = (char *) palloc(2);
3745 val[0] = DatumGetChar(value);
3751 val = TextDatumGetCString(value);
3755 NameData *nm = (NameData *) DatumGetPointer(value);
3757 val = pstrdup(NameStr(*nm));
3763 * Can't get here unless someone tries to use scalarltsel on an
3764 * operator with one string and one non-string operand.
3766 elog(ERROR, "unsupported type: %u", typid);
3770 if (!lc_collate_is_c(DEFAULT_COLLATION_OID))
3777 * Note: originally we guessed at a suitable output buffer size, and
3778 * only needed to call strxfrm twice if our guess was too small.
3779 * However, it seems that some versions of Solaris have buggy strxfrm
3780 * that can write past the specified buffer length in that scenario.
3781 * So, do it the dumb way for portability.
3783 * Yet other systems (e.g., glibc) sometimes return a smaller value
3784 * from the second call than the first; thus the Assert must be <= not
3785 * == as you'd expect. Can't any of these people program their way
3786 * out of a paper bag?
3788 * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
3789 * bogus data or set an error. This is not really a problem unless it
3790 * crashes since it will only give an estimation error and nothing
3793 #if _MSC_VER == 1400 /* VS.Net 2005 */
3797 * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?
3798 * FeedbackID=99694 */
3802 xfrmlen = strxfrm(x, val, 0);
3805 xfrmlen = strxfrm(NULL, val, 0);
3810 * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
3811 * of trying to allocate this much memory (and fail), just return the
3812 * original string unmodified as if we were in the C locale.
3814 if (xfrmlen == INT_MAX)
3817 xfrmstr = (char *) palloc(xfrmlen + 1);
3818 xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
3819 Assert(xfrmlen2 <= xfrmlen);
3828 * Do convert_to_scalar()'s work for any bytea data type.
3830 * Very similar to convert_string_to_scalar except we can't assume
3831 * null-termination and therefore pass explicit lengths around.
3833 * Also, assumptions about likely "normal" ranges of characters have been
3834 * removed - a data range of 0..255 is always used, for now. (Perhaps
3835 * someday we will add information about actual byte data range to
3839 convert_bytea_to_scalar(Datum value,
3840 double *scaledvalue,
3842 double *scaledlobound,
3844 double *scaledhibound)
3848 valuelen = VARSIZE(DatumGetPointer(value)) - VARHDRSZ,
3849 loboundlen = VARSIZE(DatumGetPointer(lobound)) - VARHDRSZ,
3850 hiboundlen = VARSIZE(DatumGetPointer(hibound)) - VARHDRSZ,
3853 unsigned char *valstr = (unsigned char *) VARDATA(DatumGetPointer(value)),
3854 *lostr = (unsigned char *) VARDATA(DatumGetPointer(lobound)),
3855 *histr = (unsigned char *) VARDATA(DatumGetPointer(hibound));
3858 * Assume bytea data is uniformly distributed across all byte values.
3864 * Now strip any common prefix of the three strings.
3866 minlen = Min(Min(valuelen, loboundlen), hiboundlen);
3867 for (i = 0; i < minlen; i++)
3869 if (*lostr != *histr || *lostr != *valstr)
3871 lostr++, histr++, valstr++;
3872 loboundlen--, hiboundlen--, valuelen--;
3876 * Now we can do the conversions.
3878 *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
3879 *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
3880 *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
3884 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
3885 int rangelo, int rangehi)
3892 return 0.0; /* empty string has scalar value 0 */
3895 * Since base is 256, need not consider more than about 10 chars (even
3896 * this many seems like overkill)
3901 /* Convert initial characters to fraction */
3902 base = rangehi - rangelo + 1;
3905 while (valuelen-- > 0)
3911 else if (ch > rangehi)
3913 num += ((double) (ch - rangelo)) / denom;
3921 * Do convert_to_scalar()'s work for any timevalue data type.
3924 convert_timevalue_to_scalar(Datum value, Oid typid)
3929 return DatumGetTimestamp(value);
3930 case TIMESTAMPTZOID:
3931 return DatumGetTimestampTz(value);
3933 return DatumGetTimestamp(DirectFunctionCall1(abstime_timestamp,
3936 return date2timestamp_no_overflow(DatumGetDateADT(value));
3939 Interval *interval = DatumGetIntervalP(value);
3942 * Convert the month part of Interval to days using assumed
3943 * average month length of 365.25/12.0 days. Not too
3944 * accurate, but plenty good enough for our purposes.
3946 #ifdef HAVE_INT64_TIMESTAMP
3947 return interval->time + interval->day * (double) USECS_PER_DAY +
3948 interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
3950 return interval->time + interval->day * SECS_PER_DAY +
3951 interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * (double) SECS_PER_DAY);
3955 #ifdef HAVE_INT64_TIMESTAMP
3956 return (DatumGetRelativeTime(value) * 1000000.0);
3958 return DatumGetRelativeTime(value);
3962 TimeInterval tinterval = DatumGetTimeInterval(value);
3964 #ifdef HAVE_INT64_TIMESTAMP
3965 if (tinterval->status != 0)
3966 return ((tinterval->data[1] - tinterval->data[0]) * 1000000.0);
3968 if (tinterval->status != 0)
3969 return tinterval->data[1] - tinterval->data[0];
3971 return 0; /* for lack of a better idea */
3974 return DatumGetTimeADT(value);
3977 TimeTzADT *timetz = DatumGetTimeTzADTP(value);
3979 /* use GMT-equivalent time */
3980 #ifdef HAVE_INT64_TIMESTAMP
3981 return (double) (timetz->time + (timetz->zone * 1000000.0));
3983 return (double) (timetz->time + timetz->zone);
3989 * Can't get here unless someone tries to use scalarltsel/scalargtsel on
3990 * an operator with one timevalue and one non-timevalue operand.
3992 elog(ERROR, "unsupported type: %u", typid);
3998 * get_restriction_variable
3999 * Examine the args of a restriction clause to see if it's of the
4000 * form (variable op pseudoconstant) or (pseudoconstant op variable),
4001 * where "variable" could be either a Var or an expression in vars of a
4002 * single relation. If so, extract information about the variable,
4003 * and also indicate which side it was on and the other argument.
4006 * root: the planner info
4007 * args: clause argument list
4008 * varRelid: see specs for restriction selectivity functions
4010 * Outputs: (these are valid only if TRUE is returned)
4011 * *vardata: gets information about variable (see examine_variable)
4012 * *other: gets other clause argument, aggressively reduced to a constant
4013 * *varonleft: set TRUE if variable is on the left, FALSE if on the right
4015 * Returns TRUE if a variable is identified, otherwise FALSE.
4017 * Note: if there are Vars on both sides of the clause, we must fail, because
4018 * callers are expecting that the other side will act like a pseudoconstant.
4021 get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
4022 VariableStatData *vardata, Node **other,
4027 VariableStatData rdata;
4029 /* Fail if not a binary opclause (probably shouldn't happen) */
4030 if (list_length(args) != 2)
4033 left = (Node *) linitial(args);
4034 right = (Node *) lsecond(args);
4037 * Examine both sides. Note that when varRelid is nonzero, Vars of other
4038 * relations will be treated as pseudoconstants.
4040 examine_variable(root, left, varRelid, vardata);
4041 examine_variable(root, right, varRelid, &rdata);
4044 * If one side is a variable and the other not, we win.
4046 if (vardata->rel && rdata.rel == NULL)
4049 *other = estimate_expression_value(root, rdata.var);
4050 /* Assume we need no ReleaseVariableStats(rdata) here */
4054 if (vardata->rel == NULL && rdata.rel)
4057 *other = estimate_expression_value(root, vardata->var);
4058 /* Assume we need no ReleaseVariableStats(*vardata) here */
4063 /* Ooops, clause has wrong structure (probably var op var) */
4064 ReleaseVariableStats(*vardata);
4065 ReleaseVariableStats(rdata);
4071 * get_join_variables
4072 * Apply examine_variable() to each side of a join clause.
4073 * Also, attempt to identify whether the join clause has the same
4074 * or reversed sense compared to the SpecialJoinInfo.
4076 * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4077 * or "reversed" if it is "rhs_var OP lhs_var". In complicated cases
4078 * where we can't tell for sure, we default to assuming it's normal.
4081 get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo,
4082 VariableStatData *vardata1, VariableStatData *vardata2,
4083 bool *join_is_reversed)
4088 if (list_length(args) != 2)
4089 elog(ERROR, "join operator should take two arguments");
4091 left = (Node *) linitial(args);
4092 right = (Node *) lsecond(args);
4094 examine_variable(root, left, 0, vardata1);
4095 examine_variable(root, right, 0, vardata2);
4097 if (vardata1->rel &&
4098 bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4099 *join_is_reversed = true; /* var1 is on RHS */
4100 else if (vardata2->rel &&
4101 bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4102 *join_is_reversed = true; /* var2 is on LHS */
4104 *join_is_reversed = false;
4109 * Try to look up statistical data about an expression.
4110 * Fill in a VariableStatData struct to describe the expression.
4113 * root: the planner info
4114 * node: the expression tree to examine
4115 * varRelid: see specs for restriction selectivity functions
4117 * Outputs: *vardata is filled as follows:
4118 * var: the input expression (with any binary relabeling stripped, if
4119 * it is or contains a variable; but otherwise the type is preserved)
4120 * rel: RelOptInfo for relation containing variable; NULL if expression
4121 * contains no Vars (NOTE this could point to a RelOptInfo of a
4122 * subquery, not one in the current query).
4123 * statsTuple: the pg_statistic entry for the variable, if one exists;
4125 * freefunc: pointer to a function to release statsTuple with.
4126 * vartype: exposed type of the expression; this should always match
4127 * the declared input type of the operator we are estimating for.
4128 * atttype, atttypmod: type data to pass to get_attstatsslot(). This is
4129 * commonly the same as the exposed type of the variable argument,
4130 * but can be different in binary-compatible-type cases.
4131 * isunique: TRUE if we were able to match the var to a unique index,
4132 * implying its values are unique for this query.
4134 * Caller is responsible for doing ReleaseVariableStats() before exiting.
4137 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4138 VariableStatData *vardata)
4144 /* Make sure we don't return dangling pointers in vardata */
4145 MemSet(vardata, 0, sizeof(VariableStatData));
4147 /* Save the exposed type of the expression */
4148 vardata->vartype = exprType(node);
4150 /* Look inside any binary-compatible relabeling */
4152 if (IsA(node, RelabelType))
4153 basenode = (Node *) ((RelabelType *) node)->arg;
4157 /* Fast path for a simple Var */
4159 if (IsA(basenode, Var) &&
4160 (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4162 Var *var = (Var *) basenode;
4164 /* Set up result fields other than the stats tuple */
4165 vardata->var = basenode; /* return Var without relabeling */
4166 vardata->rel = find_base_rel(root, var->varno);
4167 vardata->atttype = var->vartype;
4168 vardata->atttypmod = var->vartypmod;
4169 vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4171 /* Try to locate some stats */
4172 examine_simple_variable(root, var, vardata);
4178 * Okay, it's a more complicated expression. Determine variable
4179 * membership. Note that when varRelid isn't zero, only vars of that
4180 * relation are considered "real" vars.
4182 varnos = pull_varnos(basenode);
4186 switch (bms_membership(varnos))
4189 /* No Vars at all ... must be pseudo-constant clause */
4192 if (varRelid == 0 || bms_is_member(varRelid, varnos))
4194 onerel = find_base_rel(root,
4195 (varRelid ? varRelid : bms_singleton_member(varnos)));
4196 vardata->rel = onerel;
4197 node = basenode; /* strip any relabeling */
4199 /* else treat it as a constant */
4204 /* treat it as a variable of a join relation */
4205 vardata->rel = find_join_rel(root, varnos);
4206 node = basenode; /* strip any relabeling */
4208 else if (bms_is_member(varRelid, varnos))
4210 /* ignore the vars belonging to other relations */
4211 vardata->rel = find_base_rel(root, varRelid);
4212 node = basenode; /* strip any relabeling */
4213 /* note: no point in expressional-index search here */
4215 /* else treat it as a constant */
4221 vardata->var = node;
4222 vardata->atttype = exprType(node);
4223 vardata->atttypmod = exprTypmod(node);
4228 * We have an expression in vars of a single relation. Try to match
4229 * it to expressional index columns, in hopes of finding some
4232 * XXX it's conceivable that there are multiple matches with different
4233 * index opfamilies; if so, we need to pick one that matches the
4234 * operator we are estimating for. FIXME later.
4238 foreach(ilist, onerel->indexlist)
4240 IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4241 ListCell *indexpr_item;
4244 indexpr_item = list_head(index->indexprs);
4245 if (indexpr_item == NULL)
4246 continue; /* no expressions here... */
4248 for (pos = 0; pos < index->ncolumns; pos++)
4250 if (index->indexkeys[pos] == 0)
4254 if (indexpr_item == NULL)
4255 elog(ERROR, "too few entries in indexprs list");
4256 indexkey = (Node *) lfirst(indexpr_item);
4257 if (indexkey && IsA(indexkey, RelabelType))
4258 indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4259 if (equal(node, indexkey))
4262 * Found a match ... is it a unique index? Tests here
4263 * should match has_unique_index().
4265 if (index->unique &&
4266 index->ncolumns == 1 &&
4267 (index->indpred == NIL || index->predOK))
4268 vardata->isunique = true;
4271 * Has it got stats? We only consider stats for
4272 * non-partial indexes, since partial indexes probably
4273 * don't reflect whole-relation statistics; the above
4274 * check for uniqueness is the only info we take from
4277 * An index stats hook, however, must make its own
4278 * decisions about what to do with partial indexes.
4280 if (get_index_stats_hook &&
4281 (*get_index_stats_hook) (root, index->indexoid,
4285 * The hook took control of acquiring a stats
4286 * tuple. If it did supply a tuple, it'd better
4287 * have supplied a freefunc.
4289 if (HeapTupleIsValid(vardata->statsTuple) &&
4291 elog(ERROR, "no function provided to release variable stats with");
4293 else if (index->indpred == NIL)
4295 vardata->statsTuple =
4296 SearchSysCache3(STATRELATTINH,
4297 ObjectIdGetDatum(index->indexoid),
4298 Int16GetDatum(pos + 1),
4299 BoolGetDatum(false));
4300 vardata->freefunc = ReleaseSysCache;
4302 if (vardata->statsTuple)
4305 indexpr_item = lnext(indexpr_item);
4308 if (vardata->statsTuple)
4315 * examine_simple_variable
4316 * Handle a simple Var for examine_variable
4318 * This is split out as a subroutine so that we can recurse to deal with
4319 * Vars referencing subqueries.
4321 * We already filled in all the fields of *vardata except for the stats tuple.
4324 examine_simple_variable(PlannerInfo *root, Var *var,
4325 VariableStatData *vardata)
4327 RangeTblEntry *rte = root->simple_rte_array[var->varno];
4329 Assert(IsA(rte, RangeTblEntry));
4331 if (get_relation_stats_hook &&
4332 (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
4335 * The hook took control of acquiring a stats tuple. If it did supply
4336 * a tuple, it'd better have supplied a freefunc.
4338 if (HeapTupleIsValid(vardata->statsTuple) &&
4340 elog(ERROR, "no function provided to release variable stats with");
4342 else if (rte->rtekind == RTE_RELATION)
4345 * Plain table or parent of an inheritance appendrel, so look up the
4346 * column in pg_statistic
4348 vardata->statsTuple = SearchSysCache3(STATRELATTINH,
4349 ObjectIdGetDatum(rte->relid),
4350 Int16GetDatum(var->varattno),
4351 BoolGetDatum(rte->inh));
4352 vardata->freefunc = ReleaseSysCache;
4354 else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
4357 * Plain subquery (not one that was converted to an appendrel).
4359 * Punt if subquery uses set operations, GROUP BY, or DISTINCT --- any
4360 * of these will mash underlying columns' stats beyond recognition.
4361 * (Set ops are particularly nasty; if we forged ahead, we would
4362 * return stats relevant to only the leftmost subselect...)
4364 Query *subquery = rte->subquery;
4368 if (subquery->setOperations ||
4369 subquery->groupClause ||
4370 subquery->distinctClause)
4374 * OK, fetch RelOptInfo for subquery. Note that we don't change the
4375 * rel returned in vardata, since caller expects it to be a rel of the
4376 * caller's query level. Because we might already be recursing, we
4377 * can't use that rel pointer either, but have to look up the Var's
4380 rel = find_base_rel(root, var->varno);
4382 /* Subquery should have been planned already */
4383 Assert(rel->subroot && IsA(rel->subroot, PlannerInfo));
4386 * Switch our attention to the subquery as mangled by the planner.
4387 * It was okay to look at the pre-planning version for the tests
4388 * above, but now we need a Var that will refer to the subroot's
4389 * live RelOptInfos. For instance, if any subquery pullup happened
4390 * during planning, Vars in the targetlist might have gotten replaced,
4391 * and we need to see the replacement expressions.
4393 subquery = rel->subroot->parse;
4394 Assert(IsA(subquery, Query));
4396 /* Get the subquery output expression referenced by the upper Var */
4397 ste = get_tle_by_resno(subquery->targetList, var->varattno);
4398 if (ste == NULL || ste->resjunk)
4399 elog(ERROR, "subquery %s does not have attribute %d",
4400 rte->eref->aliasname, var->varattno);
4401 var = (Var *) ste->expr;
4403 /* Can only handle a simple Var of subquery's query level */
4404 if (var && IsA(var, Var) &&
4405 var->varlevelsup == 0)
4408 * OK, recurse into the subquery. Note that the original setting
4409 * of vardata->isunique (which will surely be false) is left
4410 * unchanged in this situation. That's what we want, since even
4411 * if the underlying column is unique, the subquery may have
4412 * joined to other tables in a way that creates duplicates.
4414 examine_simple_variable(rel->subroot, var, vardata);
4420 * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
4421 * won't see RTE_JOIN here because join alias Vars have already been
4422 * flattened.) There's not much we can do with function outputs, but
4423 * maybe someday try to be smarter about VALUES and/or CTEs.
4429 * get_variable_numdistinct
4430 * Estimate the number of distinct values of a variable.
4432 * vardata: results of examine_variable
4433 * *isdefault: set to TRUE if the result is a default rather than based on
4434 * anything meaningful.
4436 * NB: be careful to produce an integral result, since callers may compare
4437 * the result to exact integer counts.
4440 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
4448 * Determine the stadistinct value to use. There are cases where we can
4449 * get an estimate even without a pg_statistic entry, or can get a better
4450 * value than is in pg_statistic.
4452 if (HeapTupleIsValid(vardata->statsTuple))
4454 /* Use the pg_statistic entry */
4455 Form_pg_statistic stats;
4457 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
4458 stadistinct = stats->stadistinct;
4460 else if (vardata->vartype == BOOLOID)
4463 * Special-case boolean columns: presumably, two distinct values.
4465 * Are there any other datatypes we should wire in special estimates
4473 * We don't keep statistics for system columns, but in some cases we
4474 * can infer distinctness anyway.
4476 if (vardata->var && IsA(vardata->var, Var))
4478 switch (((Var *) vardata->var)->varattno)
4480 case ObjectIdAttributeNumber:
4481 case SelfItemPointerAttributeNumber:
4482 stadistinct = -1.0; /* unique */
4484 case TableOidAttributeNumber:
4485 stadistinct = 1.0; /* only 1 value */
4488 stadistinct = 0.0; /* means "unknown" */
4493 stadistinct = 0.0; /* means "unknown" */
4496 * XXX consider using estimate_num_groups on expressions?
4501 * If there is a unique index for the variable, assume it is unique no
4502 * matter what pg_statistic says; the statistics could be out of date, or
4503 * we might have found a partial unique index that proves the var is
4504 * unique for this query.
4506 if (vardata->isunique)
4510 * If we had an absolute estimate, use that.
4512 if (stadistinct > 0.0)
4516 * Otherwise we need to get the relation size; punt if not available.
4518 if (vardata->rel == NULL)
4521 return DEFAULT_NUM_DISTINCT;
4523 ntuples = vardata->rel->tuples;
4527 return DEFAULT_NUM_DISTINCT;
4531 * If we had a relative estimate, use that.
4533 if (stadistinct < 0.0)
4534 return floor((-stadistinct * ntuples) + 0.5);
4537 * With no data, estimate ndistinct = ntuples if the table is small, else
4538 * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small"
4539 * so that the behavior isn't discontinuous.
4541 if (ntuples < DEFAULT_NUM_DISTINCT)
4545 return DEFAULT_NUM_DISTINCT;
4549 * get_variable_range
4550 * Estimate the minimum and maximum value of the specified variable.
4551 * If successful, store values in *min and *max, and return TRUE.
4552 * If no data available, return FALSE.
4554 * sortop is the "<" comparison operator to use. This should generally
4555 * be "<" not ">", as only the former is likely to be found in pg_statistic.
4558 get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
4559 Datum *min, Datum *max)
4563 bool have_data = false;
4571 * XXX It's very tempting to try to use the actual column min and max, if
4572 * we can get them relatively-cheaply with an index probe. However, since
4573 * this function is called many times during join planning, that could
4574 * have unpleasant effects on planning speed. Need more investigation
4575 * before enabling this.
4578 if (get_actual_variable_range(root, vardata, sortop, min, max))
4582 if (!HeapTupleIsValid(vardata->statsTuple))
4584 /* no stats available, so default result */
4588 get_typlenbyval(vardata->atttype, &typLen, &typByVal);
4591 * If there is a histogram, grab the first and last values.
4593 * If there is a histogram that is sorted with some other operator than
4594 * the one we want, fail --- this suggests that there is data we can't
4597 if (get_attstatsslot(vardata->statsTuple,
4598 vardata->atttype, vardata->atttypmod,
4599 STATISTIC_KIND_HISTOGRAM, sortop,
4606 tmin = datumCopy(values[0], typByVal, typLen);
4607 tmax = datumCopy(values[nvalues - 1], typByVal, typLen);
4610 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4612 else if (get_attstatsslot(vardata->statsTuple,
4613 vardata->atttype, vardata->atttypmod,
4614 STATISTIC_KIND_HISTOGRAM, InvalidOid,
4619 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4624 * If we have most-common-values info, look for extreme MCVs. This is
4625 * needed even if we also have a histogram, since the histogram excludes
4626 * the MCVs. However, usually the MCVs will not be the extreme values, so
4627 * avoid unnecessary data copying.
4629 if (get_attstatsslot(vardata->statsTuple,
4630 vardata->atttype, vardata->atttypmod,
4631 STATISTIC_KIND_MCV, InvalidOid,
4636 bool tmin_is_mcv = false;
4637 bool tmax_is_mcv = false;
4640 fmgr_info(get_opcode(sortop), &opproc);
4642 for (i = 0; i < nvalues; i++)
4646 tmin = tmax = values[i];
4647 tmin_is_mcv = tmax_is_mcv = have_data = true;
4650 if (DatumGetBool(FunctionCall2Coll(&opproc,
4651 DEFAULT_COLLATION_OID,
4657 if (DatumGetBool(FunctionCall2Coll(&opproc,
4658 DEFAULT_COLLATION_OID,
4666 tmin = datumCopy(tmin, typByVal, typLen);
4668 tmax = datumCopy(tmax, typByVal, typLen);
4669 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4679 * get_actual_variable_range
4680 * Attempt to identify the current *actual* minimum and/or maximum
4681 * of the specified variable, by looking for a suitable btree index
4682 * and fetching its low and/or high values.
4683 * If successful, store values in *min and *max, and return TRUE.
4684 * (Either pointer can be NULL if that endpoint isn't needed.)
4685 * If no data available, return FALSE.
4687 * sortop is the "<" comparison operator to use.
4690 get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
4692 Datum *min, Datum *max)
4694 bool have_data = false;
4695 RelOptInfo *rel = vardata->rel;
4699 /* No hope if no relation or it doesn't have indexes */
4700 if (rel == NULL || rel->indexlist == NIL)
4702 /* If it has indexes it must be a plain relation */
4703 rte = root->simple_rte_array[rel->relid];
4704 Assert(rte->rtekind == RTE_RELATION);
4706 /* Search through the indexes to see if any match our problem */
4707 foreach(lc, rel->indexlist)
4709 IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
4710 ScanDirection indexscandir;
4712 /* Ignore non-btree indexes */
4713 if (index->relam != BTREE_AM_OID)
4717 * Ignore partial indexes --- we only want stats that cover the entire
4720 if (index->indpred != NIL)
4724 * The index list might include hypothetical indexes inserted by a
4725 * get_relation_info hook --- don't try to access them.
4727 if (index->hypothetical)
4731 * The first index column must match the desired variable and sort
4732 * operator --- but we can use a descending-order index.
4734 if (!match_index_to_operand(vardata->var, 0, index))
4736 switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
4738 case BTLessStrategyNumber:
4739 if (index->reverse_sort[0])
4740 indexscandir = BackwardScanDirection;
4742 indexscandir = ForwardScanDirection;
4744 case BTGreaterStrategyNumber:
4745 if (index->reverse_sort[0])
4746 indexscandir = ForwardScanDirection;
4748 indexscandir = BackwardScanDirection;
4751 /* index doesn't match the sortop */
4756 * Found a suitable index to extract data from. We'll need an EState
4757 * and a bunch of other infrastructure.
4761 ExprContext *econtext;
4762 MemoryContext tmpcontext;
4763 MemoryContext oldcontext;
4766 IndexInfo *indexInfo;
4767 TupleTableSlot *slot;
4770 ScanKeyData scankeys[1];
4771 IndexScanDesc index_scan;
4773 Datum values[INDEX_MAX_KEYS];
4774 bool isnull[INDEX_MAX_KEYS];
4776 estate = CreateExecutorState();
4777 econtext = GetPerTupleExprContext(estate);
4778 /* Make sure any cruft is generated in the econtext's memory */
4779 tmpcontext = econtext->ecxt_per_tuple_memory;
4780 oldcontext = MemoryContextSwitchTo(tmpcontext);
4783 * Open the table and index so we can read from them. We should
4784 * already have at least AccessShareLock on the table, but not
4785 * necessarily on the index.
4787 heapRel = heap_open(rte->relid, NoLock);
4788 indexRel = index_open(index->indexoid, AccessShareLock);
4790 /* extract index key information from the index's pg_index info */
4791 indexInfo = BuildIndexInfo(indexRel);
4793 /* some other stuff */
4794 slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRel));
4795 econtext->ecxt_scantuple = slot;
4796 get_typlenbyval(vardata->atttype, &typLen, &typByVal);
4798 /* set up an IS NOT NULL scan key so that we ignore nulls */
4799 ScanKeyEntryInitialize(&scankeys[0],
4800 SK_ISNULL | SK_SEARCHNOTNULL,
4801 1, /* index col to scan */
4802 InvalidStrategy, /* no strategy */
4803 InvalidOid, /* no strategy subtype */
4804 InvalidOid, /* no collation */
4805 InvalidOid, /* no reg proc for this */
4806 (Datum) 0); /* constant */
4810 /* If min is requested ... */
4813 index_scan = index_beginscan(heapRel, indexRel, SnapshotNow,
4815 index_rescan(index_scan, scankeys, 1, NULL, 0);
4817 /* Fetch first tuple in sortop's direction */
4818 if ((tup = index_getnext(index_scan,
4819 indexscandir)) != NULL)
4821 /* Extract the index column values from the heap tuple */
4822 ExecStoreTuple(tup, slot, InvalidBuffer, false);
4823 FormIndexDatum(indexInfo, slot, estate,
4826 /* Shouldn't have got a null, but be careful */
4828 elog(ERROR, "found unexpected null value in index \"%s\"",
4829 RelationGetRelationName(indexRel));
4831 /* Copy the index column value out to caller's context */
4832 MemoryContextSwitchTo(oldcontext);
4833 *min = datumCopy(values[0], typByVal, typLen);
4834 MemoryContextSwitchTo(tmpcontext);
4839 index_endscan(index_scan);
4842 /* If max is requested, and we didn't find the index is empty */
4843 if (max && have_data)
4845 index_scan = index_beginscan(heapRel, indexRel, SnapshotNow,
4847 index_rescan(index_scan, scankeys, 1, NULL, 0);
4849 /* Fetch first tuple in reverse direction */
4850 if ((tup = index_getnext(index_scan,
4851 -indexscandir)) != NULL)
4853 /* Extract the index column values from the heap tuple */
4854 ExecStoreTuple(tup, slot, InvalidBuffer, false);
4855 FormIndexDatum(indexInfo, slot, estate,
4858 /* Shouldn't have got a null, but be careful */
4860 elog(ERROR, "found unexpected null value in index \"%s\"",
4861 RelationGetRelationName(indexRel));
4863 /* Copy the index column value out to caller's context */
4864 MemoryContextSwitchTo(oldcontext);
4865 *max = datumCopy(values[0], typByVal, typLen);
4866 MemoryContextSwitchTo(tmpcontext);
4871 index_endscan(index_scan);
4874 /* Clean everything up */
4875 ExecDropSingleTupleTableSlot(slot);
4877 index_close(indexRel, AccessShareLock);
4878 heap_close(heapRel, NoLock);
4880 MemoryContextSwitchTo(oldcontext);
4881 FreeExecutorState(estate);
4883 /* And we're done */
4892 * find_join_input_rel
4893 * Look up the input relation for a join.
4895 * We assume that the input relation's RelOptInfo must have been constructed
4899 find_join_input_rel(PlannerInfo *root, Relids relids)
4901 RelOptInfo *rel = NULL;
4903 switch (bms_membership(relids))
4906 /* should not happen */
4909 rel = find_base_rel(root, bms_singleton_member(relids));
4912 rel = find_join_rel(root, relids);
4917 elog(ERROR, "could not find RelOptInfo for given relids");
4923 /*-------------------------------------------------------------------------
4925 * Pattern analysis functions
4927 * These routines support analysis of LIKE and regular-expression patterns
4928 * by the planner/optimizer. It's important that they agree with the
4929 * regular-expression code in backend/regex/ and the LIKE code in
4930 * backend/utils/adt/like.c. Also, the computation of the fixed prefix
4931 * must be conservative: if we report a string longer than the true fixed
4932 * prefix, the query may produce actually wrong answers, rather than just
4933 * getting a bad selectivity estimate!
4935 * Note that the prefix-analysis functions are called from
4936 * backend/optimizer/path/indxpath.c as well as from routines in this file.
4938 *-------------------------------------------------------------------------
4942 * Check whether char is a letter (and, hence, subject to case-folding)
4944 * In multibyte character sets, we can't use isalpha, and it does not seem
4945 * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
4946 * any multibyte char is potentially case-varying.
4949 pattern_char_isalpha(char c, bool is_multibyte,
4950 pg_locale_t locale, bool locale_is_c)
4953 return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
4954 else if (is_multibyte && IS_HIGHBIT_SET(c))
4956 #ifdef HAVE_LOCALE_T
4958 return isalpha_l((unsigned char) c, locale);
4961 return isalpha((unsigned char) c);
4965 * Extract the fixed prefix, if any, for a pattern.
4967 * *prefix is set to a palloc'd prefix string (in the form of a Const node),
4968 * or to NULL if no fixed prefix exists for the pattern.
4969 * *rest is set to a palloc'd Const representing the remainder of the pattern
4970 * after the portion describing the fixed prefix.
4971 * Each of these has the same type (TEXT or BYTEA) as the given pattern Const.
4973 * The return value distinguishes no fixed prefix, a partial prefix,
4974 * or an exact-match-only pattern.
4977 static Pattern_Prefix_Status
4978 like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
4979 Const **prefix_const, Const **rest_const)
4985 Oid typeid = patt_const->consttype;
4988 bool is_multibyte = (pg_database_encoding_max_length() > 1);
4989 pg_locale_t locale = 0;
4990 bool locale_is_c = false;
4992 /* the right-hand const is type text or bytea */
4993 Assert(typeid == BYTEAOID || typeid == TEXTOID);
4995 if (case_insensitive)
4997 if (typeid == BYTEAOID)
4999 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5000 errmsg("case insensitive matching not supported on type bytea")));
5002 /* If case-insensitive, we need locale info */
5003 if (lc_ctype_is_c(collation))
5005 else if (collation != DEFAULT_COLLATION_OID)
5007 if (!OidIsValid(collation))
5010 * This typically means that the parser could not resolve a
5011 * conflict of implicit collations, so report it that way.
5014 (errcode(ERRCODE_INDETERMINATE_COLLATION),
5015 errmsg("could not determine which collation to use for ILIKE"),
5016 errhint("Use the COLLATE clause to set the collation explicitly.")));
5018 locale = pg_newlocale_from_collation(collation);
5022 if (typeid != BYTEAOID)
5024 patt = TextDatumGetCString(patt_const->constvalue);
5025 pattlen = strlen(patt);
5029 bytea *bstr = DatumGetByteaP(patt_const->constvalue);
5031 pattlen = VARSIZE(bstr) - VARHDRSZ;
5032 patt = (char *) palloc(pattlen);
5033 memcpy(patt, VARDATA(bstr), pattlen);
5034 if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
5038 match = palloc(pattlen + 1);
5040 for (pos = 0; pos < pattlen; pos++)
5042 /* % and _ are wildcard characters in LIKE */
5043 if (patt[pos] == '%' ||
5047 /* Backslash escapes the next character */
5048 if (patt[pos] == '\\')
5055 /* Stop if case-varying character (it's sort of a wildcard) */
5056 if (case_insensitive &&
5057 pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
5060 match[match_pos++] = patt[pos];
5063 match[match_pos] = '\0';
5066 if (typeid != BYTEAOID)
5068 *prefix_const = string_to_const(match, typeid);
5069 *rest_const = string_to_const(rest, typeid);
5073 *prefix_const = string_to_bytea_const(match, match_pos);
5074 *rest_const = string_to_bytea_const(rest, pattlen - pos);
5080 /* in LIKE, an empty pattern is an exact match! */
5082 return Pattern_Prefix_Exact; /* reached end of pattern, so exact */
5085 return Pattern_Prefix_Partial;
5087 return Pattern_Prefix_None;
5090 static Pattern_Prefix_Status
5091 regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5092 Const **prefix_const, Const **rest_const)
5099 bool have_leading_paren;
5102 Oid typeid = patt_const->consttype;
5103 bool is_multibyte = (pg_database_encoding_max_length() > 1);
5104 pg_locale_t locale = 0;
5105 bool locale_is_c = false;
5108 * Should be unnecessary, there are no bytea regex operators defined. As
5109 * such, it should be noted that the rest of this function has *not* been
5110 * made safe for binary (possibly NULL containing) strings.
5112 if (typeid == BYTEAOID)
5114 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5115 errmsg("regular-expression matching not supported on type bytea")));
5117 if (case_insensitive)
5119 /* If case-insensitive, we need locale info */
5120 if (lc_ctype_is_c(collation))
5122 else if (collation != DEFAULT_COLLATION_OID)
5124 if (!OidIsValid(collation))
5127 * This typically means that the parser could not resolve a
5128 * conflict of implicit collations, so report it that way.
5131 (errcode(ERRCODE_INDETERMINATE_COLLATION),
5132 errmsg("could not determine which collation to use for regular expression"),
5133 errhint("Use the COLLATE clause to set the collation explicitly.")));
5135 locale = pg_newlocale_from_collation(collation);
5139 /* the right-hand const is type text for all of these */
5140 patt = TextDatumGetCString(patt_const->constvalue);
5143 * Check for ARE director prefix. It's worth our trouble to recognize
5144 * this because similar_escape() used to use it, and some other code might
5145 * still use it, to force ARE mode.
5148 if (strncmp(patt, "***:", 4) == 0)
5151 /* Pattern must be anchored left */
5152 if (patt[pos] != '^')
5156 *prefix_const = NULL;
5157 *rest_const = string_to_const(rest, typeid);
5159 return Pattern_Prefix_None;
5164 * If '|' is present in pattern, then there may be multiple alternatives
5165 * for the start of the string. (There are cases where this isn't so, for
5166 * instance if the '|' is inside parens, but detecting that reliably is
5169 if (strchr(patt + pos, '|') != NULL)
5173 *prefix_const = NULL;
5174 *rest_const = string_to_const(rest, typeid);
5176 return Pattern_Prefix_None;
5179 /* OK, allocate space for pattern */
5180 match = palloc(strlen(patt) + 1);
5181 prev_match_pos = match_pos = 0;
5184 * We special-case the syntax '^(...)$' because psql uses it. But beware:
5185 * sequences beginning "(?" are not what they seem, unless they're "(?:".
5186 * (We must recognize that because of similar_escape().)
5188 have_leading_paren = false;
5189 if (patt[pos] == '(' &&
5190 (patt[pos + 1] != '?' || patt[pos + 2] == ':'))
5192 have_leading_paren = true;
5193 pos += (patt[pos + 1] != '?' ? 1 : 3);
5196 /* Scan remainder of pattern */
5203 * Check for characters that indicate multiple possible matches here.
5204 * Also, drop out at ')' or '$' so the termination test works right.
5206 if (patt[pos] == '.' ||
5214 /* Stop if case-varying character (it's sort of a wildcard) */
5215 if (case_insensitive &&
5216 pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
5220 * Check for quantifiers. Except for +, this means the preceding
5221 * character is optional, so we must remove it from the prefix too!
5223 if (patt[pos] == '*' ||
5227 match_pos = prev_match_pos;
5231 if (patt[pos] == '+')
5238 * Normally, backslash quotes the next character. But in AREs,
5239 * backslash followed by alphanumeric is an escape, not a quoted
5240 * character. Must treat it as having multiple possible matches.
5241 * Note: since only ASCII alphanumerics are escapes, we don't have to
5242 * be paranoid about multibyte or collations here.
5244 if (patt[pos] == '\\')
5246 if (isalnum((unsigned char) patt[pos + 1]))
5249 if (patt[pos] == '\0')
5252 /* save position in case we need to back up on next loop cycle */
5253 prev_match_pos = match_pos;
5255 /* must use encoding-aware processing here */
5256 len = pg_mblen(&patt[pos]);
5257 memcpy(&match[match_pos], &patt[pos], len);
5262 match[match_pos] = '\0';
5265 if (have_leading_paren && patt[pos] == ')')
5268 if (patt[pos] == '$' && patt[pos + 1] == '\0')
5270 rest = &patt[pos + 1];
5272 *prefix_const = string_to_const(match, typeid);
5273 *rest_const = string_to_const(rest, typeid);
5278 return Pattern_Prefix_Exact; /* pattern specifies exact match */
5281 *prefix_const = string_to_const(match, typeid);
5282 *rest_const = string_to_const(rest, typeid);
5288 return Pattern_Prefix_Partial;
5290 return Pattern_Prefix_None;
5293 Pattern_Prefix_Status
5294 pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
5295 Const **prefix, Const **rest)
5297 Pattern_Prefix_Status result;
5301 case Pattern_Type_Like:
5302 result = like_fixed_prefix(patt, false, collation, prefix, rest);
5304 case Pattern_Type_Like_IC:
5305 result = like_fixed_prefix(patt, true, collation, prefix, rest);
5307 case Pattern_Type_Regex:
5308 result = regex_fixed_prefix(patt, false, collation, prefix, rest);
5310 case Pattern_Type_Regex_IC:
5311 result = regex_fixed_prefix(patt, true, collation, prefix, rest);
5314 elog(ERROR, "unrecognized ptype: %d", (int) ptype);
5315 result = Pattern_Prefix_None; /* keep compiler quiet */
5322 * Estimate the selectivity of a fixed prefix for a pattern match.
5324 * A fixed prefix "foo" is estimated as the selectivity of the expression
5325 * "variable >= 'foo' AND variable < 'fop'" (see also indxpath.c).
5327 * The selectivity estimate is with respect to the portion of the column
5328 * population represented by the histogram --- the caller must fold this
5329 * together with info about MCVs and NULLs.
5331 * We use the >= and < operators from the specified btree opfamily to do the
5332 * estimation. The given variable and Const must be of the associated
5335 * XXX Note: we make use of the upper bound to estimate operator selectivity
5336 * even if the locale is such that we cannot rely on the upper-bound string.
5337 * The selectivity only needs to be approximately right anyway, so it seems
5338 * more useful to use the upper-bound code than not.
5341 prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
5342 Oid vartype, Oid opfamily, Const *prefixcon)
5344 Selectivity prefixsel;
5347 Const *greaterstrcon;
5350 cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5351 BTGreaterEqualStrategyNumber);
5352 if (cmpopr == InvalidOid)
5353 elog(ERROR, "no >= operator for opfamily %u", opfamily);
5354 fmgr_info(get_opcode(cmpopr), &opproc);
5356 prefixsel = ineq_histogram_selectivity(root, vardata, &opproc, true,
5357 prefixcon->constvalue,
5358 prefixcon->consttype);
5360 if (prefixsel < 0.0)
5362 /* No histogram is present ... return a suitable default estimate */
5363 return DEFAULT_MATCH_SEL;
5367 * If we can create a string larger than the prefix, say
5371 cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5372 BTLessStrategyNumber);
5373 if (cmpopr == InvalidOid)
5374 elog(ERROR, "no < operator for opfamily %u", opfamily);
5375 fmgr_info(get_opcode(cmpopr), &opproc);
5376 greaterstrcon = make_greater_string(prefixcon, &opproc,
5377 DEFAULT_COLLATION_OID);
5382 topsel = ineq_histogram_selectivity(root, vardata, &opproc, false,
5383 greaterstrcon->constvalue,
5384 greaterstrcon->consttype);
5386 /* ineq_histogram_selectivity worked before, it shouldn't fail now */
5387 Assert(topsel >= 0.0);
5390 * Merge the two selectivities in the same way as for a range query
5391 * (see clauselist_selectivity()). Note that we don't need to worry
5392 * about double-exclusion of nulls, since ineq_histogram_selectivity
5393 * doesn't count those anyway.
5395 prefixsel = topsel + prefixsel - 1.0;
5399 * If the prefix is long then the two bounding values might be too close
5400 * together for the histogram to distinguish them usefully, resulting in a
5401 * zero estimate (plus or minus roundoff error). To avoid returning a
5402 * ridiculously small estimate, compute the estimated selectivity for
5403 * "variable = 'foo'", and clamp to that. (Obviously, the resultant
5404 * estimate should be at least that.)
5406 * We apply this even if we couldn't make a greater string. That case
5407 * suggests that the prefix is near the maximum possible, and thus
5408 * probably off the end of the histogram, and thus we probably got a very
5409 * small estimate from the >= condition; so we still need to clamp.
5411 cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5412 BTEqualStrategyNumber);
5413 if (cmpopr == InvalidOid)
5414 elog(ERROR, "no = operator for opfamily %u", opfamily);
5415 eq_sel = var_eq_const(vardata, cmpopr, prefixcon->constvalue,
5418 prefixsel = Max(prefixsel, eq_sel);
5425 * Estimate the selectivity of a pattern of the specified type.
5426 * Note that any fixed prefix of the pattern will have been removed already.
5428 * For now, we use a very simplistic approach: fixed characters reduce the
5429 * selectivity a good deal, character ranges reduce it a little,
5430 * wildcards (such as % for LIKE or .* for regex) increase it.
5433 #define FIXED_CHAR_SEL 0.20 /* about 1/5 */
5434 #define CHAR_RANGE_SEL 0.25
5435 #define ANY_CHAR_SEL 0.9 /* not 1, since it won't match end-of-string */
5436 #define FULL_WILDCARD_SEL 5.0
5437 #define PARTIAL_WILDCARD_SEL 2.0
5440 like_selectivity(Const *patt_const, bool case_insensitive)
5442 Selectivity sel = 1.0;
5444 Oid typeid = patt_const->consttype;
5448 /* the right-hand const is type text or bytea */
5449 Assert(typeid == BYTEAOID || typeid == TEXTOID);
5451 if (typeid == BYTEAOID && case_insensitive)
5453 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5454 errmsg("case insensitive matching not supported on type bytea")));
5456 if (typeid != BYTEAOID)
5458 patt = TextDatumGetCString(patt_const->constvalue);
5459 pattlen = strlen(patt);
5463 bytea *bstr = DatumGetByteaP(patt_const->constvalue);
5465 pattlen = VARSIZE(bstr) - VARHDRSZ;
5466 patt = (char *) palloc(pattlen);
5467 memcpy(patt, VARDATA(bstr), pattlen);
5468 if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
5472 /* Skip any leading wildcard; it's already factored into initial sel */
5473 for (pos = 0; pos < pattlen; pos++)
5475 if (patt[pos] != '%' && patt[pos] != '_')
5479 for (; pos < pattlen; pos++)
5481 /* % and _ are wildcard characters in LIKE */
5482 if (patt[pos] == '%')
5483 sel *= FULL_WILDCARD_SEL;
5484 else if (patt[pos] == '_')
5485 sel *= ANY_CHAR_SEL;
5486 else if (patt[pos] == '\\')
5488 /* Backslash quotes the next character */
5492 sel *= FIXED_CHAR_SEL;
5495 sel *= FIXED_CHAR_SEL;
5497 /* Could get sel > 1 if multiple wildcards */
5506 regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
5508 Selectivity sel = 1.0;
5509 int paren_depth = 0;
5510 int paren_pos = 0; /* dummy init to keep compiler quiet */
5513 for (pos = 0; pos < pattlen; pos++)
5515 if (patt[pos] == '(')
5517 if (paren_depth == 0)
5518 paren_pos = pos; /* remember start of parenthesized item */
5521 else if (patt[pos] == ')' && paren_depth > 0)
5524 if (paren_depth == 0)
5525 sel *= regex_selectivity_sub(patt + (paren_pos + 1),
5526 pos - (paren_pos + 1),
5529 else if (patt[pos] == '|' && paren_depth == 0)
5532 * If unquoted | is present at paren level 0 in pattern, we have
5533 * multiple alternatives; sum their probabilities.
5535 sel += regex_selectivity_sub(patt + (pos + 1),
5536 pattlen - (pos + 1),
5538 break; /* rest of pattern is now processed */
5540 else if (patt[pos] == '[')
5542 bool negclass = false;
5544 if (patt[++pos] == '^')
5549 if (patt[pos] == ']') /* ']' at start of class is not
5552 while (pos < pattlen && patt[pos] != ']')
5554 if (paren_depth == 0)
5555 sel *= (negclass ? (1.0 - CHAR_RANGE_SEL) : CHAR_RANGE_SEL);
5557 else if (patt[pos] == '.')
5559 if (paren_depth == 0)
5560 sel *= ANY_CHAR_SEL;
5562 else if (patt[pos] == '*' ||
5566 /* Ought to be smarter about quantifiers... */
5567 if (paren_depth == 0)
5568 sel *= PARTIAL_WILDCARD_SEL;
5570 else if (patt[pos] == '{')
5572 while (pos < pattlen && patt[pos] != '}')
5574 if (paren_depth == 0)
5575 sel *= PARTIAL_WILDCARD_SEL;
5577 else if (patt[pos] == '\\')
5579 /* backslash quotes the next character */
5583 if (paren_depth == 0)
5584 sel *= FIXED_CHAR_SEL;
5588 if (paren_depth == 0)
5589 sel *= FIXED_CHAR_SEL;
5592 /* Could get sel > 1 if multiple wildcards */
5599 regex_selectivity(Const *patt_const, bool case_insensitive)
5604 Oid typeid = patt_const->consttype;
5607 * Should be unnecessary, there are no bytea regex operators defined. As
5608 * such, it should be noted that the rest of this function has *not* been
5609 * made safe for binary (possibly NULL containing) strings.
5611 if (typeid == BYTEAOID)
5613 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5614 errmsg("regular-expression matching not supported on type bytea")));
5616 /* the right-hand const is type text for all of these */
5617 patt = TextDatumGetCString(patt_const->constvalue);
5618 pattlen = strlen(patt);
5620 /* If patt doesn't end with $, consider it to have a trailing wildcard */
5621 if (pattlen > 0 && patt[pattlen - 1] == '$' &&
5622 (pattlen == 1 || patt[pattlen - 2] != '\\'))
5624 /* has trailing $ */
5625 sel = regex_selectivity_sub(patt, pattlen - 1, case_insensitive);
5630 sel = regex_selectivity_sub(patt, pattlen, case_insensitive);
5631 sel *= FULL_WILDCARD_SEL;
5639 pattern_selectivity(Const *patt, Pattern_Type ptype)
5645 case Pattern_Type_Like:
5646 result = like_selectivity(patt, false);
5648 case Pattern_Type_Like_IC:
5649 result = like_selectivity(patt, true);
5651 case Pattern_Type_Regex:
5652 result = regex_selectivity(patt, false);
5654 case Pattern_Type_Regex_IC:
5655 result = regex_selectivity(patt, true);
5658 elog(ERROR, "unrecognized ptype: %d", (int) ptype);
5659 result = 1.0; /* keep compiler quiet */
5667 * Try to generate a string greater than the given string or any
5668 * string it is a prefix of. If successful, return a palloc'd string
5669 * in the form of a Const node; else return NULL.
5671 * The caller must provide the appropriate "less than" comparison function
5672 * for testing the strings, along with the collation to use.
5674 * The key requirement here is that given a prefix string, say "foo",
5675 * we must be able to generate another string "fop" that is greater than
5676 * all strings "foobar" starting with "foo". We can test that we have
5677 * generated a string greater than the prefix string, but in non-C collations
5678 * that is not a bulletproof guarantee that an extension of the string might
5679 * not sort after it; an example is that "foo " is less than "foo!", but it
5680 * is not clear that a "dictionary" sort ordering will consider "foo!" less
5681 * than "foo bar". CAUTION: Therefore, this function should be used only for
5682 * estimation purposes when working in a non-C collation.
5684 * To try to catch most cases where an extended string might otherwise sort
5685 * before the result value, we determine which of the strings "Z", "z", "y",
5686 * and "9" is seen as largest by the collation, and append that to the given
5687 * prefix before trying to find a string that compares as larger.
5689 * If we max out the righthand byte, truncate off the last character
5690 * and start incrementing the next. For example, if "z" were the last
5691 * character in the sort order, then we could produce "foo" as a
5692 * string greater than "fonz".
5694 * This could be rather slow in the worst case, but in most cases we
5695 * won't have to try more than one or two strings before succeeding.
5698 make_greater_string(const Const *str_const, FmgrInfo *ltproc, Oid collation)
5700 Oid datatype = str_const->consttype;
5704 text *cmptxt = NULL;
5707 * Get a modifiable copy of the prefix string in C-string format, and set
5708 * up the string we will compare to as a Datum. In C locale this can just
5709 * be the given prefix string, otherwise we need to add a suffix. Types
5710 * NAME and BYTEA sort bytewise so they don't need a suffix either.
5712 if (datatype == NAMEOID)
5714 workstr = DatumGetCString(DirectFunctionCall1(nameout,
5715 str_const->constvalue));
5716 len = strlen(workstr);
5717 cmpstr = str_const->constvalue;
5719 else if (datatype == BYTEAOID)
5721 bytea *bstr = DatumGetByteaP(str_const->constvalue);
5723 len = VARSIZE(bstr) - VARHDRSZ;
5724 workstr = (char *) palloc(len);
5725 memcpy(workstr, VARDATA(bstr), len);
5726 if ((Pointer) bstr != DatumGetPointer(str_const->constvalue))
5728 cmpstr = str_const->constvalue;
5732 workstr = TextDatumGetCString(str_const->constvalue);
5733 len = strlen(workstr);
5734 if (lc_collate_is_c(collation) || len == 0)
5735 cmpstr = str_const->constvalue;
5738 /* If first time through, determine the suffix to use */
5739 static char suffixchar = 0;
5740 static Oid suffixcollation = 0;
5742 if (!suffixchar || suffixcollation != collation)
5747 if (varstr_cmp(best, 1, "z", 1, collation) < 0)
5749 if (varstr_cmp(best, 1, "y", 1, collation) < 0)
5751 if (varstr_cmp(best, 1, "9", 1, collation) < 0)
5754 suffixcollation = collation;
5757 /* And build the string to compare to */
5758 cmptxt = (text *) palloc(VARHDRSZ + len + 1);
5759 SET_VARSIZE(cmptxt, VARHDRSZ + len + 1);
5760 memcpy(VARDATA(cmptxt), workstr, len);
5761 *(VARDATA(cmptxt) + len) = suffixchar;
5762 cmpstr = PointerGetDatum(cmptxt);
5768 unsigned char *lastchar = (unsigned char *) (workstr + len - 1);
5769 unsigned char savelastchar = *lastchar;
5772 * Try to generate a larger string by incrementing the last byte.
5774 while (*lastchar < (unsigned char) 255)
5776 Const *workstr_const;
5780 if (datatype != BYTEAOID)
5782 /* do not generate invalid encoding sequences */
5783 if (!pg_verifymbstr(workstr, len, true))
5785 workstr_const = string_to_const(workstr, datatype);
5788 workstr_const = string_to_bytea_const(workstr, len);
5790 if (DatumGetBool(FunctionCall2Coll(ltproc,
5793 workstr_const->constvalue)))
5795 /* Successfully made a string larger than cmpstr */
5799 return workstr_const;
5802 /* No good, release unusable value and try again */
5803 pfree(DatumGetPointer(workstr_const->constvalue));
5804 pfree(workstr_const);
5807 /* restore last byte so we don't confuse pg_mbcliplen */
5808 *lastchar = savelastchar;
5811 * Truncate off the last character, which might be more than 1 byte,
5812 * depending on the character encoding.
5814 if (datatype != BYTEAOID && pg_database_encoding_max_length() > 1)
5815 len = pg_mbcliplen(workstr, len, len - 1);
5819 if (datatype != BYTEAOID)
5820 workstr[len] = '\0';
5832 * Generate a Datum of the appropriate type from a C string.
5833 * Note that all of the supported types are pass-by-ref, so the
5834 * returned value should be pfree'd if no longer needed.
5837 string_to_datum(const char *str, Oid datatype)
5839 Assert(str != NULL);
5842 * We cheat a little by assuming that CStringGetTextDatum() will do for
5843 * bpchar and varchar constants too...
5845 if (datatype == NAMEOID)
5846 return DirectFunctionCall1(namein, CStringGetDatum(str));
5847 else if (datatype == BYTEAOID)
5848 return DirectFunctionCall1(byteain, CStringGetDatum(str));
5850 return CStringGetTextDatum(str);
5854 * Generate a Const node of the appropriate type from a C string.
5857 string_to_const(const char *str, Oid datatype)
5859 Datum conval = string_to_datum(str, datatype);
5864 * We only need to support a few datatypes here, so hard-wire properties
5865 * instead of incurring the expense of catalog lookups.
5872 collation = DEFAULT_COLLATION_OID;
5877 collation = InvalidOid;
5878 constlen = NAMEDATALEN;
5882 collation = InvalidOid;
5887 elog(ERROR, "unexpected datatype in string_to_const: %u",
5892 return makeConst(datatype, -1, collation, constlen,
5893 conval, false, false);
5897 * Generate a Const node of bytea type from a binary C string and a length.
5900 string_to_bytea_const(const char *str, size_t str_len)
5902 bytea *bstr = palloc(VARHDRSZ + str_len);
5905 memcpy(VARDATA(bstr), str, str_len);
5906 SET_VARSIZE(bstr, VARHDRSZ + str_len);
5907 conval = PointerGetDatum(bstr);
5909 return makeConst(BYTEAOID, -1, InvalidOid, -1, conval, false, false);
5912 /*-------------------------------------------------------------------------
5914 * Index cost estimation functions
5916 * genericcostestimate is a general-purpose estimator for use when we
5917 * don't have any better idea about how to estimate. Index-type-specific
5918 * knowledge can be incorporated in the type-specific routines.
5920 * One bit of index-type-specific knowledge we can relatively easily use
5921 * in genericcostestimate is the estimate of the number of index tuples
5922 * visited. If numIndexTuples is not 0 then it is used as the estimate,
5923 * otherwise we compute a generic estimate.
5925 *-------------------------------------------------------------------------
5929 genericcostestimate(PlannerInfo *root,
5930 IndexOptInfo *index,
5932 List *indexOrderBys,
5933 RelOptInfo *outer_rel,
5934 double numIndexTuples,
5935 Cost *indexStartupCost,
5936 Cost *indexTotalCost,
5937 Selectivity *indexSelectivity,
5938 double *indexCorrelation)
5940 double numIndexPages;
5941 double num_sa_scans;
5942 double num_outer_scans;
5944 QualCost index_qual_cost;
5945 double qual_op_cost;
5946 double qual_arg_cost;
5947 double spc_random_page_cost;
5948 List *selectivityQuals;
5952 * If the index is partial, AND the index predicate with the explicitly
5953 * given indexquals to produce a more accurate idea of the index
5954 * selectivity. However, we need to be careful not to insert redundant
5955 * clauses, because clauselist_selectivity() is easily fooled into
5956 * computing a too-low selectivity estimate. Our approach is to add
5957 * only the index predicate clause(s) that cannot be proven to be implied
5958 * by the given indexquals. This successfully handles cases such as a
5959 * qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
5960 * There are many other cases where we won't detect redundancy, leading
5961 * to a too-low selectivity estimate, which will bias the system in favor
5962 * of using partial indexes where possible. That is not necessarily bad
5965 * Note that indexQuals contains RestrictInfo nodes while the indpred
5966 * does not. This is OK for both predicate_implied_by() and
5967 * clauselist_selectivity().
5970 if (index->indpred != NIL)
5972 List *predExtraQuals = NIL;
5974 foreach(l, index->indpred)
5976 Node *predQual = (Node *) lfirst(l);
5977 List *oneQual = list_make1(predQual);
5979 if (!predicate_implied_by(oneQual, indexQuals))
5980 predExtraQuals = list_concat(predExtraQuals, oneQual);
5982 /* list_concat avoids modifying the passed-in indexQuals list */
5983 selectivityQuals = list_concat(predExtraQuals, indexQuals);
5986 selectivityQuals = indexQuals;
5989 * Check for ScalarArrayOpExpr index quals, and estimate the number of
5990 * index scans that will be performed.
5993 foreach(l, indexQuals)
5995 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
5997 if (IsA(rinfo->clause, ScalarArrayOpExpr))
5999 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6000 int alength = estimate_array_length(lsecond(saop->args));
6003 num_sa_scans *= alength;
6007 /* Estimate the fraction of main-table tuples that will be visited */
6008 *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6014 * If caller didn't give us an estimate, estimate the number of index
6015 * tuples that will be visited. We do it in this rather peculiar-looking
6016 * way in order to get the right answer for partial indexes.
6018 if (numIndexTuples <= 0.0)
6020 numIndexTuples = *indexSelectivity * index->rel->tuples;
6023 * The above calculation counts all the tuples visited across all
6024 * scans induced by ScalarArrayOpExpr nodes. We want to consider the
6025 * average per-indexscan number, so adjust. This is a handy place to
6026 * round to integer, too. (If caller supplied tuple estimate, it's
6027 * responsible for handling these considerations.)
6029 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6033 * We can bound the number of tuples by the index size in any case. Also,
6034 * always estimate at least one tuple is touched, even when
6035 * indexSelectivity estimate is tiny.
6037 if (numIndexTuples > index->tuples)
6038 numIndexTuples = index->tuples;
6039 if (numIndexTuples < 1.0)
6040 numIndexTuples = 1.0;
6043 * Estimate the number of index pages that will be retrieved.
6045 * We use the simplistic method of taking a pro-rata fraction of the total
6046 * number of index pages. In effect, this counts only leaf pages and not
6047 * any overhead such as index metapage or upper tree levels. In practice
6048 * this seems a better approximation than charging for access to the upper
6049 * levels, perhaps because those tend to stay in cache under load.
6051 if (index->pages > 1 && index->tuples > 1)
6052 numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6054 numIndexPages = 1.0;
6056 /* fetch estimated page cost for schema containing index */
6057 get_tablespace_page_costs(index->reltablespace,
6058 &spc_random_page_cost,
6062 * Now compute the disk access costs.
6064 * The above calculations are all per-index-scan. However, if we are in a
6065 * nestloop inner scan, we can expect the scan to be repeated (with
6066 * different search keys) for each row of the outer relation. Likewise,
6067 * ScalarArrayOpExpr quals result in multiple index scans. This creates
6068 * the potential for cache effects to reduce the number of disk page
6069 * fetches needed. We want to estimate the average per-scan I/O cost in
6070 * the presence of caching.
6072 * We use the Mackert-Lohman formula (see costsize.c for details) to
6073 * estimate the total number of page fetches that occur. While this
6074 * wasn't what it was designed for, it seems a reasonable model anyway.
6075 * Note that we are counting pages not tuples anymore, so we take N = T =
6076 * index size, as if there were one "tuple" per page.
6078 if (outer_rel != NULL && outer_rel->rows > 1)
6080 num_outer_scans = outer_rel->rows;
6081 num_scans = num_sa_scans * num_outer_scans;
6085 num_outer_scans = 1;
6086 num_scans = num_sa_scans;
6091 double pages_fetched;
6093 /* total page fetches ignoring cache effects */
6094 pages_fetched = numIndexPages * num_scans;
6096 /* use Mackert and Lohman formula to adjust for cache effects */
6097 pages_fetched = index_pages_fetched(pages_fetched,
6099 (double) index->pages,
6103 * Now compute the total disk access cost, and then report a pro-rated
6104 * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
6105 * since that's internal to the indexscan.)
6107 *indexTotalCost = (pages_fetched * spc_random_page_cost)
6113 * For a single index scan, we just charge spc_random_page_cost per
6116 *indexTotalCost = numIndexPages * spc_random_page_cost;
6120 * A difficulty with the leaf-pages-only cost approach is that for small
6121 * selectivities (eg, single index tuple fetched) all indexes will look
6122 * equally attractive because we will estimate exactly 1 leaf page to be
6123 * fetched. All else being equal, we should prefer physically smaller
6124 * indexes over larger ones. (An index might be smaller because it is
6125 * partial or because it contains fewer columns; presumably the other
6126 * columns in the larger index aren't useful to the query, or the larger
6127 * index would have better selectivity.)
6129 * We can deal with this by adding a very small "fudge factor" that
6130 * depends on the index size. The fudge factor used here is one
6131 * spc_random_page_cost per 100000 index pages, which should be small
6132 * enough to not alter index-vs-seqscan decisions, but will prevent
6133 * indexes of different sizes from looking exactly equally attractive.
6135 *indexTotalCost += index->pages * spc_random_page_cost / 100000.0;
6138 * CPU cost: any complex expressions in the indexquals will need to be
6139 * evaluated once at the start of the scan to reduce them to runtime keys
6140 * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
6141 * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6142 * indexqual operator. Because we have numIndexTuples as a per-scan
6143 * number, we have to multiply by num_sa_scans to get the correct result
6144 * for ScalarArrayOpExpr cases. Similarly add in costs for any index
6145 * ORDER BY expressions.
6147 * Note: this neglects the possible costs of rechecking lossy operators
6148 * and OR-clause expressions. Detecting that that might be needed seems
6149 * more expensive than it's worth, though, considering all the other
6150 * inaccuracies here ...
6152 cost_qual_eval(&index_qual_cost, indexQuals, root);
6153 qual_arg_cost = index_qual_cost.startup + index_qual_cost.per_tuple;
6154 cost_qual_eval(&index_qual_cost, indexOrderBys, root);
6155 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6156 qual_op_cost = cpu_operator_cost *
6157 (list_length(indexQuals) + list_length(indexOrderBys));
6158 qual_arg_cost -= qual_op_cost;
6159 if (qual_arg_cost < 0) /* just in case... */
6162 *indexStartupCost = qual_arg_cost;
6163 *indexTotalCost += qual_arg_cost;
6164 *indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6167 * We also add a CPU-cost component to represent the general costs of
6168 * starting an indexscan, such as analysis of btree index keys and initial
6169 * tree descent. This is estimated at 100x cpu_operator_cost, which is a
6170 * bit arbitrary but seems the right order of magnitude. (As noted above,
6171 * we don't charge any I/O for touching upper tree levels, but charging
6172 * nothing at all has been found too optimistic.)
6174 * Although this is startup cost with respect to any one scan, we add it
6175 * to the "total" cost component because it's only very interesting in the
6176 * many-ScalarArrayOpExpr-scan case, and there it will be paid over the
6177 * life of the scan node.
6179 *indexTotalCost += num_sa_scans * 100.0 * cpu_operator_cost;
6182 * Generic assumption about index correlation: there isn't any.
6184 *indexCorrelation = 0.0;
6189 btcostestimate(PG_FUNCTION_ARGS)
6191 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
6192 IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
6193 List *indexQuals = (List *) PG_GETARG_POINTER(2);
6194 List *indexOrderBys = (List *) PG_GETARG_POINTER(3);
6195 RelOptInfo *outer_rel = (RelOptInfo *) PG_GETARG_POINTER(4);
6196 Cost *indexStartupCost = (Cost *) PG_GETARG_POINTER(5);
6197 Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(6);
6198 Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(7);
6199 double *indexCorrelation = (double *) PG_GETARG_POINTER(8);
6202 VariableStatData vardata;
6203 double numIndexTuples;
6204 List *indexBoundQuals;
6208 bool found_is_null_op;
6209 double num_sa_scans;
6213 * For a btree scan, only leading '=' quals plus inequality quals for the
6214 * immediately next attribute contribute to index selectivity (these are
6215 * the "boundary quals" that determine the starting and stopping points of
6216 * the index scan). Additional quals can suppress visits to the heap, so
6217 * it's OK to count them in indexSelectivity, but they should not count
6218 * for estimating numIndexTuples. So we must examine the given indexQuals
6219 * to find out which ones count as boundary quals. We rely on the
6220 * knowledge that they are given in index column order.
6222 * For a RowCompareExpr, we consider only the first column, just as
6223 * rowcomparesel() does.
6225 * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6226 * index scans not one, but the ScalarArrayOpExpr's operator can be
6227 * considered to act the same as it normally does.
6229 indexBoundQuals = NIL;
6233 found_is_null_op = false;
6235 foreach(l, indexQuals)
6237 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6243 bool is_null_op = false;
6245 Assert(IsA(rinfo, RestrictInfo));
6246 clause = rinfo->clause;
6247 if (IsA(clause, OpExpr))
6249 leftop = get_leftop(clause);
6250 rightop = get_rightop(clause);
6251 clause_op = ((OpExpr *) clause)->opno;
6253 else if (IsA(clause, RowCompareExpr))
6255 RowCompareExpr *rc = (RowCompareExpr *) clause;
6257 leftop = (Node *) linitial(rc->largs);
6258 rightop = (Node *) linitial(rc->rargs);
6259 clause_op = linitial_oid(rc->opnos);
6261 else if (IsA(clause, ScalarArrayOpExpr))
6263 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6265 leftop = (Node *) linitial(saop->args);
6266 rightop = (Node *) lsecond(saop->args);
6267 clause_op = saop->opno;
6270 else if (IsA(clause, NullTest))
6272 NullTest *nt = (NullTest *) clause;
6274 leftop = (Node *) nt->arg;
6276 clause_op = InvalidOid;
6277 if (nt->nulltesttype == IS_NULL)
6279 found_is_null_op = true;
6285 elog(ERROR, "unsupported indexqual type: %d",
6286 (int) nodeTag(clause));
6287 continue; /* keep compiler quiet */
6289 if (match_index_to_operand(leftop, indexcol, index))
6291 /* clause_op is correct */
6293 else if (match_index_to_operand(rightop, indexcol, index))
6295 /* Must flip operator to get the opfamily member */
6296 clause_op = get_commutator(clause_op);
6300 /* Must be past the end of quals for indexcol, try next */
6302 break; /* done if no '=' qual for indexcol */
6305 if (match_index_to_operand(leftop, indexcol, index))
6307 /* clause_op is correct */
6309 else if (match_index_to_operand(rightop, indexcol, index))
6311 /* Must flip operator to get the opfamily member */
6312 clause_op = get_commutator(clause_op);
6316 /* No quals for new indexcol, so we are done */
6320 /* check for equality operator */
6321 if (OidIsValid(clause_op))
6323 op_strategy = get_op_opfamily_strategy(clause_op,
6324 index->opfamily[indexcol]);
6325 Assert(op_strategy != 0); /* not a member of opfamily?? */
6326 if (op_strategy == BTEqualStrategyNumber)
6329 else if (is_null_op)
6331 /* IS NULL is like = for purposes of selectivity determination */
6334 /* count up number of SA scans induced by indexBoundQuals only */
6335 if (IsA(clause, ScalarArrayOpExpr))
6337 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6338 int alength = estimate_array_length(lsecond(saop->args));
6341 num_sa_scans *= alength;
6343 indexBoundQuals = lappend(indexBoundQuals, rinfo);
6347 * If index is unique and we found an '=' clause for each column, we can
6348 * just assume numIndexTuples = 1 and skip the expensive
6349 * clauselist_selectivity calculations. However, a ScalarArrayOp or
6350 * NullTest invalidates that theory, even though it sets eqQualHere.
6352 if (index->unique &&
6353 indexcol == index->ncolumns - 1 &&
6357 numIndexTuples = 1.0;
6360 Selectivity btreeSelectivity;
6362 btreeSelectivity = clauselist_selectivity(root, indexBoundQuals,
6366 numIndexTuples = btreeSelectivity * index->rel->tuples;
6369 * As in genericcostestimate(), we have to adjust for any
6370 * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6373 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6376 genericcostestimate(root, index, indexQuals, indexOrderBys,
6377 outer_rel, numIndexTuples,
6378 indexStartupCost, indexTotalCost,
6379 indexSelectivity, indexCorrelation);
6382 * If we can get an estimate of the first column's ordering correlation C
6383 * from pg_statistic, estimate the index correlation as C for a
6384 * single-column index, or C * 0.75 for multiple columns. (The idea here
6385 * is that multiple columns dilute the importance of the first column's
6386 * ordering, but don't negate it entirely. Before 8.0 we divided the
6387 * correlation by the number of columns, but that seems too strong.)
6389 MemSet(&vardata, 0, sizeof(vardata));
6391 if (index->indexkeys[0] != 0)
6393 /* Simple variable --- look to stats for the underlying table */
6394 RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6396 Assert(rte->rtekind == RTE_RELATION);
6398 Assert(relid != InvalidOid);
6399 colnum = index->indexkeys[0];
6401 if (get_relation_stats_hook &&
6402 (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6405 * The hook took control of acquiring a stats tuple. If it did
6406 * supply a tuple, it'd better have supplied a freefunc.
6408 if (HeapTupleIsValid(vardata.statsTuple) &&
6410 elog(ERROR, "no function provided to release variable stats with");
6414 vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6415 ObjectIdGetDatum(relid),
6416 Int16GetDatum(colnum),
6417 BoolGetDatum(rte->inh));
6418 vardata.freefunc = ReleaseSysCache;
6423 /* Expression --- maybe there are stats for the index itself */
6424 relid = index->indexoid;
6427 if (get_index_stats_hook &&
6428 (*get_index_stats_hook) (root, relid, colnum, &vardata))
6431 * The hook took control of acquiring a stats tuple. If it did
6432 * supply a tuple, it'd better have supplied a freefunc.
6434 if (HeapTupleIsValid(vardata.statsTuple) &&
6436 elog(ERROR, "no function provided to release variable stats with");
6440 vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6441 ObjectIdGetDatum(relid),
6442 Int16GetDatum(colnum),
6443 BoolGetDatum(false));
6444 vardata.freefunc = ReleaseSysCache;
6448 if (HeapTupleIsValid(vardata.statsTuple))
6454 sortop = get_opfamily_member(index->opfamily[0],
6455 index->opcintype[0],
6456 index->opcintype[0],
6457 BTLessStrategyNumber);
6458 if (OidIsValid(sortop) &&
6459 get_attstatsslot(vardata.statsTuple, InvalidOid, 0,
6460 STATISTIC_KIND_CORRELATION,
6464 &numbers, &nnumbers))
6466 double varCorrelation;
6468 Assert(nnumbers == 1);
6469 varCorrelation = numbers[0];
6471 if (index->reverse_sort[0])
6472 varCorrelation = -varCorrelation;
6474 if (index->ncolumns > 1)
6475 *indexCorrelation = varCorrelation * 0.75;
6477 *indexCorrelation = varCorrelation;
6479 free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
6483 ReleaseVariableStats(vardata);
6489 hashcostestimate(PG_FUNCTION_ARGS)
6491 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
6492 IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
6493 List *indexQuals = (List *) PG_GETARG_POINTER(2);
6494 List *indexOrderBys = (List *) PG_GETARG_POINTER(3);
6495 RelOptInfo *outer_rel = (RelOptInfo *) PG_GETARG_POINTER(4);
6496 Cost *indexStartupCost = (Cost *) PG_GETARG_POINTER(5);
6497 Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(6);
6498 Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(7);
6499 double *indexCorrelation = (double *) PG_GETARG_POINTER(8);
6501 genericcostestimate(root, index, indexQuals, indexOrderBys, outer_rel, 0.0,
6502 indexStartupCost, indexTotalCost,
6503 indexSelectivity, indexCorrelation);
6509 gistcostestimate(PG_FUNCTION_ARGS)
6511 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
6512 IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
6513 List *indexQuals = (List *) PG_GETARG_POINTER(2);
6514 List *indexOrderBys = (List *) PG_GETARG_POINTER(3);
6515 RelOptInfo *outer_rel = (RelOptInfo *) PG_GETARG_POINTER(4);
6516 Cost *indexStartupCost = (Cost *) PG_GETARG_POINTER(5);
6517 Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(6);
6518 Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(7);
6519 double *indexCorrelation = (double *) PG_GETARG_POINTER(8);
6521 genericcostestimate(root, index, indexQuals, indexOrderBys, outer_rel, 0.0,
6522 indexStartupCost, indexTotalCost,
6523 indexSelectivity, indexCorrelation);
6528 /* Find the index column matching "op"; return its index, or -1 if no match */
6530 find_index_column(Node *op, IndexOptInfo *index)
6534 for (i = 0; i < index->ncolumns; i++)
6536 if (match_index_to_operand(op, i, index))
6544 * GIN has search behavior completely different from other index types
6547 gincostestimate(PG_FUNCTION_ARGS)
6549 PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
6550 IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
6551 List *indexQuals = (List *) PG_GETARG_POINTER(2);
6552 List *indexOrderBys = (List *) PG_GETARG_POINTER(3);
6553 RelOptInfo *outer_rel = (RelOptInfo *) PG_GETARG_POINTER(4);
6554 Cost *indexStartupCost = (Cost *) PG_GETARG_POINTER(5);
6555 Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(6);
6556 Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(7);
6557 double *indexCorrelation = (double *) PG_GETARG_POINTER(8);
6559 List *selectivityQuals;
6560 double numPages = index->pages,
6561 numTuples = index->tuples;
6562 double numEntryPages,
6566 bool haveFullScan = false;
6567 double partialEntriesInQuals = 0.0;
6568 double searchEntriesInQuals = 0.0;
6569 double exactEntriesInQuals = 0.0;
6570 double entryPagesFetched,
6572 dataPagesFetchedBySel;
6573 double qual_op_cost,
6575 spc_random_page_cost,
6577 QualCost index_qual_cost;
6579 GinStatsData ginStats;
6582 * Obtain statistic information from the meta page
6584 indexRel = index_open(index->indexoid, AccessShareLock);
6585 ginGetStats(indexRel, &ginStats);
6586 index_close(indexRel, AccessShareLock);
6588 numEntryPages = ginStats.nEntryPages;
6589 numDataPages = ginStats.nDataPages;
6590 numPendingPages = ginStats.nPendingPages;
6591 numEntries = ginStats.nEntries;
6594 * nPendingPages can be trusted, but the other fields are as of the last
6595 * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
6596 * growth since then. If the fields are zero (implying no VACUUM at all,
6597 * and an index created pre-9.1), assume all pages are entry pages.
6599 if (ginStats.nTotalPages == 0 || ginStats.nEntryPages == 0)
6601 numEntryPages = numPages;
6603 numEntries = numTuples; /* bogus, but no other info available */
6607 double scale = numPages / ginStats.nTotalPages;
6609 numEntryPages = ceil(numEntryPages * scale);
6610 numDataPages = ceil(numDataPages * scale);
6611 numEntries = ceil(numEntries * scale);
6612 /* ensure we didn't round up too much */
6613 numEntryPages = Min(numEntryPages, numPages);
6614 numDataPages = Min(numDataPages, numPages - numEntryPages);
6617 /* In an empty index, numEntries could be zero. Avoid divide-by-zero */
6622 * Include predicate in selectivityQuals (should match
6623 * genericcostestimate)
6625 if (index->indpred != NIL)
6627 List *predExtraQuals = NIL;
6629 foreach(l, index->indpred)
6631 Node *predQual = (Node *) lfirst(l);
6632 List *oneQual = list_make1(predQual);
6634 if (!predicate_implied_by(oneQual, indexQuals))
6635 predExtraQuals = list_concat(predExtraQuals, oneQual);
6637 /* list_concat avoids modifying the passed-in indexQuals list */
6638 selectivityQuals = list_concat(predExtraQuals, indexQuals);
6641 selectivityQuals = indexQuals;
6643 /* Estimate the fraction of main-table tuples that will be visited */
6644 *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6649 /* fetch estimated page cost for schema containing index */
6650 get_tablespace_page_costs(index->reltablespace,
6651 &spc_random_page_cost,
6655 * Generic assumption about index correlation: there isn't any.
6657 *indexCorrelation = 0.0;
6660 * Examine quals to estimate number of search entries & partial matches
6662 foreach(l, indexQuals)
6664 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6675 bool *partial_matches = NULL;
6676 Pointer *extra_data = NULL;
6677 bool *nullFlags = NULL;
6678 int32 searchMode = GIN_SEARCH_MODE_DEFAULT;
6681 Assert(IsA(rinfo, RestrictInfo));
6682 clause = rinfo->clause;
6683 Assert(IsA(clause, OpExpr));
6684 leftop = get_leftop(clause);
6685 rightop = get_rightop(clause);
6686 clause_op = ((OpExpr *) clause)->opno;
6688 if ((indexcol = find_index_column(leftop, index)) >= 0)
6692 else if ((indexcol = find_index_column(rightop, index)) >= 0)
6695 clause_op = get_commutator(clause_op);
6699 elog(ERROR, "could not match index to operand");
6700 operand = NULL; /* keep compiler quiet */
6703 if (IsA(operand, RelabelType))
6704 operand = (Node *) ((RelabelType *) operand)->arg;
6707 * It's impossible to call extractQuery method for unknown operand. So
6708 * unless operand is a Const we can't do much; just assume there will
6709 * be one ordinary search entry from the operand at runtime.
6711 if (!IsA(operand, Const))
6713 searchEntriesInQuals++;
6717 /* If Const is null, there can be no matches */
6718 if (((Const *) operand)->constisnull)
6720 *indexStartupCost = 0;
6721 *indexTotalCost = 0;
6722 *indexSelectivity = 0;
6727 * Get the operator's strategy number and declared input data types
6728 * within the index opfamily. (We don't need the latter, but we use
6729 * get_op_opfamily_properties because it will throw error if it fails
6730 * to find a matching pg_amop entry.)
6732 get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
6733 &strategy_op, &lefttype, &righttype);
6736 * GIN always uses the "default" support functions, which are those
6737 * with lefttype == righttype == the opclass' opcintype (see
6738 * IndexSupportInitialize in relcache.c).
6740 extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
6741 index->opcintype[indexcol],
6742 index->opcintype[indexcol],
6743 GIN_EXTRACTQUERY_PROC);
6745 if (!OidIsValid(extractProcOid))
6747 /* should not happen; throw same error as index_getprocinfo */
6748 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
6749 GIN_EXTRACTQUERY_PROC, indexcol + 1,
6750 get_rel_name(index->indexoid));
6753 OidFunctionCall7(extractProcOid,
6754 ((Const *) operand)->constvalue,
6755 PointerGetDatum(&nentries),
6756 UInt16GetDatum(strategy_op),
6757 PointerGetDatum(&partial_matches),
6758 PointerGetDatum(&extra_data),
6759 PointerGetDatum(&nullFlags),
6760 PointerGetDatum(&searchMode));
6762 if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
6764 /* No match is possible */
6765 *indexStartupCost = 0;
6766 *indexTotalCost = 0;
6767 *indexSelectivity = 0;
6774 for (i = 0; i < nentries; i++)
6777 * For partial match we haven't any information to estimate
6778 * number of matched entries in index, so, we just estimate it
6781 if (partial_matches && partial_matches[i])
6782 partialEntriesInQuals += 100;
6784 exactEntriesInQuals++;
6786 searchEntriesInQuals++;
6790 if (searchMode == GIN_SEARCH_MODE_INCLUDE_EMPTY)
6792 /* Treat "include empty" like an exact-match item */
6793 exactEntriesInQuals++;
6794 searchEntriesInQuals++;
6796 else if (searchMode != GIN_SEARCH_MODE_DEFAULT)
6798 /* It's GIN_SEARCH_MODE_ALL */
6799 haveFullScan = true;
6803 if (haveFullScan || indexQuals == NIL)
6806 * Full index scan will be required. We treat this as if every key in
6807 * the index had been listed in the query; is that reasonable?
6809 searchEntriesInQuals = numEntries;
6812 /* Will we have more than one iteration of a nestloop scan? */
6813 if (outer_rel != NULL && outer_rel->rows > 1)
6814 num_scans = outer_rel->rows;
6819 * cost to begin scan, first of all, pay attention to pending list.
6821 entryPagesFetched = numPendingPages;
6824 * Estimate number of entry pages read. We need to do
6825 * searchEntriesInQuals searches. Use a power function as it should be,
6826 * but tuples on leaf pages usually is much greater. Here we include all
6827 * searches in entry tree, including search of first entry in partial
6830 entryPagesFetched += ceil(searchEntriesInQuals * rint(pow(numEntryPages, 0.15)));
6833 * Add an estimate of entry pages read by partial match algorithm. It's a
6834 * scan over leaf pages in entry tree. We haven't any useful stats here,
6835 * so estimate it as proportion.
6837 entryPagesFetched += ceil(numEntryPages * partialEntriesInQuals / numEntries);
6840 * Partial match algorithm reads all data pages before doing actual scan,
6841 * so it's a startup cost. Again, we havn't any useful stats here, so,
6842 * estimate it as proportion
6844 dataPagesFetched = ceil(numDataPages * partialEntriesInQuals / numEntries);
6846 /* calculate cache effects */
6847 if (num_scans > 1 || searchEntriesInQuals > 1)
6849 entryPagesFetched = index_pages_fetched(entryPagesFetched,
6850 (BlockNumber) numEntryPages,
6851 numEntryPages, root);
6852 dataPagesFetched = index_pages_fetched(dataPagesFetched,
6853 (BlockNumber) numDataPages,
6854 numDataPages, root);
6858 * Here we use random page cost because logically-close pages could be far
6861 *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
6863 /* cost to scan data pages for each exact (non-partial) matched entry */
6864 dataPagesFetched = ceil(numDataPages * exactEntriesInQuals / numEntries);
6867 * Estimate number of data pages read, using selectivity estimation and
6868 * capacity of data page.
6870 dataPagesFetchedBySel = ceil(*indexSelectivity *
6871 (numTuples / (BLCKSZ / SizeOfIptrData)));
6873 if (dataPagesFetchedBySel > dataPagesFetched)
6876 * At least one of entries is very frequent and, unfortunately, we
6877 * couldn't get statistic about entries (only tsvector has such
6878 * statistics). So, we obviously have too small estimation of pages
6879 * fetched from data tree. Re-estimate it from known capacity of data
6882 dataPagesFetched = dataPagesFetchedBySel;
6886 dataPagesFetched = index_pages_fetched(dataPagesFetched,
6887 (BlockNumber) numDataPages,
6888 numDataPages, root);
6889 *indexTotalCost = *indexStartupCost +
6890 dataPagesFetched * spc_random_page_cost;
6893 * Add on index qual eval costs, much as in genericcostestimate
6895 cost_qual_eval(&index_qual_cost, indexQuals, root);
6896 qual_arg_cost = index_qual_cost.startup + index_qual_cost.per_tuple;
6897 cost_qual_eval(&index_qual_cost, indexOrderBys, root);
6898 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6899 qual_op_cost = cpu_operator_cost *
6900 (list_length(indexQuals) + list_length(indexOrderBys));
6901 qual_arg_cost -= qual_op_cost;
6902 if (qual_arg_cost < 0) /* just in case... */
6905 *indexStartupCost += qual_arg_cost;
6906 *indexTotalCost += qual_arg_cost;
6907 *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);