]> granicus.if.org Git - postgresql/blob - src/backend/utils/adt/selfuncs.c
Fix thinko in estimate_num_groups
[postgresql] / src / backend / utils / adt / selfuncs.c
1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  *        Selectivity functions and index cost estimation functions for
5  *        standard operators and index access methods.
6  *
7  *        Selectivity routines are registered in the pg_operator catalog
8  *        in the "oprrest" and "oprjoin" attributes.
9  *
10  *        Index cost functions are located via the index AM's API struct,
11  *        which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  *        src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  *              Restriction clause estimation: the clause involves vars of just
28  *                      one relation.
29  *              Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values).  So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  *              Selectivity oprrest (PlannerInfo *root,
42  *                                                       Oid operator,
43  *                                                       List *args,
44  *                                                       int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation.  May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  *              float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  *              Selectivity oprjoin (PlannerInfo *root,
67  *                                                       Oid operator,
68  *                                                       List *args,
69  *                                                       JoinType jointype,
70  *                                                       SpecialJoinInfo *sjinfo);
71  *
72  *              float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters.  That signature is still allowed, but deprecated.)  The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator.  For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION().  Note, however, that all
90  * statistics in pg_statistic are currently built using the database's default
91  * collation.  Thus, in most cases where we are looking at statistics, we
92  * should ignore the actual operator collation and use DEFAULT_COLLATION_OID.
93  * We expect that the error induced by doing this is usually not large enough
94  * to justify complicating matters.
95  *----------
96  */
97
98 #include "postgres.h"
99
100 #include <ctype.h>
101 #include <float.h>
102 #include <math.h>
103
104 #include "access/gin.h"
105 #include "access/htup_details.h"
106 #include "access/sysattr.h"
107 #include "catalog/index.h"
108 #include "catalog/pg_am.h"
109 #include "catalog/pg_collation.h"
110 #include "catalog/pg_operator.h"
111 #include "catalog/pg_opfamily.h"
112 #include "catalog/pg_statistic.h"
113 #include "catalog/pg_statistic_ext.h"
114 #include "catalog/pg_type.h"
115 #include "executor/executor.h"
116 #include "mb/pg_wchar.h"
117 #include "nodes/makefuncs.h"
118 #include "nodes/nodeFuncs.h"
119 #include "optimizer/clauses.h"
120 #include "optimizer/cost.h"
121 #include "optimizer/pathnode.h"
122 #include "optimizer/paths.h"
123 #include "optimizer/plancat.h"
124 #include "optimizer/predtest.h"
125 #include "optimizer/restrictinfo.h"
126 #include "optimizer/var.h"
127 #include "parser/parse_clause.h"
128 #include "parser/parse_coerce.h"
129 #include "parser/parsetree.h"
130 #include "statistics/statistics.h"
131 #include "utils/builtins.h"
132 #include "utils/bytea.h"
133 #include "utils/date.h"
134 #include "utils/datum.h"
135 #include "utils/fmgroids.h"
136 #include "utils/index_selfuncs.h"
137 #include "utils/lsyscache.h"
138 #include "utils/nabstime.h"
139 #include "utils/pg_locale.h"
140 #include "utils/rel.h"
141 #include "utils/selfuncs.h"
142 #include "utils/spccache.h"
143 #include "utils/syscache.h"
144 #include "utils/timestamp.h"
145 #include "utils/tqual.h"
146 #include "utils/typcache.h"
147 #include "utils/varlena.h"
148
149
150 /* Hooks for plugins to get control when we ask for stats */
151 get_relation_stats_hook_type get_relation_stats_hook = NULL;
152 get_index_stats_hook_type get_index_stats_hook = NULL;
153
154 static double var_eq_const(VariableStatData *vardata, Oid operator,
155                          Datum constval, bool constisnull,
156                          bool varonleft);
157 static double var_eq_non_const(VariableStatData *vardata, Oid operator,
158                                  Node *other,
159                                  bool varonleft);
160 static double ineq_histogram_selectivity(PlannerInfo *root,
161                                                    VariableStatData *vardata,
162                                                    FmgrInfo *opproc, bool isgt,
163                                                    Datum constval, Oid consttype);
164 static double eqjoinsel_inner(Oid operator,
165                                 VariableStatData *vardata1, VariableStatData *vardata2);
166 static double eqjoinsel_semi(Oid operator,
167                            VariableStatData *vardata1, VariableStatData *vardata2,
168                            RelOptInfo *inner_rel);
169 static bool estimate_multivariate_ndistinct(PlannerInfo *root,
170                            RelOptInfo *rel, List **varinfos, double *ndistinct);
171 static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
172                                   Datum lobound, Datum hibound, Oid boundstypid,
173                                   double *scaledlobound, double *scaledhibound);
174 static double convert_numeric_to_scalar(Datum value, Oid typid);
175 static void convert_string_to_scalar(char *value,
176                                                  double *scaledvalue,
177                                                  char *lobound,
178                                                  double *scaledlobound,
179                                                  char *hibound,
180                                                  double *scaledhibound);
181 static void convert_bytea_to_scalar(Datum value,
182                                                 double *scaledvalue,
183                                                 Datum lobound,
184                                                 double *scaledlobound,
185                                                 Datum hibound,
186                                                 double *scaledhibound);
187 static double convert_one_string_to_scalar(char *value,
188                                                          int rangelo, int rangehi);
189 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
190                                                         int rangelo, int rangehi);
191 static char *convert_string_datum(Datum value, Oid typid);
192 static double convert_timevalue_to_scalar(Datum value, Oid typid);
193 static void examine_simple_variable(PlannerInfo *root, Var *var,
194                                                 VariableStatData *vardata);
195 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
196                                    Oid sortop, Datum *min, Datum *max);
197 static bool get_actual_variable_range(PlannerInfo *root,
198                                                   VariableStatData *vardata,
199                                                   Oid sortop,
200                                                   Datum *min, Datum *max);
201 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
202 static Selectivity prefix_selectivity(PlannerInfo *root,
203                                    VariableStatData *vardata,
204                                    Oid vartype, Oid opfamily, Const *prefixcon);
205 static Selectivity like_selectivity(const char *patt, int pattlen,
206                                  bool case_insensitive);
207 static Selectivity regex_selectivity(const char *patt, int pattlen,
208                                   bool case_insensitive,
209                                   int fixed_prefix_len);
210 static Datum string_to_datum(const char *str, Oid datatype);
211 static Const *string_to_const(const char *str, Oid datatype);
212 static Const *string_to_bytea_const(const char *str, size_t str_len);
213 static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
214
215
216 /*
217  *              eqsel                   - Selectivity of "=" for any data types.
218  *
219  * Note: this routine is also used to estimate selectivity for some
220  * operators that are not "=" but have comparable selectivity behavior,
221  * such as "~=" (geometric approximate-match).  Even for "=", we must
222  * keep in mind that the left and right datatypes may differ.
223  */
224 Datum
225 eqsel(PG_FUNCTION_ARGS)
226 {
227         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
228         Oid                     operator = PG_GETARG_OID(1);
229         List       *args = (List *) PG_GETARG_POINTER(2);
230         int                     varRelid = PG_GETARG_INT32(3);
231         VariableStatData vardata;
232         Node       *other;
233         bool            varonleft;
234         double          selec;
235
236         /*
237          * If expression is not variable = something or something = variable, then
238          * punt and return a default estimate.
239          */
240         if (!get_restriction_variable(root, args, varRelid,
241                                                                   &vardata, &other, &varonleft))
242                 PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
243
244         /*
245          * We can do a lot better if the something is a constant.  (Note: the
246          * Const might result from estimation rather than being a simple constant
247          * in the query.)
248          */
249         if (IsA(other, Const))
250                 selec = var_eq_const(&vardata, operator,
251                                                          ((Const *) other)->constvalue,
252                                                          ((Const *) other)->constisnull,
253                                                          varonleft);
254         else
255                 selec = var_eq_non_const(&vardata, operator, other,
256                                                                  varonleft);
257
258         ReleaseVariableStats(vardata);
259
260         PG_RETURN_FLOAT8((float8) selec);
261 }
262
263 /*
264  * var_eq_const --- eqsel for var = const case
265  *
266  * This is split out so that some other estimation functions can use it.
267  */
268 static double
269 var_eq_const(VariableStatData *vardata, Oid operator,
270                          Datum constval, bool constisnull,
271                          bool varonleft)
272 {
273         double          selec;
274         bool            isdefault;
275
276         /*
277          * If the constant is NULL, assume operator is strict and return zero, ie,
278          * operator will never return TRUE.
279          */
280         if (constisnull)
281                 return 0.0;
282
283         /*
284          * If we matched the var to a unique index or DISTINCT clause, assume
285          * there is exactly one match regardless of anything else.  (This is
286          * slightly bogus, since the index or clause's equality operator might be
287          * different from ours, but it's much more likely to be right than
288          * ignoring the information.)
289          */
290         if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
291                 return 1.0 / vardata->rel->tuples;
292
293         if (HeapTupleIsValid(vardata->statsTuple))
294         {
295                 Form_pg_statistic stats;
296                 Datum      *values;
297                 int                     nvalues;
298                 float4     *numbers;
299                 int                     nnumbers;
300                 bool            match = false;
301                 int                     i;
302
303                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
304
305                 /*
306                  * Is the constant "=" to any of the column's most common values?
307                  * (Although the given operator may not really be "=", we will assume
308                  * that seeing whether it returns TRUE is an appropriate test.  If you
309                  * don't like this, maybe you shouldn't be using eqsel for your
310                  * operator...)
311                  */
312                 if (get_attstatsslot(vardata->statsTuple,
313                                                          vardata->atttype, vardata->atttypmod,
314                                                          STATISTIC_KIND_MCV, InvalidOid,
315                                                          NULL,
316                                                          &values, &nvalues,
317                                                          &numbers, &nnumbers))
318                 {
319                         FmgrInfo        eqproc;
320
321                         fmgr_info(get_opcode(operator), &eqproc);
322
323                         for (i = 0; i < nvalues; i++)
324                         {
325                                 /* be careful to apply operator right way 'round */
326                                 if (varonleft)
327                                         match = DatumGetBool(FunctionCall2Coll(&eqproc,
328                                                                                                            DEFAULT_COLLATION_OID,
329                                                                                                                    values[i],
330                                                                                                                    constval));
331                                 else
332                                         match = DatumGetBool(FunctionCall2Coll(&eqproc,
333                                                                                                            DEFAULT_COLLATION_OID,
334                                                                                                                    constval,
335                                                                                                                    values[i]));
336                                 if (match)
337                                         break;
338                         }
339                 }
340                 else
341                 {
342                         /* no most-common-value info available */
343                         values = NULL;
344                         numbers = NULL;
345                         i = nvalues = nnumbers = 0;
346                 }
347
348                 if (match)
349                 {
350                         /*
351                          * Constant is "=" to this common value.  We know selectivity
352                          * exactly (or as exactly as ANALYZE could calculate it, anyway).
353                          */
354                         selec = numbers[i];
355                 }
356                 else
357                 {
358                         /*
359                          * Comparison is against a constant that is neither NULL nor any
360                          * of the common values.  Its selectivity cannot be more than
361                          * this:
362                          */
363                         double          sumcommon = 0.0;
364                         double          otherdistinct;
365
366                         for (i = 0; i < nnumbers; i++)
367                                 sumcommon += numbers[i];
368                         selec = 1.0 - sumcommon - stats->stanullfrac;
369                         CLAMP_PROBABILITY(selec);
370
371                         /*
372                          * and in fact it's probably a good deal less. We approximate that
373                          * all the not-common values share this remaining fraction
374                          * equally, so we divide by the number of other distinct values.
375                          */
376                         otherdistinct = get_variable_numdistinct(vardata, &isdefault) - nnumbers;
377                         if (otherdistinct > 1)
378                                 selec /= otherdistinct;
379
380                         /*
381                          * Another cross-check: selectivity shouldn't be estimated as more
382                          * than the least common "most common value".
383                          */
384                         if (nnumbers > 0 && selec > numbers[nnumbers - 1])
385                                 selec = numbers[nnumbers - 1];
386                 }
387
388                 free_attstatsslot(vardata->atttype, values, nvalues,
389                                                   numbers, nnumbers);
390         }
391         else
392         {
393                 /*
394                  * No ANALYZE stats available, so make a guess using estimated number
395                  * of distinct values and assuming they are equally common. (The guess
396                  * is unlikely to be very good, but we do know a few special cases.)
397                  */
398                 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
399         }
400
401         /* result should be in range, but make sure... */
402         CLAMP_PROBABILITY(selec);
403
404         return selec;
405 }
406
407 /*
408  * var_eq_non_const --- eqsel for var = something-other-than-const case
409  */
410 static double
411 var_eq_non_const(VariableStatData *vardata, Oid operator,
412                                  Node *other,
413                                  bool varonleft)
414 {
415         double          selec;
416         bool            isdefault;
417
418         /*
419          * If we matched the var to a unique index or DISTINCT clause, assume
420          * there is exactly one match regardless of anything else.  (This is
421          * slightly bogus, since the index or clause's equality operator might be
422          * different from ours, but it's much more likely to be right than
423          * ignoring the information.)
424          */
425         if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
426                 return 1.0 / vardata->rel->tuples;
427
428         if (HeapTupleIsValid(vardata->statsTuple))
429         {
430                 Form_pg_statistic stats;
431                 double          ndistinct;
432                 float4     *numbers;
433                 int                     nnumbers;
434
435                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
436
437                 /*
438                  * Search is for a value that we do not know a priori, but we will
439                  * assume it is not NULL.  Estimate the selectivity as non-null
440                  * fraction divided by number of distinct values, so that we get a
441                  * result averaged over all possible values whether common or
442                  * uncommon.  (Essentially, we are assuming that the not-yet-known
443                  * comparison value is equally likely to be any of the possible
444                  * values, regardless of their frequency in the table.  Is that a good
445                  * idea?)
446                  */
447                 selec = 1.0 - stats->stanullfrac;
448                 ndistinct = get_variable_numdistinct(vardata, &isdefault);
449                 if (ndistinct > 1)
450                         selec /= ndistinct;
451
452                 /*
453                  * Cross-check: selectivity should never be estimated as more than the
454                  * most common value's.
455                  */
456                 if (get_attstatsslot(vardata->statsTuple,
457                                                          vardata->atttype, vardata->atttypmod,
458                                                          STATISTIC_KIND_MCV, InvalidOid,
459                                                          NULL,
460                                                          NULL, NULL,
461                                                          &numbers, &nnumbers))
462                 {
463                         if (nnumbers > 0 && selec > numbers[0])
464                                 selec = numbers[0];
465                         free_attstatsslot(vardata->atttype, NULL, 0, numbers, nnumbers);
466                 }
467         }
468         else
469         {
470                 /*
471                  * No ANALYZE stats available, so make a guess using estimated number
472                  * of distinct values and assuming they are equally common. (The guess
473                  * is unlikely to be very good, but we do know a few special cases.)
474                  */
475                 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
476         }
477
478         /* result should be in range, but make sure... */
479         CLAMP_PROBABILITY(selec);
480
481         return selec;
482 }
483
484 /*
485  *              neqsel                  - Selectivity of "!=" for any data types.
486  *
487  * This routine is also used for some operators that are not "!="
488  * but have comparable selectivity behavior.  See above comments
489  * for eqsel().
490  */
491 Datum
492 neqsel(PG_FUNCTION_ARGS)
493 {
494         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
495         Oid                     operator = PG_GETARG_OID(1);
496         List       *args = (List *) PG_GETARG_POINTER(2);
497         int                     varRelid = PG_GETARG_INT32(3);
498         Oid                     eqop;
499         float8          result;
500
501         /*
502          * We want 1 - eqsel() where the equality operator is the one associated
503          * with this != operator, that is, its negator.
504          */
505         eqop = get_negator(operator);
506         if (eqop)
507         {
508                 result = DatumGetFloat8(DirectFunctionCall4(eqsel,
509                                                                                                         PointerGetDatum(root),
510                                                                                                         ObjectIdGetDatum(eqop),
511                                                                                                         PointerGetDatum(args),
512                                                                                                         Int32GetDatum(varRelid)));
513         }
514         else
515         {
516                 /* Use default selectivity (should we raise an error instead?) */
517                 result = DEFAULT_EQ_SEL;
518         }
519         result = 1.0 - result;
520         PG_RETURN_FLOAT8(result);
521 }
522
523 /*
524  *      scalarineqsel           - Selectivity of "<", "<=", ">", ">=" for scalars.
525  *
526  * This is the guts of both scalarltsel and scalargtsel.  The caller has
527  * commuted the clause, if necessary, so that we can treat the variable as
528  * being on the left.  The caller must also make sure that the other side
529  * of the clause is a non-null Const, and dissect same into a value and
530  * datatype.
531  *
532  * This routine works for any datatype (or pair of datatypes) known to
533  * convert_to_scalar().  If it is applied to some other datatype,
534  * it will return a default estimate.
535  */
536 static double
537 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
538                           VariableStatData *vardata, Datum constval, Oid consttype)
539 {
540         Form_pg_statistic stats;
541         FmgrInfo        opproc;
542         double          mcv_selec,
543                                 hist_selec,
544                                 sumcommon;
545         double          selec;
546
547         if (!HeapTupleIsValid(vardata->statsTuple))
548         {
549                 /* no stats available, so default result */
550                 return DEFAULT_INEQ_SEL;
551         }
552         stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
553
554         fmgr_info(get_opcode(operator), &opproc);
555
556         /*
557          * If we have most-common-values info, add up the fractions of the MCV
558          * entries that satisfy MCV OP CONST.  These fractions contribute directly
559          * to the result selectivity.  Also add up the total fraction represented
560          * by MCV entries.
561          */
562         mcv_selec = mcv_selectivity(vardata, &opproc, constval, true,
563                                                                 &sumcommon);
564
565         /*
566          * If there is a histogram, determine which bin the constant falls in, and
567          * compute the resulting contribution to selectivity.
568          */
569         hist_selec = ineq_histogram_selectivity(root, vardata, &opproc, isgt,
570                                                                                         constval, consttype);
571
572         /*
573          * Now merge the results from the MCV and histogram calculations,
574          * realizing that the histogram covers only the non-null values that are
575          * not listed in MCV.
576          */
577         selec = 1.0 - stats->stanullfrac - sumcommon;
578
579         if (hist_selec >= 0.0)
580                 selec *= hist_selec;
581         else
582         {
583                 /*
584                  * If no histogram but there are values not accounted for by MCV,
585                  * arbitrarily assume half of them will match.
586                  */
587                 selec *= 0.5;
588         }
589
590         selec += mcv_selec;
591
592         /* result should be in range, but make sure... */
593         CLAMP_PROBABILITY(selec);
594
595         return selec;
596 }
597
598 /*
599  *      mcv_selectivity                 - Examine the MCV list for selectivity estimates
600  *
601  * Determine the fraction of the variable's MCV population that satisfies
602  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.  Also
603  * compute the fraction of the total column population represented by the MCV
604  * list.  This code will work for any boolean-returning predicate operator.
605  *
606  * The function result is the MCV selectivity, and the fraction of the
607  * total population is returned into *sumcommonp.  Zeroes are returned
608  * if there is no MCV list.
609  */
610 double
611 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
612                                 Datum constval, bool varonleft,
613                                 double *sumcommonp)
614 {
615         double          mcv_selec,
616                                 sumcommon;
617         Datum      *values;
618         int                     nvalues;
619         float4     *numbers;
620         int                     nnumbers;
621         int                     i;
622
623         mcv_selec = 0.0;
624         sumcommon = 0.0;
625
626         if (HeapTupleIsValid(vardata->statsTuple) &&
627                 get_attstatsslot(vardata->statsTuple,
628                                                  vardata->atttype, vardata->atttypmod,
629                                                  STATISTIC_KIND_MCV, InvalidOid,
630                                                  NULL,
631                                                  &values, &nvalues,
632                                                  &numbers, &nnumbers))
633         {
634                 for (i = 0; i < nvalues; i++)
635                 {
636                         if (varonleft ?
637                                 DatumGetBool(FunctionCall2Coll(opproc,
638                                                                                            DEFAULT_COLLATION_OID,
639                                                                                            values[i],
640                                                                                            constval)) :
641                                 DatumGetBool(FunctionCall2Coll(opproc,
642                                                                                            DEFAULT_COLLATION_OID,
643                                                                                            constval,
644                                                                                            values[i])))
645                                 mcv_selec += numbers[i];
646                         sumcommon += numbers[i];
647                 }
648                 free_attstatsslot(vardata->atttype, values, nvalues,
649                                                   numbers, nnumbers);
650         }
651
652         *sumcommonp = sumcommon;
653         return mcv_selec;
654 }
655
656 /*
657  *      histogram_selectivity   - Examine the histogram for selectivity estimates
658  *
659  * Determine the fraction of the variable's histogram entries that satisfy
660  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
661  *
662  * This code will work for any boolean-returning predicate operator, whether
663  * or not it has anything to do with the histogram sort operator.  We are
664  * essentially using the histogram just as a representative sample.  However,
665  * small histograms are unlikely to be all that representative, so the caller
666  * should be prepared to fall back on some other estimation approach when the
667  * histogram is missing or very small.  It may also be prudent to combine this
668  * approach with another one when the histogram is small.
669  *
670  * If the actual histogram size is not at least min_hist_size, we won't bother
671  * to do the calculation at all.  Also, if the n_skip parameter is > 0, we
672  * ignore the first and last n_skip histogram elements, on the grounds that
673  * they are outliers and hence not very representative.  Typical values for
674  * these parameters are 10 and 1.
675  *
676  * The function result is the selectivity, or -1 if there is no histogram
677  * or it's smaller than min_hist_size.
678  *
679  * The output parameter *hist_size receives the actual histogram size,
680  * or zero if no histogram.  Callers may use this number to decide how
681  * much faith to put in the function result.
682  *
683  * Note that the result disregards both the most-common-values (if any) and
684  * null entries.  The caller is expected to combine this result with
685  * statistics for those portions of the column population.  It may also be
686  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
687  */
688 double
689 histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
690                                           Datum constval, bool varonleft,
691                                           int min_hist_size, int n_skip,
692                                           int *hist_size)
693 {
694         double          result;
695         Datum      *values;
696         int                     nvalues;
697
698         /* check sanity of parameters */
699         Assert(n_skip >= 0);
700         Assert(min_hist_size > 2 * n_skip);
701
702         if (HeapTupleIsValid(vardata->statsTuple) &&
703                 get_attstatsslot(vardata->statsTuple,
704                                                  vardata->atttype, vardata->atttypmod,
705                                                  STATISTIC_KIND_HISTOGRAM, InvalidOid,
706                                                  NULL,
707                                                  &values, &nvalues,
708                                                  NULL, NULL))
709         {
710                 *hist_size = nvalues;
711                 if (nvalues >= min_hist_size)
712                 {
713                         int                     nmatch = 0;
714                         int                     i;
715
716                         for (i = n_skip; i < nvalues - n_skip; i++)
717                         {
718                                 if (varonleft ?
719                                         DatumGetBool(FunctionCall2Coll(opproc,
720                                                                                                    DEFAULT_COLLATION_OID,
721                                                                                                    values[i],
722                                                                                                    constval)) :
723                                         DatumGetBool(FunctionCall2Coll(opproc,
724                                                                                                    DEFAULT_COLLATION_OID,
725                                                                                                    constval,
726                                                                                                    values[i])))
727                                         nmatch++;
728                         }
729                         result = ((double) nmatch) / ((double) (nvalues - 2 * n_skip));
730                 }
731                 else
732                         result = -1;
733                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
734         }
735         else
736         {
737                 *hist_size = 0;
738                 result = -1;
739         }
740
741         return result;
742 }
743
744 /*
745  *      ineq_histogram_selectivity      - Examine the histogram for scalarineqsel
746  *
747  * Determine the fraction of the variable's histogram population that
748  * satisfies the inequality condition, ie, VAR < CONST or VAR > CONST.
749  *
750  * Returns -1 if there is no histogram (valid results will always be >= 0).
751  *
752  * Note that the result disregards both the most-common-values (if any) and
753  * null entries.  The caller is expected to combine this result with
754  * statistics for those portions of the column population.
755  */
756 static double
757 ineq_histogram_selectivity(PlannerInfo *root,
758                                                    VariableStatData *vardata,
759                                                    FmgrInfo *opproc, bool isgt,
760                                                    Datum constval, Oid consttype)
761 {
762         double          hist_selec;
763         Oid                     hist_op;
764         Datum      *values;
765         int                     nvalues;
766
767         hist_selec = -1.0;
768
769         /*
770          * Someday, ANALYZE might store more than one histogram per rel/att,
771          * corresponding to more than one possible sort ordering defined for the
772          * column type.  However, to make that work we will need to figure out
773          * which staop to search for --- it's not necessarily the one we have at
774          * hand!  (For example, we might have a '<=' operator rather than the '<'
775          * operator that will appear in staop.)  For now, assume that whatever
776          * appears in pg_statistic is sorted the same way our operator sorts, or
777          * the reverse way if isgt is TRUE.
778          */
779         if (HeapTupleIsValid(vardata->statsTuple) &&
780                 get_attstatsslot(vardata->statsTuple,
781                                                  vardata->atttype, vardata->atttypmod,
782                                                  STATISTIC_KIND_HISTOGRAM, InvalidOid,
783                                                  &hist_op,
784                                                  &values, &nvalues,
785                                                  NULL, NULL))
786         {
787                 if (nvalues > 1)
788                 {
789                         /*
790                          * Use binary search to find proper location, ie, the first slot
791                          * at which the comparison fails.  (If the given operator isn't
792                          * actually sort-compatible with the histogram, you'll get garbage
793                          * results ... but probably not any more garbage-y than you would
794                          * from the old linear search.)
795                          *
796                          * If the binary search accesses the first or last histogram
797                          * entry, we try to replace that endpoint with the true column min
798                          * or max as found by get_actual_variable_range().  This
799                          * ameliorates misestimates when the min or max is moving as a
800                          * result of changes since the last ANALYZE.  Note that this could
801                          * result in effectively including MCVs into the histogram that
802                          * weren't there before, but we don't try to correct for that.
803                          */
804                         double          histfrac;
805                         int                     lobound = 0;    /* first possible slot to search */
806                         int                     hibound = nvalues;              /* last+1 slot to search */
807                         bool            have_end = false;
808
809                         /*
810                          * If there are only two histogram entries, we'll want up-to-date
811                          * values for both.  (If there are more than two, we need at most
812                          * one of them to be updated, so we deal with that within the
813                          * loop.)
814                          */
815                         if (nvalues == 2)
816                                 have_end = get_actual_variable_range(root,
817                                                                                                          vardata,
818                                                                                                          hist_op,
819                                                                                                          &values[0],
820                                                                                                          &values[1]);
821
822                         while (lobound < hibound)
823                         {
824                                 int                     probe = (lobound + hibound) / 2;
825                                 bool            ltcmp;
826
827                                 /*
828                                  * If we find ourselves about to compare to the first or last
829                                  * histogram entry, first try to replace it with the actual
830                                  * current min or max (unless we already did so above).
831                                  */
832                                 if (probe == 0 && nvalues > 2)
833                                         have_end = get_actual_variable_range(root,
834                                                                                                                  vardata,
835                                                                                                                  hist_op,
836                                                                                                                  &values[0],
837                                                                                                                  NULL);
838                                 else if (probe == nvalues - 1 && nvalues > 2)
839                                         have_end = get_actual_variable_range(root,
840                                                                                                                  vardata,
841                                                                                                                  hist_op,
842                                                                                                                  NULL,
843                                                                                                                  &values[probe]);
844
845                                 ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
846                                                                                                            DEFAULT_COLLATION_OID,
847                                                                                                            values[probe],
848                                                                                                            constval));
849                                 if (isgt)
850                                         ltcmp = !ltcmp;
851                                 if (ltcmp)
852                                         lobound = probe + 1;
853                                 else
854                                         hibound = probe;
855                         }
856
857                         if (lobound <= 0)
858                         {
859                                 /* Constant is below lower histogram boundary. */
860                                 histfrac = 0.0;
861                         }
862                         else if (lobound >= nvalues)
863                         {
864                                 /* Constant is above upper histogram boundary. */
865                                 histfrac = 1.0;
866                         }
867                         else
868                         {
869                                 int                     i = lobound;
870                                 double          val,
871                                                         high,
872                                                         low;
873                                 double          binfrac;
874
875                                 /*
876                                  * We have values[i-1] <= constant <= values[i].
877                                  *
878                                  * Convert the constant and the two nearest bin boundary
879                                  * values to a uniform comparison scale, and do a linear
880                                  * interpolation within this bin.
881                                  */
882                                 if (convert_to_scalar(constval, consttype, &val,
883                                                                           values[i - 1], values[i],
884                                                                           vardata->vartype,
885                                                                           &low, &high))
886                                 {
887                                         if (high <= low)
888                                         {
889                                                 /* cope if bin boundaries appear identical */
890                                                 binfrac = 0.5;
891                                         }
892                                         else if (val <= low)
893                                                 binfrac = 0.0;
894                                         else if (val >= high)
895                                                 binfrac = 1.0;
896                                         else
897                                         {
898                                                 binfrac = (val - low) / (high - low);
899
900                                                 /*
901                                                  * Watch out for the possibility that we got a NaN or
902                                                  * Infinity from the division.  This can happen
903                                                  * despite the previous checks, if for example "low"
904                                                  * is -Infinity.
905                                                  */
906                                                 if (isnan(binfrac) ||
907                                                         binfrac < 0.0 || binfrac > 1.0)
908                                                         binfrac = 0.5;
909                                         }
910                                 }
911                                 else
912                                 {
913                                         /*
914                                          * Ideally we'd produce an error here, on the grounds that
915                                          * the given operator shouldn't have scalarXXsel
916                                          * registered as its selectivity func unless we can deal
917                                          * with its operand types.  But currently, all manner of
918                                          * stuff is invoking scalarXXsel, so give a default
919                                          * estimate until that can be fixed.
920                                          */
921                                         binfrac = 0.5;
922                                 }
923
924                                 /*
925                                  * Now, compute the overall selectivity across the values
926                                  * represented by the histogram.  We have i-1 full bins and
927                                  * binfrac partial bin below the constant.
928                                  */
929                                 histfrac = (double) (i - 1) + binfrac;
930                                 histfrac /= (double) (nvalues - 1);
931                         }
932
933                         /*
934                          * Now histfrac = fraction of histogram entries below the
935                          * constant.
936                          *
937                          * Account for "<" vs ">"
938                          */
939                         hist_selec = isgt ? (1.0 - histfrac) : histfrac;
940
941                         /*
942                          * The histogram boundaries are only approximate to begin with,
943                          * and may well be out of date anyway.  Therefore, don't believe
944                          * extremely small or large selectivity estimates --- unless we
945                          * got actual current endpoint values from the table.
946                          */
947                         if (have_end)
948                                 CLAMP_PROBABILITY(hist_selec);
949                         else
950                         {
951                                 if (hist_selec < 0.0001)
952                                         hist_selec = 0.0001;
953                                 else if (hist_selec > 0.9999)
954                                         hist_selec = 0.9999;
955                         }
956                 }
957
958                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
959         }
960
961         return hist_selec;
962 }
963
964 /*
965  *              scalarltsel             - Selectivity of "<" (also "<=") for scalars.
966  */
967 Datum
968 scalarltsel(PG_FUNCTION_ARGS)
969 {
970         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
971         Oid                     operator = PG_GETARG_OID(1);
972         List       *args = (List *) PG_GETARG_POINTER(2);
973         int                     varRelid = PG_GETARG_INT32(3);
974         VariableStatData vardata;
975         Node       *other;
976         bool            varonleft;
977         Datum           constval;
978         Oid                     consttype;
979         bool            isgt;
980         double          selec;
981
982         /*
983          * If expression is not variable op something or something op variable,
984          * then punt and return a default estimate.
985          */
986         if (!get_restriction_variable(root, args, varRelid,
987                                                                   &vardata, &other, &varonleft))
988                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
989
990         /*
991          * Can't do anything useful if the something is not a constant, either.
992          */
993         if (!IsA(other, Const))
994         {
995                 ReleaseVariableStats(vardata);
996                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
997         }
998
999         /*
1000          * If the constant is NULL, assume operator is strict and return zero, ie,
1001          * operator will never return TRUE.
1002          */
1003         if (((Const *) other)->constisnull)
1004         {
1005                 ReleaseVariableStats(vardata);
1006                 PG_RETURN_FLOAT8(0.0);
1007         }
1008         constval = ((Const *) other)->constvalue;
1009         consttype = ((Const *) other)->consttype;
1010
1011         /*
1012          * Force the var to be on the left to simplify logic in scalarineqsel.
1013          */
1014         if (varonleft)
1015         {
1016                 /* we have var < other */
1017                 isgt = false;
1018         }
1019         else
1020         {
1021                 /* we have other < var, commute to make var > other */
1022                 operator = get_commutator(operator);
1023                 if (!operator)
1024                 {
1025                         /* Use default selectivity (should we raise an error instead?) */
1026                         ReleaseVariableStats(vardata);
1027                         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1028                 }
1029                 isgt = true;
1030         }
1031
1032         selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1033
1034         ReleaseVariableStats(vardata);
1035
1036         PG_RETURN_FLOAT8((float8) selec);
1037 }
1038
1039 /*
1040  *              scalargtsel             - Selectivity of ">" (also ">=") for integers.
1041  */
1042 Datum
1043 scalargtsel(PG_FUNCTION_ARGS)
1044 {
1045         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1046         Oid                     operator = PG_GETARG_OID(1);
1047         List       *args = (List *) PG_GETARG_POINTER(2);
1048         int                     varRelid = PG_GETARG_INT32(3);
1049         VariableStatData vardata;
1050         Node       *other;
1051         bool            varonleft;
1052         Datum           constval;
1053         Oid                     consttype;
1054         bool            isgt;
1055         double          selec;
1056
1057         /*
1058          * If expression is not variable op something or something op variable,
1059          * then punt and return a default estimate.
1060          */
1061         if (!get_restriction_variable(root, args, varRelid,
1062                                                                   &vardata, &other, &varonleft))
1063                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1064
1065         /*
1066          * Can't do anything useful if the something is not a constant, either.
1067          */
1068         if (!IsA(other, Const))
1069         {
1070                 ReleaseVariableStats(vardata);
1071                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1072         }
1073
1074         /*
1075          * If the constant is NULL, assume operator is strict and return zero, ie,
1076          * operator will never return TRUE.
1077          */
1078         if (((Const *) other)->constisnull)
1079         {
1080                 ReleaseVariableStats(vardata);
1081                 PG_RETURN_FLOAT8(0.0);
1082         }
1083         constval = ((Const *) other)->constvalue;
1084         consttype = ((Const *) other)->consttype;
1085
1086         /*
1087          * Force the var to be on the left to simplify logic in scalarineqsel.
1088          */
1089         if (varonleft)
1090         {
1091                 /* we have var > other */
1092                 isgt = true;
1093         }
1094         else
1095         {
1096                 /* we have other > var, commute to make var < other */
1097                 operator = get_commutator(operator);
1098                 if (!operator)
1099                 {
1100                         /* Use default selectivity (should we raise an error instead?) */
1101                         ReleaseVariableStats(vardata);
1102                         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1103                 }
1104                 isgt = false;
1105         }
1106
1107         selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1108
1109         ReleaseVariableStats(vardata);
1110
1111         PG_RETURN_FLOAT8((float8) selec);
1112 }
1113
1114 /*
1115  * patternsel                   - Generic code for pattern-match selectivity.
1116  */
1117 static double
1118 patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
1119 {
1120         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1121         Oid                     operator = PG_GETARG_OID(1);
1122         List       *args = (List *) PG_GETARG_POINTER(2);
1123         int                     varRelid = PG_GETARG_INT32(3);
1124         Oid                     collation = PG_GET_COLLATION();
1125         VariableStatData vardata;
1126         Node       *other;
1127         bool            varonleft;
1128         Datum           constval;
1129         Oid                     consttype;
1130         Oid                     vartype;
1131         Oid                     opfamily;
1132         Pattern_Prefix_Status pstatus;
1133         Const      *patt;
1134         Const      *prefix = NULL;
1135         Selectivity rest_selec = 0;
1136         double          result;
1137
1138         /*
1139          * If this is for a NOT LIKE or similar operator, get the corresponding
1140          * positive-match operator and work with that.  Set result to the correct
1141          * default estimate, too.
1142          */
1143         if (negate)
1144         {
1145                 operator = get_negator(operator);
1146                 if (!OidIsValid(operator))
1147                         elog(ERROR, "patternsel called for operator without a negator");
1148                 result = 1.0 - DEFAULT_MATCH_SEL;
1149         }
1150         else
1151         {
1152                 result = DEFAULT_MATCH_SEL;
1153         }
1154
1155         /*
1156          * If expression is not variable op constant, then punt and return a
1157          * default estimate.
1158          */
1159         if (!get_restriction_variable(root, args, varRelid,
1160                                                                   &vardata, &other, &varonleft))
1161                 return result;
1162         if (!varonleft || !IsA(other, Const))
1163         {
1164                 ReleaseVariableStats(vardata);
1165                 return result;
1166         }
1167
1168         /*
1169          * If the constant is NULL, assume operator is strict and return zero, ie,
1170          * operator will never return TRUE.  (It's zero even for a negator op.)
1171          */
1172         if (((Const *) other)->constisnull)
1173         {
1174                 ReleaseVariableStats(vardata);
1175                 return 0.0;
1176         }
1177         constval = ((Const *) other)->constvalue;
1178         consttype = ((Const *) other)->consttype;
1179
1180         /*
1181          * The right-hand const is type text or bytea for all supported operators.
1182          * We do not expect to see binary-compatible types here, since
1183          * const-folding should have relabeled the const to exactly match the
1184          * operator's declared type.
1185          */
1186         if (consttype != TEXTOID && consttype != BYTEAOID)
1187         {
1188                 ReleaseVariableStats(vardata);
1189                 return result;
1190         }
1191
1192         /*
1193          * Similarly, the exposed type of the left-hand side should be one of
1194          * those we know.  (Do not look at vardata.atttype, which might be
1195          * something binary-compatible but different.)  We can use it to choose
1196          * the index opfamily from which we must draw the comparison operators.
1197          *
1198          * NOTE: It would be more correct to use the PATTERN opfamilies than the
1199          * simple ones, but at the moment ANALYZE will not generate statistics for
1200          * the PATTERN operators.  But our results are so approximate anyway that
1201          * it probably hardly matters.
1202          */
1203         vartype = vardata.vartype;
1204
1205         switch (vartype)
1206         {
1207                 case TEXTOID:
1208                         opfamily = TEXT_BTREE_FAM_OID;
1209                         break;
1210                 case BPCHAROID:
1211                         opfamily = BPCHAR_BTREE_FAM_OID;
1212                         break;
1213                 case NAMEOID:
1214                         opfamily = NAME_BTREE_FAM_OID;
1215                         break;
1216                 case BYTEAOID:
1217                         opfamily = BYTEA_BTREE_FAM_OID;
1218                         break;
1219                 default:
1220                         ReleaseVariableStats(vardata);
1221                         return result;
1222         }
1223
1224         /*
1225          * Pull out any fixed prefix implied by the pattern, and estimate the
1226          * fractional selectivity of the remainder of the pattern.  Unlike many of
1227          * the other functions in this file, we use the pattern operator's actual
1228          * collation for this step.  This is not because we expect the collation
1229          * to make a big difference in the selectivity estimate (it seldom would),
1230          * but because we want to be sure we cache compiled regexps under the
1231          * right cache key, so that they can be re-used at runtime.
1232          */
1233         patt = (Const *) other;
1234         pstatus = pattern_fixed_prefix(patt, ptype, collation,
1235                                                                    &prefix, &rest_selec);
1236
1237         /*
1238          * If necessary, coerce the prefix constant to the right type.
1239          */
1240         if (prefix && prefix->consttype != vartype)
1241         {
1242                 char       *prefixstr;
1243
1244                 switch (prefix->consttype)
1245                 {
1246                         case TEXTOID:
1247                                 prefixstr = TextDatumGetCString(prefix->constvalue);
1248                                 break;
1249                         case BYTEAOID:
1250                                 prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
1251                                                                                                                 prefix->constvalue));
1252                                 break;
1253                         default:
1254                                 elog(ERROR, "unrecognized consttype: %u",
1255                                          prefix->consttype);
1256                                 ReleaseVariableStats(vardata);
1257                                 return result;
1258                 }
1259                 prefix = string_to_const(prefixstr, vartype);
1260                 pfree(prefixstr);
1261         }
1262
1263         if (pstatus == Pattern_Prefix_Exact)
1264         {
1265                 /*
1266                  * Pattern specifies an exact match, so pretend operator is '='
1267                  */
1268                 Oid                     eqopr = get_opfamily_member(opfamily, vartype, vartype,
1269                                                                                                 BTEqualStrategyNumber);
1270
1271                 if (eqopr == InvalidOid)
1272                         elog(ERROR, "no = operator for opfamily %u", opfamily);
1273                 result = var_eq_const(&vardata, eqopr, prefix->constvalue,
1274                                                           false, true);
1275         }
1276         else
1277         {
1278                 /*
1279                  * Not exact-match pattern.  If we have a sufficiently large
1280                  * histogram, estimate selectivity for the histogram part of the
1281                  * population by counting matches in the histogram.  If not, estimate
1282                  * selectivity of the fixed prefix and remainder of pattern
1283                  * separately, then combine the two to get an estimate of the
1284                  * selectivity for the part of the column population represented by
1285                  * the histogram.  (For small histograms, we combine these
1286                  * approaches.)
1287                  *
1288                  * We then add up data for any most-common-values values; these are
1289                  * not in the histogram population, and we can get exact answers for
1290                  * them by applying the pattern operator, so there's no reason to
1291                  * approximate.  (If the MCVs cover a significant part of the total
1292                  * population, this gives us a big leg up in accuracy.)
1293                  */
1294                 Selectivity selec;
1295                 int                     hist_size;
1296                 FmgrInfo        opproc;
1297                 double          nullfrac,
1298                                         mcv_selec,
1299                                         sumcommon;
1300
1301                 /* Try to use the histogram entries to get selectivity */
1302                 fmgr_info(get_opcode(operator), &opproc);
1303
1304                 selec = histogram_selectivity(&vardata, &opproc, constval, true,
1305                                                                           10, 1, &hist_size);
1306
1307                 /* If not at least 100 entries, use the heuristic method */
1308                 if (hist_size < 100)
1309                 {
1310                         Selectivity heursel;
1311                         Selectivity prefixsel;
1312
1313                         if (pstatus == Pattern_Prefix_Partial)
1314                                 prefixsel = prefix_selectivity(root, &vardata, vartype,
1315                                                                                            opfamily, prefix);
1316                         else
1317                                 prefixsel = 1.0;
1318                         heursel = prefixsel * rest_selec;
1319
1320                         if (selec < 0)          /* fewer than 10 histogram entries? */
1321                                 selec = heursel;
1322                         else
1323                         {
1324                                 /*
1325                                  * For histogram sizes from 10 to 100, we combine the
1326                                  * histogram and heuristic selectivities, putting increasingly
1327                                  * more trust in the histogram for larger sizes.
1328                                  */
1329                                 double          hist_weight = hist_size / 100.0;
1330
1331                                 selec = selec * hist_weight + heursel * (1.0 - hist_weight);
1332                         }
1333                 }
1334
1335                 /* In any case, don't believe extremely small or large estimates. */
1336                 if (selec < 0.0001)
1337                         selec = 0.0001;
1338                 else if (selec > 0.9999)
1339                         selec = 0.9999;
1340
1341                 /*
1342                  * If we have most-common-values info, add up the fractions of the MCV
1343                  * entries that satisfy MCV OP PATTERN.  These fractions contribute
1344                  * directly to the result selectivity.  Also add up the total fraction
1345                  * represented by MCV entries.
1346                  */
1347                 mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
1348                                                                         &sumcommon);
1349
1350                 if (HeapTupleIsValid(vardata.statsTuple))
1351                         nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
1352                 else
1353                         nullfrac = 0.0;
1354
1355                 /*
1356                  * Now merge the results from the MCV and histogram calculations,
1357                  * realizing that the histogram covers only the non-null values that
1358                  * are not listed in MCV.
1359                  */
1360                 selec *= 1.0 - nullfrac - sumcommon;
1361                 selec += mcv_selec;
1362
1363                 /* result should be in range, but make sure... */
1364                 CLAMP_PROBABILITY(selec);
1365                 result = selec;
1366         }
1367
1368         if (prefix)
1369         {
1370                 pfree(DatumGetPointer(prefix->constvalue));
1371                 pfree(prefix);
1372         }
1373
1374         ReleaseVariableStats(vardata);
1375
1376         return negate ? (1.0 - result) : result;
1377 }
1378
1379 /*
1380  *              regexeqsel              - Selectivity of regular-expression pattern match.
1381  */
1382 Datum
1383 regexeqsel(PG_FUNCTION_ARGS)
1384 {
1385         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, false));
1386 }
1387
1388 /*
1389  *              icregexeqsel    - Selectivity of case-insensitive regex match.
1390  */
1391 Datum
1392 icregexeqsel(PG_FUNCTION_ARGS)
1393 {
1394         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, false));
1395 }
1396
1397 /*
1398  *              likesel                 - Selectivity of LIKE pattern match.
1399  */
1400 Datum
1401 likesel(PG_FUNCTION_ARGS)
1402 {
1403         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, false));
1404 }
1405
1406 /*
1407  *              iclikesel                       - Selectivity of ILIKE pattern match.
1408  */
1409 Datum
1410 iclikesel(PG_FUNCTION_ARGS)
1411 {
1412         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, false));
1413 }
1414
1415 /*
1416  *              regexnesel              - Selectivity of regular-expression pattern non-match.
1417  */
1418 Datum
1419 regexnesel(PG_FUNCTION_ARGS)
1420 {
1421         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, true));
1422 }
1423
1424 /*
1425  *              icregexnesel    - Selectivity of case-insensitive regex non-match.
1426  */
1427 Datum
1428 icregexnesel(PG_FUNCTION_ARGS)
1429 {
1430         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, true));
1431 }
1432
1433 /*
1434  *              nlikesel                - Selectivity of LIKE pattern non-match.
1435  */
1436 Datum
1437 nlikesel(PG_FUNCTION_ARGS)
1438 {
1439         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, true));
1440 }
1441
1442 /*
1443  *              icnlikesel              - Selectivity of ILIKE pattern non-match.
1444  */
1445 Datum
1446 icnlikesel(PG_FUNCTION_ARGS)
1447 {
1448         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, true));
1449 }
1450
1451 /*
1452  *              boolvarsel              - Selectivity of Boolean variable.
1453  *
1454  * This can actually be called on any boolean-valued expression.  If it
1455  * involves only Vars of the specified relation, and if there are statistics
1456  * about the Var or expression (the latter is possible if it's indexed) then
1457  * we'll produce a real estimate; otherwise it's just a default.
1458  */
1459 Selectivity
1460 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1461 {
1462         VariableStatData vardata;
1463         double          selec;
1464
1465         examine_variable(root, arg, varRelid, &vardata);
1466         if (HeapTupleIsValid(vardata.statsTuple))
1467         {
1468                 /*
1469                  * A boolean variable V is equivalent to the clause V = 't', so we
1470                  * compute the selectivity as if that is what we have.
1471                  */
1472                 selec = var_eq_const(&vardata, BooleanEqualOperator,
1473                                                          BoolGetDatum(true), false, true);
1474         }
1475         else if (is_funcclause(arg))
1476         {
1477                 /*
1478                  * If we have no stats and it's a function call, estimate 0.3333333.
1479                  * This seems a pretty unprincipled choice, but Postgres has been
1480                  * using that estimate for function calls since 1992.  The hoariness
1481                  * of this behavior suggests that we should not be in too much hurry
1482                  * to use another value.
1483                  */
1484                 selec = 0.3333333;
1485         }
1486         else
1487         {
1488                 /* Otherwise, the default estimate is 0.5 */
1489                 selec = 0.5;
1490         }
1491         ReleaseVariableStats(vardata);
1492         return selec;
1493 }
1494
1495 /*
1496  *              booltestsel             - Selectivity of BooleanTest Node.
1497  */
1498 Selectivity
1499 booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
1500                         int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1501 {
1502         VariableStatData vardata;
1503         double          selec;
1504
1505         examine_variable(root, arg, varRelid, &vardata);
1506
1507         if (HeapTupleIsValid(vardata.statsTuple))
1508         {
1509                 Form_pg_statistic stats;
1510                 double          freq_null;
1511                 Datum      *values;
1512                 int                     nvalues;
1513                 float4     *numbers;
1514                 int                     nnumbers;
1515
1516                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1517                 freq_null = stats->stanullfrac;
1518
1519                 if (get_attstatsslot(vardata.statsTuple,
1520                                                          vardata.atttype, vardata.atttypmod,
1521                                                          STATISTIC_KIND_MCV, InvalidOid,
1522                                                          NULL,
1523                                                          &values, &nvalues,
1524                                                          &numbers, &nnumbers)
1525                         && nnumbers > 0)
1526                 {
1527                         double          freq_true;
1528                         double          freq_false;
1529
1530                         /*
1531                          * Get first MCV frequency and derive frequency for true.
1532                          */
1533                         if (DatumGetBool(values[0]))
1534                                 freq_true = numbers[0];
1535                         else
1536                                 freq_true = 1.0 - numbers[0] - freq_null;
1537
1538                         /*
1539                          * Next derive frequency for false. Then use these as appropriate
1540                          * to derive frequency for each case.
1541                          */
1542                         freq_false = 1.0 - freq_true - freq_null;
1543
1544                         switch (booltesttype)
1545                         {
1546                                 case IS_UNKNOWN:
1547                                         /* select only NULL values */
1548                                         selec = freq_null;
1549                                         break;
1550                                 case IS_NOT_UNKNOWN:
1551                                         /* select non-NULL values */
1552                                         selec = 1.0 - freq_null;
1553                                         break;
1554                                 case IS_TRUE:
1555                                         /* select only TRUE values */
1556                                         selec = freq_true;
1557                                         break;
1558                                 case IS_NOT_TRUE:
1559                                         /* select non-TRUE values */
1560                                         selec = 1.0 - freq_true;
1561                                         break;
1562                                 case IS_FALSE:
1563                                         /* select only FALSE values */
1564                                         selec = freq_false;
1565                                         break;
1566                                 case IS_NOT_FALSE:
1567                                         /* select non-FALSE values */
1568                                         selec = 1.0 - freq_false;
1569                                         break;
1570                                 default:
1571                                         elog(ERROR, "unrecognized booltesttype: %d",
1572                                                  (int) booltesttype);
1573                                         selec = 0.0;    /* Keep compiler quiet */
1574                                         break;
1575                         }
1576
1577                         free_attstatsslot(vardata.atttype, values, nvalues,
1578                                                           numbers, nnumbers);
1579                 }
1580                 else
1581                 {
1582                         /*
1583                          * No most-common-value info available. Still have null fraction
1584                          * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1585                          * for null fraction and assume a 50-50 split of TRUE and FALSE.
1586                          */
1587                         switch (booltesttype)
1588                         {
1589                                 case IS_UNKNOWN:
1590                                         /* select only NULL values */
1591                                         selec = freq_null;
1592                                         break;
1593                                 case IS_NOT_UNKNOWN:
1594                                         /* select non-NULL values */
1595                                         selec = 1.0 - freq_null;
1596                                         break;
1597                                 case IS_TRUE:
1598                                 case IS_FALSE:
1599                                         /* Assume we select half of the non-NULL values */
1600                                         selec = (1.0 - freq_null) / 2.0;
1601                                         break;
1602                                 case IS_NOT_TRUE:
1603                                 case IS_NOT_FALSE:
1604                                         /* Assume we select NULLs plus half of the non-NULLs */
1605                                         /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1606                                         selec = (freq_null + 1.0) / 2.0;
1607                                         break;
1608                                 default:
1609                                         elog(ERROR, "unrecognized booltesttype: %d",
1610                                                  (int) booltesttype);
1611                                         selec = 0.0;    /* Keep compiler quiet */
1612                                         break;
1613                         }
1614                 }
1615         }
1616         else
1617         {
1618                 /*
1619                  * If we can't get variable statistics for the argument, perhaps
1620                  * clause_selectivity can do something with it.  We ignore the
1621                  * possibility of a NULL value when using clause_selectivity, and just
1622                  * assume the value is either TRUE or FALSE.
1623                  */
1624                 switch (booltesttype)
1625                 {
1626                         case IS_UNKNOWN:
1627                                 selec = DEFAULT_UNK_SEL;
1628                                 break;
1629                         case IS_NOT_UNKNOWN:
1630                                 selec = DEFAULT_NOT_UNK_SEL;
1631                                 break;
1632                         case IS_TRUE:
1633                         case IS_NOT_FALSE:
1634                                 selec = (double) clause_selectivity(root, arg,
1635                                                                                                         varRelid,
1636                                                                                                         jointype, sjinfo);
1637                                 break;
1638                         case IS_FALSE:
1639                         case IS_NOT_TRUE:
1640                                 selec = 1.0 - (double) clause_selectivity(root, arg,
1641                                                                                                                   varRelid,
1642                                                                                                                   jointype, sjinfo);
1643                                 break;
1644                         default:
1645                                 elog(ERROR, "unrecognized booltesttype: %d",
1646                                          (int) booltesttype);
1647                                 selec = 0.0;    /* Keep compiler quiet */
1648                                 break;
1649                 }
1650         }
1651
1652         ReleaseVariableStats(vardata);
1653
1654         /* result should be in range, but make sure... */
1655         CLAMP_PROBABILITY(selec);
1656
1657         return (Selectivity) selec;
1658 }
1659
1660 /*
1661  *              nulltestsel             - Selectivity of NullTest Node.
1662  */
1663 Selectivity
1664 nulltestsel(PlannerInfo *root, NullTestType nulltesttype, Node *arg,
1665                         int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1666 {
1667         VariableStatData vardata;
1668         double          selec;
1669
1670         examine_variable(root, arg, varRelid, &vardata);
1671
1672         if (HeapTupleIsValid(vardata.statsTuple))
1673         {
1674                 Form_pg_statistic stats;
1675                 double          freq_null;
1676
1677                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1678                 freq_null = stats->stanullfrac;
1679
1680                 switch (nulltesttype)
1681                 {
1682                         case IS_NULL:
1683
1684                                 /*
1685                                  * Use freq_null directly.
1686                                  */
1687                                 selec = freq_null;
1688                                 break;
1689                         case IS_NOT_NULL:
1690
1691                                 /*
1692                                  * Select not unknown (not null) values. Calculate from
1693                                  * freq_null.
1694                                  */
1695                                 selec = 1.0 - freq_null;
1696                                 break;
1697                         default:
1698                                 elog(ERROR, "unrecognized nulltesttype: %d",
1699                                          (int) nulltesttype);
1700                                 return (Selectivity) 0; /* keep compiler quiet */
1701                 }
1702         }
1703         else
1704         {
1705                 /*
1706                  * No ANALYZE stats available, so make a guess
1707                  */
1708                 switch (nulltesttype)
1709                 {
1710                         case IS_NULL:
1711                                 selec = DEFAULT_UNK_SEL;
1712                                 break;
1713                         case IS_NOT_NULL:
1714                                 selec = DEFAULT_NOT_UNK_SEL;
1715                                 break;
1716                         default:
1717                                 elog(ERROR, "unrecognized nulltesttype: %d",
1718                                          (int) nulltesttype);
1719                                 return (Selectivity) 0; /* keep compiler quiet */
1720                 }
1721         }
1722
1723         ReleaseVariableStats(vardata);
1724
1725         /* result should be in range, but make sure... */
1726         CLAMP_PROBABILITY(selec);
1727
1728         return (Selectivity) selec;
1729 }
1730
1731 /*
1732  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1733  *
1734  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1735  * but it seems possible that RelabelType might show up.  Also, the planner
1736  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1737  * so we need to be ready to deal with more than one level.
1738  */
1739 static Node *
1740 strip_array_coercion(Node *node)
1741 {
1742         for (;;)
1743         {
1744                 if (node && IsA(node, ArrayCoerceExpr) &&
1745                         ((ArrayCoerceExpr *) node)->elemfuncid == InvalidOid)
1746                 {
1747                         node = (Node *) ((ArrayCoerceExpr *) node)->arg;
1748                 }
1749                 else if (node && IsA(node, RelabelType))
1750                 {
1751                         /* We don't really expect this case, but may as well cope */
1752                         node = (Node *) ((RelabelType *) node)->arg;
1753                 }
1754                 else
1755                         break;
1756         }
1757         return node;
1758 }
1759
1760 /*
1761  *              scalararraysel          - Selectivity of ScalarArrayOpExpr Node.
1762  */
1763 Selectivity
1764 scalararraysel(PlannerInfo *root,
1765                            ScalarArrayOpExpr *clause,
1766                            bool is_join_clause,
1767                            int varRelid,
1768                            JoinType jointype,
1769                            SpecialJoinInfo *sjinfo)
1770 {
1771         Oid                     operator = clause->opno;
1772         bool            useOr = clause->useOr;
1773         bool            isEquality = false;
1774         bool            isInequality = false;
1775         Node       *leftop;
1776         Node       *rightop;
1777         Oid                     nominal_element_type;
1778         Oid                     nominal_element_collation;
1779         TypeCacheEntry *typentry;
1780         RegProcedure oprsel;
1781         FmgrInfo        oprselproc;
1782         Selectivity s1;
1783         Selectivity s1disjoint;
1784
1785         /* First, deconstruct the expression */
1786         Assert(list_length(clause->args) == 2);
1787         leftop = (Node *) linitial(clause->args);
1788         rightop = (Node *) lsecond(clause->args);
1789
1790         /* aggressively reduce both sides to constants */
1791         leftop = estimate_expression_value(root, leftop);
1792         rightop = estimate_expression_value(root, rightop);
1793
1794         /* get nominal (after relabeling) element type of rightop */
1795         nominal_element_type = get_base_element_type(exprType(rightop));
1796         if (!OidIsValid(nominal_element_type))
1797                 return (Selectivity) 0.5;               /* probably shouldn't happen */
1798         /* get nominal collation, too, for generating constants */
1799         nominal_element_collation = exprCollation(rightop);
1800
1801         /* look through any binary-compatible relabeling of rightop */
1802         rightop = strip_array_coercion(rightop);
1803
1804         /*
1805          * Detect whether the operator is the default equality or inequality
1806          * operator of the array element type.
1807          */
1808         typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1809         if (OidIsValid(typentry->eq_opr))
1810         {
1811                 if (operator == typentry->eq_opr)
1812                         isEquality = true;
1813                 else if (get_negator(operator) == typentry->eq_opr)
1814                         isInequality = true;
1815         }
1816
1817         /*
1818          * If it is equality or inequality, we might be able to estimate this as a
1819          * form of array containment; for instance "const = ANY(column)" can be
1820          * treated as "ARRAY[const] <@ column".  scalararraysel_containment tries
1821          * that, and returns the selectivity estimate if successful, or -1 if not.
1822          */
1823         if ((isEquality || isInequality) && !is_join_clause)
1824         {
1825                 s1 = scalararraysel_containment(root, leftop, rightop,
1826                                                                                 nominal_element_type,
1827                                                                                 isEquality, useOr, varRelid);
1828                 if (s1 >= 0.0)
1829                         return s1;
1830         }
1831
1832         /*
1833          * Look up the underlying operator's selectivity estimator. Punt if it
1834          * hasn't got one.
1835          */
1836         if (is_join_clause)
1837                 oprsel = get_oprjoin(operator);
1838         else
1839                 oprsel = get_oprrest(operator);
1840         if (!oprsel)
1841                 return (Selectivity) 0.5;
1842         fmgr_info(oprsel, &oprselproc);
1843
1844         /*
1845          * In the array-containment check above, we must only believe that an
1846          * operator is equality or inequality if it is the default btree equality
1847          * operator (or its negator) for the element type, since those are the
1848          * operators that array containment will use.  But in what follows, we can
1849          * be a little laxer, and also believe that any operators using eqsel() or
1850          * neqsel() as selectivity estimator act like equality or inequality.
1851          */
1852         if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1853                 isEquality = true;
1854         else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1855                 isInequality = true;
1856
1857         /*
1858          * We consider three cases:
1859          *
1860          * 1. rightop is an Array constant: deconstruct the array, apply the
1861          * operator's selectivity function for each array element, and merge the
1862          * results in the same way that clausesel.c does for AND/OR combinations.
1863          *
1864          * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1865          * function for each element of the ARRAY[] construct, and merge.
1866          *
1867          * 3. otherwise, make a guess ...
1868          */
1869         if (rightop && IsA(rightop, Const))
1870         {
1871                 Datum           arraydatum = ((Const *) rightop)->constvalue;
1872                 bool            arrayisnull = ((Const *) rightop)->constisnull;
1873                 ArrayType  *arrayval;
1874                 int16           elmlen;
1875                 bool            elmbyval;
1876                 char            elmalign;
1877                 int                     num_elems;
1878                 Datum      *elem_values;
1879                 bool       *elem_nulls;
1880                 int                     i;
1881
1882                 if (arrayisnull)                /* qual can't succeed if null array */
1883                         return (Selectivity) 0.0;
1884                 arrayval = DatumGetArrayTypeP(arraydatum);
1885                 get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
1886                                                          &elmlen, &elmbyval, &elmalign);
1887                 deconstruct_array(arrayval,
1888                                                   ARR_ELEMTYPE(arrayval),
1889                                                   elmlen, elmbyval, elmalign,
1890                                                   &elem_values, &elem_nulls, &num_elems);
1891
1892                 /*
1893                  * For generic operators, we assume the probability of success is
1894                  * independent for each array element.  But for "= ANY" or "<> ALL",
1895                  * if the array elements are distinct (which'd typically be the case)
1896                  * then the probabilities are disjoint, and we should just sum them.
1897                  *
1898                  * If we were being really tense we would try to confirm that the
1899                  * elements are all distinct, but that would be expensive and it
1900                  * doesn't seem to be worth the cycles; it would amount to penalizing
1901                  * well-written queries in favor of poorly-written ones.  However, we
1902                  * do protect ourselves a little bit by checking whether the
1903                  * disjointness assumption leads to an impossible (out of range)
1904                  * probability; if so, we fall back to the normal calculation.
1905                  */
1906                 s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1907
1908                 for (i = 0; i < num_elems; i++)
1909                 {
1910                         List       *args;
1911                         Selectivity s2;
1912
1913                         args = list_make2(leftop,
1914                                                           makeConst(nominal_element_type,
1915                                                                                 -1,
1916                                                                                 nominal_element_collation,
1917                                                                                 elmlen,
1918                                                                                 elem_values[i],
1919                                                                                 elem_nulls[i],
1920                                                                                 elmbyval));
1921                         if (is_join_clause)
1922                                 s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1923                                                                                                           clause->inputcollid,
1924                                                                                                           PointerGetDatum(root),
1925                                                                                                   ObjectIdGetDatum(operator),
1926                                                                                                           PointerGetDatum(args),
1927                                                                                                           Int16GetDatum(jointype),
1928                                                                                                    PointerGetDatum(sjinfo)));
1929                         else
1930                                 s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1931                                                                                                           clause->inputcollid,
1932                                                                                                           PointerGetDatum(root),
1933                                                                                                   ObjectIdGetDatum(operator),
1934                                                                                                           PointerGetDatum(args),
1935                                                                                                    Int32GetDatum(varRelid)));
1936
1937                         if (useOr)
1938                         {
1939                                 s1 = s1 + s2 - s1 * s2;
1940                                 if (isEquality)
1941                                         s1disjoint += s2;
1942                         }
1943                         else
1944                         {
1945                                 s1 = s1 * s2;
1946                                 if (isInequality)
1947                                         s1disjoint += s2 - 1.0;
1948                         }
1949                 }
1950
1951                 /* accept disjoint-probability estimate if in range */
1952                 if ((useOr ? isEquality : isInequality) &&
1953                         s1disjoint >= 0.0 && s1disjoint <= 1.0)
1954                         s1 = s1disjoint;
1955         }
1956         else if (rightop && IsA(rightop, ArrayExpr) &&
1957                          !((ArrayExpr *) rightop)->multidims)
1958         {
1959                 ArrayExpr  *arrayexpr = (ArrayExpr *) rightop;
1960                 int16           elmlen;
1961                 bool            elmbyval;
1962                 ListCell   *l;
1963
1964                 get_typlenbyval(arrayexpr->element_typeid,
1965                                                 &elmlen, &elmbyval);
1966
1967                 /*
1968                  * We use the assumption of disjoint probabilities here too, although
1969                  * the odds of equal array elements are rather higher if the elements
1970                  * are not all constants (which they won't be, else constant folding
1971                  * would have reduced the ArrayExpr to a Const).  In this path it's
1972                  * critical to have the sanity check on the s1disjoint estimate.
1973                  */
1974                 s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1975
1976                 foreach(l, arrayexpr->elements)
1977                 {
1978                         Node       *elem = (Node *) lfirst(l);
1979                         List       *args;
1980                         Selectivity s2;
1981
1982                         /*
1983                          * Theoretically, if elem isn't of nominal_element_type we should
1984                          * insert a RelabelType, but it seems unlikely that any operator
1985                          * estimation function would really care ...
1986                          */
1987                         args = list_make2(leftop, elem);
1988                         if (is_join_clause)
1989                                 s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1990                                                                                                           clause->inputcollid,
1991                                                                                                           PointerGetDatum(root),
1992                                                                                                   ObjectIdGetDatum(operator),
1993                                                                                                           PointerGetDatum(args),
1994                                                                                                           Int16GetDatum(jointype),
1995                                                                                                    PointerGetDatum(sjinfo)));
1996                         else
1997                                 s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1998                                                                                                           clause->inputcollid,
1999                                                                                                           PointerGetDatum(root),
2000                                                                                                   ObjectIdGetDatum(operator),
2001                                                                                                           PointerGetDatum(args),
2002                                                                                                    Int32GetDatum(varRelid)));
2003
2004                         if (useOr)
2005                         {
2006                                 s1 = s1 + s2 - s1 * s2;
2007                                 if (isEquality)
2008                                         s1disjoint += s2;
2009                         }
2010                         else
2011                         {
2012                                 s1 = s1 * s2;
2013                                 if (isInequality)
2014                                         s1disjoint += s2 - 1.0;
2015                         }
2016                 }
2017
2018                 /* accept disjoint-probability estimate if in range */
2019                 if ((useOr ? isEquality : isInequality) &&
2020                         s1disjoint >= 0.0 && s1disjoint <= 1.0)
2021                         s1 = s1disjoint;
2022         }
2023         else
2024         {
2025                 CaseTestExpr *dummyexpr;
2026                 List       *args;
2027                 Selectivity s2;
2028                 int                     i;
2029
2030                 /*
2031                  * We need a dummy rightop to pass to the operator selectivity
2032                  * routine.  It can be pretty much anything that doesn't look like a
2033                  * constant; CaseTestExpr is a convenient choice.
2034                  */
2035                 dummyexpr = makeNode(CaseTestExpr);
2036                 dummyexpr->typeId = nominal_element_type;
2037                 dummyexpr->typeMod = -1;
2038                 dummyexpr->collation = clause->inputcollid;
2039                 args = list_make2(leftop, dummyexpr);
2040                 if (is_join_clause)
2041                         s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2042                                                                                                   clause->inputcollid,
2043                                                                                                   PointerGetDatum(root),
2044                                                                                                   ObjectIdGetDatum(operator),
2045                                                                                                   PointerGetDatum(args),
2046                                                                                                   Int16GetDatum(jointype),
2047                                                                                                   PointerGetDatum(sjinfo)));
2048                 else
2049                         s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2050                                                                                                   clause->inputcollid,
2051                                                                                                   PointerGetDatum(root),
2052                                                                                                   ObjectIdGetDatum(operator),
2053                                                                                                   PointerGetDatum(args),
2054                                                                                                   Int32GetDatum(varRelid)));
2055                 s1 = useOr ? 0.0 : 1.0;
2056
2057                 /*
2058                  * Arbitrarily assume 10 elements in the eventual array value (see
2059                  * also estimate_array_length).  We don't risk an assumption of
2060                  * disjoint probabilities here.
2061                  */
2062                 for (i = 0; i < 10; i++)
2063                 {
2064                         if (useOr)
2065                                 s1 = s1 + s2 - s1 * s2;
2066                         else
2067                                 s1 = s1 * s2;
2068                 }
2069         }
2070
2071         /* result should be in range, but make sure... */
2072         CLAMP_PROBABILITY(s1);
2073
2074         return s1;
2075 }
2076
2077 /*
2078  * Estimate number of elements in the array yielded by an expression.
2079  *
2080  * It's important that this agree with scalararraysel.
2081  */
2082 int
2083 estimate_array_length(Node *arrayexpr)
2084 {
2085         /* look through any binary-compatible relabeling of arrayexpr */
2086         arrayexpr = strip_array_coercion(arrayexpr);
2087
2088         if (arrayexpr && IsA(arrayexpr, Const))
2089         {
2090                 Datum           arraydatum = ((Const *) arrayexpr)->constvalue;
2091                 bool            arrayisnull = ((Const *) arrayexpr)->constisnull;
2092                 ArrayType  *arrayval;
2093
2094                 if (arrayisnull)
2095                         return 0;
2096                 arrayval = DatumGetArrayTypeP(arraydatum);
2097                 return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2098         }
2099         else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2100                          !((ArrayExpr *) arrayexpr)->multidims)
2101         {
2102                 return list_length(((ArrayExpr *) arrayexpr)->elements);
2103         }
2104         else
2105         {
2106                 /* default guess --- see also scalararraysel */
2107                 return 10;
2108         }
2109 }
2110
2111 /*
2112  *              rowcomparesel           - Selectivity of RowCompareExpr Node.
2113  *
2114  * We estimate RowCompare selectivity by considering just the first (high
2115  * order) columns, which makes it equivalent to an ordinary OpExpr.  While
2116  * this estimate could be refined by considering additional columns, it
2117  * seems unlikely that we could do a lot better without multi-column
2118  * statistics.
2119  */
2120 Selectivity
2121 rowcomparesel(PlannerInfo *root,
2122                           RowCompareExpr *clause,
2123                           int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2124 {
2125         Selectivity s1;
2126         Oid                     opno = linitial_oid(clause->opnos);
2127         Oid                     inputcollid = linitial_oid(clause->inputcollids);
2128         List       *opargs;
2129         bool            is_join_clause;
2130
2131         /* Build equivalent arg list for single operator */
2132         opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2133
2134         /*
2135          * Decide if it's a join clause.  This should match clausesel.c's
2136          * treat_as_join_clause(), except that we intentionally consider only the
2137          * leading columns and not the rest of the clause.
2138          */
2139         if (varRelid != 0)
2140         {
2141                 /*
2142                  * Caller is forcing restriction mode (eg, because we are examining an
2143                  * inner indexscan qual).
2144                  */
2145                 is_join_clause = false;
2146         }
2147         else if (sjinfo == NULL)
2148         {
2149                 /*
2150                  * It must be a restriction clause, since it's being evaluated at a
2151                  * scan node.
2152                  */
2153                 is_join_clause = false;
2154         }
2155         else
2156         {
2157                 /*
2158                  * Otherwise, it's a join if there's more than one relation used.
2159                  */
2160                 is_join_clause = (NumRelids((Node *) opargs) > 1);
2161         }
2162
2163         if (is_join_clause)
2164         {
2165                 /* Estimate selectivity for a join clause. */
2166                 s1 = join_selectivity(root, opno,
2167                                                           opargs,
2168                                                           inputcollid,
2169                                                           jointype,
2170                                                           sjinfo);
2171         }
2172         else
2173         {
2174                 /* Estimate selectivity for a restriction clause. */
2175                 s1 = restriction_selectivity(root, opno,
2176                                                                          opargs,
2177                                                                          inputcollid,
2178                                                                          varRelid);
2179         }
2180
2181         return s1;
2182 }
2183
2184 /*
2185  *              eqjoinsel               - Join selectivity of "="
2186  */
2187 Datum
2188 eqjoinsel(PG_FUNCTION_ARGS)
2189 {
2190         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2191         Oid                     operator = PG_GETARG_OID(1);
2192         List       *args = (List *) PG_GETARG_POINTER(2);
2193
2194 #ifdef NOT_USED
2195         JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
2196 #endif
2197         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2198         double          selec;
2199         VariableStatData vardata1;
2200         VariableStatData vardata2;
2201         bool            join_is_reversed;
2202         RelOptInfo *inner_rel;
2203
2204         get_join_variables(root, args, sjinfo,
2205                                            &vardata1, &vardata2, &join_is_reversed);
2206
2207         switch (sjinfo->jointype)
2208         {
2209                 case JOIN_INNER:
2210                 case JOIN_LEFT:
2211                 case JOIN_FULL:
2212                         selec = eqjoinsel_inner(operator, &vardata1, &vardata2);
2213                         break;
2214                 case JOIN_SEMI:
2215                 case JOIN_ANTI:
2216
2217                         /*
2218                          * Look up the join's inner relation.  min_righthand is sufficient
2219                          * information because neither SEMI nor ANTI joins permit any
2220                          * reassociation into or out of their RHS, so the righthand will
2221                          * always be exactly that set of rels.
2222                          */
2223                         inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2224
2225                         if (!join_is_reversed)
2226                                 selec = eqjoinsel_semi(operator, &vardata1, &vardata2,
2227                                                                            inner_rel);
2228                         else
2229                                 selec = eqjoinsel_semi(get_commutator(operator),
2230                                                                            &vardata2, &vardata1,
2231                                                                            inner_rel);
2232                         break;
2233                 default:
2234                         /* other values not expected here */
2235                         elog(ERROR, "unrecognized join type: %d",
2236                                  (int) sjinfo->jointype);
2237                         selec = 0;                      /* keep compiler quiet */
2238                         break;
2239         }
2240
2241         ReleaseVariableStats(vardata1);
2242         ReleaseVariableStats(vardata2);
2243
2244         CLAMP_PROBABILITY(selec);
2245
2246         PG_RETURN_FLOAT8((float8) selec);
2247 }
2248
2249 /*
2250  * eqjoinsel_inner --- eqjoinsel for normal inner join
2251  *
2252  * We also use this for LEFT/FULL outer joins; it's not presently clear
2253  * that it's worth trying to distinguish them here.
2254  */
2255 static double
2256 eqjoinsel_inner(Oid operator,
2257                                 VariableStatData *vardata1, VariableStatData *vardata2)
2258 {
2259         double          selec;
2260         double          nd1;
2261         double          nd2;
2262         bool            isdefault1;
2263         bool            isdefault2;
2264         Form_pg_statistic stats1 = NULL;
2265         Form_pg_statistic stats2 = NULL;
2266         bool            have_mcvs1 = false;
2267         Datum      *values1 = NULL;
2268         int                     nvalues1 = 0;
2269         float4     *numbers1 = NULL;
2270         int                     nnumbers1 = 0;
2271         bool            have_mcvs2 = false;
2272         Datum      *values2 = NULL;
2273         int                     nvalues2 = 0;
2274         float4     *numbers2 = NULL;
2275         int                     nnumbers2 = 0;
2276
2277         nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2278         nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2279
2280         if (HeapTupleIsValid(vardata1->statsTuple))
2281         {
2282                 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2283                 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2284                                                                           vardata1->atttype,
2285                                                                           vardata1->atttypmod,
2286                                                                           STATISTIC_KIND_MCV,
2287                                                                           InvalidOid,
2288                                                                           NULL,
2289                                                                           &values1, &nvalues1,
2290                                                                           &numbers1, &nnumbers1);
2291         }
2292
2293         if (HeapTupleIsValid(vardata2->statsTuple))
2294         {
2295                 stats2 = (Form_pg_statistic) GETSTRUCT(vardata2->statsTuple);
2296                 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2297                                                                           vardata2->atttype,
2298                                                                           vardata2->atttypmod,
2299                                                                           STATISTIC_KIND_MCV,
2300                                                                           InvalidOid,
2301                                                                           NULL,
2302                                                                           &values2, &nvalues2,
2303                                                                           &numbers2, &nnumbers2);
2304         }
2305
2306         if (have_mcvs1 && have_mcvs2)
2307         {
2308                 /*
2309                  * We have most-common-value lists for both relations.  Run through
2310                  * the lists to see which MCVs actually join to each other with the
2311                  * given operator.  This allows us to determine the exact join
2312                  * selectivity for the portion of the relations represented by the MCV
2313                  * lists.  We still have to estimate for the remaining population, but
2314                  * in a skewed distribution this gives us a big leg up in accuracy.
2315                  * For motivation see the analysis in Y. Ioannidis and S.
2316                  * Christodoulakis, "On the propagation of errors in the size of join
2317                  * results", Technical Report 1018, Computer Science Dept., University
2318                  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2319                  */
2320                 FmgrInfo        eqproc;
2321                 bool       *hasmatch1;
2322                 bool       *hasmatch2;
2323                 double          nullfrac1 = stats1->stanullfrac;
2324                 double          nullfrac2 = stats2->stanullfrac;
2325                 double          matchprodfreq,
2326                                         matchfreq1,
2327                                         matchfreq2,
2328                                         unmatchfreq1,
2329                                         unmatchfreq2,
2330                                         otherfreq1,
2331                                         otherfreq2,
2332                                         totalsel1,
2333                                         totalsel2;
2334                 int                     i,
2335                                         nmatches;
2336
2337                 fmgr_info(get_opcode(operator), &eqproc);
2338                 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2339                 hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
2340
2341                 /*
2342                  * Note we assume that each MCV will match at most one member of the
2343                  * other MCV list.  If the operator isn't really equality, there could
2344                  * be multiple matches --- but we don't look for them, both for speed
2345                  * and because the math wouldn't add up...
2346                  */
2347                 matchprodfreq = 0.0;
2348                 nmatches = 0;
2349                 for (i = 0; i < nvalues1; i++)
2350                 {
2351                         int                     j;
2352
2353                         for (j = 0; j < nvalues2; j++)
2354                         {
2355                                 if (hasmatch2[j])
2356                                         continue;
2357                                 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2358                                                                                                    DEFAULT_COLLATION_OID,
2359                                                                                                    values1[i],
2360                                                                                                    values2[j])))
2361                                 {
2362                                         hasmatch1[i] = hasmatch2[j] = true;
2363                                         matchprodfreq += numbers1[i] * numbers2[j];
2364                                         nmatches++;
2365                                         break;
2366                                 }
2367                         }
2368                 }
2369                 CLAMP_PROBABILITY(matchprodfreq);
2370                 /* Sum up frequencies of matched and unmatched MCVs */
2371                 matchfreq1 = unmatchfreq1 = 0.0;
2372                 for (i = 0; i < nvalues1; i++)
2373                 {
2374                         if (hasmatch1[i])
2375                                 matchfreq1 += numbers1[i];
2376                         else
2377                                 unmatchfreq1 += numbers1[i];
2378                 }
2379                 CLAMP_PROBABILITY(matchfreq1);
2380                 CLAMP_PROBABILITY(unmatchfreq1);
2381                 matchfreq2 = unmatchfreq2 = 0.0;
2382                 for (i = 0; i < nvalues2; i++)
2383                 {
2384                         if (hasmatch2[i])
2385                                 matchfreq2 += numbers2[i];
2386                         else
2387                                 unmatchfreq2 += numbers2[i];
2388                 }
2389                 CLAMP_PROBABILITY(matchfreq2);
2390                 CLAMP_PROBABILITY(unmatchfreq2);
2391                 pfree(hasmatch1);
2392                 pfree(hasmatch2);
2393
2394                 /*
2395                  * Compute total frequency of non-null values that are not in the MCV
2396                  * lists.
2397                  */
2398                 otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2399                 otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2400                 CLAMP_PROBABILITY(otherfreq1);
2401                 CLAMP_PROBABILITY(otherfreq2);
2402
2403                 /*
2404                  * We can estimate the total selectivity from the point of view of
2405                  * relation 1 as: the known selectivity for matched MCVs, plus
2406                  * unmatched MCVs that are assumed to match against random members of
2407                  * relation 2's non-MCV population, plus non-MCV values that are
2408                  * assumed to match against random members of relation 2's unmatched
2409                  * MCVs plus non-MCV values.
2410                  */
2411                 totalsel1 = matchprodfreq;
2412                 if (nd2 > nvalues2)
2413                         totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - nvalues2);
2414                 if (nd2 > nmatches)
2415                         totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2416                                 (nd2 - nmatches);
2417                 /* Same estimate from the point of view of relation 2. */
2418                 totalsel2 = matchprodfreq;
2419                 if (nd1 > nvalues1)
2420                         totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - nvalues1);
2421                 if (nd1 > nmatches)
2422                         totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2423                                 (nd1 - nmatches);
2424
2425                 /*
2426                  * Use the smaller of the two estimates.  This can be justified in
2427                  * essentially the same terms as given below for the no-stats case: to
2428                  * a first approximation, we are estimating from the point of view of
2429                  * the relation with smaller nd.
2430                  */
2431                 selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2432         }
2433         else
2434         {
2435                 /*
2436                  * We do not have MCV lists for both sides.  Estimate the join
2437                  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2438                  * is plausible if we assume that the join operator is strict and the
2439                  * non-null values are about equally distributed: a given non-null
2440                  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2441                  * of rel2, so total join rows are at most
2442                  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2443                  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2444                  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2445                  * with MIN() is an upper bound.  Using the MIN() means we estimate
2446                  * from the point of view of the relation with smaller nd (since the
2447                  * larger nd is determining the MIN).  It is reasonable to assume that
2448                  * most tuples in this rel will have join partners, so the bound is
2449                  * probably reasonably tight and should be taken as-is.
2450                  *
2451                  * XXX Can we be smarter if we have an MCV list for just one side? It
2452                  * seems that if we assume equal distribution for the other side, we
2453                  * end up with the same answer anyway.
2454                  */
2455                 double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2456                 double          nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2457
2458                 selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2459                 if (nd1 > nd2)
2460                         selec /= nd1;
2461                 else
2462                         selec /= nd2;
2463         }
2464
2465         if (have_mcvs1)
2466                 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2467                                                   numbers1, nnumbers1);
2468         if (have_mcvs2)
2469                 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2470                                                   numbers2, nnumbers2);
2471
2472         return selec;
2473 }
2474
2475 /*
2476  * eqjoinsel_semi --- eqjoinsel for semi join
2477  *
2478  * (Also used for anti join, which we are supposed to estimate the same way.)
2479  * Caller has ensured that vardata1 is the LHS variable.
2480  */
2481 static double
2482 eqjoinsel_semi(Oid operator,
2483                            VariableStatData *vardata1, VariableStatData *vardata2,
2484                            RelOptInfo *inner_rel)
2485 {
2486         double          selec;
2487         double          nd1;
2488         double          nd2;
2489         bool            isdefault1;
2490         bool            isdefault2;
2491         Form_pg_statistic stats1 = NULL;
2492         bool            have_mcvs1 = false;
2493         Datum      *values1 = NULL;
2494         int                     nvalues1 = 0;
2495         float4     *numbers1 = NULL;
2496         int                     nnumbers1 = 0;
2497         bool            have_mcvs2 = false;
2498         Datum      *values2 = NULL;
2499         int                     nvalues2 = 0;
2500         float4     *numbers2 = NULL;
2501         int                     nnumbers2 = 0;
2502
2503         nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2504         nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2505
2506         /*
2507          * We clamp nd2 to be not more than what we estimate the inner relation's
2508          * size to be.  This is intuitively somewhat reasonable since obviously
2509          * there can't be more than that many distinct values coming from the
2510          * inner rel.  The reason for the asymmetry (ie, that we don't clamp nd1
2511          * likewise) is that this is the only pathway by which restriction clauses
2512          * applied to the inner rel will affect the join result size estimate,
2513          * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2514          * only the outer rel's size.  If we clamped nd1 we'd be double-counting
2515          * the selectivity of outer-rel restrictions.
2516          *
2517          * We can apply this clamping both with respect to the base relation from
2518          * which the join variable comes (if there is just one), and to the
2519          * immediate inner input relation of the current join.
2520          *
2521          * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2522          * great, maybe, but it didn't come out of nowhere either.  This is most
2523          * helpful when the inner relation is empty and consequently has no stats.
2524          */
2525         if (vardata2->rel)
2526         {
2527                 if (nd2 >= vardata2->rel->rows)
2528                 {
2529                         nd2 = vardata2->rel->rows;
2530                         isdefault2 = false;
2531                 }
2532         }
2533         if (nd2 >= inner_rel->rows)
2534         {
2535                 nd2 = inner_rel->rows;
2536                 isdefault2 = false;
2537         }
2538
2539         if (HeapTupleIsValid(vardata1->statsTuple))
2540         {
2541                 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2542                 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2543                                                                           vardata1->atttype,
2544                                                                           vardata1->atttypmod,
2545                                                                           STATISTIC_KIND_MCV,
2546                                                                           InvalidOid,
2547                                                                           NULL,
2548                                                                           &values1, &nvalues1,
2549                                                                           &numbers1, &nnumbers1);
2550         }
2551
2552         if (HeapTupleIsValid(vardata2->statsTuple))
2553         {
2554                 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2555                                                                           vardata2->atttype,
2556                                                                           vardata2->atttypmod,
2557                                                                           STATISTIC_KIND_MCV,
2558                                                                           InvalidOid,
2559                                                                           NULL,
2560                                                                           &values2, &nvalues2,
2561                                                                           &numbers2, &nnumbers2);
2562         }
2563
2564         if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
2565         {
2566                 /*
2567                  * We have most-common-value lists for both relations.  Run through
2568                  * the lists to see which MCVs actually join to each other with the
2569                  * given operator.  This allows us to determine the exact join
2570                  * selectivity for the portion of the relations represented by the MCV
2571                  * lists.  We still have to estimate for the remaining population, but
2572                  * in a skewed distribution this gives us a big leg up in accuracy.
2573                  */
2574                 FmgrInfo        eqproc;
2575                 bool       *hasmatch1;
2576                 bool       *hasmatch2;
2577                 double          nullfrac1 = stats1->stanullfrac;
2578                 double          matchfreq1,
2579                                         uncertainfrac,
2580                                         uncertain;
2581                 int                     i,
2582                                         nmatches,
2583                                         clamped_nvalues2;
2584
2585                 /*
2586                  * The clamping above could have resulted in nd2 being less than
2587                  * nvalues2; in which case, we assume that precisely the nd2 most
2588                  * common values in the relation will appear in the join input, and so
2589                  * compare to only the first nd2 members of the MCV list.  Of course
2590                  * this is frequently wrong, but it's the best bet we can make.
2591                  */
2592                 clamped_nvalues2 = Min(nvalues2, nd2);
2593
2594                 fmgr_info(get_opcode(operator), &eqproc);
2595                 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2596                 hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2597
2598                 /*
2599                  * Note we assume that each MCV will match at most one member of the
2600                  * other MCV list.  If the operator isn't really equality, there could
2601                  * be multiple matches --- but we don't look for them, both for speed
2602                  * and because the math wouldn't add up...
2603                  */
2604                 nmatches = 0;
2605                 for (i = 0; i < nvalues1; i++)
2606                 {
2607                         int                     j;
2608
2609                         for (j = 0; j < clamped_nvalues2; j++)
2610                         {
2611                                 if (hasmatch2[j])
2612                                         continue;
2613                                 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2614                                                                                                    DEFAULT_COLLATION_OID,
2615                                                                                                    values1[i],
2616                                                                                                    values2[j])))
2617                                 {
2618                                         hasmatch1[i] = hasmatch2[j] = true;
2619                                         nmatches++;
2620                                         break;
2621                                 }
2622                         }
2623                 }
2624                 /* Sum up frequencies of matched MCVs */
2625                 matchfreq1 = 0.0;
2626                 for (i = 0; i < nvalues1; i++)
2627                 {
2628                         if (hasmatch1[i])
2629                                 matchfreq1 += numbers1[i];
2630                 }
2631                 CLAMP_PROBABILITY(matchfreq1);
2632                 pfree(hasmatch1);
2633                 pfree(hasmatch2);
2634
2635                 /*
2636                  * Now we need to estimate the fraction of relation 1 that has at
2637                  * least one join partner.  We know for certain that the matched MCVs
2638                  * do, so that gives us a lower bound, but we're really in the dark
2639                  * about everything else.  Our crude approach is: if nd1 <= nd2 then
2640                  * assume all non-null rel1 rows have join partners, else assume for
2641                  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2642                  * can discount the known-matched MCVs from the distinct-values counts
2643                  * before doing the division.
2644                  *
2645                  * Crude as the above is, it's completely useless if we don't have
2646                  * reliable ndistinct values for both sides.  Hence, if either nd1 or
2647                  * nd2 is default, punt and assume half of the uncertain rows have
2648                  * join partners.
2649                  */
2650                 if (!isdefault1 && !isdefault2)
2651                 {
2652                         nd1 -= nmatches;
2653                         nd2 -= nmatches;
2654                         if (nd1 <= nd2 || nd2 < 0)
2655                                 uncertainfrac = 1.0;
2656                         else
2657                                 uncertainfrac = nd2 / nd1;
2658                 }
2659                 else
2660                         uncertainfrac = 0.5;
2661                 uncertain = 1.0 - matchfreq1 - nullfrac1;
2662                 CLAMP_PROBABILITY(uncertain);
2663                 selec = matchfreq1 + uncertainfrac * uncertain;
2664         }
2665         else
2666         {
2667                 /*
2668                  * Without MCV lists for both sides, we can only use the heuristic
2669                  * about nd1 vs nd2.
2670                  */
2671                 double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2672
2673                 if (!isdefault1 && !isdefault2)
2674                 {
2675                         if (nd1 <= nd2 || nd2 < 0)
2676                                 selec = 1.0 - nullfrac1;
2677                         else
2678                                 selec = (nd2 / nd1) * (1.0 - nullfrac1);
2679                 }
2680                 else
2681                         selec = 0.5 * (1.0 - nullfrac1);
2682         }
2683
2684         if (have_mcvs1)
2685                 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2686                                                   numbers1, nnumbers1);
2687         if (have_mcvs2)
2688                 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2689                                                   numbers2, nnumbers2);
2690
2691         return selec;
2692 }
2693
2694 /*
2695  *              neqjoinsel              - Join selectivity of "!="
2696  */
2697 Datum
2698 neqjoinsel(PG_FUNCTION_ARGS)
2699 {
2700         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2701         Oid                     operator = PG_GETARG_OID(1);
2702         List       *args = (List *) PG_GETARG_POINTER(2);
2703         JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
2704         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2705         Oid                     eqop;
2706         float8          result;
2707
2708         /*
2709          * We want 1 - eqjoinsel() where the equality operator is the one
2710          * associated with this != operator, that is, its negator.
2711          */
2712         eqop = get_negator(operator);
2713         if (eqop)
2714         {
2715                 result = DatumGetFloat8(DirectFunctionCall5(eqjoinsel,
2716                                                                                                         PointerGetDatum(root),
2717                                                                                                         ObjectIdGetDatum(eqop),
2718                                                                                                         PointerGetDatum(args),
2719                                                                                                         Int16GetDatum(jointype),
2720                                                                                                         PointerGetDatum(sjinfo)));
2721         }
2722         else
2723         {
2724                 /* Use default selectivity (should we raise an error instead?) */
2725                 result = DEFAULT_EQ_SEL;
2726         }
2727         result = 1.0 - result;
2728         PG_RETURN_FLOAT8(result);
2729 }
2730
2731 /*
2732  *              scalarltjoinsel - Join selectivity of "<" and "<=" for scalars
2733  */
2734 Datum
2735 scalarltjoinsel(PG_FUNCTION_ARGS)
2736 {
2737         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2738 }
2739
2740 /*
2741  *              scalargtjoinsel - Join selectivity of ">" and ">=" for scalars
2742  */
2743 Datum
2744 scalargtjoinsel(PG_FUNCTION_ARGS)
2745 {
2746         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2747 }
2748
2749 /*
2750  * patternjoinsel               - Generic code for pattern-match join selectivity.
2751  */
2752 static double
2753 patternjoinsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
2754 {
2755         /* For the moment we just punt. */
2756         return negate ? (1.0 - DEFAULT_MATCH_SEL) : DEFAULT_MATCH_SEL;
2757 }
2758
2759 /*
2760  *              regexeqjoinsel  - Join selectivity of regular-expression pattern match.
2761  */
2762 Datum
2763 regexeqjoinsel(PG_FUNCTION_ARGS)
2764 {
2765         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, false));
2766 }
2767
2768 /*
2769  *              icregexeqjoinsel        - Join selectivity of case-insensitive regex match.
2770  */
2771 Datum
2772 icregexeqjoinsel(PG_FUNCTION_ARGS)
2773 {
2774         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, false));
2775 }
2776
2777 /*
2778  *              likejoinsel                     - Join selectivity of LIKE pattern match.
2779  */
2780 Datum
2781 likejoinsel(PG_FUNCTION_ARGS)
2782 {
2783         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, false));
2784 }
2785
2786 /*
2787  *              iclikejoinsel                   - Join selectivity of ILIKE pattern match.
2788  */
2789 Datum
2790 iclikejoinsel(PG_FUNCTION_ARGS)
2791 {
2792         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, false));
2793 }
2794
2795 /*
2796  *              regexnejoinsel  - Join selectivity of regex non-match.
2797  */
2798 Datum
2799 regexnejoinsel(PG_FUNCTION_ARGS)
2800 {
2801         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, true));
2802 }
2803
2804 /*
2805  *              icregexnejoinsel        - Join selectivity of case-insensitive regex non-match.
2806  */
2807 Datum
2808 icregexnejoinsel(PG_FUNCTION_ARGS)
2809 {
2810         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, true));
2811 }
2812
2813 /*
2814  *              nlikejoinsel            - Join selectivity of LIKE pattern non-match.
2815  */
2816 Datum
2817 nlikejoinsel(PG_FUNCTION_ARGS)
2818 {
2819         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, true));
2820 }
2821
2822 /*
2823  *              icnlikejoinsel          - Join selectivity of ILIKE pattern non-match.
2824  */
2825 Datum
2826 icnlikejoinsel(PG_FUNCTION_ARGS)
2827 {
2828         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, true));
2829 }
2830
2831 /*
2832  * mergejoinscansel                     - Scan selectivity of merge join.
2833  *
2834  * A merge join will stop as soon as it exhausts either input stream.
2835  * Therefore, if we can estimate the ranges of both input variables,
2836  * we can estimate how much of the input will actually be read.  This
2837  * can have a considerable impact on the cost when using indexscans.
2838  *
2839  * Also, we can estimate how much of each input has to be read before the
2840  * first join pair is found, which will affect the join's startup time.
2841  *
2842  * clause should be a clause already known to be mergejoinable.  opfamily,
2843  * strategy, and nulls_first specify the sort ordering being used.
2844  *
2845  * The outputs are:
2846  *              *leftstart is set to the fraction of the left-hand variable expected
2847  *               to be scanned before the first join pair is found (0 to 1).
2848  *              *leftend is set to the fraction of the left-hand variable expected
2849  *               to be scanned before the join terminates (0 to 1).
2850  *              *rightstart, *rightend similarly for the right-hand variable.
2851  */
2852 void
2853 mergejoinscansel(PlannerInfo *root, Node *clause,
2854                                  Oid opfamily, int strategy, bool nulls_first,
2855                                  Selectivity *leftstart, Selectivity *leftend,
2856                                  Selectivity *rightstart, Selectivity *rightend)
2857 {
2858         Node       *left,
2859                            *right;
2860         VariableStatData leftvar,
2861                                 rightvar;
2862         int                     op_strategy;
2863         Oid                     op_lefttype;
2864         Oid                     op_righttype;
2865         Oid                     opno,
2866                                 lsortop,
2867                                 rsortop,
2868                                 lstatop,
2869                                 rstatop,
2870                                 ltop,
2871                                 leop,
2872                                 revltop,
2873                                 revleop;
2874         bool            isgt;
2875         Datum           leftmin,
2876                                 leftmax,
2877                                 rightmin,
2878                                 rightmax;
2879         double          selec;
2880
2881         /* Set default results if we can't figure anything out. */
2882         /* XXX should default "start" fraction be a bit more than 0? */
2883         *leftstart = *rightstart = 0.0;
2884         *leftend = *rightend = 1.0;
2885
2886         /* Deconstruct the merge clause */
2887         if (!is_opclause(clause))
2888                 return;                                 /* shouldn't happen */
2889         opno = ((OpExpr *) clause)->opno;
2890         left = get_leftop((Expr *) clause);
2891         right = get_rightop((Expr *) clause);
2892         if (!right)
2893                 return;                                 /* shouldn't happen */
2894
2895         /* Look for stats for the inputs */
2896         examine_variable(root, left, 0, &leftvar);
2897         examine_variable(root, right, 0, &rightvar);
2898
2899         /* Extract the operator's declared left/right datatypes */
2900         get_op_opfamily_properties(opno, opfamily, false,
2901                                                            &op_strategy,
2902                                                            &op_lefttype,
2903                                                            &op_righttype);
2904         Assert(op_strategy == BTEqualStrategyNumber);
2905
2906         /*
2907          * Look up the various operators we need.  If we don't find them all, it
2908          * probably means the opfamily is broken, but we just fail silently.
2909          *
2910          * Note: we expect that pg_statistic histograms will be sorted by the '<'
2911          * operator, regardless of which sort direction we are considering.
2912          */
2913         switch (strategy)
2914         {
2915                 case BTLessStrategyNumber:
2916                         isgt = false;
2917                         if (op_lefttype == op_righttype)
2918                         {
2919                                 /* easy case */
2920                                 ltop = get_opfamily_member(opfamily,
2921                                                                                    op_lefttype, op_righttype,
2922                                                                                    BTLessStrategyNumber);
2923                                 leop = get_opfamily_member(opfamily,
2924                                                                                    op_lefttype, op_righttype,
2925                                                                                    BTLessEqualStrategyNumber);
2926                                 lsortop = ltop;
2927                                 rsortop = ltop;
2928                                 lstatop = lsortop;
2929                                 rstatop = rsortop;
2930                                 revltop = ltop;
2931                                 revleop = leop;
2932                         }
2933                         else
2934                         {
2935                                 ltop = get_opfamily_member(opfamily,
2936                                                                                    op_lefttype, op_righttype,
2937                                                                                    BTLessStrategyNumber);
2938                                 leop = get_opfamily_member(opfamily,
2939                                                                                    op_lefttype, op_righttype,
2940                                                                                    BTLessEqualStrategyNumber);
2941                                 lsortop = get_opfamily_member(opfamily,
2942                                                                                           op_lefttype, op_lefttype,
2943                                                                                           BTLessStrategyNumber);
2944                                 rsortop = get_opfamily_member(opfamily,
2945                                                                                           op_righttype, op_righttype,
2946                                                                                           BTLessStrategyNumber);
2947                                 lstatop = lsortop;
2948                                 rstatop = rsortop;
2949                                 revltop = get_opfamily_member(opfamily,
2950                                                                                           op_righttype, op_lefttype,
2951                                                                                           BTLessStrategyNumber);
2952                                 revleop = get_opfamily_member(opfamily,
2953                                                                                           op_righttype, op_lefttype,
2954                                                                                           BTLessEqualStrategyNumber);
2955                         }
2956                         break;
2957                 case BTGreaterStrategyNumber:
2958                         /* descending-order case */
2959                         isgt = true;
2960                         if (op_lefttype == op_righttype)
2961                         {
2962                                 /* easy case */
2963                                 ltop = get_opfamily_member(opfamily,
2964                                                                                    op_lefttype, op_righttype,
2965                                                                                    BTGreaterStrategyNumber);
2966                                 leop = get_opfamily_member(opfamily,
2967                                                                                    op_lefttype, op_righttype,
2968                                                                                    BTGreaterEqualStrategyNumber);
2969                                 lsortop = ltop;
2970                                 rsortop = ltop;
2971                                 lstatop = get_opfamily_member(opfamily,
2972                                                                                           op_lefttype, op_lefttype,
2973                                                                                           BTLessStrategyNumber);
2974                                 rstatop = lstatop;
2975                                 revltop = ltop;
2976                                 revleop = leop;
2977                         }
2978                         else
2979                         {
2980                                 ltop = get_opfamily_member(opfamily,
2981                                                                                    op_lefttype, op_righttype,
2982                                                                                    BTGreaterStrategyNumber);
2983                                 leop = get_opfamily_member(opfamily,
2984                                                                                    op_lefttype, op_righttype,
2985                                                                                    BTGreaterEqualStrategyNumber);
2986                                 lsortop = get_opfamily_member(opfamily,
2987                                                                                           op_lefttype, op_lefttype,
2988                                                                                           BTGreaterStrategyNumber);
2989                                 rsortop = get_opfamily_member(opfamily,
2990                                                                                           op_righttype, op_righttype,
2991                                                                                           BTGreaterStrategyNumber);
2992                                 lstatop = get_opfamily_member(opfamily,
2993                                                                                           op_lefttype, op_lefttype,
2994                                                                                           BTLessStrategyNumber);
2995                                 rstatop = get_opfamily_member(opfamily,
2996                                                                                           op_righttype, op_righttype,
2997                                                                                           BTLessStrategyNumber);
2998                                 revltop = get_opfamily_member(opfamily,
2999                                                                                           op_righttype, op_lefttype,
3000                                                                                           BTGreaterStrategyNumber);
3001                                 revleop = get_opfamily_member(opfamily,
3002                                                                                           op_righttype, op_lefttype,
3003                                                                                           BTGreaterEqualStrategyNumber);
3004                         }
3005                         break;
3006                 default:
3007                         goto fail;                      /* shouldn't get here */
3008         }
3009
3010         if (!OidIsValid(lsortop) ||
3011                 !OidIsValid(rsortop) ||
3012                 !OidIsValid(lstatop) ||
3013                 !OidIsValid(rstatop) ||
3014                 !OidIsValid(ltop) ||
3015                 !OidIsValid(leop) ||
3016                 !OidIsValid(revltop) ||
3017                 !OidIsValid(revleop))
3018                 goto fail;                              /* insufficient info in catalogs */
3019
3020         /* Try to get ranges of both inputs */
3021         if (!isgt)
3022         {
3023                 if (!get_variable_range(root, &leftvar, lstatop,
3024                                                                 &leftmin, &leftmax))
3025                         goto fail;                      /* no range available from stats */
3026                 if (!get_variable_range(root, &rightvar, rstatop,
3027                                                                 &rightmin, &rightmax))
3028                         goto fail;                      /* no range available from stats */
3029         }
3030         else
3031         {
3032                 /* need to swap the max and min */
3033                 if (!get_variable_range(root, &leftvar, lstatop,
3034                                                                 &leftmax, &leftmin))
3035                         goto fail;                      /* no range available from stats */
3036                 if (!get_variable_range(root, &rightvar, rstatop,
3037                                                                 &rightmax, &rightmin))
3038                         goto fail;                      /* no range available from stats */
3039         }
3040
3041         /*
3042          * Now, the fraction of the left variable that will be scanned is the
3043          * fraction that's <= the right-side maximum value.  But only believe
3044          * non-default estimates, else stick with our 1.0.
3045          */
3046         selec = scalarineqsel(root, leop, isgt, &leftvar,
3047                                                   rightmax, op_righttype);
3048         if (selec != DEFAULT_INEQ_SEL)
3049                 *leftend = selec;
3050
3051         /* And similarly for the right variable. */
3052         selec = scalarineqsel(root, revleop, isgt, &rightvar,
3053                                                   leftmax, op_lefttype);
3054         if (selec != DEFAULT_INEQ_SEL)
3055                 *rightend = selec;
3056
3057         /*
3058          * Only one of the two "end" fractions can really be less than 1.0;
3059          * believe the smaller estimate and reset the other one to exactly 1.0. If
3060          * we get exactly equal estimates (as can easily happen with self-joins),
3061          * believe neither.
3062          */
3063         if (*leftend > *rightend)
3064                 *leftend = 1.0;
3065         else if (*leftend < *rightend)
3066                 *rightend = 1.0;
3067         else
3068                 *leftend = *rightend = 1.0;
3069
3070         /*
3071          * Also, the fraction of the left variable that will be scanned before the
3072          * first join pair is found is the fraction that's < the right-side
3073          * minimum value.  But only believe non-default estimates, else stick with
3074          * our own default.
3075          */
3076         selec = scalarineqsel(root, ltop, isgt, &leftvar,
3077                                                   rightmin, op_righttype);
3078         if (selec != DEFAULT_INEQ_SEL)
3079                 *leftstart = selec;
3080
3081         /* And similarly for the right variable. */
3082         selec = scalarineqsel(root, revltop, isgt, &rightvar,
3083                                                   leftmin, op_lefttype);
3084         if (selec != DEFAULT_INEQ_SEL)
3085                 *rightstart = selec;
3086
3087         /*
3088          * Only one of the two "start" fractions can really be more than zero;
3089          * believe the larger estimate and reset the other one to exactly 0.0. If
3090          * we get exactly equal estimates (as can easily happen with self-joins),
3091          * believe neither.
3092          */
3093         if (*leftstart < *rightstart)
3094                 *leftstart = 0.0;
3095         else if (*leftstart > *rightstart)
3096                 *rightstart = 0.0;
3097         else
3098                 *leftstart = *rightstart = 0.0;
3099
3100         /*
3101          * If the sort order is nulls-first, we're going to have to skip over any
3102          * nulls too.  These would not have been counted by scalarineqsel, and we
3103          * can safely add in this fraction regardless of whether we believe
3104          * scalarineqsel's results or not.  But be sure to clamp the sum to 1.0!
3105          */
3106         if (nulls_first)
3107         {
3108                 Form_pg_statistic stats;
3109
3110                 if (HeapTupleIsValid(leftvar.statsTuple))
3111                 {
3112                         stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3113                         *leftstart += stats->stanullfrac;
3114                         CLAMP_PROBABILITY(*leftstart);
3115                         *leftend += stats->stanullfrac;
3116                         CLAMP_PROBABILITY(*leftend);
3117                 }
3118                 if (HeapTupleIsValid(rightvar.statsTuple))
3119                 {
3120                         stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3121                         *rightstart += stats->stanullfrac;
3122                         CLAMP_PROBABILITY(*rightstart);
3123                         *rightend += stats->stanullfrac;
3124                         CLAMP_PROBABILITY(*rightend);
3125                 }
3126         }
3127
3128         /* Disbelieve start >= end, just in case that can happen */
3129         if (*leftstart >= *leftend)
3130         {
3131                 *leftstart = 0.0;
3132                 *leftend = 1.0;
3133         }
3134         if (*rightstart >= *rightend)
3135         {
3136                 *rightstart = 0.0;
3137                 *rightend = 1.0;
3138         }
3139
3140 fail:
3141         ReleaseVariableStats(leftvar);
3142         ReleaseVariableStats(rightvar);
3143 }
3144
3145
3146 /*
3147  * Helper routine for estimate_num_groups: add an item to a list of
3148  * GroupVarInfos, but only if it's not known equal to any of the existing
3149  * entries.
3150  */
3151 typedef struct
3152 {
3153         Node       *var;                        /* might be an expression, not just a Var */
3154         RelOptInfo *rel;                        /* relation it belongs to */
3155         double          ndistinct;              /* # distinct values */
3156 } GroupVarInfo;
3157
3158 static List *
3159 add_unique_group_var(PlannerInfo *root, List *varinfos,
3160                                          Node *var, VariableStatData *vardata)
3161 {
3162         GroupVarInfo *varinfo;
3163         double          ndistinct;
3164         bool            isdefault;
3165         ListCell   *lc;
3166
3167         ndistinct = get_variable_numdistinct(vardata, &isdefault);
3168
3169         /* cannot use foreach here because of possible list_delete */
3170         lc = list_head(varinfos);
3171         while (lc)
3172         {
3173                 varinfo = (GroupVarInfo *) lfirst(lc);
3174
3175                 /* must advance lc before list_delete possibly pfree's it */
3176                 lc = lnext(lc);
3177
3178                 /* Drop exact duplicates */
3179                 if (equal(var, varinfo->var))
3180                         return varinfos;
3181
3182                 /*
3183                  * Drop known-equal vars, but only if they belong to different
3184                  * relations (see comments for estimate_num_groups)
3185                  */
3186                 if (vardata->rel != varinfo->rel &&
3187                         exprs_known_equal(root, var, varinfo->var))
3188                 {
3189                         if (varinfo->ndistinct <= ndistinct)
3190                         {
3191                                 /* Keep older item, forget new one */
3192                                 return varinfos;
3193                         }
3194                         else
3195                         {
3196                                 /* Delete the older item */
3197                                 varinfos = list_delete_ptr(varinfos, varinfo);
3198                         }
3199                 }
3200         }
3201
3202         varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3203
3204         varinfo->var = var;
3205         varinfo->rel = vardata->rel;
3206         varinfo->ndistinct = ndistinct;
3207         varinfos = lappend(varinfos, varinfo);
3208         return varinfos;
3209 }
3210
3211 /*
3212  * estimate_num_groups          - Estimate number of groups in a grouped query
3213  *
3214  * Given a query having a GROUP BY clause, estimate how many groups there
3215  * will be --- ie, the number of distinct combinations of the GROUP BY
3216  * expressions.
3217  *
3218  * This routine is also used to estimate the number of rows emitted by
3219  * a DISTINCT filtering step; that is an isomorphic problem.  (Note:
3220  * actually, we only use it for DISTINCT when there's no grouping or
3221  * aggregation ahead of the DISTINCT.)
3222  *
3223  * Inputs:
3224  *      root - the query
3225  *      groupExprs - list of expressions being grouped by
3226  *      input_rows - number of rows estimated to arrive at the group/unique
3227  *              filter step
3228  *      pgset - NULL, or a List** pointing to a grouping set to filter the
3229  *              groupExprs against
3230  *
3231  * Given the lack of any cross-correlation statistics in the system, it's
3232  * impossible to do anything really trustworthy with GROUP BY conditions
3233  * involving multiple Vars.  We should however avoid assuming the worst
3234  * case (all possible cross-product terms actually appear as groups) since
3235  * very often the grouped-by Vars are highly correlated.  Our current approach
3236  * is as follows:
3237  *      1.  Expressions yielding boolean are assumed to contribute two groups,
3238  *              independently of their content, and are ignored in the subsequent
3239  *              steps.  This is mainly because tests like "col IS NULL" break the
3240  *              heuristic used in step 2 especially badly.
3241  *      2.  Reduce the given expressions to a list of unique Vars used.  For
3242  *              example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3243  *              It is clearly correct not to count the same Var more than once.
3244  *              It is also reasonable to treat f(x) the same as x: f() cannot
3245  *              increase the number of distinct values (unless it is volatile,
3246  *              which we consider unlikely for grouping), but it probably won't
3247  *              reduce the number of distinct values much either.
3248  *              As a special case, if a GROUP BY expression can be matched to an
3249  *              expressional index for which we have statistics, then we treat the
3250  *              whole expression as though it were just a Var.
3251  *      3.  If the list contains Vars of different relations that are known equal
3252  *              due to equivalence classes, then drop all but one of the Vars from each
3253  *              known-equal set, keeping the one with smallest estimated # of values
3254  *              (since the extra values of the others can't appear in joined rows).
3255  *              Note the reason we only consider Vars of different relations is that
3256  *              if we considered ones of the same rel, we'd be double-counting the
3257  *              restriction selectivity of the equality in the next step.
3258  *      4.  For Vars within a single source rel, we multiply together the numbers
3259  *              of values, clamp to the number of rows in the rel (divided by 10 if
3260  *              more than one Var), and then multiply by a factor based on the
3261  *              selectivity of the restriction clauses for that rel.  When there's
3262  *              more than one Var, the initial product is probably too high (it's the
3263  *              worst case) but clamping to a fraction of the rel's rows seems to be a
3264  *              helpful heuristic for not letting the estimate get out of hand.  (The
3265  *              factor of 10 is derived from pre-Postgres-7.4 practice.)  The factor
3266  *              we multiply by to adjust for the restriction selectivity assumes that
3267  *              the restriction clauses are independent of the grouping, which may not
3268  *              be a valid assumption, but it's hard to do better.
3269  *      5.  If there are Vars from multiple rels, we repeat step 4 for each such
3270  *              rel, and multiply the results together.
3271  * Note that rels not containing grouped Vars are ignored completely, as are
3272  * join clauses.  Such rels cannot increase the number of groups, and we
3273  * assume such clauses do not reduce the number either (somewhat bogus,
3274  * but we don't have the info to do better).
3275  */
3276 double
3277 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3278                                         List **pgset)
3279 {
3280         List       *varinfos = NIL;
3281         double          numdistinct;
3282         ListCell   *l;
3283         int                     i;
3284
3285         /*
3286          * We don't ever want to return an estimate of zero groups, as that tends
3287          * to lead to division-by-zero and other unpleasantness.  The input_rows
3288          * estimate is usually already at least 1, but clamp it just in case it
3289          * isn't.
3290          */
3291         input_rows = clamp_row_est(input_rows);
3292
3293         /*
3294          * If no grouping columns, there's exactly one group.  (This can't happen
3295          * for normal cases with GROUP BY or DISTINCT, but it is possible for
3296          * corner cases with set operations.)
3297          */
3298         if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3299                 return 1.0;
3300
3301         /*
3302          * Count groups derived from boolean grouping expressions.  For other
3303          * expressions, find the unique Vars used, treating an expression as a Var
3304          * if we can find stats for it.  For each one, record the statistical
3305          * estimate of number of distinct values (total in its table, without
3306          * regard for filtering).
3307          */
3308         numdistinct = 1.0;
3309
3310         i = 0;
3311         foreach(l, groupExprs)
3312         {
3313                 Node       *groupexpr = (Node *) lfirst(l);
3314                 VariableStatData vardata;
3315                 List       *varshere;
3316                 ListCell   *l2;
3317
3318                 /* is expression in this grouping set? */
3319                 if (pgset && !list_member_int(*pgset, i++))
3320                         continue;
3321
3322                 /* Short-circuit for expressions returning boolean */
3323                 if (exprType(groupexpr) == BOOLOID)
3324                 {
3325                         numdistinct *= 2.0;
3326                         continue;
3327                 }
3328
3329                 /*
3330                  * If examine_variable is able to deduce anything about the GROUP BY
3331                  * expression, treat it as a single variable even if it's really more
3332                  * complicated.
3333                  */
3334                 examine_variable(root, groupexpr, 0, &vardata);
3335                 if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3336                 {
3337                         varinfos = add_unique_group_var(root, varinfos,
3338                                                                                         groupexpr, &vardata);
3339                         ReleaseVariableStats(vardata);
3340                         continue;
3341                 }
3342                 ReleaseVariableStats(vardata);
3343
3344                 /*
3345                  * Else pull out the component Vars.  Handle PlaceHolderVars by
3346                  * recursing into their arguments (effectively assuming that the
3347                  * PlaceHolderVar doesn't change the number of groups, which boils
3348                  * down to ignoring the possible addition of nulls to the result set).
3349                  */
3350                 varshere = pull_var_clause(groupexpr,
3351                                                                    PVC_RECURSE_AGGREGATES |
3352                                                                    PVC_RECURSE_WINDOWFUNCS |
3353                                                                    PVC_RECURSE_PLACEHOLDERS);
3354
3355                 /*
3356                  * If we find any variable-free GROUP BY item, then either it is a
3357                  * constant (and we can ignore it) or it contains a volatile function;
3358                  * in the latter case we punt and assume that each input row will
3359                  * yield a distinct group.
3360                  */
3361                 if (varshere == NIL)
3362                 {
3363                         if (contain_volatile_functions(groupexpr))
3364                                 return input_rows;
3365                         continue;
3366                 }
3367
3368                 /*
3369                  * Else add variables to varinfos list
3370                  */
3371                 foreach(l2, varshere)
3372                 {
3373                         Node       *var = (Node *) lfirst(l2);
3374
3375                         examine_variable(root, var, 0, &vardata);
3376                         varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3377                         ReleaseVariableStats(vardata);
3378                 }
3379         }
3380
3381         /*
3382          * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3383          * list.
3384          */
3385         if (varinfos == NIL)
3386         {
3387                 /* Guard against out-of-range answers */
3388                 if (numdistinct > input_rows)
3389                         numdistinct = input_rows;
3390                 return numdistinct;
3391         }
3392
3393         /*
3394          * Group Vars by relation and estimate total numdistinct.
3395          *
3396          * For each iteration of the outer loop, we process the frontmost Var in
3397          * varinfos, plus all other Vars in the same relation.  We remove these
3398          * Vars from the newvarinfos list for the next iteration. This is the
3399          * easiest way to group Vars of same rel together.
3400          */
3401         do
3402         {
3403                 GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3404                 RelOptInfo *rel = varinfo1->rel;
3405                 double          reldistinct = 1;
3406                 double          relmaxndistinct = reldistinct;
3407                 int                     relvarcount = 0;
3408                 List       *newvarinfos = NIL;
3409                 List       *relvarinfos = NIL;
3410
3411                 /*
3412                  * Split the list of varinfos in two - one for the current rel,
3413                  * one for remaining Vars on other rels.
3414                  */
3415                 relvarinfos = lcons(varinfo1, relvarinfos);
3416                 for_each_cell(l, lnext(list_head(varinfos)))
3417                 {
3418                         GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3419
3420                         if (varinfo2->rel == varinfo1->rel)
3421                         {
3422                                 /* varinfos on current rel */
3423                                 relvarinfos = lcons(varinfo2, relvarinfos);
3424                         }
3425                         else
3426                         {
3427                                 /* not time to process varinfo2 yet */
3428                                 newvarinfos = lcons(varinfo2, newvarinfos);
3429                         }
3430                 }
3431
3432                 /*
3433                  * Get the numdistinct estimate for the Vars of this rel.  We
3434                  * iteratively search for multivariate n-distinct with maximum number
3435                  * of vars; assuming that each var group is independent of the others,
3436                  * we multiply them together.  Any remaining relvarinfos after
3437                  * no more multivariate matches are found are assumed independent too,
3438                  * so their individual ndistinct estimates are multiplied also.
3439                  *
3440                  * While iterating, count how many separate numdistinct values we
3441                  * apply.  We apply a fudge factor below, but only if we multiplied
3442                  * more than one such values.
3443                  */
3444                 while (relvarinfos)
3445                 {
3446                         double          mvndistinct;
3447
3448                         if (estimate_multivariate_ndistinct(root, rel, &relvarinfos,
3449                                                                                                 &mvndistinct))
3450                         {
3451                                 reldistinct *= mvndistinct;
3452                                 if (relmaxndistinct < mvndistinct)
3453                                         relmaxndistinct = mvndistinct;
3454                                 relvarcount++;
3455                         }
3456                         else
3457                         {
3458                                 foreach (l, relvarinfos)
3459                                 {
3460                                         GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3461
3462                                         reldistinct *= varinfo2->ndistinct;
3463                                         if (relmaxndistinct < varinfo2->ndistinct)
3464                                                 relmaxndistinct = varinfo2->ndistinct;
3465                                         relvarcount++;
3466                                 }
3467
3468                                 /* we're done with this relation */
3469                                 relvarinfos = NIL;
3470                         }
3471                 }
3472
3473                 /*
3474                  * Sanity check --- don't divide by zero if empty relation.
3475                  */
3476                 Assert(rel->reloptkind == RELOPT_BASEREL);
3477                 if (rel->tuples > 0)
3478                 {
3479                         /*
3480                          * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3481                          * fudge factor is because the Vars are probably correlated but we
3482                          * don't know by how much.  We should never clamp to less than the
3483                          * largest ndistinct value for any of the Vars, though, since
3484                          * there will surely be at least that many groups.
3485                          */
3486                         double          clamp = rel->tuples;
3487
3488                         if (relvarcount > 1)
3489                         {
3490                                 clamp *= 0.1;
3491                                 if (clamp < relmaxndistinct)
3492                                 {
3493                                         clamp = relmaxndistinct;
3494                                         /* for sanity in case some ndistinct is too large: */
3495                                         if (clamp > rel->tuples)
3496                                                 clamp = rel->tuples;
3497                                 }
3498                         }
3499                         if (reldistinct > clamp)
3500                                 reldistinct = clamp;
3501
3502                         /*
3503                          * Update the estimate based on the restriction selectivity,
3504                          * guarding against division by zero when reldistinct is zero.
3505                          * Also skip this if we know that we are returning all rows.
3506                          */
3507                         if (reldistinct > 0 && rel->rows < rel->tuples)
3508                         {
3509                                 /*
3510                                  * Given a table containing N rows with n distinct values in a
3511                                  * uniform distribution, if we select p rows at random then
3512                                  * the expected number of distinct values selected is
3513                                  *
3514                                  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3515                                  *
3516                                  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3517                                  *
3518                                  * See "Approximating block accesses in database
3519                                  * organizations", S. B. Yao, Communications of the ACM,
3520                                  * Volume 20 Issue 4, April 1977 Pages 260-261.
3521                                  *
3522                                  * Alternatively, re-arranging the terms from the factorials,
3523                                  * this may be written as
3524                                  *
3525                                  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3526                                  *
3527                                  * This form of the formula is more efficient to compute in
3528                                  * the common case where p is larger than N/n.  Additionally,
3529                                  * as pointed out by Dell'Era, if i << N for all terms in the
3530                                  * product, it can be approximated by
3531                                  *
3532                                  * n * (1 - ((N-p)/N)^(N/n))
3533                                  *
3534                                  * See "Expected distinct values when selecting from a bag
3535                                  * without replacement", Alberto Dell'Era,
3536                                  * http://www.adellera.it/investigations/distinct_balls/.
3537                                  *
3538                                  * The condition i << N is equivalent to n >> 1, so this is a
3539                                  * good approximation when the number of distinct values in
3540                                  * the table is large.  It turns out that this formula also
3541                                  * works well even when n is small.
3542                                  */
3543                                 reldistinct *=
3544                                         (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3545                                                          rel->tuples / reldistinct));
3546                         }
3547                         reldistinct = clamp_row_est(reldistinct);
3548
3549                         /*
3550                          * Update estimate of total distinct groups.
3551                          */
3552                         numdistinct *= reldistinct;
3553                 }
3554
3555                 varinfos = newvarinfos;
3556         } while (varinfos != NIL);
3557
3558         numdistinct = ceil(numdistinct);
3559
3560         /* Guard against out-of-range answers */
3561         if (numdistinct > input_rows)
3562                 numdistinct = input_rows;
3563         if (numdistinct < 1.0)
3564                 numdistinct = 1.0;
3565
3566         return numdistinct;
3567 }
3568
3569 /*
3570  * Estimate hash bucketsize fraction (ie, number of entries in a bucket
3571  * divided by total tuples in relation) if the specified expression is used
3572  * as a hash key.
3573  *
3574  * XXX This is really pretty bogus since we're effectively assuming that the
3575  * distribution of hash keys will be the same after applying restriction
3576  * clauses as it was in the underlying relation.  However, we are not nearly
3577  * smart enough to figure out how the restrict clauses might change the
3578  * distribution, so this will have to do for now.
3579  *
3580  * We are passed the number of buckets the executor will use for the given
3581  * input relation.  If the data were perfectly distributed, with the same
3582  * number of tuples going into each available bucket, then the bucketsize
3583  * fraction would be 1/nbuckets.  But this happy state of affairs will occur
3584  * only if (a) there are at least nbuckets distinct data values, and (b)
3585  * we have a not-too-skewed data distribution.  Otherwise the buckets will
3586  * be nonuniformly occupied.  If the other relation in the join has a key
3587  * distribution similar to this one's, then the most-loaded buckets are
3588  * exactly those that will be probed most often.  Therefore, the "average"
3589  * bucket size for costing purposes should really be taken as something close
3590  * to the "worst case" bucket size.  We try to estimate this by adjusting the
3591  * fraction if there are too few distinct data values, and then scaling up
3592  * by the ratio of the most common value's frequency to the average frequency.
3593  *
3594  * If no statistics are available, use a default estimate of 0.1.  This will
3595  * discourage use of a hash rather strongly if the inner relation is large,
3596  * which is what we want.  We do not want to hash unless we know that the
3597  * inner rel is well-dispersed (or the alternatives seem much worse).
3598  */
3599 Selectivity
3600 estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
3601 {
3602         VariableStatData vardata;
3603         double          estfract,
3604                                 ndistinct,
3605                                 stanullfrac,
3606                                 mcvfreq,
3607                                 avgfreq;
3608         bool            isdefault;
3609         float4     *numbers;
3610         int                     nnumbers;
3611
3612         examine_variable(root, hashkey, 0, &vardata);
3613
3614         /* Get number of distinct values */
3615         ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3616
3617         /* If ndistinct isn't real, punt and return 0.1, per comments above */
3618         if (isdefault)
3619         {
3620                 ReleaseVariableStats(vardata);
3621                 return (Selectivity) 0.1;
3622         }
3623
3624         /* Get fraction that are null */
3625         if (HeapTupleIsValid(vardata.statsTuple))
3626         {
3627                 Form_pg_statistic stats;
3628
3629                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3630                 stanullfrac = stats->stanullfrac;
3631         }
3632         else
3633                 stanullfrac = 0.0;
3634
3635         /* Compute avg freq of all distinct data values in raw relation */
3636         avgfreq = (1.0 - stanullfrac) / ndistinct;
3637
3638         /*
3639          * Adjust ndistinct to account for restriction clauses.  Observe we are
3640          * assuming that the data distribution is affected uniformly by the
3641          * restriction clauses!
3642          *
3643          * XXX Possibly better way, but much more expensive: multiply by
3644          * selectivity of rel's restriction clauses that mention the target Var.
3645          */
3646         if (vardata.rel && vardata.rel->tuples > 0)
3647         {
3648                 ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3649                 ndistinct = clamp_row_est(ndistinct);
3650         }
3651
3652         /*
3653          * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3654          * number of buckets is less than the expected number of distinct values;
3655          * otherwise it is 1/ndistinct.
3656          */
3657         if (ndistinct > nbuckets)
3658                 estfract = 1.0 / nbuckets;
3659         else
3660                 estfract = 1.0 / ndistinct;
3661
3662         /*
3663          * Look up the frequency of the most common value, if available.
3664          */
3665         mcvfreq = 0.0;
3666
3667         if (HeapTupleIsValid(vardata.statsTuple))
3668         {
3669                 if (get_attstatsslot(vardata.statsTuple,
3670                                                          vardata.atttype, vardata.atttypmod,
3671                                                          STATISTIC_KIND_MCV, InvalidOid,
3672                                                          NULL,
3673                                                          NULL, NULL,
3674                                                          &numbers, &nnumbers))
3675                 {
3676                         /*
3677                          * The first MCV stat is for the most common value.
3678                          */
3679                         if (nnumbers > 0)
3680                                 mcvfreq = numbers[0];
3681                         free_attstatsslot(vardata.atttype, NULL, 0,
3682                                                           numbers, nnumbers);
3683                 }
3684         }
3685
3686         /*
3687          * Adjust estimated bucketsize upward to account for skewed distribution.
3688          */
3689         if (avgfreq > 0.0 && mcvfreq > avgfreq)
3690                 estfract *= mcvfreq / avgfreq;
3691
3692         /*
3693          * Clamp bucketsize to sane range (the above adjustment could easily
3694          * produce an out-of-range result).  We set the lower bound a little above
3695          * zero, since zero isn't a very sane result.
3696          */
3697         if (estfract < 1.0e-6)
3698                 estfract = 1.0e-6;
3699         else if (estfract > 1.0)
3700                 estfract = 1.0;
3701
3702         ReleaseVariableStats(vardata);
3703
3704         return (Selectivity) estfract;
3705 }
3706
3707
3708 /*-------------------------------------------------------------------------
3709  *
3710  * Support routines
3711  *
3712  *-------------------------------------------------------------------------
3713  */
3714
3715 /*
3716  * Find applicable ndistinct statistics for the given list of VarInfos (which
3717  * must all belong to the given rel), and update *ndistinct to the estimate of
3718  * the MVNDistinctItem that best matches.  If a match it found, *varinfos is
3719  * updated to remove the list of matched varinfos.
3720  *
3721  * Varinfos that aren't for simple Vars are ignored.
3722  *
3723  * Return TRUE if we're able to find a match, FALSE otherwise.
3724  */
3725 static bool
3726 estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel,
3727                                                                 List **varinfos, double *ndistinct)
3728 {
3729         ListCell   *lc;
3730         Bitmapset  *attnums = NULL;
3731         int                     nmatches;
3732         Oid                     statOid = InvalidOid;
3733         MVNDistinct *stats;
3734         Bitmapset  *matched = NULL;
3735
3736         /* bail out immediately if the table has no extended statistics */
3737         if (!rel->statlist)
3738                 return false;
3739
3740         /* Determine the attnums we're looking for */
3741         foreach(lc, *varinfos)
3742         {
3743                 GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
3744
3745                 Assert(varinfo->rel == rel);
3746
3747                 if (IsA(varinfo->var, Var))
3748                 {
3749                         attnums = bms_add_member(attnums,
3750                                                                          ((Var *) varinfo->var)->varattno);
3751                 }
3752         }
3753
3754         /* look for the ndistinct statistics matching the most vars */
3755         nmatches = 1; /* we require at least two matches */
3756         foreach(lc, rel->statlist)
3757         {
3758                 StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
3759                 Bitmapset  *shared;
3760
3761                 /* skip statistics of other kinds */
3762                 if (info->kind != STATS_EXT_NDISTINCT)
3763                         continue;
3764
3765                 /* compute attnums shared by the vars and the statistic */
3766                 shared = bms_intersect(info->keys, attnums);
3767
3768                 /*
3769                  * Does this statistics matches more columns than the currently
3770                  * best statistic?  If so, use this one instead.
3771                  *
3772                  * XXX This should break ties using name of the statistic, or
3773                  * something like that, to make the outcome stable.
3774                  */
3775                 if (bms_num_members(shared) > nmatches)
3776                 {
3777                         statOid = info->statOid;
3778                         nmatches = bms_num_members(shared);
3779                         matched = shared;
3780                 }
3781         }
3782
3783         /* No match? */
3784         if (statOid == InvalidOid)
3785                 return false;
3786         Assert(nmatches > 1 && matched != NULL);
3787
3788         stats = statext_ndistinct_load(statOid);
3789
3790         /*
3791          * If we have a match, search it for the specific item that matches (there
3792          * must be one), and construct the output values.
3793          */
3794         if (stats)
3795         {
3796                 int             i;
3797                 List   *newlist = NIL;
3798                 MVNDistinctItem *item = NULL;
3799
3800                 /* Find the specific item that exactly matches the combination */
3801                 for (i = 0; i < stats->nitems; i++)
3802                 {
3803                         MVNDistinctItem *tmpitem = &stats->items[i];
3804
3805                         if (bms_subset_compare(tmpitem->attrs, matched) == BMS_EQUAL)
3806                         {
3807                                 item = tmpitem;
3808                                 break;
3809                         }
3810                 }
3811
3812                 /* make sure we found an item */
3813                 if (!item)
3814                         elog(ERROR, "corrupt MVNDistinct entry");
3815
3816                 /* Form the output varinfo list, keeping only unmatched ones */
3817                 foreach(lc, *varinfos)
3818                 {
3819                         GroupVarInfo *varinfo = (GroupVarInfo *) lfirst(lc);
3820                         AttrNumber      attnum;
3821
3822                         if (!IsA(varinfo->var, Var))
3823                         {
3824                                 newlist = lappend(newlist, varinfo);
3825                                 continue;
3826                         }
3827
3828                         attnum = ((Var *) varinfo->var)->varattno;
3829                         if (!bms_is_member(attnum, matched))
3830                                 newlist = lappend(newlist, varinfo);
3831                 }
3832
3833                 *varinfos = newlist;
3834                 *ndistinct = item->ndistinct;
3835                 return true;
3836         }
3837
3838         return false;
3839 }
3840
3841 /*
3842  * convert_to_scalar
3843  *        Convert non-NULL values of the indicated types to the comparison
3844  *        scale needed by scalarineqsel().
3845  *        Returns "true" if successful.
3846  *
3847  * XXX this routine is a hack: ideally we should look up the conversion
3848  * subroutines in pg_type.
3849  *
3850  * All numeric datatypes are simply converted to their equivalent
3851  * "double" values.  (NUMERIC values that are outside the range of "double"
3852  * are clamped to +/- HUGE_VAL.)
3853  *
3854  * String datatypes are converted by convert_string_to_scalar(),
3855  * which is explained below.  The reason why this routine deals with
3856  * three values at a time, not just one, is that we need it for strings.
3857  *
3858  * The bytea datatype is just enough different from strings that it has
3859  * to be treated separately.
3860  *
3861  * The several datatypes representing absolute times are all converted
3862  * to Timestamp, which is actually a double, and then we just use that
3863  * double value.  Note this will give correct results even for the "special"
3864  * values of Timestamp, since those are chosen to compare correctly;
3865  * see timestamp_cmp.
3866  *
3867  * The several datatypes representing relative times (intervals) are all
3868  * converted to measurements expressed in seconds.
3869  */
3870 static bool
3871 convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
3872                                   Datum lobound, Datum hibound, Oid boundstypid,
3873                                   double *scaledlobound, double *scaledhibound)
3874 {
3875         /*
3876          * Both the valuetypid and the boundstypid should exactly match the
3877          * declared input type(s) of the operator we are invoked for, so we just
3878          * error out if either is not recognized.
3879          *
3880          * XXX The histogram we are interpolating between points of could belong
3881          * to a column that's only binary-compatible with the declared type. In
3882          * essence we are assuming that the semantics of binary-compatible types
3883          * are enough alike that we can use a histogram generated with one type's
3884          * operators to estimate selectivity for the other's.  This is outright
3885          * wrong in some cases --- in particular signed versus unsigned
3886          * interpretation could trip us up.  But it's useful enough in the
3887          * majority of cases that we do it anyway.  Should think about more
3888          * rigorous ways to do it.
3889          */
3890         switch (valuetypid)
3891         {
3892                         /*
3893                          * Built-in numeric types
3894                          */
3895                 case BOOLOID:
3896                 case INT2OID:
3897                 case INT4OID:
3898                 case INT8OID:
3899                 case FLOAT4OID:
3900                 case FLOAT8OID:
3901                 case NUMERICOID:
3902                 case OIDOID:
3903                 case REGPROCOID:
3904                 case REGPROCEDUREOID:
3905                 case REGOPEROID:
3906                 case REGOPERATOROID:
3907                 case REGCLASSOID:
3908                 case REGTYPEOID:
3909                 case REGCONFIGOID:
3910                 case REGDICTIONARYOID:
3911                 case REGROLEOID:
3912                 case REGNAMESPACEOID:
3913                         *scaledvalue = convert_numeric_to_scalar(value, valuetypid);
3914                         *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid);
3915                         *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid);
3916                         return true;
3917
3918                         /*
3919                          * Built-in string types
3920                          */
3921                 case CHAROID:
3922                 case BPCHAROID:
3923                 case VARCHAROID:
3924                 case TEXTOID:
3925                 case NAMEOID:
3926                         {
3927                                 char       *valstr = convert_string_datum(value, valuetypid);
3928                                 char       *lostr = convert_string_datum(lobound, boundstypid);
3929                                 char       *histr = convert_string_datum(hibound, boundstypid);
3930
3931                                 convert_string_to_scalar(valstr, scaledvalue,
3932                                                                                  lostr, scaledlobound,
3933                                                                                  histr, scaledhibound);
3934                                 pfree(valstr);
3935                                 pfree(lostr);
3936                                 pfree(histr);
3937                                 return true;
3938                         }
3939
3940                         /*
3941                          * Built-in bytea type
3942                          */
3943                 case BYTEAOID:
3944                         {
3945                                 convert_bytea_to_scalar(value, scaledvalue,
3946                                                                                 lobound, scaledlobound,
3947                                                                                 hibound, scaledhibound);
3948                                 return true;
3949                         }
3950
3951                         /*
3952                          * Built-in time types
3953                          */
3954                 case TIMESTAMPOID:
3955                 case TIMESTAMPTZOID:
3956                 case ABSTIMEOID:
3957                 case DATEOID:
3958                 case INTERVALOID:
3959                 case RELTIMEOID:
3960                 case TINTERVALOID:
3961                 case TIMEOID:
3962                 case TIMETZOID:
3963                         *scaledvalue = convert_timevalue_to_scalar(value, valuetypid);
3964                         *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid);
3965                         *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid);
3966                         return true;
3967
3968                         /*
3969                          * Built-in network types
3970                          */
3971                 case INETOID:
3972                 case CIDROID:
3973                 case MACADDROID:
3974                 case MACADDR8OID:
3975                         *scaledvalue = convert_network_to_scalar(value, valuetypid);
3976                         *scaledlobound = convert_network_to_scalar(lobound, boundstypid);
3977                         *scaledhibound = convert_network_to_scalar(hibound, boundstypid);
3978                         return true;
3979         }
3980         /* Don't know how to convert */
3981         *scaledvalue = *scaledlobound = *scaledhibound = 0;
3982         return false;
3983 }
3984
3985 /*
3986  * Do convert_to_scalar()'s work for any numeric data type.
3987  */
3988 static double
3989 convert_numeric_to_scalar(Datum value, Oid typid)
3990 {
3991         switch (typid)
3992         {
3993                 case BOOLOID:
3994                         return (double) DatumGetBool(value);
3995                 case INT2OID:
3996                         return (double) DatumGetInt16(value);
3997                 case INT4OID:
3998                         return (double) DatumGetInt32(value);
3999                 case INT8OID:
4000                         return (double) DatumGetInt64(value);
4001                 case FLOAT4OID:
4002                         return (double) DatumGetFloat4(value);
4003                 case FLOAT8OID:
4004                         return (double) DatumGetFloat8(value);
4005                 case NUMERICOID:
4006                         /* Note: out-of-range values will be clamped to +-HUGE_VAL */
4007                         return (double)
4008                                 DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow,
4009                                                                                                    value));
4010                 case OIDOID:
4011                 case REGPROCOID:
4012                 case REGPROCEDUREOID:
4013                 case REGOPEROID:
4014                 case REGOPERATOROID:
4015                 case REGCLASSOID:
4016                 case REGTYPEOID:
4017                 case REGCONFIGOID:
4018                 case REGDICTIONARYOID:
4019                 case REGROLEOID:
4020                 case REGNAMESPACEOID:
4021                         /* we can treat OIDs as integers... */
4022                         return (double) DatumGetObjectId(value);
4023         }
4024
4025         /*
4026          * Can't get here unless someone tries to use scalarltsel/scalargtsel on
4027          * an operator with one numeric and one non-numeric operand.
4028          */
4029         elog(ERROR, "unsupported type: %u", typid);
4030         return 0;
4031 }
4032
4033 /*
4034  * Do convert_to_scalar()'s work for any character-string data type.
4035  *
4036  * String datatypes are converted to a scale that ranges from 0 to 1,
4037  * where we visualize the bytes of the string as fractional digits.
4038  *
4039  * We do not want the base to be 256, however, since that tends to
4040  * generate inflated selectivity estimates; few databases will have
4041  * occurrences of all 256 possible byte values at each position.
4042  * Instead, use the smallest and largest byte values seen in the bounds
4043  * as the estimated range for each byte, after some fudging to deal with
4044  * the fact that we probably aren't going to see the full range that way.
4045  *
4046  * An additional refinement is that we discard any common prefix of the
4047  * three strings before computing the scaled values.  This allows us to
4048  * "zoom in" when we encounter a narrow data range.  An example is a phone
4049  * number database where all the values begin with the same area code.
4050  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
4051  * so this is more likely to happen than you might think.)
4052  */
4053 static void
4054 convert_string_to_scalar(char *value,
4055                                                  double *scaledvalue,
4056                                                  char *lobound,
4057                                                  double *scaledlobound,
4058                                                  char *hibound,
4059                                                  double *scaledhibound)
4060 {
4061         int                     rangelo,
4062                                 rangehi;
4063         char       *sptr;
4064
4065         rangelo = rangehi = (unsigned char) hibound[0];
4066         for (sptr = lobound; *sptr; sptr++)
4067         {
4068                 if (rangelo > (unsigned char) *sptr)
4069                         rangelo = (unsigned char) *sptr;
4070                 if (rangehi < (unsigned char) *sptr)
4071                         rangehi = (unsigned char) *sptr;
4072         }
4073         for (sptr = hibound; *sptr; sptr++)
4074         {
4075                 if (rangelo > (unsigned char) *sptr)
4076                         rangelo = (unsigned char) *sptr;
4077                 if (rangehi < (unsigned char) *sptr)
4078                         rangehi = (unsigned char) *sptr;
4079         }
4080         /* If range includes any upper-case ASCII chars, make it include all */
4081         if (rangelo <= 'Z' && rangehi >= 'A')
4082         {
4083                 if (rangelo > 'A')
4084                         rangelo = 'A';
4085                 if (rangehi < 'Z')
4086                         rangehi = 'Z';
4087         }
4088         /* Ditto lower-case */
4089         if (rangelo <= 'z' && rangehi >= 'a')
4090         {
4091                 if (rangelo > 'a')
4092                         rangelo = 'a';
4093                 if (rangehi < 'z')
4094                         rangehi = 'z';
4095         }
4096         /* Ditto digits */
4097         if (rangelo <= '9' && rangehi >= '0')
4098         {
4099                 if (rangelo > '0')
4100                         rangelo = '0';
4101                 if (rangehi < '9')
4102                         rangehi = '9';
4103         }
4104
4105         /*
4106          * If range includes less than 10 chars, assume we have not got enough
4107          * data, and make it include regular ASCII set.
4108          */
4109         if (rangehi - rangelo < 9)
4110         {
4111                 rangelo = ' ';
4112                 rangehi = 127;
4113         }
4114
4115         /*
4116          * Now strip any common prefix of the three strings.
4117          */
4118         while (*lobound)
4119         {
4120                 if (*lobound != *hibound || *lobound != *value)
4121                         break;
4122                 lobound++, hibound++, value++;
4123         }
4124
4125         /*
4126          * Now we can do the conversions.
4127          */
4128         *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
4129         *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
4130         *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
4131 }
4132
4133 static double
4134 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
4135 {
4136         int                     slen = strlen(value);
4137         double          num,
4138                                 denom,
4139                                 base;
4140
4141         if (slen <= 0)
4142                 return 0.0;                             /* empty string has scalar value 0 */
4143
4144         /*
4145          * There seems little point in considering more than a dozen bytes from
4146          * the string.  Since base is at least 10, that will give us nominal
4147          * resolution of at least 12 decimal digits, which is surely far more
4148          * precision than this estimation technique has got anyway (especially in
4149          * non-C locales).  Also, even with the maximum possible base of 256, this
4150          * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
4151          * overflow on any known machine.
4152          */
4153         if (slen > 12)
4154                 slen = 12;
4155
4156         /* Convert initial characters to fraction */
4157         base = rangehi - rangelo + 1;
4158         num = 0.0;
4159         denom = base;
4160         while (slen-- > 0)
4161         {
4162                 int                     ch = (unsigned char) *value++;
4163
4164                 if (ch < rangelo)
4165                         ch = rangelo - 1;
4166                 else if (ch > rangehi)
4167                         ch = rangehi + 1;
4168                 num += ((double) (ch - rangelo)) / denom;
4169                 denom *= base;
4170         }
4171
4172         return num;
4173 }
4174
4175 /*
4176  * Convert a string-type Datum into a palloc'd, null-terminated string.
4177  *
4178  * When using a non-C locale, we must pass the string through strxfrm()
4179  * before continuing, so as to generate correct locale-specific results.
4180  */
4181 static char *
4182 convert_string_datum(Datum value, Oid typid)
4183 {
4184         char       *val;
4185
4186         switch (typid)
4187         {
4188                 case CHAROID:
4189                         val = (char *) palloc(2);
4190                         val[0] = DatumGetChar(value);
4191                         val[1] = '\0';
4192                         break;
4193                 case BPCHAROID:
4194                 case VARCHAROID:
4195                 case TEXTOID:
4196                         val = TextDatumGetCString(value);
4197                         break;
4198                 case NAMEOID:
4199                         {
4200                                 NameData   *nm = (NameData *) DatumGetPointer(value);
4201
4202                                 val = pstrdup(NameStr(*nm));
4203                                 break;
4204                         }
4205                 default:
4206
4207                         /*
4208                          * Can't get here unless someone tries to use scalarltsel on an
4209                          * operator with one string and one non-string operand.
4210                          */
4211                         elog(ERROR, "unsupported type: %u", typid);
4212                         return NULL;
4213         }
4214
4215         if (!lc_collate_is_c(DEFAULT_COLLATION_OID))
4216         {
4217                 char       *xfrmstr;
4218                 size_t          xfrmlen;
4219                 size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4220
4221                 /*
4222                  * XXX: We could guess at a suitable output buffer size and only call
4223                  * strxfrm twice if our guess is too small.
4224                  *
4225                  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4226                  * bogus data or set an error. This is not really a problem unless it
4227                  * crashes since it will only give an estimation error and nothing
4228                  * fatal.
4229                  */
4230 #if _MSC_VER == 1400                    /* VS.Net 2005 */
4231
4232                 /*
4233                  *
4234                  * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?
4235                  * FeedbackID=99694 */
4236                 {
4237                         char            x[1];
4238
4239                         xfrmlen = strxfrm(x, val, 0);
4240                 }
4241 #else
4242                 xfrmlen = strxfrm(NULL, val, 0);
4243 #endif
4244 #ifdef WIN32
4245
4246                 /*
4247                  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4248                  * of trying to allocate this much memory (and fail), just return the
4249                  * original string unmodified as if we were in the C locale.
4250                  */
4251                 if (xfrmlen == INT_MAX)
4252                         return val;
4253 #endif
4254                 xfrmstr = (char *) palloc(xfrmlen + 1);
4255                 xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4256
4257                 /*
4258                  * Some systems (e.g., glibc) can return a smaller value from the
4259                  * second call than the first; thus the Assert must be <= not ==.
4260                  */
4261                 Assert(xfrmlen2 <= xfrmlen);
4262                 pfree(val);
4263                 val = xfrmstr;
4264         }
4265
4266         return val;
4267 }
4268
4269 /*
4270  * Do convert_to_scalar()'s work for any bytea data type.
4271  *
4272  * Very similar to convert_string_to_scalar except we can't assume
4273  * null-termination and therefore pass explicit lengths around.
4274  *
4275  * Also, assumptions about likely "normal" ranges of characters have been
4276  * removed - a data range of 0..255 is always used, for now.  (Perhaps
4277  * someday we will add information about actual byte data range to
4278  * pg_statistic.)
4279  */
4280 static void
4281 convert_bytea_to_scalar(Datum value,
4282                                                 double *scaledvalue,
4283                                                 Datum lobound,
4284                                                 double *scaledlobound,
4285                                                 Datum hibound,
4286                                                 double *scaledhibound)
4287 {
4288         int                     rangelo,
4289                                 rangehi,
4290                                 valuelen = VARSIZE(DatumGetPointer(value)) - VARHDRSZ,
4291                                 loboundlen = VARSIZE(DatumGetPointer(lobound)) - VARHDRSZ,
4292                                 hiboundlen = VARSIZE(DatumGetPointer(hibound)) - VARHDRSZ,
4293                                 i,
4294                                 minlen;
4295         unsigned char *valstr = (unsigned char *) VARDATA(DatumGetPointer(value)),
4296                            *lostr = (unsigned char *) VARDATA(DatumGetPointer(lobound)),
4297                            *histr = (unsigned char *) VARDATA(DatumGetPointer(hibound));
4298
4299         /*
4300          * Assume bytea data is uniformly distributed across all byte values.
4301          */
4302         rangelo = 0;
4303         rangehi = 255;
4304
4305         /*
4306          * Now strip any common prefix of the three strings.
4307          */
4308         minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4309         for (i = 0; i < minlen; i++)
4310         {
4311                 if (*lostr != *histr || *lostr != *valstr)
4312                         break;
4313                 lostr++, histr++, valstr++;
4314                 loboundlen--, hiboundlen--, valuelen--;
4315         }
4316
4317         /*
4318          * Now we can do the conversions.
4319          */
4320         *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4321         *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4322         *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4323 }
4324
4325 static double
4326 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4327                                                         int rangelo, int rangehi)
4328 {
4329         double          num,
4330                                 denom,
4331                                 base;
4332
4333         if (valuelen <= 0)
4334                 return 0.0;                             /* empty string has scalar value 0 */
4335
4336         /*
4337          * Since base is 256, need not consider more than about 10 chars (even
4338          * this many seems like overkill)
4339          */
4340         if (valuelen > 10)
4341                 valuelen = 10;
4342
4343         /* Convert initial characters to fraction */
4344         base = rangehi - rangelo + 1;
4345         num = 0.0;
4346         denom = base;
4347         while (valuelen-- > 0)
4348         {
4349                 int                     ch = *value++;
4350
4351                 if (ch < rangelo)
4352                         ch = rangelo - 1;
4353                 else if (ch > rangehi)
4354                         ch = rangehi + 1;
4355                 num += ((double) (ch - rangelo)) / denom;
4356                 denom *= base;
4357         }
4358
4359         return num;
4360 }
4361
4362 /*
4363  * Do convert_to_scalar()'s work for any timevalue data type.
4364  */
4365 static double
4366 convert_timevalue_to_scalar(Datum value, Oid typid)
4367 {
4368         switch (typid)
4369         {
4370                 case TIMESTAMPOID:
4371                         return DatumGetTimestamp(value);
4372                 case TIMESTAMPTZOID:
4373                         return DatumGetTimestampTz(value);
4374                 case ABSTIMEOID:
4375                         return DatumGetTimestamp(DirectFunctionCall1(abstime_timestamp,
4376                                                                                                                  value));
4377                 case DATEOID:
4378                         return date2timestamp_no_overflow(DatumGetDateADT(value));
4379                 case INTERVALOID:
4380                         {
4381                                 Interval   *interval = DatumGetIntervalP(value);
4382
4383                                 /*
4384                                  * Convert the month part of Interval to days using assumed
4385                                  * average month length of 365.25/12.0 days.  Not too
4386                                  * accurate, but plenty good enough for our purposes.
4387                                  */
4388                                 return interval->time + interval->day * (double) USECS_PER_DAY +
4389                                         interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
4390                         }
4391                 case RELTIMEOID:
4392                         return (DatumGetRelativeTime(value) * 1000000.0);
4393                 case TINTERVALOID:
4394                         {
4395                                 TimeInterval tinterval = DatumGetTimeInterval(value);
4396
4397                                 if (tinterval->status != 0)
4398                                         return ((tinterval->data[1] - tinterval->data[0]) * 1000000.0);
4399                                 return 0;               /* for lack of a better idea */
4400                         }
4401                 case TIMEOID:
4402                         return DatumGetTimeADT(value);
4403                 case TIMETZOID:
4404                         {
4405                                 TimeTzADT  *timetz = DatumGetTimeTzADTP(value);
4406
4407                                 /* use GMT-equivalent time */
4408                                 return (double) (timetz->time + (timetz->zone * 1000000.0));
4409                         }
4410         }
4411
4412         /*
4413          * Can't get here unless someone tries to use scalarltsel/scalargtsel on
4414          * an operator with one timevalue and one non-timevalue operand.
4415          */
4416         elog(ERROR, "unsupported type: %u", typid);
4417         return 0;
4418 }
4419
4420
4421 /*
4422  * get_restriction_variable
4423  *              Examine the args of a restriction clause to see if it's of the
4424  *              form (variable op pseudoconstant) or (pseudoconstant op variable),
4425  *              where "variable" could be either a Var or an expression in vars of a
4426  *              single relation.  If so, extract information about the variable,
4427  *              and also indicate which side it was on and the other argument.
4428  *
4429  * Inputs:
4430  *      root: the planner info
4431  *      args: clause argument list
4432  *      varRelid: see specs for restriction selectivity functions
4433  *
4434  * Outputs: (these are valid only if TRUE is returned)
4435  *      *vardata: gets information about variable (see examine_variable)
4436  *      *other: gets other clause argument, aggressively reduced to a constant
4437  *      *varonleft: set TRUE if variable is on the left, FALSE if on the right
4438  *
4439  * Returns TRUE if a variable is identified, otherwise FALSE.
4440  *
4441  * Note: if there are Vars on both sides of the clause, we must fail, because
4442  * callers are expecting that the other side will act like a pseudoconstant.
4443  */
4444 bool
4445 get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
4446                                                  VariableStatData *vardata, Node **other,
4447                                                  bool *varonleft)
4448 {
4449         Node       *left,
4450                            *right;
4451         VariableStatData rdata;
4452
4453         /* Fail if not a binary opclause (probably shouldn't happen) */
4454         if (list_length(args) != 2)
4455                 return false;
4456
4457         left = (Node *) linitial(args);
4458         right = (Node *) lsecond(args);
4459
4460         /*
4461          * Examine both sides.  Note that when varRelid is nonzero, Vars of other
4462          * relations will be treated as pseudoconstants.
4463          */
4464         examine_variable(root, left, varRelid, vardata);
4465         examine_variable(root, right, varRelid, &rdata);
4466
4467         /*
4468          * If one side is a variable and the other not, we win.
4469          */
4470         if (vardata->rel && rdata.rel == NULL)
4471         {
4472                 *varonleft = true;
4473                 *other = estimate_expression_value(root, rdata.var);
4474                 /* Assume we need no ReleaseVariableStats(rdata) here */
4475                 return true;
4476         }
4477
4478         if (vardata->rel == NULL && rdata.rel)
4479         {
4480                 *varonleft = false;
4481                 *other = estimate_expression_value(root, vardata->var);
4482                 /* Assume we need no ReleaseVariableStats(*vardata) here */
4483                 *vardata = rdata;
4484                 return true;
4485         }
4486
4487         /* Oops, clause has wrong structure (probably var op var) */
4488         ReleaseVariableStats(*vardata);
4489         ReleaseVariableStats(rdata);
4490
4491         return false;
4492 }
4493
4494 /*
4495  * get_join_variables
4496  *              Apply examine_variable() to each side of a join clause.
4497  *              Also, attempt to identify whether the join clause has the same
4498  *              or reversed sense compared to the SpecialJoinInfo.
4499  *
4500  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4501  * or "reversed" if it is "rhs_var OP lhs_var".  In complicated cases
4502  * where we can't tell for sure, we default to assuming it's normal.
4503  */
4504 void
4505 get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo,
4506                                    VariableStatData *vardata1, VariableStatData *vardata2,
4507                                    bool *join_is_reversed)
4508 {
4509         Node       *left,
4510                            *right;
4511
4512         if (list_length(args) != 2)
4513                 elog(ERROR, "join operator should take two arguments");
4514
4515         left = (Node *) linitial(args);
4516         right = (Node *) lsecond(args);
4517
4518         examine_variable(root, left, 0, vardata1);
4519         examine_variable(root, right, 0, vardata2);
4520
4521         if (vardata1->rel &&
4522                 bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4523                 *join_is_reversed = true;               /* var1 is on RHS */
4524         else if (vardata2->rel &&
4525                          bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4526                 *join_is_reversed = true;               /* var2 is on LHS */
4527         else
4528                 *join_is_reversed = false;
4529 }
4530
4531 /*
4532  * examine_variable
4533  *              Try to look up statistical data about an expression.
4534  *              Fill in a VariableStatData struct to describe the expression.
4535  *
4536  * Inputs:
4537  *      root: the planner info
4538  *      node: the expression tree to examine
4539  *      varRelid: see specs for restriction selectivity functions
4540  *
4541  * Outputs: *vardata is filled as follows:
4542  *      var: the input expression (with any binary relabeling stripped, if
4543  *              it is or contains a variable; but otherwise the type is preserved)
4544  *      rel: RelOptInfo for relation containing variable; NULL if expression
4545  *              contains no Vars (NOTE this could point to a RelOptInfo of a
4546  *              subquery, not one in the current query).
4547  *      statsTuple: the pg_statistic entry for the variable, if one exists;
4548  *              otherwise NULL.
4549  *      freefunc: pointer to a function to release statsTuple with.
4550  *      vartype: exposed type of the expression; this should always match
4551  *              the declared input type of the operator we are estimating for.
4552  *      atttype, atttypmod: type data to pass to get_attstatsslot().  This is
4553  *              commonly the same as the exposed type of the variable argument,
4554  *              but can be different in binary-compatible-type cases.
4555  *      isunique: TRUE if we were able to match the var to a unique index or a
4556  *              single-column DISTINCT clause, implying its values are unique for
4557  *              this query.  (Caution: this should be trusted for statistical
4558  *              purposes only, since we do not check indimmediate nor verify that
4559  *              the exact same definition of equality applies.)
4560  *
4561  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4562  */
4563 void
4564 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4565                                  VariableStatData *vardata)
4566 {
4567         Node       *basenode;
4568         Relids          varnos;
4569         RelOptInfo *onerel;
4570
4571         /* Make sure we don't return dangling pointers in vardata */
4572         MemSet(vardata, 0, sizeof(VariableStatData));
4573
4574         /* Save the exposed type of the expression */
4575         vardata->vartype = exprType(node);
4576
4577         /* Look inside any binary-compatible relabeling */
4578
4579         if (IsA(node, RelabelType))
4580                 basenode = (Node *) ((RelabelType *) node)->arg;
4581         else
4582                 basenode = node;
4583
4584         /* Fast path for a simple Var */
4585
4586         if (IsA(basenode, Var) &&
4587                 (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4588         {
4589                 Var                *var = (Var *) basenode;
4590
4591                 /* Set up result fields other than the stats tuple */
4592                 vardata->var = basenode;        /* return Var without relabeling */
4593                 vardata->rel = find_base_rel(root, var->varno);
4594                 vardata->atttype = var->vartype;
4595                 vardata->atttypmod = var->vartypmod;
4596                 vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4597
4598                 /* Try to locate some stats */
4599                 examine_simple_variable(root, var, vardata);
4600
4601                 return;
4602         }
4603
4604         /*
4605          * Okay, it's a more complicated expression.  Determine variable
4606          * membership.  Note that when varRelid isn't zero, only vars of that
4607          * relation are considered "real" vars.
4608          */
4609         varnos = pull_varnos(basenode);
4610
4611         onerel = NULL;
4612
4613         switch (bms_membership(varnos))
4614         {
4615                 case BMS_EMPTY_SET:
4616                         /* No Vars at all ... must be pseudo-constant clause */
4617                         break;
4618                 case BMS_SINGLETON:
4619                         if (varRelid == 0 || bms_is_member(varRelid, varnos))
4620                         {
4621                                 onerel = find_base_rel(root,
4622                                            (varRelid ? varRelid : bms_singleton_member(varnos)));
4623                                 vardata->rel = onerel;
4624                                 node = basenode;        /* strip any relabeling */
4625                         }
4626                         /* else treat it as a constant */
4627                         break;
4628                 case BMS_MULTIPLE:
4629                         if (varRelid == 0)
4630                         {
4631                                 /* treat it as a variable of a join relation */
4632                                 vardata->rel = find_join_rel(root, varnos);
4633                                 node = basenode;        /* strip any relabeling */
4634                         }
4635                         else if (bms_is_member(varRelid, varnos))
4636                         {
4637                                 /* ignore the vars belonging to other relations */
4638                                 vardata->rel = find_base_rel(root, varRelid);
4639                                 node = basenode;        /* strip any relabeling */
4640                                 /* note: no point in expressional-index search here */
4641                         }
4642                         /* else treat it as a constant */
4643                         break;
4644         }
4645
4646         bms_free(varnos);
4647
4648         vardata->var = node;
4649         vardata->atttype = exprType(node);
4650         vardata->atttypmod = exprTypmod(node);
4651
4652         if (onerel)
4653         {
4654                 /*
4655                  * We have an expression in vars of a single relation.  Try to match
4656                  * it to expressional index columns, in hopes of finding some
4657                  * statistics.
4658                  *
4659                  * XXX it's conceivable that there are multiple matches with different
4660                  * index opfamilies; if so, we need to pick one that matches the
4661                  * operator we are estimating for.  FIXME later.
4662                  */
4663                 ListCell   *ilist;
4664
4665                 foreach(ilist, onerel->indexlist)
4666                 {
4667                         IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4668                         ListCell   *indexpr_item;
4669                         int                     pos;
4670
4671                         indexpr_item = list_head(index->indexprs);
4672                         if (indexpr_item == NULL)
4673                                 continue;               /* no expressions here... */
4674
4675                         for (pos = 0; pos < index->ncolumns; pos++)
4676                         {
4677                                 if (index->indexkeys[pos] == 0)
4678                                 {
4679                                         Node       *indexkey;
4680
4681                                         if (indexpr_item == NULL)
4682                                                 elog(ERROR, "too few entries in indexprs list");
4683                                         indexkey = (Node *) lfirst(indexpr_item);
4684                                         if (indexkey && IsA(indexkey, RelabelType))
4685                                                 indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4686                                         if (equal(node, indexkey))
4687                                         {
4688                                                 /*
4689                                                  * Found a match ... is it a unique index? Tests here
4690                                                  * should match has_unique_index().
4691                                                  */
4692                                                 if (index->unique &&
4693                                                         index->ncolumns == 1 &&
4694                                                         (index->indpred == NIL || index->predOK))
4695                                                         vardata->isunique = true;
4696
4697                                                 /*
4698                                                  * Has it got stats?  We only consider stats for
4699                                                  * non-partial indexes, since partial indexes probably
4700                                                  * don't reflect whole-relation statistics; the above
4701                                                  * check for uniqueness is the only info we take from
4702                                                  * a partial index.
4703                                                  *
4704                                                  * An index stats hook, however, must make its own
4705                                                  * decisions about what to do with partial indexes.
4706                                                  */
4707                                                 if (get_index_stats_hook &&
4708                                                         (*get_index_stats_hook) (root, index->indexoid,
4709                                                                                                          pos + 1, vardata))
4710                                                 {
4711                                                         /*
4712                                                          * The hook took control of acquiring a stats
4713                                                          * tuple.  If it did supply a tuple, it'd better
4714                                                          * have supplied a freefunc.
4715                                                          */
4716                                                         if (HeapTupleIsValid(vardata->statsTuple) &&
4717                                                                 !vardata->freefunc)
4718                                                                 elog(ERROR, "no function provided to release variable stats with");
4719                                                 }
4720                                                 else if (index->indpred == NIL)
4721                                                 {
4722                                                         vardata->statsTuple =
4723                                                                 SearchSysCache3(STATRELATTINH,
4724                                                                                    ObjectIdGetDatum(index->indexoid),
4725                                                                                                 Int16GetDatum(pos + 1),
4726                                                                                                 BoolGetDatum(false));
4727                                                         vardata->freefunc = ReleaseSysCache;
4728                                                 }
4729                                                 if (vardata->statsTuple)
4730                                                         break;
4731                                         }
4732                                         indexpr_item = lnext(indexpr_item);
4733                                 }
4734                         }
4735                         if (vardata->statsTuple)
4736                                 break;
4737                 }
4738         }
4739 }
4740
4741 /*
4742  * examine_simple_variable
4743  *              Handle a simple Var for examine_variable
4744  *
4745  * This is split out as a subroutine so that we can recurse to deal with
4746  * Vars referencing subqueries.
4747  *
4748  * We already filled in all the fields of *vardata except for the stats tuple.
4749  */
4750 static void
4751 examine_simple_variable(PlannerInfo *root, Var *var,
4752                                                 VariableStatData *vardata)
4753 {
4754         RangeTblEntry *rte = root->simple_rte_array[var->varno];
4755
4756         Assert(IsA(rte, RangeTblEntry));
4757
4758         if (get_relation_stats_hook &&
4759                 (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
4760         {
4761                 /*
4762                  * The hook took control of acquiring a stats tuple.  If it did supply
4763                  * a tuple, it'd better have supplied a freefunc.
4764                  */
4765                 if (HeapTupleIsValid(vardata->statsTuple) &&
4766                         !vardata->freefunc)
4767                         elog(ERROR, "no function provided to release variable stats with");
4768         }
4769         else if (rte->rtekind == RTE_RELATION)
4770         {
4771                 /*
4772                  * Plain table or parent of an inheritance appendrel, so look up the
4773                  * column in pg_statistic
4774                  */
4775                 vardata->statsTuple = SearchSysCache3(STATRELATTINH,
4776                                                                                           ObjectIdGetDatum(rte->relid),
4777                                                                                           Int16GetDatum(var->varattno),
4778                                                                                           BoolGetDatum(rte->inh));
4779                 vardata->freefunc = ReleaseSysCache;
4780         }
4781         else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
4782         {
4783                 /*
4784                  * Plain subquery (not one that was converted to an appendrel).
4785                  */
4786                 Query      *subquery = rte->subquery;
4787                 RelOptInfo *rel;
4788                 TargetEntry *ste;
4789
4790                 /*
4791                  * Punt if it's a whole-row var rather than a plain column reference.
4792                  */
4793                 if (var->varattno == InvalidAttrNumber)
4794                         return;
4795
4796                 /*
4797                  * Punt if subquery uses set operations or GROUP BY, as these will
4798                  * mash underlying columns' stats beyond recognition.  (Set ops are
4799                  * particularly nasty; if we forged ahead, we would return stats
4800                  * relevant to only the leftmost subselect...)  DISTINCT is also
4801                  * problematic, but we check that later because there is a possibility
4802                  * of learning something even with it.
4803                  */
4804                 if (subquery->setOperations ||
4805                         subquery->groupClause)
4806                         return;
4807
4808                 /*
4809                  * OK, fetch RelOptInfo for subquery.  Note that we don't change the
4810                  * rel returned in vardata, since caller expects it to be a rel of the
4811                  * caller's query level.  Because we might already be recursing, we
4812                  * can't use that rel pointer either, but have to look up the Var's
4813                  * rel afresh.
4814                  */
4815                 rel = find_base_rel(root, var->varno);
4816
4817                 /* If the subquery hasn't been planned yet, we have to punt */
4818                 if (rel->subroot == NULL)
4819                         return;
4820                 Assert(IsA(rel->subroot, PlannerInfo));
4821
4822                 /*
4823                  * Switch our attention to the subquery as mangled by the planner. It
4824                  * was okay to look at the pre-planning version for the tests above,
4825                  * but now we need a Var that will refer to the subroot's live
4826                  * RelOptInfos.  For instance, if any subquery pullup happened during
4827                  * planning, Vars in the targetlist might have gotten replaced, and we
4828                  * need to see the replacement expressions.
4829                  */
4830                 subquery = rel->subroot->parse;
4831                 Assert(IsA(subquery, Query));
4832
4833                 /* Get the subquery output expression referenced by the upper Var */
4834                 ste = get_tle_by_resno(subquery->targetList, var->varattno);
4835                 if (ste == NULL || ste->resjunk)
4836                         elog(ERROR, "subquery %s does not have attribute %d",
4837                                  rte->eref->aliasname, var->varattno);
4838                 var = (Var *) ste->expr;
4839
4840                 /*
4841                  * If subquery uses DISTINCT, we can't make use of any stats for the
4842                  * variable ... but, if it's the only DISTINCT column, we are entitled
4843                  * to consider it unique.  We do the test this way so that it works
4844                  * for cases involving DISTINCT ON.
4845                  */
4846                 if (subquery->distinctClause)
4847                 {
4848                         if (list_length(subquery->distinctClause) == 1 &&
4849                                 targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
4850                                 vardata->isunique = true;
4851                         /* cannot go further */
4852                         return;
4853                 }
4854
4855                 /*
4856                  * If the sub-query originated from a view with the security_barrier
4857                  * attribute, we must not look at the variable's statistics, though it
4858                  * seems all right to notice the existence of a DISTINCT clause. So
4859                  * stop here.
4860                  *
4861                  * This is probably a harsher restriction than necessary; it's
4862                  * certainly OK for the selectivity estimator (which is a C function,
4863                  * and therefore omnipotent anyway) to look at the statistics.  But
4864                  * many selectivity estimators will happily *invoke the operator
4865                  * function* to try to work out a good estimate - and that's not OK.
4866                  * So for now, don't dig down for stats.
4867                  */
4868                 if (rte->security_barrier)
4869                         return;
4870
4871                 /* Can only handle a simple Var of subquery's query level */
4872                 if (var && IsA(var, Var) &&
4873                         var->varlevelsup == 0)
4874                 {
4875                         /*
4876                          * OK, recurse into the subquery.  Note that the original setting
4877                          * of vardata->isunique (which will surely be false) is left
4878                          * unchanged in this situation.  That's what we want, since even
4879                          * if the underlying column is unique, the subquery may have
4880                          * joined to other tables in a way that creates duplicates.
4881                          */
4882                         examine_simple_variable(rel->subroot, var, vardata);
4883                 }
4884         }
4885         else
4886         {
4887                 /*
4888                  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE.  (We
4889                  * won't see RTE_JOIN here because join alias Vars have already been
4890                  * flattened.)  There's not much we can do with function outputs, but
4891                  * maybe someday try to be smarter about VALUES and/or CTEs.
4892                  */
4893         }
4894 }
4895
4896 /*
4897  * get_variable_numdistinct
4898  *        Estimate the number of distinct values of a variable.
4899  *
4900  * vardata: results of examine_variable
4901  * *isdefault: set to TRUE if the result is a default rather than based on
4902  * anything meaningful.
4903  *
4904  * NB: be careful to produce a positive integral result, since callers may
4905  * compare the result to exact integer counts, or might divide by it.
4906  */
4907 double
4908 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
4909 {
4910         double          stadistinct;
4911         double          stanullfrac = 0.0;
4912         double          ntuples;
4913
4914         *isdefault = false;
4915
4916         /*
4917          * Determine the stadistinct value to use.  There are cases where we can
4918          * get an estimate even without a pg_statistic entry, or can get a better
4919          * value than is in pg_statistic.  Grab stanullfrac too if we can find it
4920          * (otherwise, assume no nulls, for lack of any better idea).
4921          */
4922         if (HeapTupleIsValid(vardata->statsTuple))
4923         {
4924                 /* Use the pg_statistic entry */
4925                 Form_pg_statistic stats;
4926
4927                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
4928                 stadistinct = stats->stadistinct;
4929                 stanullfrac = stats->stanullfrac;
4930         }
4931         else if (vardata->vartype == BOOLOID)
4932         {
4933                 /*
4934                  * Special-case boolean columns: presumably, two distinct values.
4935                  *
4936                  * Are there any other datatypes we should wire in special estimates
4937                  * for?
4938                  */
4939                 stadistinct = 2.0;
4940         }
4941         else
4942         {
4943                 /*
4944                  * We don't keep statistics for system columns, but in some cases we
4945                  * can infer distinctness anyway.
4946                  */
4947                 if (vardata->var && IsA(vardata->var, Var))
4948                 {
4949                         switch (((Var *) vardata->var)->varattno)
4950                         {
4951                                 case ObjectIdAttributeNumber:
4952                                 case SelfItemPointerAttributeNumber:
4953                                         stadistinct = -1.0; /* unique (and all non null) */
4954                                         break;
4955                                 case TableOidAttributeNumber:
4956                                         stadistinct = 1.0;      /* only 1 value */
4957                                         break;
4958                                 default:
4959                                         stadistinct = 0.0;      /* means "unknown" */
4960                                         break;
4961                         }
4962                 }
4963                 else
4964                         stadistinct = 0.0;      /* means "unknown" */
4965
4966                 /*
4967                  * XXX consider using estimate_num_groups on expressions?
4968                  */
4969         }
4970
4971         /*
4972          * If there is a unique index or DISTINCT clause for the variable, assume
4973          * it is unique no matter what pg_statistic says; the statistics could be
4974          * out of date, or we might have found a partial unique index that proves
4975          * the var is unique for this query.  However, we'd better still believe
4976          * the null-fraction statistic.
4977          */
4978         if (vardata->isunique)
4979                 stadistinct = -1.0 * (1.0 - stanullfrac);
4980
4981         /*
4982          * If we had an absolute estimate, use that.
4983          */
4984         if (stadistinct > 0.0)
4985                 return clamp_row_est(stadistinct);
4986
4987         /*
4988          * Otherwise we need to get the relation size; punt if not available.
4989          */
4990         if (vardata->rel == NULL)
4991         {
4992                 *isdefault = true;
4993                 return DEFAULT_NUM_DISTINCT;
4994         }
4995         ntuples = vardata->rel->tuples;
4996         if (ntuples <= 0.0)
4997         {
4998                 *isdefault = true;
4999                 return DEFAULT_NUM_DISTINCT;
5000         }
5001
5002         /*
5003          * If we had a relative estimate, use that.
5004          */
5005         if (stadistinct < 0.0)
5006                 return clamp_row_est(-stadistinct * ntuples);
5007
5008         /*
5009          * With no data, estimate ndistinct = ntuples if the table is small, else
5010          * use default.  We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
5011          * that the behavior isn't discontinuous.
5012          */
5013         if (ntuples < DEFAULT_NUM_DISTINCT)
5014                 return clamp_row_est(ntuples);
5015
5016         *isdefault = true;
5017         return DEFAULT_NUM_DISTINCT;
5018 }
5019
5020 /*
5021  * get_variable_range
5022  *              Estimate the minimum and maximum value of the specified variable.
5023  *              If successful, store values in *min and *max, and return TRUE.
5024  *              If no data available, return FALSE.
5025  *
5026  * sortop is the "<" comparison operator to use.  This should generally
5027  * be "<" not ">", as only the former is likely to be found in pg_statistic.
5028  */
5029 static bool
5030 get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
5031                                    Datum *min, Datum *max)
5032 {
5033         Datum           tmin = 0;
5034         Datum           tmax = 0;
5035         bool            have_data = false;
5036         int16           typLen;
5037         bool            typByVal;
5038         Datum      *values;
5039         int                     nvalues;
5040         int                     i;
5041
5042         /*
5043          * XXX It's very tempting to try to use the actual column min and max, if
5044          * we can get them relatively-cheaply with an index probe.  However, since
5045          * this function is called many times during join planning, that could
5046          * have unpleasant effects on planning speed.  Need more investigation
5047          * before enabling this.
5048          */
5049 #ifdef NOT_USED
5050         if (get_actual_variable_range(root, vardata, sortop, min, max))
5051                 return true;
5052 #endif
5053
5054         if (!HeapTupleIsValid(vardata->statsTuple))
5055         {
5056                 /* no stats available, so default result */
5057                 return false;
5058         }
5059
5060         get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5061
5062         /*
5063          * If there is a histogram, grab the first and last values.
5064          *
5065          * If there is a histogram that is sorted with some other operator than
5066          * the one we want, fail --- this suggests that there is data we can't
5067          * use.
5068          */
5069         if (get_attstatsslot(vardata->statsTuple,
5070                                                  vardata->atttype, vardata->atttypmod,
5071                                                  STATISTIC_KIND_HISTOGRAM, sortop,
5072                                                  NULL,
5073                                                  &values, &nvalues,
5074                                                  NULL, NULL))
5075         {
5076                 if (nvalues > 0)
5077                 {
5078                         tmin = datumCopy(values[0], typByVal, typLen);
5079                         tmax = datumCopy(values[nvalues - 1], typByVal, typLen);
5080                         have_data = true;
5081                 }
5082                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
5083         }
5084         else if (get_attstatsslot(vardata->statsTuple,
5085                                                           vardata->atttype, vardata->atttypmod,
5086                                                           STATISTIC_KIND_HISTOGRAM, InvalidOid,
5087                                                           NULL,
5088                                                           &values, &nvalues,
5089                                                           NULL, NULL))
5090         {
5091                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
5092                 return false;
5093         }
5094
5095         /*
5096          * If we have most-common-values info, look for extreme MCVs.  This is
5097          * needed even if we also have a histogram, since the histogram excludes
5098          * the MCVs.  However, usually the MCVs will not be the extreme values, so
5099          * avoid unnecessary data copying.
5100          */
5101         if (get_attstatsslot(vardata->statsTuple,
5102                                                  vardata->atttype, vardata->atttypmod,
5103                                                  STATISTIC_KIND_MCV, InvalidOid,
5104                                                  NULL,
5105                                                  &values, &nvalues,
5106                                                  NULL, NULL))
5107         {
5108                 bool            tmin_is_mcv = false;
5109                 bool            tmax_is_mcv = false;
5110                 FmgrInfo        opproc;
5111
5112                 fmgr_info(get_opcode(sortop), &opproc);
5113
5114                 for (i = 0; i < nvalues; i++)
5115                 {
5116                         if (!have_data)
5117                         {
5118                                 tmin = tmax = values[i];
5119                                 tmin_is_mcv = tmax_is_mcv = have_data = true;
5120                                 continue;
5121                         }
5122                         if (DatumGetBool(FunctionCall2Coll(&opproc,
5123                                                                                            DEFAULT_COLLATION_OID,
5124                                                                                            values[i], tmin)))
5125                         {
5126                                 tmin = values[i];
5127                                 tmin_is_mcv = true;
5128                         }
5129                         if (DatumGetBool(FunctionCall2Coll(&opproc,
5130                                                                                            DEFAULT_COLLATION_OID,
5131                                                                                            tmax, values[i])))
5132                         {
5133                                 tmax = values[i];
5134                                 tmax_is_mcv = true;
5135                         }
5136                 }
5137                 if (tmin_is_mcv)
5138                         tmin = datumCopy(tmin, typByVal, typLen);
5139                 if (tmax_is_mcv)
5140                         tmax = datumCopy(tmax, typByVal, typLen);
5141                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
5142         }
5143
5144         *min = tmin;
5145         *max = tmax;
5146         return have_data;
5147 }
5148
5149
5150 /*
5151  * get_actual_variable_range
5152  *              Attempt to identify the current *actual* minimum and/or maximum
5153  *              of the specified variable, by looking for a suitable btree index
5154  *              and fetching its low and/or high values.
5155  *              If successful, store values in *min and *max, and return TRUE.
5156  *              (Either pointer can be NULL if that endpoint isn't needed.)
5157  *              If no data available, return FALSE.
5158  *
5159  * sortop is the "<" comparison operator to use.
5160  */
5161 static bool
5162 get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
5163                                                   Oid sortop,
5164                                                   Datum *min, Datum *max)
5165 {
5166         bool            have_data = false;
5167         RelOptInfo *rel = vardata->rel;
5168         RangeTblEntry *rte;
5169         ListCell   *lc;
5170
5171         /* No hope if no relation or it doesn't have indexes */
5172         if (rel == NULL || rel->indexlist == NIL)
5173                 return false;
5174         /* If it has indexes it must be a plain relation */
5175         rte = root->simple_rte_array[rel->relid];
5176         Assert(rte->rtekind == RTE_RELATION);
5177
5178         /* Search through the indexes to see if any match our problem */
5179         foreach(lc, rel->indexlist)
5180         {
5181                 IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
5182                 ScanDirection indexscandir;
5183
5184                 /* Ignore non-btree indexes */
5185                 if (index->relam != BTREE_AM_OID)
5186                         continue;
5187
5188                 /*
5189                  * Ignore partial indexes --- we only want stats that cover the entire
5190                  * relation.
5191                  */
5192                 if (index->indpred != NIL)
5193                         continue;
5194
5195                 /*
5196                  * The index list might include hypothetical indexes inserted by a
5197                  * get_relation_info hook --- don't try to access them.
5198                  */
5199                 if (index->hypothetical)
5200                         continue;
5201
5202                 /*
5203                  * The first index column must match the desired variable and sort
5204                  * operator --- but we can use a descending-order index.
5205                  */
5206                 if (!match_index_to_operand(vardata->var, 0, index))
5207                         continue;
5208                 switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
5209                 {
5210                         case BTLessStrategyNumber:
5211                                 if (index->reverse_sort[0])
5212                                         indexscandir = BackwardScanDirection;
5213                                 else
5214                                         indexscandir = ForwardScanDirection;
5215                                 break;
5216                         case BTGreaterStrategyNumber:
5217                                 if (index->reverse_sort[0])
5218                                         indexscandir = ForwardScanDirection;
5219                                 else
5220                                         indexscandir = BackwardScanDirection;
5221                                 break;
5222                         default:
5223                                 /* index doesn't match the sortop */
5224                                 continue;
5225                 }
5226
5227                 /*
5228                  * Found a suitable index to extract data from.  We'll need an EState
5229                  * and a bunch of other infrastructure.
5230                  */
5231                 {
5232                         EState     *estate;
5233                         ExprContext *econtext;
5234                         MemoryContext tmpcontext;
5235                         MemoryContext oldcontext;
5236                         Relation        heapRel;
5237                         Relation        indexRel;
5238                         IndexInfo  *indexInfo;
5239                         TupleTableSlot *slot;
5240                         int16           typLen;
5241                         bool            typByVal;
5242                         ScanKeyData scankeys[1];
5243                         IndexScanDesc index_scan;
5244                         HeapTuple       tup;
5245                         Datum           values[INDEX_MAX_KEYS];
5246                         bool            isnull[INDEX_MAX_KEYS];
5247                         SnapshotData SnapshotDirty;
5248
5249                         estate = CreateExecutorState();
5250                         econtext = GetPerTupleExprContext(estate);
5251                         /* Make sure any cruft is generated in the econtext's memory */
5252                         tmpcontext = econtext->ecxt_per_tuple_memory;
5253                         oldcontext = MemoryContextSwitchTo(tmpcontext);
5254
5255                         /*
5256                          * Open the table and index so we can read from them.  We should
5257                          * already have at least AccessShareLock on the table, but not
5258                          * necessarily on the index.
5259                          */
5260                         heapRel = heap_open(rte->relid, NoLock);
5261                         indexRel = index_open(index->indexoid, AccessShareLock);
5262
5263                         /* extract index key information from the index's pg_index info */
5264                         indexInfo = BuildIndexInfo(indexRel);
5265
5266                         /* some other stuff */
5267                         slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRel));
5268                         econtext->ecxt_scantuple = slot;
5269                         get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5270                         InitDirtySnapshot(SnapshotDirty);
5271
5272                         /* set up an IS NOT NULL scan key so that we ignore nulls */
5273                         ScanKeyEntryInitialize(&scankeys[0],
5274                                                                    SK_ISNULL | SK_SEARCHNOTNULL,
5275                                                                    1,   /* index col to scan */
5276                                                                    InvalidStrategy,             /* no strategy */
5277                                                                    InvalidOid,  /* no strategy subtype */
5278                                                                    InvalidOid,  /* no collation */
5279                                                                    InvalidOid,  /* no reg proc for this */
5280                                                                    (Datum) 0);  /* constant */
5281
5282                         have_data = true;
5283
5284                         /* If min is requested ... */
5285                         if (min)
5286                         {
5287                                 /*
5288                                  * In principle, we should scan the index with our current
5289                                  * active snapshot, which is the best approximation we've got
5290                                  * to what the query will see when executed.  But that won't
5291                                  * be exact if a new snap is taken before running the query,
5292                                  * and it can be very expensive if a lot of uncommitted rows
5293                                  * exist at the end of the index (because we'll laboriously
5294                                  * fetch each one and reject it).  What seems like a good
5295                                  * compromise is to use SnapshotDirty.  That will accept
5296                                  * uncommitted rows, and thus avoid fetching multiple heap
5297                                  * tuples in this scenario.  On the other hand, it will reject
5298                                  * known-dead rows, and thus not give a bogus answer when the
5299                                  * extreme value has been deleted; that case motivates not
5300                                  * using SnapshotAny here.
5301                                  */
5302                                 index_scan = index_beginscan(heapRel, indexRel, &SnapshotDirty,
5303                                                                                          1, 0);
5304                                 index_rescan(index_scan, scankeys, 1, NULL, 0);
5305
5306                                 /* Fetch first tuple in sortop's direction */
5307                                 if ((tup = index_getnext(index_scan,
5308                                                                                  indexscandir)) != NULL)
5309                                 {
5310                                         /* Extract the index column values from the heap tuple */
5311                                         ExecStoreTuple(tup, slot, InvalidBuffer, false);
5312                                         FormIndexDatum(indexInfo, slot, estate,
5313                                                                    values, isnull);
5314
5315                                         /* Shouldn't have got a null, but be careful */
5316                                         if (isnull[0])
5317                                                 elog(ERROR, "found unexpected null value in index \"%s\"",
5318                                                          RelationGetRelationName(indexRel));
5319
5320                                         /* Copy the index column value out to caller's context */
5321                                         MemoryContextSwitchTo(oldcontext);
5322                                         *min = datumCopy(values[0], typByVal, typLen);
5323                                         MemoryContextSwitchTo(tmpcontext);
5324                                 }
5325                                 else
5326                                         have_data = false;
5327
5328                                 index_endscan(index_scan);
5329                         }
5330
5331                         /* If max is requested, and we didn't find the index is empty */
5332                         if (max && have_data)
5333                         {
5334                                 index_scan = index_beginscan(heapRel, indexRel, &SnapshotDirty,
5335                                                                                          1, 0);
5336                                 index_rescan(index_scan, scankeys, 1, NULL, 0);
5337
5338                                 /* Fetch first tuple in reverse direction */
5339                                 if ((tup = index_getnext(index_scan,
5340                                                                                  -indexscandir)) != NULL)
5341                                 {
5342                                         /* Extract the index column values from the heap tuple */
5343                                         ExecStoreTuple(tup, slot, InvalidBuffer, false);
5344                                         FormIndexDatum(indexInfo, slot, estate,
5345                                                                    values, isnull);
5346
5347                                         /* Shouldn't have got a null, but be careful */
5348                                         if (isnull[0])
5349                                                 elog(ERROR, "found unexpected null value in index \"%s\"",
5350                                                          RelationGetRelationName(indexRel));
5351
5352                                         /* Copy the index column value out to caller's context */
5353                                         MemoryContextSwitchTo(oldcontext);
5354                                         *max = datumCopy(values[0], typByVal, typLen);
5355                                         MemoryContextSwitchTo(tmpcontext);
5356                                 }
5357                                 else
5358                                         have_data = false;
5359
5360                                 index_endscan(index_scan);
5361                         }
5362
5363                         /* Clean everything up */
5364                         ExecDropSingleTupleTableSlot(slot);
5365
5366                         index_close(indexRel, AccessShareLock);
5367                         heap_close(heapRel, NoLock);
5368
5369                         MemoryContextSwitchTo(oldcontext);
5370                         FreeExecutorState(estate);
5371
5372                         /* And we're done */
5373                         break;
5374                 }
5375         }
5376
5377         return have_data;
5378 }
5379
5380 /*
5381  * find_join_input_rel
5382  *              Look up the input relation for a join.
5383  *
5384  * We assume that the input relation's RelOptInfo must have been constructed
5385  * already.
5386  */
5387 static RelOptInfo *
5388 find_join_input_rel(PlannerInfo *root, Relids relids)
5389 {
5390         RelOptInfo *rel = NULL;
5391
5392         switch (bms_membership(relids))
5393         {
5394                 case BMS_EMPTY_SET:
5395                         /* should not happen */
5396                         break;
5397                 case BMS_SINGLETON:
5398                         rel = find_base_rel(root, bms_singleton_member(relids));
5399                         break;
5400                 case BMS_MULTIPLE:
5401                         rel = find_join_rel(root, relids);
5402                         break;
5403         }
5404
5405         if (rel == NULL)
5406                 elog(ERROR, "could not find RelOptInfo for given relids");
5407
5408         return rel;
5409 }
5410
5411
5412 /*-------------------------------------------------------------------------
5413  *
5414  * Pattern analysis functions
5415  *
5416  * These routines support analysis of LIKE and regular-expression patterns
5417  * by the planner/optimizer.  It's important that they agree with the
5418  * regular-expression code in backend/regex/ and the LIKE code in
5419  * backend/utils/adt/like.c.  Also, the computation of the fixed prefix
5420  * must be conservative: if we report a string longer than the true fixed
5421  * prefix, the query may produce actually wrong answers, rather than just
5422  * getting a bad selectivity estimate!
5423  *
5424  * Note that the prefix-analysis functions are called from
5425  * backend/optimizer/path/indxpath.c as well as from routines in this file.
5426  *
5427  *-------------------------------------------------------------------------
5428  */
5429
5430 /*
5431  * Check whether char is a letter (and, hence, subject to case-folding)
5432  *
5433  * In multibyte character sets or with ICU, we can't use isalpha, and it does not seem
5434  * worth trying to convert to wchar_t to use iswalpha.  Instead, just assume
5435  * any multibyte char is potentially case-varying.
5436  */
5437 static int
5438 pattern_char_isalpha(char c, bool is_multibyte,
5439                                          pg_locale_t locale, bool locale_is_c)
5440 {
5441         if (locale_is_c)
5442                 return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
5443         else if (is_multibyte && IS_HIGHBIT_SET(c))
5444                 return true;
5445         else if (locale && locale->provider == COLLPROVIDER_ICU)
5446                 return IS_HIGHBIT_SET(c) ? true : false;
5447 #ifdef HAVE_LOCALE_T
5448         else if (locale && locale->provider == COLLPROVIDER_LIBC)
5449                 return isalpha_l((unsigned char) c, locale->info.lt);
5450 #endif
5451         else
5452                 return isalpha((unsigned char) c);
5453 }
5454
5455 /*
5456  * Extract the fixed prefix, if any, for a pattern.
5457  *
5458  * *prefix is set to a palloc'd prefix string (in the form of a Const node),
5459  *      or to NULL if no fixed prefix exists for the pattern.
5460  * If rest_selec is not NULL, *rest_selec is set to an estimate of the
5461  *      selectivity of the remainder of the pattern (without any fixed prefix).
5462  * The prefix Const has the same type (TEXT or BYTEA) as the input pattern.
5463  *
5464  * The return value distinguishes no fixed prefix, a partial prefix,
5465  * or an exact-match-only pattern.
5466  */
5467
5468 static Pattern_Prefix_Status
5469 like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5470                                   Const **prefix_const, Selectivity *rest_selec)
5471 {
5472         char       *match;
5473         char       *patt;
5474         int                     pattlen;
5475         Oid                     typeid = patt_const->consttype;
5476         int                     pos,
5477                                 match_pos;
5478         bool            is_multibyte = (pg_database_encoding_max_length() > 1);
5479         pg_locale_t locale = 0;
5480         bool            locale_is_c = false;
5481
5482         /* the right-hand const is type text or bytea */
5483         Assert(typeid == BYTEAOID || typeid == TEXTOID);
5484
5485         if (case_insensitive)
5486         {
5487                 if (typeid == BYTEAOID)
5488                         ereport(ERROR,
5489                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5490                         errmsg("case insensitive matching not supported on type bytea")));
5491
5492                 /* If case-insensitive, we need locale info */
5493                 if (lc_ctype_is_c(collation))
5494                         locale_is_c = true;
5495                 else if (collation != DEFAULT_COLLATION_OID)
5496                 {
5497                         if (!OidIsValid(collation))
5498                         {
5499                                 /*
5500                                  * This typically means that the parser could not resolve a
5501                                  * conflict of implicit collations, so report it that way.
5502                                  */
5503                                 ereport(ERROR,
5504                                                 (errcode(ERRCODE_INDETERMINATE_COLLATION),
5505                                                  errmsg("could not determine which collation to use for ILIKE"),
5506                                                  errhint("Use the COLLATE clause to set the collation explicitly.")));
5507                         }
5508                         locale = pg_newlocale_from_collation(collation);
5509                 }
5510         }
5511
5512         if (typeid != BYTEAOID)
5513         {
5514                 patt = TextDatumGetCString(patt_const->constvalue);
5515                 pattlen = strlen(patt);
5516         }
5517         else
5518         {
5519                 bytea      *bstr = DatumGetByteaPP(patt_const->constvalue);
5520
5521                 pattlen = VARSIZE_ANY_EXHDR(bstr);
5522                 patt = (char *) palloc(pattlen);
5523                 memcpy(patt, VARDATA_ANY(bstr), pattlen);
5524                 Assert((Pointer) bstr == DatumGetPointer(patt_const->constvalue));
5525         }
5526
5527         match = palloc(pattlen + 1);
5528         match_pos = 0;
5529         for (pos = 0; pos < pattlen; pos++)
5530         {
5531                 /* % and _ are wildcard characters in LIKE */
5532                 if (patt[pos] == '%' ||
5533                         patt[pos] == '_')
5534                         break;
5535
5536                 /* Backslash escapes the next character */
5537                 if (patt[pos] == '\\')
5538                 {
5539                         pos++;
5540                         if (pos >= pattlen)
5541                                 break;
5542                 }
5543
5544                 /* Stop if case-varying character (it's sort of a wildcard) */
5545                 if (case_insensitive &&
5546                   pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
5547                         break;
5548
5549                 match[match_pos++] = patt[pos];
5550         }
5551
5552         match[match_pos] = '\0';
5553
5554         if (typeid != BYTEAOID)
5555                 *prefix_const = string_to_const(match, typeid);
5556         else
5557                 *prefix_const = string_to_bytea_const(match, match_pos);
5558
5559         if (rest_selec != NULL)
5560                 *rest_selec = like_selectivity(&patt[pos], pattlen - pos,
5561                                                                            case_insensitive);
5562
5563         pfree(patt);
5564         pfree(match);
5565
5566         /* in LIKE, an empty pattern is an exact match! */
5567         if (pos == pattlen)
5568                 return Pattern_Prefix_Exact;    /* reached end of pattern, so exact */
5569
5570         if (match_pos > 0)
5571                 return Pattern_Prefix_Partial;
5572
5573         return Pattern_Prefix_None;
5574 }
5575
5576 static Pattern_Prefix_Status
5577 regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5578                                    Const **prefix_const, Selectivity *rest_selec)
5579 {
5580         Oid                     typeid = patt_const->consttype;
5581         char       *prefix;
5582         bool            exact;
5583
5584         /*
5585          * Should be unnecessary, there are no bytea regex operators defined. As
5586          * such, it should be noted that the rest of this function has *not* been
5587          * made safe for binary (possibly NULL containing) strings.
5588          */
5589         if (typeid == BYTEAOID)
5590                 ereport(ERROR,
5591                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5592                  errmsg("regular-expression matching not supported on type bytea")));
5593
5594         /* Use the regexp machinery to extract the prefix, if any */
5595         prefix = regexp_fixed_prefix(DatumGetTextPP(patt_const->constvalue),
5596                                                                  case_insensitive, collation,
5597                                                                  &exact);
5598
5599         if (prefix == NULL)
5600         {
5601                 *prefix_const = NULL;
5602
5603                 if (rest_selec != NULL)
5604                 {
5605                         char       *patt = TextDatumGetCString(patt_const->constvalue);
5606
5607                         *rest_selec = regex_selectivity(patt, strlen(patt),
5608                                                                                         case_insensitive,
5609                                                                                         0);
5610                         pfree(patt);
5611                 }
5612
5613                 return Pattern_Prefix_None;
5614         }
5615
5616         *prefix_const = string_to_const(prefix, typeid);
5617
5618         if (rest_selec != NULL)
5619         {
5620                 if (exact)
5621                 {
5622                         /* Exact match, so there's no additional selectivity */
5623                         *rest_selec = 1.0;
5624                 }
5625                 else
5626                 {
5627                         char       *patt = TextDatumGetCString(patt_const->constvalue);
5628
5629                         *rest_selec = regex_selectivity(patt, strlen(patt),
5630                                                                                         case_insensitive,
5631                                                                                         strlen(prefix));
5632                         pfree(patt);
5633                 }
5634         }
5635
5636         pfree(prefix);
5637
5638         if (exact)
5639                 return Pattern_Prefix_Exact;    /* pattern specifies exact match */
5640         else
5641                 return Pattern_Prefix_Partial;
5642 }
5643
5644 Pattern_Prefix_Status
5645 pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
5646                                          Const **prefix, Selectivity *rest_selec)
5647 {
5648         Pattern_Prefix_Status result;
5649
5650         switch (ptype)
5651         {
5652                 case Pattern_Type_Like:
5653                         result = like_fixed_prefix(patt, false, collation,
5654                                                                            prefix, rest_selec);
5655                         break;
5656                 case Pattern_Type_Like_IC:
5657                         result = like_fixed_prefix(patt, true, collation,
5658                                                                            prefix, rest_selec);
5659                         break;
5660                 case Pattern_Type_Regex:
5661                         result = regex_fixed_prefix(patt, false, collation,
5662                                                                                 prefix, rest_selec);
5663                         break;
5664                 case Pattern_Type_Regex_IC:
5665                         result = regex_fixed_prefix(patt, true, collation,
5666                                                                                 prefix, rest_selec);
5667                         break;
5668                 default:
5669                         elog(ERROR, "unrecognized ptype: %d", (int) ptype);
5670                         result = Pattern_Prefix_None;           /* keep compiler quiet */
5671                         break;
5672         }
5673         return result;
5674 }
5675
5676 /*
5677  * Estimate the selectivity of a fixed prefix for a pattern match.
5678  *
5679  * A fixed prefix "foo" is estimated as the selectivity of the expression
5680  * "variable >= 'foo' AND variable < 'fop'" (see also indxpath.c).
5681  *
5682  * The selectivity estimate is with respect to the portion of the column
5683  * population represented by the histogram --- the caller must fold this
5684  * together with info about MCVs and NULLs.
5685  *
5686  * We use the >= and < operators from the specified btree opfamily to do the
5687  * estimation.  The given variable and Const must be of the associated
5688  * datatype.
5689  *
5690  * XXX Note: we make use of the upper bound to estimate operator selectivity
5691  * even if the locale is such that we cannot rely on the upper-bound string.
5692  * The selectivity only needs to be approximately right anyway, so it seems
5693  * more useful to use the upper-bound code than not.
5694  */
5695 static Selectivity
5696 prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
5697                                    Oid vartype, Oid opfamily, Const *prefixcon)
5698 {
5699         Selectivity prefixsel;
5700         Oid                     cmpopr;
5701         FmgrInfo        opproc;
5702         Const      *greaterstrcon;
5703         Selectivity eq_sel;
5704
5705         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5706                                                                  BTGreaterEqualStrategyNumber);
5707         if (cmpopr == InvalidOid)
5708                 elog(ERROR, "no >= operator for opfamily %u", opfamily);
5709         fmgr_info(get_opcode(cmpopr), &opproc);
5710
5711         prefixsel = ineq_histogram_selectivity(root, vardata, &opproc, true,
5712                                                                                    prefixcon->constvalue,
5713                                                                                    prefixcon->consttype);
5714
5715         if (prefixsel < 0.0)
5716         {
5717                 /* No histogram is present ... return a suitable default estimate */
5718                 return DEFAULT_MATCH_SEL;
5719         }
5720
5721         /*-------
5722          * If we can create a string larger than the prefix, say
5723          *      "x < greaterstr".
5724          *-------
5725          */
5726         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5727                                                                  BTLessStrategyNumber);
5728         if (cmpopr == InvalidOid)
5729                 elog(ERROR, "no < operator for opfamily %u", opfamily);
5730         fmgr_info(get_opcode(cmpopr), &opproc);
5731         greaterstrcon = make_greater_string(prefixcon, &opproc,
5732                                                                                 DEFAULT_COLLATION_OID);
5733         if (greaterstrcon)
5734         {
5735                 Selectivity topsel;
5736
5737                 topsel = ineq_histogram_selectivity(root, vardata, &opproc, false,
5738                                                                                         greaterstrcon->constvalue,
5739                                                                                         greaterstrcon->consttype);
5740
5741                 /* ineq_histogram_selectivity worked before, it shouldn't fail now */
5742                 Assert(topsel >= 0.0);
5743
5744                 /*
5745                  * Merge the two selectivities in the same way as for a range query
5746                  * (see clauselist_selectivity()).  Note that we don't need to worry
5747                  * about double-exclusion of nulls, since ineq_histogram_selectivity
5748                  * doesn't count those anyway.
5749                  */
5750                 prefixsel = topsel + prefixsel - 1.0;
5751         }
5752
5753         /*
5754          * If the prefix is long then the two bounding values might be too close
5755          * together for the histogram to distinguish them usefully, resulting in a
5756          * zero estimate (plus or minus roundoff error). To avoid returning a
5757          * ridiculously small estimate, compute the estimated selectivity for
5758          * "variable = 'foo'", and clamp to that. (Obviously, the resultant
5759          * estimate should be at least that.)
5760          *
5761          * We apply this even if we couldn't make a greater string.  That case
5762          * suggests that the prefix is near the maximum possible, and thus
5763          * probably off the end of the histogram, and thus we probably got a very
5764          * small estimate from the >= condition; so we still need to clamp.
5765          */
5766         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5767                                                                  BTEqualStrategyNumber);
5768         if (cmpopr == InvalidOid)
5769                 elog(ERROR, "no = operator for opfamily %u", opfamily);
5770         eq_sel = var_eq_const(vardata, cmpopr, prefixcon->constvalue,
5771                                                   false, true);
5772
5773         prefixsel = Max(prefixsel, eq_sel);
5774
5775         return prefixsel;
5776 }
5777
5778
5779 /*
5780  * Estimate the selectivity of a pattern of the specified type.
5781  * Note that any fixed prefix of the pattern will have been removed already,
5782  * so actually we may be looking at just a fragment of the pattern.
5783  *
5784  * For now, we use a very simplistic approach: fixed characters reduce the
5785  * selectivity a good deal, character ranges reduce it a little,
5786  * wildcards (such as % for LIKE or .* for regex) increase it.
5787  */
5788
5789 #define FIXED_CHAR_SEL  0.20    /* about 1/5 */
5790 #define CHAR_RANGE_SEL  0.25
5791 #define ANY_CHAR_SEL    0.9             /* not 1, since it won't match end-of-string */
5792 #define FULL_WILDCARD_SEL 5.0
5793 #define PARTIAL_WILDCARD_SEL 2.0
5794
5795 static Selectivity
5796 like_selectivity(const char *patt, int pattlen, bool case_insensitive)
5797 {
5798         Selectivity sel = 1.0;
5799         int                     pos;
5800
5801         /* Skip any leading wildcard; it's already factored into initial sel */
5802         for (pos = 0; pos < pattlen; pos++)
5803         {
5804                 if (patt[pos] != '%' && patt[pos] != '_')
5805                         break;
5806         }
5807
5808         for (; pos < pattlen; pos++)
5809         {
5810                 /* % and _ are wildcard characters in LIKE */
5811                 if (patt[pos] == '%')
5812                         sel *= FULL_WILDCARD_SEL;
5813                 else if (patt[pos] == '_')
5814                         sel *= ANY_CHAR_SEL;
5815                 else if (patt[pos] == '\\')
5816                 {
5817                         /* Backslash quotes the next character */
5818                         pos++;
5819                         if (pos >= pattlen)
5820                                 break;
5821                         sel *= FIXED_CHAR_SEL;
5822                 }
5823                 else
5824                         sel *= FIXED_CHAR_SEL;
5825         }
5826         /* Could get sel > 1 if multiple wildcards */
5827         if (sel > 1.0)
5828                 sel = 1.0;
5829         return sel;
5830 }
5831
5832 static Selectivity
5833 regex_selectivity_sub(const char *patt, int pattlen, bool case_insensitive)
5834 {
5835         Selectivity sel = 1.0;
5836         int                     paren_depth = 0;
5837         int                     paren_pos = 0;  /* dummy init to keep compiler quiet */
5838         int                     pos;
5839
5840         for (pos = 0; pos < pattlen; pos++)
5841         {
5842                 if (patt[pos] == '(')
5843                 {
5844                         if (paren_depth == 0)
5845                                 paren_pos = pos;        /* remember start of parenthesized item */
5846                         paren_depth++;
5847                 }
5848                 else if (patt[pos] == ')' && paren_depth > 0)
5849                 {
5850                         paren_depth--;
5851                         if (paren_depth == 0)
5852                                 sel *= regex_selectivity_sub(patt + (paren_pos + 1),
5853                                                                                          pos - (paren_pos + 1),
5854                                                                                          case_insensitive);
5855                 }
5856                 else if (patt[pos] == '|' && paren_depth == 0)
5857                 {
5858                         /*
5859                          * If unquoted | is present at paren level 0 in pattern, we have
5860                          * multiple alternatives; sum their probabilities.
5861                          */
5862                         sel += regex_selectivity_sub(patt + (pos + 1),
5863                                                                                  pattlen - (pos + 1),
5864                                                                                  case_insensitive);
5865                         break;                          /* rest of pattern is now processed */
5866                 }
5867                 else if (patt[pos] == '[')
5868                 {
5869                         bool            negclass = false;
5870
5871                         if (patt[++pos] == '^')
5872                         {
5873                                 negclass = true;
5874                                 pos++;
5875                         }
5876                         if (patt[pos] == ']')           /* ']' at start of class is not
5877                                                                                  * special */
5878                                 pos++;
5879                         while (pos < pattlen && patt[pos] != ']')
5880                                 pos++;
5881                         if (paren_depth == 0)
5882                                 sel *= (negclass ? (1.0 - CHAR_RANGE_SEL) : CHAR_RANGE_SEL);
5883                 }
5884                 else if (patt[pos] == '.')
5885                 {
5886                         if (paren_depth == 0)
5887                                 sel *= ANY_CHAR_SEL;
5888                 }
5889                 else if (patt[pos] == '*' ||
5890                                  patt[pos] == '?' ||
5891                                  patt[pos] == '+')
5892                 {
5893                         /* Ought to be smarter about quantifiers... */
5894                         if (paren_depth == 0)
5895                                 sel *= PARTIAL_WILDCARD_SEL;
5896                 }
5897                 else if (patt[pos] == '{')
5898                 {
5899                         while (pos < pattlen && patt[pos] != '}')
5900                                 pos++;
5901                         if (paren_depth == 0)
5902                                 sel *= PARTIAL_WILDCARD_SEL;
5903                 }
5904                 else if (patt[pos] == '\\')
5905                 {
5906                         /* backslash quotes the next character */
5907                         pos++;
5908                         if (pos >= pattlen)
5909                                 break;
5910                         if (paren_depth == 0)
5911                                 sel *= FIXED_CHAR_SEL;
5912                 }
5913                 else
5914                 {
5915                         if (paren_depth == 0)
5916                                 sel *= FIXED_CHAR_SEL;
5917                 }
5918         }
5919         /* Could get sel > 1 if multiple wildcards */
5920         if (sel > 1.0)
5921                 sel = 1.0;
5922         return sel;
5923 }
5924
5925 static Selectivity
5926 regex_selectivity(const char *patt, int pattlen, bool case_insensitive,
5927                                   int fixed_prefix_len)
5928 {
5929         Selectivity sel;
5930
5931         /* If patt doesn't end with $, consider it to have a trailing wildcard */
5932         if (pattlen > 0 && patt[pattlen - 1] == '$' &&
5933                 (pattlen == 1 || patt[pattlen - 2] != '\\'))
5934         {
5935                 /* has trailing $ */
5936                 sel = regex_selectivity_sub(patt, pattlen - 1, case_insensitive);
5937         }
5938         else
5939         {
5940                 /* no trailing $ */
5941                 sel = regex_selectivity_sub(patt, pattlen, case_insensitive);
5942                 sel *= FULL_WILDCARD_SEL;
5943         }
5944
5945         /* If there's a fixed prefix, discount its selectivity */
5946         if (fixed_prefix_len > 0)
5947                 sel /= pow(FIXED_CHAR_SEL, fixed_prefix_len);
5948
5949         /* Make sure result stays in range */
5950         CLAMP_PROBABILITY(sel);
5951         return sel;
5952 }
5953
5954
5955 /*
5956  * For bytea, the increment function need only increment the current byte
5957  * (there are no multibyte characters to worry about).
5958  */
5959 static bool
5960 byte_increment(unsigned char *ptr, int len)
5961 {
5962         if (*ptr >= 255)
5963                 return false;
5964         (*ptr)++;
5965         return true;
5966 }
5967
5968 /*
5969  * Try to generate a string greater than the given string or any
5970  * string it is a prefix of.  If successful, return a palloc'd string
5971  * in the form of a Const node; else return NULL.
5972  *
5973  * The caller must provide the appropriate "less than" comparison function
5974  * for testing the strings, along with the collation to use.
5975  *
5976  * The key requirement here is that given a prefix string, say "foo",
5977  * we must be able to generate another string "fop" that is greater than
5978  * all strings "foobar" starting with "foo".  We can test that we have
5979  * generated a string greater than the prefix string, but in non-C collations
5980  * that is not a bulletproof guarantee that an extension of the string might
5981  * not sort after it; an example is that "foo " is less than "foo!", but it
5982  * is not clear that a "dictionary" sort ordering will consider "foo!" less
5983  * than "foo bar".  CAUTION: Therefore, this function should be used only for
5984  * estimation purposes when working in a non-C collation.
5985  *
5986  * To try to catch most cases where an extended string might otherwise sort
5987  * before the result value, we determine which of the strings "Z", "z", "y",
5988  * and "9" is seen as largest by the collation, and append that to the given
5989  * prefix before trying to find a string that compares as larger.
5990  *
5991  * To search for a greater string, we repeatedly "increment" the rightmost
5992  * character, using an encoding-specific character incrementer function.
5993  * When it's no longer possible to increment the last character, we truncate
5994  * off that character and start incrementing the next-to-rightmost.
5995  * For example, if "z" were the last character in the sort order, then we
5996  * could produce "foo" as a string greater than "fonz".
5997  *
5998  * This could be rather slow in the worst case, but in most cases we
5999  * won't have to try more than one or two strings before succeeding.
6000  *
6001  * Note that it's important for the character incrementer not to be too anal
6002  * about producing every possible character code, since in some cases the only
6003  * way to get a larger string is to increment a previous character position.
6004  * So we don't want to spend too much time trying every possible character
6005  * code at the last position.  A good rule of thumb is to be sure that we
6006  * don't try more than 256*K values for a K-byte character (and definitely
6007  * not 256^K, which is what an exhaustive search would approach).
6008  */
6009 Const *
6010 make_greater_string(const Const *str_const, FmgrInfo *ltproc, Oid collation)
6011 {
6012         Oid                     datatype = str_const->consttype;
6013         char       *workstr;
6014         int                     len;
6015         Datum           cmpstr;
6016         text       *cmptxt = NULL;
6017         mbcharacter_incrementer charinc;
6018
6019         /*
6020          * Get a modifiable copy of the prefix string in C-string format, and set
6021          * up the string we will compare to as a Datum.  In C locale this can just
6022          * be the given prefix string, otherwise we need to add a suffix.  Types
6023          * NAME and BYTEA sort bytewise so they don't need a suffix either.
6024          */
6025         if (datatype == NAMEOID)
6026         {
6027                 workstr = DatumGetCString(DirectFunctionCall1(nameout,
6028                                                                                                           str_const->constvalue));
6029                 len = strlen(workstr);
6030                 cmpstr = str_const->constvalue;
6031         }
6032         else if (datatype == BYTEAOID)
6033         {
6034                 bytea      *bstr = DatumGetByteaPP(str_const->constvalue);
6035
6036                 len = VARSIZE_ANY_EXHDR(bstr);
6037                 workstr = (char *) palloc(len);
6038                 memcpy(workstr, VARDATA_ANY(bstr), len);
6039                 Assert((Pointer) bstr == DatumGetPointer(str_const->constvalue));
6040                 cmpstr = str_const->constvalue;
6041         }
6042         else
6043         {
6044                 workstr = TextDatumGetCString(str_const->constvalue);
6045                 len = strlen(workstr);
6046                 if (lc_collate_is_c(collation) || len == 0)
6047                         cmpstr = str_const->constvalue;
6048                 else
6049                 {
6050                         /* If first time through, determine the suffix to use */
6051                         static char suffixchar = 0;
6052                         static Oid      suffixcollation = 0;
6053
6054                         if (!suffixchar || suffixcollation != collation)
6055                         {
6056                                 char       *best;
6057
6058                                 best = "Z";
6059                                 if (varstr_cmp(best, 1, "z", 1, collation) < 0)
6060                                         best = "z";
6061                                 if (varstr_cmp(best, 1, "y", 1, collation) < 0)
6062                                         best = "y";
6063                                 if (varstr_cmp(best, 1, "9", 1, collation) < 0)
6064                                         best = "9";
6065                                 suffixchar = *best;
6066                                 suffixcollation = collation;
6067                         }
6068
6069                         /* And build the string to compare to */
6070                         cmptxt = (text *) palloc(VARHDRSZ + len + 1);
6071                         SET_VARSIZE(cmptxt, VARHDRSZ + len + 1);
6072                         memcpy(VARDATA(cmptxt), workstr, len);
6073                         *(VARDATA(cmptxt) + len) = suffixchar;
6074                         cmpstr = PointerGetDatum(cmptxt);
6075                 }
6076         }
6077
6078         /* Select appropriate character-incrementer function */
6079         if (datatype == BYTEAOID)
6080                 charinc = byte_increment;
6081         else
6082                 charinc = pg_database_encoding_character_incrementer();
6083
6084         /* And search ... */
6085         while (len > 0)
6086         {
6087                 int                     charlen;
6088                 unsigned char *lastchar;
6089
6090                 /* Identify the last character --- for bytea, just the last byte */
6091                 if (datatype == BYTEAOID)
6092                         charlen = 1;
6093                 else
6094                         charlen = len - pg_mbcliplen(workstr, len, len - 1);
6095                 lastchar = (unsigned char *) (workstr + len - charlen);
6096
6097                 /*
6098                  * Try to generate a larger string by incrementing the last character
6099                  * (for BYTEA, we treat each byte as a character).
6100                  *
6101                  * Note: the incrementer function is expected to return true if it's
6102                  * generated a valid-per-the-encoding new character, otherwise false.
6103                  * The contents of the character on false return are unspecified.
6104                  */
6105                 while (charinc(lastchar, charlen))
6106                 {
6107                         Const      *workstr_const;
6108
6109                         if (datatype == BYTEAOID)
6110                                 workstr_const = string_to_bytea_const(workstr, len);
6111                         else
6112                                 workstr_const = string_to_const(workstr, datatype);
6113
6114                         if (DatumGetBool(FunctionCall2Coll(ltproc,
6115                                                                                            collation,
6116                                                                                            cmpstr,
6117                                                                                            workstr_const->constvalue)))
6118                         {
6119                                 /* Successfully made a string larger than cmpstr */
6120                                 if (cmptxt)
6121                                         pfree(cmptxt);
6122                                 pfree(workstr);
6123                                 return workstr_const;
6124                         }
6125
6126                         /* No good, release unusable value and try again */
6127                         pfree(DatumGetPointer(workstr_const->constvalue));
6128                         pfree(workstr_const);
6129                 }
6130
6131                 /*
6132                  * No luck here, so truncate off the last character and try to
6133                  * increment the next one.
6134                  */
6135                 len -= charlen;
6136                 workstr[len] = '\0';
6137         }
6138
6139         /* Failed... */
6140         if (cmptxt)
6141                 pfree(cmptxt);
6142         pfree(workstr);
6143
6144         return NULL;
6145 }
6146
6147 /*
6148  * Generate a Datum of the appropriate type from a C string.
6149  * Note that all of the supported types are pass-by-ref, so the
6150  * returned value should be pfree'd if no longer needed.
6151  */
6152 static Datum
6153 string_to_datum(const char *str, Oid datatype)
6154 {
6155         Assert(str != NULL);
6156
6157         /*
6158          * We cheat a little by assuming that CStringGetTextDatum() will do for
6159          * bpchar and varchar constants too...
6160          */
6161         if (datatype == NAMEOID)
6162                 return DirectFunctionCall1(namein, CStringGetDatum(str));
6163         else if (datatype == BYTEAOID)
6164                 return DirectFunctionCall1(byteain, CStringGetDatum(str));
6165         else
6166                 return CStringGetTextDatum(str);
6167 }
6168
6169 /*
6170  * Generate a Const node of the appropriate type from a C string.
6171  */
6172 static Const *
6173 string_to_const(const char *str, Oid datatype)
6174 {
6175         Datum           conval = string_to_datum(str, datatype);
6176         Oid                     collation;
6177         int                     constlen;
6178
6179         /*
6180          * We only need to support a few datatypes here, so hard-wire properties
6181          * instead of incurring the expense of catalog lookups.
6182          */
6183         switch (datatype)
6184         {
6185                 case TEXTOID:
6186                 case VARCHAROID:
6187                 case BPCHAROID:
6188                         collation = DEFAULT_COLLATION_OID;
6189                         constlen = -1;
6190                         break;
6191
6192                 case NAMEOID:
6193                         collation = InvalidOid;
6194                         constlen = NAMEDATALEN;
6195                         break;
6196
6197                 case BYTEAOID:
6198                         collation = InvalidOid;
6199                         constlen = -1;
6200                         break;
6201
6202                 default:
6203                         elog(ERROR, "unexpected datatype in string_to_const: %u",
6204                                  datatype);
6205                         return NULL;
6206         }
6207
6208         return makeConst(datatype, -1, collation, constlen,
6209                                          conval, false, false);
6210 }
6211
6212 /*
6213  * Generate a Const node of bytea type from a binary C string and a length.
6214  */
6215 static Const *
6216 string_to_bytea_const(const char *str, size_t str_len)
6217 {
6218         bytea      *bstr = palloc(VARHDRSZ + str_len);
6219         Datum           conval;
6220
6221         memcpy(VARDATA(bstr), str, str_len);
6222         SET_VARSIZE(bstr, VARHDRSZ + str_len);
6223         conval = PointerGetDatum(bstr);
6224
6225         return makeConst(BYTEAOID, -1, InvalidOid, -1, conval, false, false);
6226 }
6227
6228 /*-------------------------------------------------------------------------
6229  *
6230  * Index cost estimation functions
6231  *
6232  *-------------------------------------------------------------------------
6233  */
6234
6235 List *
6236 deconstruct_indexquals(IndexPath *path)
6237 {
6238         List       *result = NIL;
6239         IndexOptInfo *index = path->indexinfo;
6240         ListCell   *lcc,
6241                            *lci;
6242
6243         forboth(lcc, path->indexquals, lci, path->indexqualcols)
6244         {
6245                 RestrictInfo *rinfo = castNode(RestrictInfo, lfirst(lcc));
6246                 int                     indexcol = lfirst_int(lci);
6247                 Expr       *clause;
6248                 Node       *leftop,
6249                                    *rightop;
6250                 IndexQualInfo *qinfo;
6251
6252                 clause = rinfo->clause;
6253
6254                 qinfo = (IndexQualInfo *) palloc(sizeof(IndexQualInfo));
6255                 qinfo->rinfo = rinfo;
6256                 qinfo->indexcol = indexcol;
6257
6258                 if (IsA(clause, OpExpr))
6259                 {
6260                         qinfo->clause_op = ((OpExpr *) clause)->opno;
6261                         leftop = get_leftop(clause);
6262                         rightop = get_rightop(clause);
6263                         if (match_index_to_operand(leftop, indexcol, index))
6264                         {
6265                                 qinfo->varonleft = true;
6266                                 qinfo->other_operand = rightop;
6267                         }
6268                         else
6269                         {
6270                                 Assert(match_index_to_operand(rightop, indexcol, index));
6271                                 qinfo->varonleft = false;
6272                                 qinfo->other_operand = leftop;
6273                         }
6274                 }
6275                 else if (IsA(clause, RowCompareExpr))
6276                 {
6277                         RowCompareExpr *rc = (RowCompareExpr *) clause;
6278
6279                         qinfo->clause_op = linitial_oid(rc->opnos);
6280                         /* Examine only first columns to determine left/right sides */
6281                         if (match_index_to_operand((Node *) linitial(rc->largs),
6282                                                                            indexcol, index))
6283                         {
6284                                 qinfo->varonleft = true;
6285                                 qinfo->other_operand = (Node *) rc->rargs;
6286                         }
6287                         else
6288                         {
6289                                 Assert(match_index_to_operand((Node *) linitial(rc->rargs),
6290                                                                                           indexcol, index));
6291                                 qinfo->varonleft = false;
6292                                 qinfo->other_operand = (Node *) rc->largs;
6293                         }
6294                 }
6295                 else if (IsA(clause, ScalarArrayOpExpr))
6296                 {
6297                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6298
6299                         qinfo->clause_op = saop->opno;
6300                         /* index column is always on the left in this case */
6301                         Assert(match_index_to_operand((Node *) linitial(saop->args),
6302                                                                                   indexcol, index));
6303                         qinfo->varonleft = true;
6304                         qinfo->other_operand = (Node *) lsecond(saop->args);
6305                 }
6306                 else if (IsA(clause, NullTest))
6307                 {
6308                         qinfo->clause_op = InvalidOid;
6309                         Assert(match_index_to_operand((Node *) ((NullTest *) clause)->arg,
6310                                                                                   indexcol, index));
6311                         qinfo->varonleft = true;
6312                         qinfo->other_operand = NULL;
6313                 }
6314                 else
6315                 {
6316                         elog(ERROR, "unsupported indexqual type: %d",
6317                                  (int) nodeTag(clause));
6318                 }
6319
6320                 result = lappend(result, qinfo);
6321         }
6322         return result;
6323 }
6324
6325 /*
6326  * Simple function to compute the total eval cost of the "other operands"
6327  * in an IndexQualInfo list.  Since we know these will be evaluated just
6328  * once per scan, there's no need to distinguish startup from per-row cost.
6329  */
6330 static Cost
6331 other_operands_eval_cost(PlannerInfo *root, List *qinfos)
6332 {
6333         Cost            qual_arg_cost = 0;
6334         ListCell   *lc;
6335
6336         foreach(lc, qinfos)
6337         {
6338                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6339                 QualCost        index_qual_cost;
6340
6341                 cost_qual_eval_node(&index_qual_cost, qinfo->other_operand, root);
6342                 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6343         }
6344         return qual_arg_cost;
6345 }
6346
6347 /*
6348  * Get other-operand eval cost for an index orderby list.
6349  *
6350  * Index orderby expressions aren't represented as RestrictInfos (since they
6351  * aren't boolean, usually).  So we can't apply deconstruct_indexquals to
6352  * them.  However, they are much simpler to deal with since they are always
6353  * OpExprs and the index column is always on the left.
6354  */
6355 static Cost
6356 orderby_operands_eval_cost(PlannerInfo *root, IndexPath *path)
6357 {
6358         Cost            qual_arg_cost = 0;
6359         ListCell   *lc;
6360
6361         foreach(lc, path->indexorderbys)
6362         {
6363                 Expr       *clause = (Expr *) lfirst(lc);
6364                 Node       *other_operand;
6365                 QualCost        index_qual_cost;
6366
6367                 if (IsA(clause, OpExpr))
6368                 {
6369                         other_operand = get_rightop(clause);
6370                 }
6371                 else
6372                 {
6373                         elog(ERROR, "unsupported indexorderby type: %d",
6374                                  (int) nodeTag(clause));
6375                         other_operand = NULL;           /* keep compiler quiet */
6376                 }
6377
6378                 cost_qual_eval_node(&index_qual_cost, other_operand, root);
6379                 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6380         }
6381         return qual_arg_cost;
6382 }
6383
6384 void
6385 genericcostestimate(PlannerInfo *root,
6386                                         IndexPath *path,
6387                                         double loop_count,
6388                                         List *qinfos,
6389                                         GenericCosts *costs)
6390 {
6391         IndexOptInfo *index = path->indexinfo;
6392         List       *indexQuals = path->indexquals;
6393         List       *indexOrderBys = path->indexorderbys;
6394         Cost            indexStartupCost;
6395         Cost            indexTotalCost;
6396         Selectivity indexSelectivity;
6397         double          indexCorrelation;
6398         double          numIndexPages;
6399         double          numIndexTuples;
6400         double          spc_random_page_cost;
6401         double          num_sa_scans;
6402         double          num_outer_scans;
6403         double          num_scans;
6404         double          qual_op_cost;
6405         double          qual_arg_cost;
6406         List       *selectivityQuals;
6407         ListCell   *l;
6408
6409         /*
6410          * If the index is partial, AND the index predicate with the explicitly
6411          * given indexquals to produce a more accurate idea of the index
6412          * selectivity.
6413          */
6414         selectivityQuals = add_predicate_to_quals(index, indexQuals);
6415
6416         /*
6417          * Check for ScalarArrayOpExpr index quals, and estimate the number of
6418          * index scans that will be performed.
6419          */
6420         num_sa_scans = 1;
6421         foreach(l, indexQuals)
6422         {
6423                 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6424
6425                 if (IsA(rinfo->clause, ScalarArrayOpExpr))
6426                 {
6427                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6428                         int                     alength = estimate_array_length(lsecond(saop->args));
6429
6430                         if (alength > 1)
6431                                 num_sa_scans *= alength;
6432                 }
6433         }
6434
6435         /* Estimate the fraction of main-table tuples that will be visited */
6436         indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6437                                                                                           index->rel->relid,
6438                                                                                           JOIN_INNER,
6439                                                                                           NULL);
6440
6441         /*
6442          * If caller didn't give us an estimate, estimate the number of index
6443          * tuples that will be visited.  We do it in this rather peculiar-looking
6444          * way in order to get the right answer for partial indexes.
6445          */
6446         numIndexTuples = costs->numIndexTuples;
6447         if (numIndexTuples <= 0.0)
6448         {
6449                 numIndexTuples = indexSelectivity * index->rel->tuples;
6450
6451                 /*
6452                  * The above calculation counts all the tuples visited across all
6453                  * scans induced by ScalarArrayOpExpr nodes.  We want to consider the
6454                  * average per-indexscan number, so adjust.  This is a handy place to
6455                  * round to integer, too.  (If caller supplied tuple estimate, it's
6456                  * responsible for handling these considerations.)
6457                  */
6458                 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6459         }
6460
6461         /*
6462          * We can bound the number of tuples by the index size in any case. Also,
6463          * always estimate at least one tuple is touched, even when
6464          * indexSelectivity estimate is tiny.
6465          */
6466         if (numIndexTuples > index->tuples)
6467                 numIndexTuples = index->tuples;
6468         if (numIndexTuples < 1.0)
6469                 numIndexTuples = 1.0;
6470
6471         /*
6472          * Estimate the number of index pages that will be retrieved.
6473          *
6474          * We use the simplistic method of taking a pro-rata fraction of the total
6475          * number of index pages.  In effect, this counts only leaf pages and not
6476          * any overhead such as index metapage or upper tree levels.
6477          *
6478          * In practice access to upper index levels is often nearly free because
6479          * those tend to stay in cache under load; moreover, the cost involved is
6480          * highly dependent on index type.  We therefore ignore such costs here
6481          * and leave it to the caller to add a suitable charge if needed.
6482          */
6483         if (index->pages > 1 && index->tuples > 1)
6484                 numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6485         else
6486                 numIndexPages = 1.0;
6487
6488         /* fetch estimated page cost for tablespace containing index */
6489         get_tablespace_page_costs(index->reltablespace,
6490                                                           &spc_random_page_cost,
6491                                                           NULL);
6492
6493         /*
6494          * Now compute the disk access costs.
6495          *
6496          * The above calculations are all per-index-scan.  However, if we are in a
6497          * nestloop inner scan, we can expect the scan to be repeated (with
6498          * different search keys) for each row of the outer relation.  Likewise,
6499          * ScalarArrayOpExpr quals result in multiple index scans.  This creates
6500          * the potential for cache effects to reduce the number of disk page
6501          * fetches needed.  We want to estimate the average per-scan I/O cost in
6502          * the presence of caching.
6503          *
6504          * We use the Mackert-Lohman formula (see costsize.c for details) to
6505          * estimate the total number of page fetches that occur.  While this
6506          * wasn't what it was designed for, it seems a reasonable model anyway.
6507          * Note that we are counting pages not tuples anymore, so we take N = T =
6508          * index size, as if there were one "tuple" per page.
6509          */
6510         num_outer_scans = loop_count;
6511         num_scans = num_sa_scans * num_outer_scans;
6512
6513         if (num_scans > 1)
6514         {
6515                 double          pages_fetched;
6516
6517                 /* total page fetches ignoring cache effects */
6518                 pages_fetched = numIndexPages * num_scans;
6519
6520                 /* use Mackert and Lohman formula to adjust for cache effects */
6521                 pages_fetched = index_pages_fetched(pages_fetched,
6522                                                                                         index->pages,
6523                                                                                         (double) index->pages,
6524                                                                                         root);
6525
6526                 /*
6527                  * Now compute the total disk access cost, and then report a pro-rated
6528                  * share for each outer scan.  (Don't pro-rate for ScalarArrayOpExpr,
6529                  * since that's internal to the indexscan.)
6530                  */
6531                 indexTotalCost = (pages_fetched * spc_random_page_cost)
6532                         / num_outer_scans;
6533         }
6534         else
6535         {
6536                 /*
6537                  * For a single index scan, we just charge spc_random_page_cost per
6538                  * page touched.
6539                  */
6540                 indexTotalCost = numIndexPages * spc_random_page_cost;
6541         }
6542
6543         /*
6544          * CPU cost: any complex expressions in the indexquals will need to be
6545          * evaluated once at the start of the scan to reduce them to runtime keys
6546          * to pass to the index AM (see nodeIndexscan.c).  We model the per-tuple
6547          * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6548          * indexqual operator.  Because we have numIndexTuples as a per-scan
6549          * number, we have to multiply by num_sa_scans to get the correct result
6550          * for ScalarArrayOpExpr cases.  Similarly add in costs for any index
6551          * ORDER BY expressions.
6552          *
6553          * Note: this neglects the possible costs of rechecking lossy operators.
6554          * Detecting that that might be needed seems more expensive than it's
6555          * worth, though, considering all the other inaccuracies here ...
6556          */
6557         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
6558                 orderby_operands_eval_cost(root, path);
6559         qual_op_cost = cpu_operator_cost *
6560                 (list_length(indexQuals) + list_length(indexOrderBys));
6561
6562         indexStartupCost = qual_arg_cost;
6563         indexTotalCost += qual_arg_cost;
6564         indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6565
6566         /*
6567          * Generic assumption about index correlation: there isn't any.
6568          */
6569         indexCorrelation = 0.0;
6570
6571         /*
6572          * Return everything to caller.
6573          */
6574         costs->indexStartupCost = indexStartupCost;
6575         costs->indexTotalCost = indexTotalCost;
6576         costs->indexSelectivity = indexSelectivity;
6577         costs->indexCorrelation = indexCorrelation;
6578         costs->numIndexPages = numIndexPages;
6579         costs->numIndexTuples = numIndexTuples;
6580         costs->spc_random_page_cost = spc_random_page_cost;
6581         costs->num_sa_scans = num_sa_scans;
6582 }
6583
6584 /*
6585  * If the index is partial, add its predicate to the given qual list.
6586  *
6587  * ANDing the index predicate with the explicitly given indexquals produces
6588  * a more accurate idea of the index's selectivity.  However, we need to be
6589  * careful not to insert redundant clauses, because clauselist_selectivity()
6590  * is easily fooled into computing a too-low selectivity estimate.  Our
6591  * approach is to add only the predicate clause(s) that cannot be proven to
6592  * be implied by the given indexquals.  This successfully handles cases such
6593  * as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
6594  * There are many other cases where we won't detect redundancy, leading to a
6595  * too-low selectivity estimate, which will bias the system in favor of using
6596  * partial indexes where possible.  That is not necessarily bad though.
6597  *
6598  * Note that indexQuals contains RestrictInfo nodes while the indpred
6599  * does not, so the output list will be mixed.  This is OK for both
6600  * predicate_implied_by() and clauselist_selectivity(), but might be
6601  * problematic if the result were passed to other things.
6602  */
6603 static List *
6604 add_predicate_to_quals(IndexOptInfo *index, List *indexQuals)
6605 {
6606         List       *predExtraQuals = NIL;
6607         ListCell   *lc;
6608
6609         if (index->indpred == NIL)
6610                 return indexQuals;
6611
6612         foreach(lc, index->indpred)
6613         {
6614                 Node       *predQual = (Node *) lfirst(lc);
6615                 List       *oneQual = list_make1(predQual);
6616
6617                 if (!predicate_implied_by(oneQual, indexQuals))
6618                         predExtraQuals = list_concat(predExtraQuals, oneQual);
6619         }
6620         /* list_concat avoids modifying the passed-in indexQuals list */
6621         return list_concat(predExtraQuals, indexQuals);
6622 }
6623
6624
6625 void
6626 btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6627                            Cost *indexStartupCost, Cost *indexTotalCost,
6628                            Selectivity *indexSelectivity, double *indexCorrelation,
6629                            double *indexPages)
6630 {
6631         IndexOptInfo *index = path->indexinfo;
6632         List       *qinfos;
6633         GenericCosts costs;
6634         Oid                     relid;
6635         AttrNumber      colnum;
6636         VariableStatData vardata;
6637         double          numIndexTuples;
6638         Cost            descentCost;
6639         List       *indexBoundQuals;
6640         int                     indexcol;
6641         bool            eqQualHere;
6642         bool            found_saop;
6643         bool            found_is_null_op;
6644         double          num_sa_scans;
6645         ListCell   *lc;
6646
6647         /* Do preliminary analysis of indexquals */
6648         qinfos = deconstruct_indexquals(path);
6649
6650         /*
6651          * For a btree scan, only leading '=' quals plus inequality quals for the
6652          * immediately next attribute contribute to index selectivity (these are
6653          * the "boundary quals" that determine the starting and stopping points of
6654          * the index scan).  Additional quals can suppress visits to the heap, so
6655          * it's OK to count them in indexSelectivity, but they should not count
6656          * for estimating numIndexTuples.  So we must examine the given indexquals
6657          * to find out which ones count as boundary quals.  We rely on the
6658          * knowledge that they are given in index column order.
6659          *
6660          * For a RowCompareExpr, we consider only the first column, just as
6661          * rowcomparesel() does.
6662          *
6663          * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6664          * index scans not one, but the ScalarArrayOpExpr's operator can be
6665          * considered to act the same as it normally does.
6666          */
6667         indexBoundQuals = NIL;
6668         indexcol = 0;
6669         eqQualHere = false;
6670         found_saop = false;
6671         found_is_null_op = false;
6672         num_sa_scans = 1;
6673         foreach(lc, qinfos)
6674         {
6675                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6676                 RestrictInfo *rinfo = qinfo->rinfo;
6677                 Expr       *clause = rinfo->clause;
6678                 Oid                     clause_op;
6679                 int                     op_strategy;
6680
6681                 if (indexcol != qinfo->indexcol)
6682                 {
6683                         /* Beginning of a new column's quals */
6684                         if (!eqQualHere)
6685                                 break;                  /* done if no '=' qual for indexcol */
6686                         eqQualHere = false;
6687                         indexcol++;
6688                         if (indexcol != qinfo->indexcol)
6689                                 break;                  /* no quals at all for indexcol */
6690                 }
6691
6692                 if (IsA(clause, ScalarArrayOpExpr))
6693                 {
6694                         int                     alength = estimate_array_length(qinfo->other_operand);
6695
6696                         found_saop = true;
6697                         /* count up number of SA scans induced by indexBoundQuals only */
6698                         if (alength > 1)
6699                                 num_sa_scans *= alength;
6700                 }
6701                 else if (IsA(clause, NullTest))
6702                 {
6703                         NullTest   *nt = (NullTest *) clause;
6704
6705                         if (nt->nulltesttype == IS_NULL)
6706                         {
6707                                 found_is_null_op = true;
6708                                 /* IS NULL is like = for selectivity determination purposes */
6709                                 eqQualHere = true;
6710                         }
6711                 }
6712
6713                 /*
6714                  * We would need to commute the clause_op if not varonleft, except
6715                  * that we only care if it's equality or not, so that refinement is
6716                  * unnecessary.
6717                  */
6718                 clause_op = qinfo->clause_op;
6719
6720                 /* check for equality operator */
6721                 if (OidIsValid(clause_op))
6722                 {
6723                         op_strategy = get_op_opfamily_strategy(clause_op,
6724                                                                                                    index->opfamily[indexcol]);
6725                         Assert(op_strategy != 0);       /* not a member of opfamily?? */
6726                         if (op_strategy == BTEqualStrategyNumber)
6727                                 eqQualHere = true;
6728                 }
6729
6730                 indexBoundQuals = lappend(indexBoundQuals, rinfo);
6731         }
6732
6733         /*
6734          * If index is unique and we found an '=' clause for each column, we can
6735          * just assume numIndexTuples = 1 and skip the expensive
6736          * clauselist_selectivity calculations.  However, a ScalarArrayOp or
6737          * NullTest invalidates that theory, even though it sets eqQualHere.
6738          */
6739         if (index->unique &&
6740                 indexcol == index->ncolumns - 1 &&
6741                 eqQualHere &&
6742                 !found_saop &&
6743                 !found_is_null_op)
6744                 numIndexTuples = 1.0;
6745         else
6746         {
6747                 List       *selectivityQuals;
6748                 Selectivity btreeSelectivity;
6749
6750                 /*
6751                  * If the index is partial, AND the index predicate with the
6752                  * index-bound quals to produce a more accurate idea of the number of
6753                  * rows covered by the bound conditions.
6754                  */
6755                 selectivityQuals = add_predicate_to_quals(index, indexBoundQuals);
6756
6757                 btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6758                                                                                                   index->rel->relid,
6759                                                                                                   JOIN_INNER,
6760                                                                                                   NULL);
6761                 numIndexTuples = btreeSelectivity * index->rel->tuples;
6762
6763                 /*
6764                  * As in genericcostestimate(), we have to adjust for any
6765                  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6766                  * to integer.
6767                  */
6768                 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6769         }
6770
6771         /*
6772          * Now do generic index cost estimation.
6773          */
6774         MemSet(&costs, 0, sizeof(costs));
6775         costs.numIndexTuples = numIndexTuples;
6776
6777         genericcostestimate(root, path, loop_count, qinfos, &costs);
6778
6779         /*
6780          * Add a CPU-cost component to represent the costs of initial btree
6781          * descent.  We don't charge any I/O cost for touching upper btree levels,
6782          * since they tend to stay in cache, but we still have to do about log2(N)
6783          * comparisons to descend a btree of N leaf tuples.  We charge one
6784          * cpu_operator_cost per comparison.
6785          *
6786          * If there are ScalarArrayOpExprs, charge this once per SA scan.  The
6787          * ones after the first one are not startup cost so far as the overall
6788          * plan is concerned, so add them only to "total" cost.
6789          */
6790         if (index->tuples > 1)          /* avoid computing log(0) */
6791         {
6792                 descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6793                 costs.indexStartupCost += descentCost;
6794                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6795         }
6796
6797         /*
6798          * Even though we're not charging I/O cost for touching upper btree pages,
6799          * it's still reasonable to charge some CPU cost per page descended
6800          * through.  Moreover, if we had no such charge at all, bloated indexes
6801          * would appear to have the same search cost as unbloated ones, at least
6802          * in cases where only a single leaf page is expected to be visited.  This
6803          * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6804          * touched.  The number of such pages is btree tree height plus one (ie,
6805          * we charge for the leaf page too).  As above, charge once per SA scan.
6806          */
6807         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6808         costs.indexStartupCost += descentCost;
6809         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6810
6811         /*
6812          * If we can get an estimate of the first column's ordering correlation C
6813          * from pg_statistic, estimate the index correlation as C for a
6814          * single-column index, or C * 0.75 for multiple columns. (The idea here
6815          * is that multiple columns dilute the importance of the first column's
6816          * ordering, but don't negate it entirely.  Before 8.0 we divided the
6817          * correlation by the number of columns, but that seems too strong.)
6818          */
6819         MemSet(&vardata, 0, sizeof(vardata));
6820
6821         if (index->indexkeys[0] != 0)
6822         {
6823                 /* Simple variable --- look to stats for the underlying table */
6824                 RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6825
6826                 Assert(rte->rtekind == RTE_RELATION);
6827                 relid = rte->relid;
6828                 Assert(relid != InvalidOid);
6829                 colnum = index->indexkeys[0];
6830
6831                 if (get_relation_stats_hook &&
6832                         (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6833                 {
6834                         /*
6835                          * The hook took control of acquiring a stats tuple.  If it did
6836                          * supply a tuple, it'd better have supplied a freefunc.
6837                          */
6838                         if (HeapTupleIsValid(vardata.statsTuple) &&
6839                                 !vardata.freefunc)
6840                                 elog(ERROR, "no function provided to release variable stats with");
6841                 }
6842                 else
6843                 {
6844                         vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6845                                                                                                  ObjectIdGetDatum(relid),
6846                                                                                                  Int16GetDatum(colnum),
6847                                                                                                  BoolGetDatum(rte->inh));
6848                         vardata.freefunc = ReleaseSysCache;
6849                 }
6850         }
6851         else
6852         {
6853                 /* Expression --- maybe there are stats for the index itself */
6854                 relid = index->indexoid;
6855                 colnum = 1;
6856
6857                 if (get_index_stats_hook &&
6858                         (*get_index_stats_hook) (root, relid, colnum, &vardata))
6859                 {
6860                         /*
6861                          * The hook took control of acquiring a stats tuple.  If it did
6862                          * supply a tuple, it'd better have supplied a freefunc.
6863                          */
6864                         if (HeapTupleIsValid(vardata.statsTuple) &&
6865                                 !vardata.freefunc)
6866                                 elog(ERROR, "no function provided to release variable stats with");
6867                 }
6868                 else
6869                 {
6870                         vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6871                                                                                                  ObjectIdGetDatum(relid),
6872                                                                                                  Int16GetDatum(colnum),
6873                                                                                                  BoolGetDatum(false));
6874                         vardata.freefunc = ReleaseSysCache;
6875                 }
6876         }
6877
6878         if (HeapTupleIsValid(vardata.statsTuple))
6879         {
6880                 Oid                     sortop;
6881                 float4     *numbers;
6882                 int                     nnumbers;
6883
6884                 sortop = get_opfamily_member(index->opfamily[0],
6885                                                                          index->opcintype[0],
6886                                                                          index->opcintype[0],
6887                                                                          BTLessStrategyNumber);
6888                 if (OidIsValid(sortop) &&
6889                         get_attstatsslot(vardata.statsTuple, InvalidOid, 0,
6890                                                          STATISTIC_KIND_CORRELATION,
6891                                                          sortop,
6892                                                          NULL,
6893                                                          NULL, NULL,
6894                                                          &numbers, &nnumbers))
6895                 {
6896                         double          varCorrelation;
6897
6898                         Assert(nnumbers == 1);
6899                         varCorrelation = numbers[0];
6900
6901                         if (index->reverse_sort[0])
6902                                 varCorrelation = -varCorrelation;
6903
6904                         if (index->ncolumns > 1)
6905                                 costs.indexCorrelation = varCorrelation * 0.75;
6906                         else
6907                                 costs.indexCorrelation = varCorrelation;
6908
6909                         free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
6910                 }
6911         }
6912
6913         ReleaseVariableStats(vardata);
6914
6915         *indexStartupCost = costs.indexStartupCost;
6916         *indexTotalCost = costs.indexTotalCost;
6917         *indexSelectivity = costs.indexSelectivity;
6918         *indexCorrelation = costs.indexCorrelation;
6919         *indexPages = costs.numIndexPages;
6920 }
6921
6922 void
6923 hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6924                                  Cost *indexStartupCost, Cost *indexTotalCost,
6925                                  Selectivity *indexSelectivity, double *indexCorrelation,
6926                                  double *indexPages)
6927 {
6928         List       *qinfos;
6929         GenericCosts costs;
6930
6931         /* Do preliminary analysis of indexquals */
6932         qinfos = deconstruct_indexquals(path);
6933
6934         MemSet(&costs, 0, sizeof(costs));
6935
6936         genericcostestimate(root, path, loop_count, qinfos, &costs);
6937
6938         /*
6939          * A hash index has no descent costs as such, since the index AM can go
6940          * directly to the target bucket after computing the hash value.  There
6941          * are a couple of other hash-specific costs that we could conceivably add
6942          * here, though:
6943          *
6944          * Ideally we'd charge spc_random_page_cost for each page in the target
6945          * bucket, not just the numIndexPages pages that genericcostestimate
6946          * thought we'd visit.  However in most cases we don't know which bucket
6947          * that will be.  There's no point in considering the average bucket size
6948          * because the hash AM makes sure that's always one page.
6949          *
6950          * Likewise, we could consider charging some CPU for each index tuple in
6951          * the bucket, if we knew how many there were.  But the per-tuple cost is
6952          * just a hash value comparison, not a general datatype-dependent
6953          * comparison, so any such charge ought to be quite a bit less than
6954          * cpu_operator_cost; which makes it probably not worth worrying about.
6955          *
6956          * A bigger issue is that chance hash-value collisions will result in
6957          * wasted probes into the heap.  We don't currently attempt to model this
6958          * cost on the grounds that it's rare, but maybe it's not rare enough.
6959          * (Any fix for this ought to consider the generic lossy-operator problem,
6960          * though; it's not entirely hash-specific.)
6961          */
6962
6963         *indexStartupCost = costs.indexStartupCost;
6964         *indexTotalCost = costs.indexTotalCost;
6965         *indexSelectivity = costs.indexSelectivity;
6966         *indexCorrelation = costs.indexCorrelation;
6967         *indexPages = costs.numIndexPages;
6968 }
6969
6970 void
6971 gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6972                                  Cost *indexStartupCost, Cost *indexTotalCost,
6973                                  Selectivity *indexSelectivity, double *indexCorrelation,
6974                                  double *indexPages)
6975 {
6976         IndexOptInfo *index = path->indexinfo;
6977         List       *qinfos;
6978         GenericCosts costs;
6979         Cost            descentCost;
6980
6981         /* Do preliminary analysis of indexquals */
6982         qinfos = deconstruct_indexquals(path);
6983
6984         MemSet(&costs, 0, sizeof(costs));
6985
6986         genericcostestimate(root, path, loop_count, qinfos, &costs);
6987
6988         /*
6989          * We model index descent costs similarly to those for btree, but to do
6990          * that we first need an idea of the tree height.  We somewhat arbitrarily
6991          * assume that the fanout is 100, meaning the tree height is at most
6992          * log100(index->pages).
6993          *
6994          * Although this computation isn't really expensive enough to require
6995          * caching, we might as well use index->tree_height to cache it.
6996          */
6997         if (index->tree_height < 0) /* unknown? */
6998         {
6999                 if (index->pages > 1)   /* avoid computing log(0) */
7000                         index->tree_height = (int) (log(index->pages) / log(100.0));
7001                 else
7002                         index->tree_height = 0;
7003         }
7004
7005         /*
7006          * Add a CPU-cost component to represent the costs of initial descent. We
7007          * just use log(N) here not log2(N) since the branching factor isn't
7008          * necessarily two anyway.  As for btree, charge once per SA scan.
7009          */
7010         if (index->tuples > 1)          /* avoid computing log(0) */
7011         {
7012                 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7013                 costs.indexStartupCost += descentCost;
7014                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
7015         }
7016
7017         /*
7018          * Likewise add a per-page charge, calculated the same as for btrees.
7019          */
7020         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7021         costs.indexStartupCost += descentCost;
7022         costs.indexTotalCost += costs.num_sa_scans * descentCost;
7023
7024         *indexStartupCost = costs.indexStartupCost;
7025         *indexTotalCost = costs.indexTotalCost;
7026         *indexSelectivity = costs.indexSelectivity;
7027         *indexCorrelation = costs.indexCorrelation;
7028         *indexPages = costs.numIndexPages;
7029 }
7030
7031 void
7032 spgcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7033                                 Cost *indexStartupCost, Cost *indexTotalCost,
7034                                 Selectivity *indexSelectivity, double *indexCorrelation,
7035                                 double *indexPages)
7036 {
7037         IndexOptInfo *index = path->indexinfo;
7038         List       *qinfos;
7039         GenericCosts costs;
7040         Cost            descentCost;
7041
7042         /* Do preliminary analysis of indexquals */
7043         qinfos = deconstruct_indexquals(path);
7044
7045         MemSet(&costs, 0, sizeof(costs));
7046
7047         genericcostestimate(root, path, loop_count, qinfos, &costs);
7048
7049         /*
7050          * We model index descent costs similarly to those for btree, but to do
7051          * that we first need an idea of the tree height.  We somewhat arbitrarily
7052          * assume that the fanout is 100, meaning the tree height is at most
7053          * log100(index->pages).
7054          *
7055          * Although this computation isn't really expensive enough to require
7056          * caching, we might as well use index->tree_height to cache it.
7057          */
7058         if (index->tree_height < 0) /* unknown? */
7059         {
7060                 if (index->pages > 1)   /* avoid computing log(0) */
7061                         index->tree_height = (int) (log(index->pages) / log(100.0));
7062                 else
7063                         index->tree_height = 0;
7064         }
7065
7066         /*
7067          * Add a CPU-cost component to represent the costs of initial descent. We
7068          * just use log(N) here not log2(N) since the branching factor isn't
7069          * necessarily two anyway.  As for btree, charge once per SA scan.
7070          */
7071         if (index->tuples > 1)          /* avoid computing log(0) */
7072         {
7073                 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
7074                 costs.indexStartupCost += descentCost;
7075                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
7076         }
7077
7078         /*
7079          * Likewise add a per-page charge, calculated the same as for btrees.
7080          */
7081         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
7082         costs.indexStartupCost += descentCost;
7083         costs.indexTotalCost += costs.num_sa_scans * descentCost;
7084
7085         *indexStartupCost = costs.indexStartupCost;
7086         *indexTotalCost = costs.indexTotalCost;
7087         *indexSelectivity = costs.indexSelectivity;
7088         *indexCorrelation = costs.indexCorrelation;
7089         *indexPages = costs.numIndexPages;
7090 }
7091
7092
7093 /*
7094  * Support routines for gincostestimate
7095  */
7096
7097 typedef struct
7098 {
7099         bool            haveFullScan;
7100         double          partialEntries;
7101         double          exactEntries;
7102         double          searchEntries;
7103         double          arrayScans;
7104 } GinQualCounts;
7105
7106 /*
7107  * Estimate the number of index terms that need to be searched for while
7108  * testing the given GIN query, and increment the counts in *counts
7109  * appropriately.  If the query is unsatisfiable, return false.
7110  */
7111 static bool
7112 gincost_pattern(IndexOptInfo *index, int indexcol,
7113                                 Oid clause_op, Datum query,
7114                                 GinQualCounts *counts)
7115 {
7116         Oid                     extractProcOid;
7117         Oid                     collation;
7118         int                     strategy_op;
7119         Oid                     lefttype,
7120                                 righttype;
7121         int32           nentries = 0;
7122         bool       *partial_matches = NULL;
7123         Pointer    *extra_data = NULL;
7124         bool       *nullFlags = NULL;
7125         int32           searchMode = GIN_SEARCH_MODE_DEFAULT;
7126         int32           i;
7127
7128         /*
7129          * Get the operator's strategy number and declared input data types within
7130          * the index opfamily.  (We don't need the latter, but we use
7131          * get_op_opfamily_properties because it will throw error if it fails to
7132          * find a matching pg_amop entry.)
7133          */
7134         get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
7135                                                            &strategy_op, &lefttype, &righttype);
7136
7137         /*
7138          * GIN always uses the "default" support functions, which are those with
7139          * lefttype == righttype == the opclass' opcintype (see
7140          * IndexSupportInitialize in relcache.c).
7141          */
7142         extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
7143                                                                            index->opcintype[indexcol],
7144                                                                            index->opcintype[indexcol],
7145                                                                            GIN_EXTRACTQUERY_PROC);
7146
7147         if (!OidIsValid(extractProcOid))
7148         {
7149                 /* should not happen; throw same error as index_getprocinfo */
7150                 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
7151                          GIN_EXTRACTQUERY_PROC, indexcol + 1,
7152                          get_rel_name(index->indexoid));
7153         }
7154
7155         /*
7156          * Choose collation to pass to extractProc (should match initGinState).
7157          */
7158         if (OidIsValid(index->indexcollations[indexcol]))
7159                 collation = index->indexcollations[indexcol];
7160         else
7161                 collation = DEFAULT_COLLATION_OID;
7162
7163         OidFunctionCall7Coll(extractProcOid,
7164                                                  collation,
7165                                                  query,
7166                                                  PointerGetDatum(&nentries),
7167                                                  UInt16GetDatum(strategy_op),
7168                                                  PointerGetDatum(&partial_matches),
7169                                                  PointerGetDatum(&extra_data),
7170                                                  PointerGetDatum(&nullFlags),
7171                                                  PointerGetDatum(&searchMode));
7172
7173         if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
7174         {
7175                 /* No match is possible */
7176                 return false;
7177         }
7178
7179         for (i = 0; i < nentries; i++)
7180         {
7181                 /*
7182                  * For partial match we haven't any information to estimate number of
7183                  * matched entries in index, so, we just estimate it as 100
7184                  */
7185                 if (partial_matches && partial_matches[i])
7186                         counts->partialEntries += 100;
7187                 else
7188                         counts->exactEntries++;
7189
7190                 counts->searchEntries++;
7191         }
7192
7193         if (searchMode == GIN_SEARCH_MODE_INCLUDE_EMPTY)
7194         {
7195                 /* Treat "include empty" like an exact-match item */
7196                 counts->exactEntries++;
7197                 counts->searchEntries++;
7198         }
7199         else if (searchMode != GIN_SEARCH_MODE_DEFAULT)
7200         {
7201                 /* It's GIN_SEARCH_MODE_ALL */
7202                 counts->haveFullScan = true;
7203         }
7204
7205         return true;
7206 }
7207
7208 /*
7209  * Estimate the number of index terms that need to be searched for while
7210  * testing the given GIN index clause, and increment the counts in *counts
7211  * appropriately.  If the query is unsatisfiable, return false.
7212  */
7213 static bool
7214 gincost_opexpr(PlannerInfo *root,
7215                            IndexOptInfo *index,
7216                            IndexQualInfo *qinfo,
7217                            GinQualCounts *counts)
7218 {
7219         int                     indexcol = qinfo->indexcol;
7220         Oid                     clause_op = qinfo->clause_op;
7221         Node       *operand = qinfo->other_operand;
7222
7223         if (!qinfo->varonleft)
7224         {
7225                 /* must commute the operator */
7226                 clause_op = get_commutator(clause_op);
7227         }
7228
7229         /* aggressively reduce to a constant, and look through relabeling */
7230         operand = estimate_expression_value(root, operand);
7231
7232         if (IsA(operand, RelabelType))
7233                 operand = (Node *) ((RelabelType *) operand)->arg;
7234
7235         /*
7236          * It's impossible to call extractQuery method for unknown operand. So
7237          * unless operand is a Const we can't do much; just assume there will be
7238          * one ordinary search entry from the operand at runtime.
7239          */
7240         if (!IsA(operand, Const))
7241         {
7242                 counts->exactEntries++;
7243                 counts->searchEntries++;
7244                 return true;
7245         }
7246
7247         /* If Const is null, there can be no matches */
7248         if (((Const *) operand)->constisnull)
7249                 return false;
7250
7251         /* Otherwise, apply extractQuery and get the actual term counts */
7252         return gincost_pattern(index, indexcol, clause_op,
7253                                                    ((Const *) operand)->constvalue,
7254                                                    counts);
7255 }
7256
7257 /*
7258  * Estimate the number of index terms that need to be searched for while
7259  * testing the given GIN index clause, and increment the counts in *counts
7260  * appropriately.  If the query is unsatisfiable, return false.
7261  *
7262  * A ScalarArrayOpExpr will give rise to N separate indexscans at runtime,
7263  * each of which involves one value from the RHS array, plus all the
7264  * non-array quals (if any).  To model this, we average the counts across
7265  * the RHS elements, and add the averages to the counts in *counts (which
7266  * correspond to per-indexscan costs).  We also multiply counts->arrayScans
7267  * by N, causing gincostestimate to scale up its estimates accordingly.
7268  */
7269 static bool
7270 gincost_scalararrayopexpr(PlannerInfo *root,
7271                                                   IndexOptInfo *index,
7272                                                   IndexQualInfo *qinfo,
7273                                                   double numIndexEntries,
7274                                                   GinQualCounts *counts)
7275 {
7276         int                     indexcol = qinfo->indexcol;
7277         Oid                     clause_op = qinfo->clause_op;
7278         Node       *rightop = qinfo->other_operand;
7279         ArrayType  *arrayval;
7280         int16           elmlen;
7281         bool            elmbyval;
7282         char            elmalign;
7283         int                     numElems;
7284         Datum      *elemValues;
7285         bool       *elemNulls;
7286         GinQualCounts arraycounts;
7287         int                     numPossible = 0;
7288         int                     i;
7289
7290         Assert(((ScalarArrayOpExpr *) qinfo->rinfo->clause)->useOr);
7291
7292         /* aggressively reduce to a constant, and look through relabeling */
7293         rightop = estimate_expression_value(root, rightop);
7294
7295         if (IsA(rightop, RelabelType))
7296                 rightop = (Node *) ((RelabelType *) rightop)->arg;
7297
7298         /*
7299          * It's impossible to call extractQuery method for unknown operand. So
7300          * unless operand is a Const we can't do much; just assume there will be
7301          * one ordinary search entry from each array entry at runtime, and fall
7302          * back on a probably-bad estimate of the number of array entries.
7303          */
7304         if (!IsA(rightop, Const))
7305         {
7306                 counts->exactEntries++;
7307                 counts->searchEntries++;
7308                 counts->arrayScans *= estimate_array_length(rightop);
7309                 return true;
7310         }
7311
7312         /* If Const is null, there can be no matches */
7313         if (((Const *) rightop)->constisnull)
7314                 return false;
7315
7316         /* Otherwise, extract the array elements and iterate over them */
7317         arrayval = DatumGetArrayTypeP(((Const *) rightop)->constvalue);
7318         get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
7319                                                  &elmlen, &elmbyval, &elmalign);
7320         deconstruct_array(arrayval,
7321                                           ARR_ELEMTYPE(arrayval),
7322                                           elmlen, elmbyval, elmalign,
7323                                           &elemValues, &elemNulls, &numElems);
7324
7325         memset(&arraycounts, 0, sizeof(arraycounts));
7326
7327         for (i = 0; i < numElems; i++)
7328         {
7329                 GinQualCounts elemcounts;
7330
7331                 /* NULL can't match anything, so ignore, as the executor will */
7332                 if (elemNulls[i])
7333                         continue;
7334
7335                 /* Otherwise, apply extractQuery and get the actual term counts */
7336                 memset(&elemcounts, 0, sizeof(elemcounts));
7337
7338                 if (gincost_pattern(index, indexcol, clause_op, elemValues[i],
7339                                                         &elemcounts))
7340                 {
7341                         /* We ignore array elements that are unsatisfiable patterns */
7342                         numPossible++;
7343
7344                         if (elemcounts.haveFullScan)
7345                         {
7346                                 /*
7347                                  * Full index scan will be required.  We treat this as if
7348                                  * every key in the index had been listed in the query; is
7349                                  * that reasonable?
7350                                  */
7351                                 elemcounts.partialEntries = 0;
7352                                 elemcounts.exactEntries = numIndexEntries;
7353                                 elemcounts.searchEntries = numIndexEntries;
7354                         }
7355                         arraycounts.partialEntries += elemcounts.partialEntries;
7356                         arraycounts.exactEntries += elemcounts.exactEntries;
7357                         arraycounts.searchEntries += elemcounts.searchEntries;
7358                 }
7359         }
7360
7361         if (numPossible == 0)
7362         {
7363                 /* No satisfiable patterns in the array */
7364                 return false;
7365         }
7366
7367         /*
7368          * Now add the averages to the global counts.  This will give us an
7369          * estimate of the average number of terms searched for in each indexscan,
7370          * including contributions from both array and non-array quals.
7371          */
7372         counts->partialEntries += arraycounts.partialEntries / numPossible;
7373         counts->exactEntries += arraycounts.exactEntries / numPossible;
7374         counts->searchEntries += arraycounts.searchEntries / numPossible;
7375
7376         counts->arrayScans *= numPossible;
7377
7378         return true;
7379 }
7380
7381 /*
7382  * GIN has search behavior completely different from other index types
7383  */
7384 void
7385 gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7386                                 Cost *indexStartupCost, Cost *indexTotalCost,
7387                                 Selectivity *indexSelectivity, double *indexCorrelation,
7388                                 double *indexPages)
7389 {
7390         IndexOptInfo *index = path->indexinfo;
7391         List       *indexQuals = path->indexquals;
7392         List       *indexOrderBys = path->indexorderbys;
7393         List       *qinfos;
7394         ListCell   *l;
7395         List       *selectivityQuals;
7396         double          numPages = index->pages,
7397                                 numTuples = index->tuples;
7398         double          numEntryPages,
7399                                 numDataPages,
7400                                 numPendingPages,
7401                                 numEntries;
7402         GinQualCounts counts;
7403         bool            matchPossible;
7404         double          partialScale;
7405         double          entryPagesFetched,
7406                                 dataPagesFetched,
7407                                 dataPagesFetchedBySel;
7408         double          qual_op_cost,
7409                                 qual_arg_cost,
7410                                 spc_random_page_cost,
7411                                 outer_scans;
7412         Relation        indexRel;
7413         GinStatsData ginStats;
7414
7415         /* Do preliminary analysis of indexquals */
7416         qinfos = deconstruct_indexquals(path);
7417
7418         /*
7419          * Obtain statistical information from the meta page, if possible.  Else
7420          * set ginStats to zeroes, and we'll cope below.
7421          */
7422         if (!index->hypothetical)
7423         {
7424                 indexRel = index_open(index->indexoid, AccessShareLock);
7425                 ginGetStats(indexRel, &ginStats);
7426                 index_close(indexRel, AccessShareLock);
7427         }
7428         else
7429         {
7430                 memset(&ginStats, 0, sizeof(ginStats));
7431         }
7432
7433         /*
7434          * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7435          * trusted, but the other fields are data as of the last VACUUM.  We can
7436          * scale them up to account for growth since then, but that method only
7437          * goes so far; in the worst case, the stats might be for a completely
7438          * empty index, and scaling them will produce pretty bogus numbers.
7439          * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7440          * it's grown more than that, fall back to estimating things only from the
7441          * assumed-accurate index size.  But we'll trust nPendingPages in any case
7442          * so long as it's not clearly insane, ie, more than the index size.
7443          */
7444         if (ginStats.nPendingPages < numPages)
7445                 numPendingPages = ginStats.nPendingPages;
7446         else
7447                 numPendingPages = 0;
7448
7449         if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7450                 ginStats.nTotalPages > numPages / 4 &&
7451                 ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7452         {
7453                 /*
7454                  * OK, the stats seem close enough to sane to be trusted.  But we
7455                  * still need to scale them by the ratio numPages / nTotalPages to
7456                  * account for growth since the last VACUUM.
7457                  */
7458                 double          scale = numPages / ginStats.nTotalPages;
7459
7460                 numEntryPages = ceil(ginStats.nEntryPages * scale);
7461                 numDataPages = ceil(ginStats.nDataPages * scale);
7462                 numEntries = ceil(ginStats.nEntries * scale);
7463                 /* ensure we didn't round up too much */
7464                 numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7465                 numDataPages = Min(numDataPages,
7466                                                    numPages - numPendingPages - numEntryPages);
7467         }
7468         else
7469         {
7470                 /*
7471                  * We might get here because it's a hypothetical index, or an index
7472                  * created pre-9.1 and never vacuumed since upgrading (in which case
7473                  * its stats would read as zeroes), or just because it's grown too
7474                  * much since the last VACUUM for us to put our faith in scaling.
7475                  *
7476                  * Invent some plausible internal statistics based on the index page
7477                  * count (and clamp that to at least 10 pages, just in case).  We
7478                  * estimate that 90% of the index is entry pages, and the rest is data
7479                  * pages.  Estimate 100 entries per entry page; this is rather bogus
7480                  * since it'll depend on the size of the keys, but it's more robust
7481                  * than trying to predict the number of entries per heap tuple.
7482                  */
7483                 numPages = Max(numPages, 10);
7484                 numEntryPages = floor((numPages - numPendingPages) * 0.90);
7485                 numDataPages = numPages - numPendingPages - numEntryPages;
7486                 numEntries = floor(numEntryPages * 100);
7487         }
7488
7489         /* In an empty index, numEntries could be zero.  Avoid divide-by-zero */
7490         if (numEntries < 1)
7491                 numEntries = 1;
7492
7493         /*
7494          * Include predicate in selectivityQuals (should match
7495          * genericcostestimate)
7496          */
7497         if (index->indpred != NIL)
7498         {
7499                 List       *predExtraQuals = NIL;
7500
7501                 foreach(l, index->indpred)
7502                 {
7503                         Node       *predQual = (Node *) lfirst(l);
7504                         List       *oneQual = list_make1(predQual);
7505
7506                         if (!predicate_implied_by(oneQual, indexQuals))
7507                                 predExtraQuals = list_concat(predExtraQuals, oneQual);
7508                 }
7509                 /* list_concat avoids modifying the passed-in indexQuals list */
7510                 selectivityQuals = list_concat(predExtraQuals, indexQuals);
7511         }
7512         else
7513                 selectivityQuals = indexQuals;
7514
7515         /* Estimate the fraction of main-table tuples that will be visited */
7516         *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7517                                                                                            index->rel->relid,
7518                                                                                            JOIN_INNER,
7519                                                                                            NULL);
7520
7521         /* fetch estimated page cost for tablespace containing index */
7522         get_tablespace_page_costs(index->reltablespace,
7523                                                           &spc_random_page_cost,
7524                                                           NULL);
7525
7526         /*
7527          * Generic assumption about index correlation: there isn't any.
7528          */
7529         *indexCorrelation = 0.0;
7530
7531         /*
7532          * Examine quals to estimate number of search entries & partial matches
7533          */
7534         memset(&counts, 0, sizeof(counts));
7535         counts.arrayScans = 1;
7536         matchPossible = true;
7537
7538         foreach(l, qinfos)
7539         {
7540                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(l);
7541                 Expr       *clause = qinfo->rinfo->clause;
7542
7543                 if (IsA(clause, OpExpr))
7544                 {
7545                         matchPossible = gincost_opexpr(root,
7546                                                                                    index,
7547                                                                                    qinfo,
7548                                                                                    &counts);
7549                         if (!matchPossible)
7550                                 break;
7551                 }
7552                 else if (IsA(clause, ScalarArrayOpExpr))
7553                 {
7554                         matchPossible = gincost_scalararrayopexpr(root,
7555                                                                                                           index,
7556                                                                                                           qinfo,
7557                                                                                                           numEntries,
7558                                                                                                           &counts);
7559                         if (!matchPossible)
7560                                 break;
7561                 }
7562                 else
7563                 {
7564                         /* shouldn't be anything else for a GIN index */
7565                         elog(ERROR, "unsupported GIN indexqual type: %d",
7566                                  (int) nodeTag(clause));
7567                 }
7568         }
7569
7570         /* Fall out if there were any provably-unsatisfiable quals */
7571         if (!matchPossible)
7572         {
7573                 *indexStartupCost = 0;
7574                 *indexTotalCost = 0;
7575                 *indexSelectivity = 0;
7576                 return;
7577         }
7578
7579         if (counts.haveFullScan || indexQuals == NIL)
7580         {
7581                 /*
7582                  * Full index scan will be required.  We treat this as if every key in
7583                  * the index had been listed in the query; is that reasonable?
7584                  */
7585                 counts.partialEntries = 0;
7586                 counts.exactEntries = numEntries;
7587                 counts.searchEntries = numEntries;
7588         }
7589
7590         /* Will we have more than one iteration of a nestloop scan? */
7591         outer_scans = loop_count;
7592
7593         /*
7594          * Compute cost to begin scan, first of all, pay attention to pending
7595          * list.
7596          */
7597         entryPagesFetched = numPendingPages;
7598
7599         /*
7600          * Estimate number of entry pages read.  We need to do
7601          * counts.searchEntries searches.  Use a power function as it should be,
7602          * but tuples on leaf pages usually is much greater. Here we include all
7603          * searches in entry tree, including search of first entry in partial
7604          * match algorithm
7605          */
7606         entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7607
7608         /*
7609          * Add an estimate of entry pages read by partial match algorithm. It's a
7610          * scan over leaf pages in entry tree.  We haven't any useful stats here,
7611          * so estimate it as proportion.  Because counts.partialEntries is really
7612          * pretty bogus (see code above), it's possible that it is more than
7613          * numEntries; clamp the proportion to ensure sanity.
7614          */
7615         partialScale = counts.partialEntries / numEntries;
7616         partialScale = Min(partialScale, 1.0);
7617
7618         entryPagesFetched += ceil(numEntryPages * partialScale);
7619
7620         /*
7621          * Partial match algorithm reads all data pages before doing actual scan,
7622          * so it's a startup cost.  Again, we haven't any useful stats here, so
7623          * estimate it as proportion.
7624          */
7625         dataPagesFetched = ceil(numDataPages * partialScale);
7626
7627         /*
7628          * Calculate cache effects if more than one scan due to nestloops or array
7629          * quals.  The result is pro-rated per nestloop scan, but the array qual
7630          * factor shouldn't be pro-rated (compare genericcostestimate).
7631          */
7632         if (outer_scans > 1 || counts.arrayScans > 1)
7633         {
7634                 entryPagesFetched *= outer_scans * counts.arrayScans;
7635                 entryPagesFetched = index_pages_fetched(entryPagesFetched,
7636                                                                                                 (BlockNumber) numEntryPages,
7637                                                                                                 numEntryPages, root);
7638                 entryPagesFetched /= outer_scans;
7639                 dataPagesFetched *= outer_scans * counts.arrayScans;
7640                 dataPagesFetched = index_pages_fetched(dataPagesFetched,
7641                                                                                            (BlockNumber) numDataPages,
7642                                                                                            numDataPages, root);
7643                 dataPagesFetched /= outer_scans;
7644         }
7645
7646         /*
7647          * Here we use random page cost because logically-close pages could be far
7648          * apart on disk.
7649          */
7650         *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7651
7652         /*
7653          * Now compute the number of data pages fetched during the scan.
7654          *
7655          * We assume every entry to have the same number of items, and that there
7656          * is no overlap between them. (XXX: tsvector and array opclasses collect
7657          * statistics on the frequency of individual keys; it would be nice to use
7658          * those here.)
7659          */
7660         dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7661
7662         /*
7663          * If there is a lot of overlap among the entries, in particular if one of
7664          * the entries is very frequent, the above calculation can grossly
7665          * under-estimate.  As a simple cross-check, calculate a lower bound based
7666          * on the overall selectivity of the quals.  At a minimum, we must read
7667          * one item pointer for each matching entry.
7668          *
7669          * The width of each item pointer varies, based on the level of
7670          * compression.  We don't have statistics on that, but an average of
7671          * around 3 bytes per item is fairly typical.
7672          */
7673         dataPagesFetchedBySel = ceil(*indexSelectivity *
7674                                                                  (numTuples / (BLCKSZ / 3)));
7675         if (dataPagesFetchedBySel > dataPagesFetched)
7676                 dataPagesFetched = dataPagesFetchedBySel;
7677
7678         /* Account for cache effects, the same as above */
7679         if (outer_scans > 1 || counts.arrayScans > 1)
7680         {
7681                 dataPagesFetched *= outer_scans * counts.arrayScans;
7682                 dataPagesFetched = index_pages_fetched(dataPagesFetched,
7683                                                                                            (BlockNumber) numDataPages,
7684                                                                                            numDataPages, root);
7685                 dataPagesFetched /= outer_scans;
7686         }
7687
7688         /* And apply random_page_cost as the cost per page */
7689         *indexTotalCost = *indexStartupCost +
7690                 dataPagesFetched * spc_random_page_cost;
7691
7692         /*
7693          * Add on index qual eval costs, much as in genericcostestimate
7694          */
7695         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7696                 orderby_operands_eval_cost(root, path);
7697         qual_op_cost = cpu_operator_cost *
7698                 (list_length(indexQuals) + list_length(indexOrderBys));
7699
7700         *indexStartupCost += qual_arg_cost;
7701         *indexTotalCost += qual_arg_cost;
7702         *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7703         *indexPages = dataPagesFetched;
7704 }
7705
7706 /*
7707  * BRIN has search behavior completely different from other index types
7708  */
7709 void
7710 brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7711                                  Cost *indexStartupCost, Cost *indexTotalCost,
7712                                  Selectivity *indexSelectivity, double *indexCorrelation,
7713                                  double *indexPages)
7714 {
7715         IndexOptInfo *index = path->indexinfo;
7716         List       *indexQuals = path->indexquals;
7717         List       *indexOrderBys = path->indexorderbys;
7718         double          numPages = index->pages;
7719         double          numTuples = index->tuples;
7720         List       *qinfos;
7721         Cost            spc_seq_page_cost;
7722         Cost            spc_random_page_cost;
7723         double          qual_op_cost;
7724         double          qual_arg_cost;
7725
7726         /* Do preliminary analysis of indexquals */
7727         qinfos = deconstruct_indexquals(path);
7728
7729         /* fetch estimated page cost for tablespace containing index */
7730         get_tablespace_page_costs(index->reltablespace,
7731                                                           &spc_random_page_cost,
7732                                                           &spc_seq_page_cost);
7733
7734         /*
7735          * BRIN indexes are always read in full; use that as startup cost.
7736          *
7737          * XXX maybe only include revmap pages here?
7738          */
7739         *indexStartupCost = spc_seq_page_cost * numPages * loop_count;
7740
7741         /*
7742          * To read a BRIN index there might be a bit of back and forth over
7743          * regular pages, as revmap might point to them out of sequential order;
7744          * calculate this as reading the whole index in random order.
7745          */
7746         *indexTotalCost = spc_random_page_cost * numPages * loop_count;
7747
7748         *indexSelectivity =
7749                 clauselist_selectivity(root, indexQuals,
7750                                                            path->indexinfo->rel->relid,
7751                                                            JOIN_INNER, NULL);
7752         *indexCorrelation = 1;
7753
7754         /*
7755          * Add on index qual eval costs, much as in genericcostestimate.
7756          */
7757         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7758                 orderby_operands_eval_cost(root, path);
7759         qual_op_cost = cpu_operator_cost *
7760                 (list_length(indexQuals) + list_length(indexOrderBys));
7761
7762         *indexStartupCost += qual_arg_cost;
7763         *indexTotalCost += qual_arg_cost;
7764         *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7765         *indexPages = index->pages;
7766
7767         /* XXX what about pages_per_range? */
7768 }