]> granicus.if.org Git - postgresql/blob - src/backend/utils/adt/selfuncs.c
Fix misestimation of n_distinct for a nearly-unique column with many nulls.
[postgresql] / src / backend / utils / adt / selfuncs.c
1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  *        Selectivity functions and index cost estimation functions for
5  *        standard operators and index access methods.
6  *
7  *        Selectivity routines are registered in the pg_operator catalog
8  *        in the "oprrest" and "oprjoin" attributes.
9  *
10  *        Index cost functions are located via the index AM's API struct,
11  *        which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  *        src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  *              Restriction clause estimation: the clause involves vars of just
28  *                      one relation.
29  *              Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values).  So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  *              Selectivity oprrest (PlannerInfo *root,
42  *                                                       Oid operator,
43  *                                                       List *args,
44  *                                                       int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation.  May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  *              float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  *              Selectivity oprjoin (PlannerInfo *root,
67  *                                                       Oid operator,
68  *                                                       List *args,
69  *                                                       JoinType jointype,
70  *                                                       SpecialJoinInfo *sjinfo);
71  *
72  *              float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters.  That signature is still allowed, but deprecated.)  The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator.  For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION().  Note, however, that all
90  * statistics in pg_statistic are currently built using the database's default
91  * collation.  Thus, in most cases where we are looking at statistics, we
92  * should ignore the actual operator collation and use DEFAULT_COLLATION_OID.
93  * We expect that the error induced by doing this is usually not large enough
94  * to justify complicating matters.
95  *----------
96  */
97
98 #include "postgres.h"
99
100 #include <ctype.h>
101 #include <math.h>
102
103 #include "access/gin.h"
104 #include "access/htup_details.h"
105 #include "access/sysattr.h"
106 #include "catalog/index.h"
107 #include "catalog/pg_am.h"
108 #include "catalog/pg_collation.h"
109 #include "catalog/pg_operator.h"
110 #include "catalog/pg_opfamily.h"
111 #include "catalog/pg_statistic.h"
112 #include "catalog/pg_type.h"
113 #include "executor/executor.h"
114 #include "mb/pg_wchar.h"
115 #include "nodes/makefuncs.h"
116 #include "nodes/nodeFuncs.h"
117 #include "optimizer/clauses.h"
118 #include "optimizer/cost.h"
119 #include "optimizer/pathnode.h"
120 #include "optimizer/paths.h"
121 #include "optimizer/plancat.h"
122 #include "optimizer/predtest.h"
123 #include "optimizer/restrictinfo.h"
124 #include "optimizer/var.h"
125 #include "parser/parse_clause.h"
126 #include "parser/parse_coerce.h"
127 #include "parser/parsetree.h"
128 #include "utils/builtins.h"
129 #include "utils/bytea.h"
130 #include "utils/date.h"
131 #include "utils/datum.h"
132 #include "utils/fmgroids.h"
133 #include "utils/index_selfuncs.h"
134 #include "utils/lsyscache.h"
135 #include "utils/nabstime.h"
136 #include "utils/pg_locale.h"
137 #include "utils/rel.h"
138 #include "utils/selfuncs.h"
139 #include "utils/spccache.h"
140 #include "utils/syscache.h"
141 #include "utils/timestamp.h"
142 #include "utils/tqual.h"
143 #include "utils/typcache.h"
144
145
146 /* Hooks for plugins to get control when we ask for stats */
147 get_relation_stats_hook_type get_relation_stats_hook = NULL;
148 get_index_stats_hook_type get_index_stats_hook = NULL;
149
150 static double var_eq_const(VariableStatData *vardata, Oid operator,
151                          Datum constval, bool constisnull,
152                          bool varonleft);
153 static double var_eq_non_const(VariableStatData *vardata, Oid operator,
154                                  Node *other,
155                                  bool varonleft);
156 static double ineq_histogram_selectivity(PlannerInfo *root,
157                                                    VariableStatData *vardata,
158                                                    FmgrInfo *opproc, bool isgt,
159                                                    Datum constval, Oid consttype);
160 static double eqjoinsel_inner(Oid operator,
161                                 VariableStatData *vardata1, VariableStatData *vardata2);
162 static double eqjoinsel_semi(Oid operator,
163                            VariableStatData *vardata1, VariableStatData *vardata2,
164                            RelOptInfo *inner_rel);
165 static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
166                                   Datum lobound, Datum hibound, Oid boundstypid,
167                                   double *scaledlobound, double *scaledhibound);
168 static double convert_numeric_to_scalar(Datum value, Oid typid);
169 static void convert_string_to_scalar(char *value,
170                                                  double *scaledvalue,
171                                                  char *lobound,
172                                                  double *scaledlobound,
173                                                  char *hibound,
174                                                  double *scaledhibound);
175 static void convert_bytea_to_scalar(Datum value,
176                                                 double *scaledvalue,
177                                                 Datum lobound,
178                                                 double *scaledlobound,
179                                                 Datum hibound,
180                                                 double *scaledhibound);
181 static double convert_one_string_to_scalar(char *value,
182                                                          int rangelo, int rangehi);
183 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
184                                                         int rangelo, int rangehi);
185 static char *convert_string_datum(Datum value, Oid typid);
186 static double convert_timevalue_to_scalar(Datum value, Oid typid);
187 static void examine_simple_variable(PlannerInfo *root, Var *var,
188                                                 VariableStatData *vardata);
189 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
190                                    Oid sortop, Datum *min, Datum *max);
191 static bool get_actual_variable_range(PlannerInfo *root,
192                                                   VariableStatData *vardata,
193                                                   Oid sortop,
194                                                   Datum *min, Datum *max);
195 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
196 static Selectivity prefix_selectivity(PlannerInfo *root,
197                                    VariableStatData *vardata,
198                                    Oid vartype, Oid opfamily, Const *prefixcon);
199 static Selectivity like_selectivity(const char *patt, int pattlen,
200                                  bool case_insensitive);
201 static Selectivity regex_selectivity(const char *patt, int pattlen,
202                                   bool case_insensitive,
203                                   int fixed_prefix_len);
204 static Datum string_to_datum(const char *str, Oid datatype);
205 static Const *string_to_const(const char *str, Oid datatype);
206 static Const *string_to_bytea_const(const char *str, size_t str_len);
207 static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
208
209
210 /*
211  *              eqsel                   - Selectivity of "=" for any data types.
212  *
213  * Note: this routine is also used to estimate selectivity for some
214  * operators that are not "=" but have comparable selectivity behavior,
215  * such as "~=" (geometric approximate-match).  Even for "=", we must
216  * keep in mind that the left and right datatypes may differ.
217  */
218 Datum
219 eqsel(PG_FUNCTION_ARGS)
220 {
221         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
222         Oid                     operator = PG_GETARG_OID(1);
223         List       *args = (List *) PG_GETARG_POINTER(2);
224         int                     varRelid = PG_GETARG_INT32(3);
225         VariableStatData vardata;
226         Node       *other;
227         bool            varonleft;
228         double          selec;
229
230         /*
231          * If expression is not variable = something or something = variable, then
232          * punt and return a default estimate.
233          */
234         if (!get_restriction_variable(root, args, varRelid,
235                                                                   &vardata, &other, &varonleft))
236                 PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
237
238         /*
239          * We can do a lot better if the something is a constant.  (Note: the
240          * Const might result from estimation rather than being a simple constant
241          * in the query.)
242          */
243         if (IsA(other, Const))
244                 selec = var_eq_const(&vardata, operator,
245                                                          ((Const *) other)->constvalue,
246                                                          ((Const *) other)->constisnull,
247                                                          varonleft);
248         else
249                 selec = var_eq_non_const(&vardata, operator, other,
250                                                                  varonleft);
251
252         ReleaseVariableStats(vardata);
253
254         PG_RETURN_FLOAT8((float8) selec);
255 }
256
257 /*
258  * var_eq_const --- eqsel for var = const case
259  *
260  * This is split out so that some other estimation functions can use it.
261  */
262 static double
263 var_eq_const(VariableStatData *vardata, Oid operator,
264                          Datum constval, bool constisnull,
265                          bool varonleft)
266 {
267         double          selec;
268         bool            isdefault;
269
270         /*
271          * If the constant is NULL, assume operator is strict and return zero, ie,
272          * operator will never return TRUE.
273          */
274         if (constisnull)
275                 return 0.0;
276
277         /*
278          * If we matched the var to a unique index or DISTINCT clause, assume
279          * there is exactly one match regardless of anything else.  (This is
280          * slightly bogus, since the index or clause's equality operator might be
281          * different from ours, but it's much more likely to be right than
282          * ignoring the information.)
283          */
284         if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
285                 return 1.0 / vardata->rel->tuples;
286
287         if (HeapTupleIsValid(vardata->statsTuple))
288         {
289                 Form_pg_statistic stats;
290                 Datum      *values;
291                 int                     nvalues;
292                 float4     *numbers;
293                 int                     nnumbers;
294                 bool            match = false;
295                 int                     i;
296
297                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
298
299                 /*
300                  * Is the constant "=" to any of the column's most common values?
301                  * (Although the given operator may not really be "=", we will assume
302                  * that seeing whether it returns TRUE is an appropriate test.  If you
303                  * don't like this, maybe you shouldn't be using eqsel for your
304                  * operator...)
305                  */
306                 if (get_attstatsslot(vardata->statsTuple,
307                                                          vardata->atttype, vardata->atttypmod,
308                                                          STATISTIC_KIND_MCV, InvalidOid,
309                                                          NULL,
310                                                          &values, &nvalues,
311                                                          &numbers, &nnumbers))
312                 {
313                         FmgrInfo        eqproc;
314
315                         fmgr_info(get_opcode(operator), &eqproc);
316
317                         for (i = 0; i < nvalues; i++)
318                         {
319                                 /* be careful to apply operator right way 'round */
320                                 if (varonleft)
321                                         match = DatumGetBool(FunctionCall2Coll(&eqproc,
322                                                                                                            DEFAULT_COLLATION_OID,
323                                                                                                                    values[i],
324                                                                                                                    constval));
325                                 else
326                                         match = DatumGetBool(FunctionCall2Coll(&eqproc,
327                                                                                                            DEFAULT_COLLATION_OID,
328                                                                                                                    constval,
329                                                                                                                    values[i]));
330                                 if (match)
331                                         break;
332                         }
333                 }
334                 else
335                 {
336                         /* no most-common-value info available */
337                         values = NULL;
338                         numbers = NULL;
339                         i = nvalues = nnumbers = 0;
340                 }
341
342                 if (match)
343                 {
344                         /*
345                          * Constant is "=" to this common value.  We know selectivity
346                          * exactly (or as exactly as ANALYZE could calculate it, anyway).
347                          */
348                         selec = numbers[i];
349                 }
350                 else
351                 {
352                         /*
353                          * Comparison is against a constant that is neither NULL nor any
354                          * of the common values.  Its selectivity cannot be more than
355                          * this:
356                          */
357                         double          sumcommon = 0.0;
358                         double          otherdistinct;
359
360                         for (i = 0; i < nnumbers; i++)
361                                 sumcommon += numbers[i];
362                         selec = 1.0 - sumcommon - stats->stanullfrac;
363                         CLAMP_PROBABILITY(selec);
364
365                         /*
366                          * and in fact it's probably a good deal less. We approximate that
367                          * all the not-common values share this remaining fraction
368                          * equally, so we divide by the number of other distinct values.
369                          */
370                         otherdistinct = get_variable_numdistinct(vardata, &isdefault) - nnumbers;
371                         if (otherdistinct > 1)
372                                 selec /= otherdistinct;
373
374                         /*
375                          * Another cross-check: selectivity shouldn't be estimated as more
376                          * than the least common "most common value".
377                          */
378                         if (nnumbers > 0 && selec > numbers[nnumbers - 1])
379                                 selec = numbers[nnumbers - 1];
380                 }
381
382                 free_attstatsslot(vardata->atttype, values, nvalues,
383                                                   numbers, nnumbers);
384         }
385         else
386         {
387                 /*
388                  * No ANALYZE stats available, so make a guess using estimated number
389                  * of distinct values and assuming they are equally common. (The guess
390                  * is unlikely to be very good, but we do know a few special cases.)
391                  */
392                 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
393         }
394
395         /* result should be in range, but make sure... */
396         CLAMP_PROBABILITY(selec);
397
398         return selec;
399 }
400
401 /*
402  * var_eq_non_const --- eqsel for var = something-other-than-const case
403  */
404 static double
405 var_eq_non_const(VariableStatData *vardata, Oid operator,
406                                  Node *other,
407                                  bool varonleft)
408 {
409         double          selec;
410         bool            isdefault;
411
412         /*
413          * If we matched the var to a unique index or DISTINCT clause, assume
414          * there is exactly one match regardless of anything else.  (This is
415          * slightly bogus, since the index or clause's equality operator might be
416          * different from ours, but it's much more likely to be right than
417          * ignoring the information.)
418          */
419         if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
420                 return 1.0 / vardata->rel->tuples;
421
422         if (HeapTupleIsValid(vardata->statsTuple))
423         {
424                 Form_pg_statistic stats;
425                 double          ndistinct;
426                 float4     *numbers;
427                 int                     nnumbers;
428
429                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
430
431                 /*
432                  * Search is for a value that we do not know a priori, but we will
433                  * assume it is not NULL.  Estimate the selectivity as non-null
434                  * fraction divided by number of distinct values, so that we get a
435                  * result averaged over all possible values whether common or
436                  * uncommon.  (Essentially, we are assuming that the not-yet-known
437                  * comparison value is equally likely to be any of the possible
438                  * values, regardless of their frequency in the table.  Is that a good
439                  * idea?)
440                  */
441                 selec = 1.0 - stats->stanullfrac;
442                 ndistinct = get_variable_numdistinct(vardata, &isdefault);
443                 if (ndistinct > 1)
444                         selec /= ndistinct;
445
446                 /*
447                  * Cross-check: selectivity should never be estimated as more than the
448                  * most common value's.
449                  */
450                 if (get_attstatsslot(vardata->statsTuple,
451                                                          vardata->atttype, vardata->atttypmod,
452                                                          STATISTIC_KIND_MCV, InvalidOid,
453                                                          NULL,
454                                                          NULL, NULL,
455                                                          &numbers, &nnumbers))
456                 {
457                         if (nnumbers > 0 && selec > numbers[0])
458                                 selec = numbers[0];
459                         free_attstatsslot(vardata->atttype, NULL, 0, numbers, nnumbers);
460                 }
461         }
462         else
463         {
464                 /*
465                  * No ANALYZE stats available, so make a guess using estimated number
466                  * of distinct values and assuming they are equally common. (The guess
467                  * is unlikely to be very good, but we do know a few special cases.)
468                  */
469                 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
470         }
471
472         /* result should be in range, but make sure... */
473         CLAMP_PROBABILITY(selec);
474
475         return selec;
476 }
477
478 /*
479  *              neqsel                  - Selectivity of "!=" for any data types.
480  *
481  * This routine is also used for some operators that are not "!="
482  * but have comparable selectivity behavior.  See above comments
483  * for eqsel().
484  */
485 Datum
486 neqsel(PG_FUNCTION_ARGS)
487 {
488         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
489         Oid                     operator = PG_GETARG_OID(1);
490         List       *args = (List *) PG_GETARG_POINTER(2);
491         int                     varRelid = PG_GETARG_INT32(3);
492         Oid                     eqop;
493         float8          result;
494
495         /*
496          * We want 1 - eqsel() where the equality operator is the one associated
497          * with this != operator, that is, its negator.
498          */
499         eqop = get_negator(operator);
500         if (eqop)
501         {
502                 result = DatumGetFloat8(DirectFunctionCall4(eqsel,
503                                                                                                         PointerGetDatum(root),
504                                                                                                         ObjectIdGetDatum(eqop),
505                                                                                                         PointerGetDatum(args),
506                                                                                                         Int32GetDatum(varRelid)));
507         }
508         else
509         {
510                 /* Use default selectivity (should we raise an error instead?) */
511                 result = DEFAULT_EQ_SEL;
512         }
513         result = 1.0 - result;
514         PG_RETURN_FLOAT8(result);
515 }
516
517 /*
518  *      scalarineqsel           - Selectivity of "<", "<=", ">", ">=" for scalars.
519  *
520  * This is the guts of both scalarltsel and scalargtsel.  The caller has
521  * commuted the clause, if necessary, so that we can treat the variable as
522  * being on the left.  The caller must also make sure that the other side
523  * of the clause is a non-null Const, and dissect same into a value and
524  * datatype.
525  *
526  * This routine works for any datatype (or pair of datatypes) known to
527  * convert_to_scalar().  If it is applied to some other datatype,
528  * it will return a default estimate.
529  */
530 static double
531 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
532                           VariableStatData *vardata, Datum constval, Oid consttype)
533 {
534         Form_pg_statistic stats;
535         FmgrInfo        opproc;
536         double          mcv_selec,
537                                 hist_selec,
538                                 sumcommon;
539         double          selec;
540
541         if (!HeapTupleIsValid(vardata->statsTuple))
542         {
543                 /* no stats available, so default result */
544                 return DEFAULT_INEQ_SEL;
545         }
546         stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
547
548         fmgr_info(get_opcode(operator), &opproc);
549
550         /*
551          * If we have most-common-values info, add up the fractions of the MCV
552          * entries that satisfy MCV OP CONST.  These fractions contribute directly
553          * to the result selectivity.  Also add up the total fraction represented
554          * by MCV entries.
555          */
556         mcv_selec = mcv_selectivity(vardata, &opproc, constval, true,
557                                                                 &sumcommon);
558
559         /*
560          * If there is a histogram, determine which bin the constant falls in, and
561          * compute the resulting contribution to selectivity.
562          */
563         hist_selec = ineq_histogram_selectivity(root, vardata, &opproc, isgt,
564                                                                                         constval, consttype);
565
566         /*
567          * Now merge the results from the MCV and histogram calculations,
568          * realizing that the histogram covers only the non-null values that are
569          * not listed in MCV.
570          */
571         selec = 1.0 - stats->stanullfrac - sumcommon;
572
573         if (hist_selec >= 0.0)
574                 selec *= hist_selec;
575         else
576         {
577                 /*
578                  * If no histogram but there are values not accounted for by MCV,
579                  * arbitrarily assume half of them will match.
580                  */
581                 selec *= 0.5;
582         }
583
584         selec += mcv_selec;
585
586         /* result should be in range, but make sure... */
587         CLAMP_PROBABILITY(selec);
588
589         return selec;
590 }
591
592 /*
593  *      mcv_selectivity                 - Examine the MCV list for selectivity estimates
594  *
595  * Determine the fraction of the variable's MCV population that satisfies
596  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.  Also
597  * compute the fraction of the total column population represented by the MCV
598  * list.  This code will work for any boolean-returning predicate operator.
599  *
600  * The function result is the MCV selectivity, and the fraction of the
601  * total population is returned into *sumcommonp.  Zeroes are returned
602  * if there is no MCV list.
603  */
604 double
605 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
606                                 Datum constval, bool varonleft,
607                                 double *sumcommonp)
608 {
609         double          mcv_selec,
610                                 sumcommon;
611         Datum      *values;
612         int                     nvalues;
613         float4     *numbers;
614         int                     nnumbers;
615         int                     i;
616
617         mcv_selec = 0.0;
618         sumcommon = 0.0;
619
620         if (HeapTupleIsValid(vardata->statsTuple) &&
621                 get_attstatsslot(vardata->statsTuple,
622                                                  vardata->atttype, vardata->atttypmod,
623                                                  STATISTIC_KIND_MCV, InvalidOid,
624                                                  NULL,
625                                                  &values, &nvalues,
626                                                  &numbers, &nnumbers))
627         {
628                 for (i = 0; i < nvalues; i++)
629                 {
630                         if (varonleft ?
631                                 DatumGetBool(FunctionCall2Coll(opproc,
632                                                                                            DEFAULT_COLLATION_OID,
633                                                                                            values[i],
634                                                                                            constval)) :
635                                 DatumGetBool(FunctionCall2Coll(opproc,
636                                                                                            DEFAULT_COLLATION_OID,
637                                                                                            constval,
638                                                                                            values[i])))
639                                 mcv_selec += numbers[i];
640                         sumcommon += numbers[i];
641                 }
642                 free_attstatsslot(vardata->atttype, values, nvalues,
643                                                   numbers, nnumbers);
644         }
645
646         *sumcommonp = sumcommon;
647         return mcv_selec;
648 }
649
650 /*
651  *      histogram_selectivity   - Examine the histogram for selectivity estimates
652  *
653  * Determine the fraction of the variable's histogram entries that satisfy
654  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
655  *
656  * This code will work for any boolean-returning predicate operator, whether
657  * or not it has anything to do with the histogram sort operator.  We are
658  * essentially using the histogram just as a representative sample.  However,
659  * small histograms are unlikely to be all that representative, so the caller
660  * should be prepared to fall back on some other estimation approach when the
661  * histogram is missing or very small.  It may also be prudent to combine this
662  * approach with another one when the histogram is small.
663  *
664  * If the actual histogram size is not at least min_hist_size, we won't bother
665  * to do the calculation at all.  Also, if the n_skip parameter is > 0, we
666  * ignore the first and last n_skip histogram elements, on the grounds that
667  * they are outliers and hence not very representative.  Typical values for
668  * these parameters are 10 and 1.
669  *
670  * The function result is the selectivity, or -1 if there is no histogram
671  * or it's smaller than min_hist_size.
672  *
673  * The output parameter *hist_size receives the actual histogram size,
674  * or zero if no histogram.  Callers may use this number to decide how
675  * much faith to put in the function result.
676  *
677  * Note that the result disregards both the most-common-values (if any) and
678  * null entries.  The caller is expected to combine this result with
679  * statistics for those portions of the column population.  It may also be
680  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
681  */
682 double
683 histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
684                                           Datum constval, bool varonleft,
685                                           int min_hist_size, int n_skip,
686                                           int *hist_size)
687 {
688         double          result;
689         Datum      *values;
690         int                     nvalues;
691
692         /* check sanity of parameters */
693         Assert(n_skip >= 0);
694         Assert(min_hist_size > 2 * n_skip);
695
696         if (HeapTupleIsValid(vardata->statsTuple) &&
697                 get_attstatsslot(vardata->statsTuple,
698                                                  vardata->atttype, vardata->atttypmod,
699                                                  STATISTIC_KIND_HISTOGRAM, InvalidOid,
700                                                  NULL,
701                                                  &values, &nvalues,
702                                                  NULL, NULL))
703         {
704                 *hist_size = nvalues;
705                 if (nvalues >= min_hist_size)
706                 {
707                         int                     nmatch = 0;
708                         int                     i;
709
710                         for (i = n_skip; i < nvalues - n_skip; i++)
711                         {
712                                 if (varonleft ?
713                                         DatumGetBool(FunctionCall2Coll(opproc,
714                                                                                                    DEFAULT_COLLATION_OID,
715                                                                                                    values[i],
716                                                                                                    constval)) :
717                                         DatumGetBool(FunctionCall2Coll(opproc,
718                                                                                                    DEFAULT_COLLATION_OID,
719                                                                                                    constval,
720                                                                                                    values[i])))
721                                         nmatch++;
722                         }
723                         result = ((double) nmatch) / ((double) (nvalues - 2 * n_skip));
724                 }
725                 else
726                         result = -1;
727                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
728         }
729         else
730         {
731                 *hist_size = 0;
732                 result = -1;
733         }
734
735         return result;
736 }
737
738 /*
739  *      ineq_histogram_selectivity      - Examine the histogram for scalarineqsel
740  *
741  * Determine the fraction of the variable's histogram population that
742  * satisfies the inequality condition, ie, VAR < CONST or VAR > CONST.
743  *
744  * Returns -1 if there is no histogram (valid results will always be >= 0).
745  *
746  * Note that the result disregards both the most-common-values (if any) and
747  * null entries.  The caller is expected to combine this result with
748  * statistics for those portions of the column population.
749  */
750 static double
751 ineq_histogram_selectivity(PlannerInfo *root,
752                                                    VariableStatData *vardata,
753                                                    FmgrInfo *opproc, bool isgt,
754                                                    Datum constval, Oid consttype)
755 {
756         double          hist_selec;
757         Oid                     hist_op;
758         Datum      *values;
759         int                     nvalues;
760
761         hist_selec = -1.0;
762
763         /*
764          * Someday, ANALYZE might store more than one histogram per rel/att,
765          * corresponding to more than one possible sort ordering defined for the
766          * column type.  However, to make that work we will need to figure out
767          * which staop to search for --- it's not necessarily the one we have at
768          * hand!  (For example, we might have a '<=' operator rather than the '<'
769          * operator that will appear in staop.)  For now, assume that whatever
770          * appears in pg_statistic is sorted the same way our operator sorts, or
771          * the reverse way if isgt is TRUE.
772          */
773         if (HeapTupleIsValid(vardata->statsTuple) &&
774                 get_attstatsslot(vardata->statsTuple,
775                                                  vardata->atttype, vardata->atttypmod,
776                                                  STATISTIC_KIND_HISTOGRAM, InvalidOid,
777                                                  &hist_op,
778                                                  &values, &nvalues,
779                                                  NULL, NULL))
780         {
781                 if (nvalues > 1)
782                 {
783                         /*
784                          * Use binary search to find proper location, ie, the first slot
785                          * at which the comparison fails.  (If the given operator isn't
786                          * actually sort-compatible with the histogram, you'll get garbage
787                          * results ... but probably not any more garbage-y than you would
788                          * from the old linear search.)
789                          *
790                          * If the binary search accesses the first or last histogram
791                          * entry, we try to replace that endpoint with the true column min
792                          * or max as found by get_actual_variable_range().  This
793                          * ameliorates misestimates when the min or max is moving as a
794                          * result of changes since the last ANALYZE.  Note that this could
795                          * result in effectively including MCVs into the histogram that
796                          * weren't there before, but we don't try to correct for that.
797                          */
798                         double          histfrac;
799                         int                     lobound = 0;    /* first possible slot to search */
800                         int                     hibound = nvalues;              /* last+1 slot to search */
801                         bool            have_end = false;
802
803                         /*
804                          * If there are only two histogram entries, we'll want up-to-date
805                          * values for both.  (If there are more than two, we need at most
806                          * one of them to be updated, so we deal with that within the
807                          * loop.)
808                          */
809                         if (nvalues == 2)
810                                 have_end = get_actual_variable_range(root,
811                                                                                                          vardata,
812                                                                                                          hist_op,
813                                                                                                          &values[0],
814                                                                                                          &values[1]);
815
816                         while (lobound < hibound)
817                         {
818                                 int                     probe = (lobound + hibound) / 2;
819                                 bool            ltcmp;
820
821                                 /*
822                                  * If we find ourselves about to compare to the first or last
823                                  * histogram entry, first try to replace it with the actual
824                                  * current min or max (unless we already did so above).
825                                  */
826                                 if (probe == 0 && nvalues > 2)
827                                         have_end = get_actual_variable_range(root,
828                                                                                                                  vardata,
829                                                                                                                  hist_op,
830                                                                                                                  &values[0],
831                                                                                                                  NULL);
832                                 else if (probe == nvalues - 1 && nvalues > 2)
833                                         have_end = get_actual_variable_range(root,
834                                                                                                                  vardata,
835                                                                                                                  hist_op,
836                                                                                                                  NULL,
837                                                                                                                  &values[probe]);
838
839                                 ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
840                                                                                                            DEFAULT_COLLATION_OID,
841                                                                                                            values[probe],
842                                                                                                            constval));
843                                 if (isgt)
844                                         ltcmp = !ltcmp;
845                                 if (ltcmp)
846                                         lobound = probe + 1;
847                                 else
848                                         hibound = probe;
849                         }
850
851                         if (lobound <= 0)
852                         {
853                                 /* Constant is below lower histogram boundary. */
854                                 histfrac = 0.0;
855                         }
856                         else if (lobound >= nvalues)
857                         {
858                                 /* Constant is above upper histogram boundary. */
859                                 histfrac = 1.0;
860                         }
861                         else
862                         {
863                                 int                     i = lobound;
864                                 double          val,
865                                                         high,
866                                                         low;
867                                 double          binfrac;
868
869                                 /*
870                                  * We have values[i-1] <= constant <= values[i].
871                                  *
872                                  * Convert the constant and the two nearest bin boundary
873                                  * values to a uniform comparison scale, and do a linear
874                                  * interpolation within this bin.
875                                  */
876                                 if (convert_to_scalar(constval, consttype, &val,
877                                                                           values[i - 1], values[i],
878                                                                           vardata->vartype,
879                                                                           &low, &high))
880                                 {
881                                         if (high <= low)
882                                         {
883                                                 /* cope if bin boundaries appear identical */
884                                                 binfrac = 0.5;
885                                         }
886                                         else if (val <= low)
887                                                 binfrac = 0.0;
888                                         else if (val >= high)
889                                                 binfrac = 1.0;
890                                         else
891                                         {
892                                                 binfrac = (val - low) / (high - low);
893
894                                                 /*
895                                                  * Watch out for the possibility that we got a NaN or
896                                                  * Infinity from the division.  This can happen
897                                                  * despite the previous checks, if for example "low"
898                                                  * is -Infinity.
899                                                  */
900                                                 if (isnan(binfrac) ||
901                                                         binfrac < 0.0 || binfrac > 1.0)
902                                                         binfrac = 0.5;
903                                         }
904                                 }
905                                 else
906                                 {
907                                         /*
908                                          * Ideally we'd produce an error here, on the grounds that
909                                          * the given operator shouldn't have scalarXXsel
910                                          * registered as its selectivity func unless we can deal
911                                          * with its operand types.  But currently, all manner of
912                                          * stuff is invoking scalarXXsel, so give a default
913                                          * estimate until that can be fixed.
914                                          */
915                                         binfrac = 0.5;
916                                 }
917
918                                 /*
919                                  * Now, compute the overall selectivity across the values
920                                  * represented by the histogram.  We have i-1 full bins and
921                                  * binfrac partial bin below the constant.
922                                  */
923                                 histfrac = (double) (i - 1) + binfrac;
924                                 histfrac /= (double) (nvalues - 1);
925                         }
926
927                         /*
928                          * Now histfrac = fraction of histogram entries below the
929                          * constant.
930                          *
931                          * Account for "<" vs ">"
932                          */
933                         hist_selec = isgt ? (1.0 - histfrac) : histfrac;
934
935                         /*
936                          * The histogram boundaries are only approximate to begin with,
937                          * and may well be out of date anyway.  Therefore, don't believe
938                          * extremely small or large selectivity estimates --- unless we
939                          * got actual current endpoint values from the table.
940                          */
941                         if (have_end)
942                                 CLAMP_PROBABILITY(hist_selec);
943                         else
944                         {
945                                 if (hist_selec < 0.0001)
946                                         hist_selec = 0.0001;
947                                 else if (hist_selec > 0.9999)
948                                         hist_selec = 0.9999;
949                         }
950                 }
951
952                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
953         }
954
955         return hist_selec;
956 }
957
958 /*
959  *              scalarltsel             - Selectivity of "<" (also "<=") for scalars.
960  */
961 Datum
962 scalarltsel(PG_FUNCTION_ARGS)
963 {
964         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
965         Oid                     operator = PG_GETARG_OID(1);
966         List       *args = (List *) PG_GETARG_POINTER(2);
967         int                     varRelid = PG_GETARG_INT32(3);
968         VariableStatData vardata;
969         Node       *other;
970         bool            varonleft;
971         Datum           constval;
972         Oid                     consttype;
973         bool            isgt;
974         double          selec;
975
976         /*
977          * If expression is not variable op something or something op variable,
978          * then punt and return a default estimate.
979          */
980         if (!get_restriction_variable(root, args, varRelid,
981                                                                   &vardata, &other, &varonleft))
982                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
983
984         /*
985          * Can't do anything useful if the something is not a constant, either.
986          */
987         if (!IsA(other, Const))
988         {
989                 ReleaseVariableStats(vardata);
990                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
991         }
992
993         /*
994          * If the constant is NULL, assume operator is strict and return zero, ie,
995          * operator will never return TRUE.
996          */
997         if (((Const *) other)->constisnull)
998         {
999                 ReleaseVariableStats(vardata);
1000                 PG_RETURN_FLOAT8(0.0);
1001         }
1002         constval = ((Const *) other)->constvalue;
1003         consttype = ((Const *) other)->consttype;
1004
1005         /*
1006          * Force the var to be on the left to simplify logic in scalarineqsel.
1007          */
1008         if (varonleft)
1009         {
1010                 /* we have var < other */
1011                 isgt = false;
1012         }
1013         else
1014         {
1015                 /* we have other < var, commute to make var > other */
1016                 operator = get_commutator(operator);
1017                 if (!operator)
1018                 {
1019                         /* Use default selectivity (should we raise an error instead?) */
1020                         ReleaseVariableStats(vardata);
1021                         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1022                 }
1023                 isgt = true;
1024         }
1025
1026         selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1027
1028         ReleaseVariableStats(vardata);
1029
1030         PG_RETURN_FLOAT8((float8) selec);
1031 }
1032
1033 /*
1034  *              scalargtsel             - Selectivity of ">" (also ">=") for integers.
1035  */
1036 Datum
1037 scalargtsel(PG_FUNCTION_ARGS)
1038 {
1039         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1040         Oid                     operator = PG_GETARG_OID(1);
1041         List       *args = (List *) PG_GETARG_POINTER(2);
1042         int                     varRelid = PG_GETARG_INT32(3);
1043         VariableStatData vardata;
1044         Node       *other;
1045         bool            varonleft;
1046         Datum           constval;
1047         Oid                     consttype;
1048         bool            isgt;
1049         double          selec;
1050
1051         /*
1052          * If expression is not variable op something or something op variable,
1053          * then punt and return a default estimate.
1054          */
1055         if (!get_restriction_variable(root, args, varRelid,
1056                                                                   &vardata, &other, &varonleft))
1057                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1058
1059         /*
1060          * Can't do anything useful if the something is not a constant, either.
1061          */
1062         if (!IsA(other, Const))
1063         {
1064                 ReleaseVariableStats(vardata);
1065                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1066         }
1067
1068         /*
1069          * If the constant is NULL, assume operator is strict and return zero, ie,
1070          * operator will never return TRUE.
1071          */
1072         if (((Const *) other)->constisnull)
1073         {
1074                 ReleaseVariableStats(vardata);
1075                 PG_RETURN_FLOAT8(0.0);
1076         }
1077         constval = ((Const *) other)->constvalue;
1078         consttype = ((Const *) other)->consttype;
1079
1080         /*
1081          * Force the var to be on the left to simplify logic in scalarineqsel.
1082          */
1083         if (varonleft)
1084         {
1085                 /* we have var > other */
1086                 isgt = true;
1087         }
1088         else
1089         {
1090                 /* we have other > var, commute to make var < other */
1091                 operator = get_commutator(operator);
1092                 if (!operator)
1093                 {
1094                         /* Use default selectivity (should we raise an error instead?) */
1095                         ReleaseVariableStats(vardata);
1096                         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1097                 }
1098                 isgt = false;
1099         }
1100
1101         selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1102
1103         ReleaseVariableStats(vardata);
1104
1105         PG_RETURN_FLOAT8((float8) selec);
1106 }
1107
1108 /*
1109  * patternsel                   - Generic code for pattern-match selectivity.
1110  */
1111 static double
1112 patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
1113 {
1114         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1115         Oid                     operator = PG_GETARG_OID(1);
1116         List       *args = (List *) PG_GETARG_POINTER(2);
1117         int                     varRelid = PG_GETARG_INT32(3);
1118         Oid                     collation = PG_GET_COLLATION();
1119         VariableStatData vardata;
1120         Node       *other;
1121         bool            varonleft;
1122         Datum           constval;
1123         Oid                     consttype;
1124         Oid                     vartype;
1125         Oid                     opfamily;
1126         Pattern_Prefix_Status pstatus;
1127         Const      *patt;
1128         Const      *prefix = NULL;
1129         Selectivity rest_selec = 0;
1130         double          result;
1131
1132         /*
1133          * If this is for a NOT LIKE or similar operator, get the corresponding
1134          * positive-match operator and work with that.  Set result to the correct
1135          * default estimate, too.
1136          */
1137         if (negate)
1138         {
1139                 operator = get_negator(operator);
1140                 if (!OidIsValid(operator))
1141                         elog(ERROR, "patternsel called for operator without a negator");
1142                 result = 1.0 - DEFAULT_MATCH_SEL;
1143         }
1144         else
1145         {
1146                 result = DEFAULT_MATCH_SEL;
1147         }
1148
1149         /*
1150          * If expression is not variable op constant, then punt and return a
1151          * default estimate.
1152          */
1153         if (!get_restriction_variable(root, args, varRelid,
1154                                                                   &vardata, &other, &varonleft))
1155                 return result;
1156         if (!varonleft || !IsA(other, Const))
1157         {
1158                 ReleaseVariableStats(vardata);
1159                 return result;
1160         }
1161
1162         /*
1163          * If the constant is NULL, assume operator is strict and return zero, ie,
1164          * operator will never return TRUE.  (It's zero even for a negator op.)
1165          */
1166         if (((Const *) other)->constisnull)
1167         {
1168                 ReleaseVariableStats(vardata);
1169                 return 0.0;
1170         }
1171         constval = ((Const *) other)->constvalue;
1172         consttype = ((Const *) other)->consttype;
1173
1174         /*
1175          * The right-hand const is type text or bytea for all supported operators.
1176          * We do not expect to see binary-compatible types here, since
1177          * const-folding should have relabeled the const to exactly match the
1178          * operator's declared type.
1179          */
1180         if (consttype != TEXTOID && consttype != BYTEAOID)
1181         {
1182                 ReleaseVariableStats(vardata);
1183                 return result;
1184         }
1185
1186         /*
1187          * Similarly, the exposed type of the left-hand side should be one of
1188          * those we know.  (Do not look at vardata.atttype, which might be
1189          * something binary-compatible but different.)  We can use it to choose
1190          * the index opfamily from which we must draw the comparison operators.
1191          *
1192          * NOTE: It would be more correct to use the PATTERN opfamilies than the
1193          * simple ones, but at the moment ANALYZE will not generate statistics for
1194          * the PATTERN operators.  But our results are so approximate anyway that
1195          * it probably hardly matters.
1196          */
1197         vartype = vardata.vartype;
1198
1199         switch (vartype)
1200         {
1201                 case TEXTOID:
1202                         opfamily = TEXT_BTREE_FAM_OID;
1203                         break;
1204                 case BPCHAROID:
1205                         opfamily = BPCHAR_BTREE_FAM_OID;
1206                         break;
1207                 case NAMEOID:
1208                         opfamily = NAME_BTREE_FAM_OID;
1209                         break;
1210                 case BYTEAOID:
1211                         opfamily = BYTEA_BTREE_FAM_OID;
1212                         break;
1213                 default:
1214                         ReleaseVariableStats(vardata);
1215                         return result;
1216         }
1217
1218         /*
1219          * Pull out any fixed prefix implied by the pattern, and estimate the
1220          * fractional selectivity of the remainder of the pattern.  Unlike many of
1221          * the other functions in this file, we use the pattern operator's actual
1222          * collation for this step.  This is not because we expect the collation
1223          * to make a big difference in the selectivity estimate (it seldom would),
1224          * but because we want to be sure we cache compiled regexps under the
1225          * right cache key, so that they can be re-used at runtime.
1226          */
1227         patt = (Const *) other;
1228         pstatus = pattern_fixed_prefix(patt, ptype, collation,
1229                                                                    &prefix, &rest_selec);
1230
1231         /*
1232          * If necessary, coerce the prefix constant to the right type.
1233          */
1234         if (prefix && prefix->consttype != vartype)
1235         {
1236                 char       *prefixstr;
1237
1238                 switch (prefix->consttype)
1239                 {
1240                         case TEXTOID:
1241                                 prefixstr = TextDatumGetCString(prefix->constvalue);
1242                                 break;
1243                         case BYTEAOID:
1244                                 prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
1245                                                                                                                 prefix->constvalue));
1246                                 break;
1247                         default:
1248                                 elog(ERROR, "unrecognized consttype: %u",
1249                                          prefix->consttype);
1250                                 ReleaseVariableStats(vardata);
1251                                 return result;
1252                 }
1253                 prefix = string_to_const(prefixstr, vartype);
1254                 pfree(prefixstr);
1255         }
1256
1257         if (pstatus == Pattern_Prefix_Exact)
1258         {
1259                 /*
1260                  * Pattern specifies an exact match, so pretend operator is '='
1261                  */
1262                 Oid                     eqopr = get_opfamily_member(opfamily, vartype, vartype,
1263                                                                                                 BTEqualStrategyNumber);
1264
1265                 if (eqopr == InvalidOid)
1266                         elog(ERROR, "no = operator for opfamily %u", opfamily);
1267                 result = var_eq_const(&vardata, eqopr, prefix->constvalue,
1268                                                           false, true);
1269         }
1270         else
1271         {
1272                 /*
1273                  * Not exact-match pattern.  If we have a sufficiently large
1274                  * histogram, estimate selectivity for the histogram part of the
1275                  * population by counting matches in the histogram.  If not, estimate
1276                  * selectivity of the fixed prefix and remainder of pattern
1277                  * separately, then combine the two to get an estimate of the
1278                  * selectivity for the part of the column population represented by
1279                  * the histogram.  (For small histograms, we combine these
1280                  * approaches.)
1281                  *
1282                  * We then add up data for any most-common-values values; these are
1283                  * not in the histogram population, and we can get exact answers for
1284                  * them by applying the pattern operator, so there's no reason to
1285                  * approximate.  (If the MCVs cover a significant part of the total
1286                  * population, this gives us a big leg up in accuracy.)
1287                  */
1288                 Selectivity selec;
1289                 int                     hist_size;
1290                 FmgrInfo        opproc;
1291                 double          nullfrac,
1292                                         mcv_selec,
1293                                         sumcommon;
1294
1295                 /* Try to use the histogram entries to get selectivity */
1296                 fmgr_info(get_opcode(operator), &opproc);
1297
1298                 selec = histogram_selectivity(&vardata, &opproc, constval, true,
1299                                                                           10, 1, &hist_size);
1300
1301                 /* If not at least 100 entries, use the heuristic method */
1302                 if (hist_size < 100)
1303                 {
1304                         Selectivity heursel;
1305                         Selectivity prefixsel;
1306
1307                         if (pstatus == Pattern_Prefix_Partial)
1308                                 prefixsel = prefix_selectivity(root, &vardata, vartype,
1309                                                                                            opfamily, prefix);
1310                         else
1311                                 prefixsel = 1.0;
1312                         heursel = prefixsel * rest_selec;
1313
1314                         if (selec < 0)          /* fewer than 10 histogram entries? */
1315                                 selec = heursel;
1316                         else
1317                         {
1318                                 /*
1319                                  * For histogram sizes from 10 to 100, we combine the
1320                                  * histogram and heuristic selectivities, putting increasingly
1321                                  * more trust in the histogram for larger sizes.
1322                                  */
1323                                 double          hist_weight = hist_size / 100.0;
1324
1325                                 selec = selec * hist_weight + heursel * (1.0 - hist_weight);
1326                         }
1327                 }
1328
1329                 /* In any case, don't believe extremely small or large estimates. */
1330                 if (selec < 0.0001)
1331                         selec = 0.0001;
1332                 else if (selec > 0.9999)
1333                         selec = 0.9999;
1334
1335                 /*
1336                  * If we have most-common-values info, add up the fractions of the MCV
1337                  * entries that satisfy MCV OP PATTERN.  These fractions contribute
1338                  * directly to the result selectivity.  Also add up the total fraction
1339                  * represented by MCV entries.
1340                  */
1341                 mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
1342                                                                         &sumcommon);
1343
1344                 if (HeapTupleIsValid(vardata.statsTuple))
1345                         nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
1346                 else
1347                         nullfrac = 0.0;
1348
1349                 /*
1350                  * Now merge the results from the MCV and histogram calculations,
1351                  * realizing that the histogram covers only the non-null values that
1352                  * are not listed in MCV.
1353                  */
1354                 selec *= 1.0 - nullfrac - sumcommon;
1355                 selec += mcv_selec;
1356
1357                 /* result should be in range, but make sure... */
1358                 CLAMP_PROBABILITY(selec);
1359                 result = selec;
1360         }
1361
1362         if (prefix)
1363         {
1364                 pfree(DatumGetPointer(prefix->constvalue));
1365                 pfree(prefix);
1366         }
1367
1368         ReleaseVariableStats(vardata);
1369
1370         return negate ? (1.0 - result) : result;
1371 }
1372
1373 /*
1374  *              regexeqsel              - Selectivity of regular-expression pattern match.
1375  */
1376 Datum
1377 regexeqsel(PG_FUNCTION_ARGS)
1378 {
1379         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, false));
1380 }
1381
1382 /*
1383  *              icregexeqsel    - Selectivity of case-insensitive regex match.
1384  */
1385 Datum
1386 icregexeqsel(PG_FUNCTION_ARGS)
1387 {
1388         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, false));
1389 }
1390
1391 /*
1392  *              likesel                 - Selectivity of LIKE pattern match.
1393  */
1394 Datum
1395 likesel(PG_FUNCTION_ARGS)
1396 {
1397         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, false));
1398 }
1399
1400 /*
1401  *              iclikesel                       - Selectivity of ILIKE pattern match.
1402  */
1403 Datum
1404 iclikesel(PG_FUNCTION_ARGS)
1405 {
1406         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, false));
1407 }
1408
1409 /*
1410  *              regexnesel              - Selectivity of regular-expression pattern non-match.
1411  */
1412 Datum
1413 regexnesel(PG_FUNCTION_ARGS)
1414 {
1415         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, true));
1416 }
1417
1418 /*
1419  *              icregexnesel    - Selectivity of case-insensitive regex non-match.
1420  */
1421 Datum
1422 icregexnesel(PG_FUNCTION_ARGS)
1423 {
1424         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, true));
1425 }
1426
1427 /*
1428  *              nlikesel                - Selectivity of LIKE pattern non-match.
1429  */
1430 Datum
1431 nlikesel(PG_FUNCTION_ARGS)
1432 {
1433         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, true));
1434 }
1435
1436 /*
1437  *              icnlikesel              - Selectivity of ILIKE pattern non-match.
1438  */
1439 Datum
1440 icnlikesel(PG_FUNCTION_ARGS)
1441 {
1442         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, true));
1443 }
1444
1445 /*
1446  *              boolvarsel              - Selectivity of Boolean variable.
1447  *
1448  * This can actually be called on any boolean-valued expression.  If it
1449  * involves only Vars of the specified relation, and if there are statistics
1450  * about the Var or expression (the latter is possible if it's indexed) then
1451  * we'll produce a real estimate; otherwise it's just a default.
1452  */
1453 Selectivity
1454 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1455 {
1456         VariableStatData vardata;
1457         double          selec;
1458
1459         examine_variable(root, arg, varRelid, &vardata);
1460         if (HeapTupleIsValid(vardata.statsTuple))
1461         {
1462                 /*
1463                  * A boolean variable V is equivalent to the clause V = 't', so we
1464                  * compute the selectivity as if that is what we have.
1465                  */
1466                 selec = var_eq_const(&vardata, BooleanEqualOperator,
1467                                                          BoolGetDatum(true), false, true);
1468         }
1469         else if (is_funcclause(arg))
1470         {
1471                 /*
1472                  * If we have no stats and it's a function call, estimate 0.3333333.
1473                  * This seems a pretty unprincipled choice, but Postgres has been
1474                  * using that estimate for function calls since 1992.  The hoariness
1475                  * of this behavior suggests that we should not be in too much hurry
1476                  * to use another value.
1477                  */
1478                 selec = 0.3333333;
1479         }
1480         else
1481         {
1482                 /* Otherwise, the default estimate is 0.5 */
1483                 selec = 0.5;
1484         }
1485         ReleaseVariableStats(vardata);
1486         return selec;
1487 }
1488
1489 /*
1490  *              booltestsel             - Selectivity of BooleanTest Node.
1491  */
1492 Selectivity
1493 booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
1494                         int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1495 {
1496         VariableStatData vardata;
1497         double          selec;
1498
1499         examine_variable(root, arg, varRelid, &vardata);
1500
1501         if (HeapTupleIsValid(vardata.statsTuple))
1502         {
1503                 Form_pg_statistic stats;
1504                 double          freq_null;
1505                 Datum      *values;
1506                 int                     nvalues;
1507                 float4     *numbers;
1508                 int                     nnumbers;
1509
1510                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1511                 freq_null = stats->stanullfrac;
1512
1513                 if (get_attstatsslot(vardata.statsTuple,
1514                                                          vardata.atttype, vardata.atttypmod,
1515                                                          STATISTIC_KIND_MCV, InvalidOid,
1516                                                          NULL,
1517                                                          &values, &nvalues,
1518                                                          &numbers, &nnumbers)
1519                         && nnumbers > 0)
1520                 {
1521                         double          freq_true;
1522                         double          freq_false;
1523
1524                         /*
1525                          * Get first MCV frequency and derive frequency for true.
1526                          */
1527                         if (DatumGetBool(values[0]))
1528                                 freq_true = numbers[0];
1529                         else
1530                                 freq_true = 1.0 - numbers[0] - freq_null;
1531
1532                         /*
1533                          * Next derive frequency for false. Then use these as appropriate
1534                          * to derive frequency for each case.
1535                          */
1536                         freq_false = 1.0 - freq_true - freq_null;
1537
1538                         switch (booltesttype)
1539                         {
1540                                 case IS_UNKNOWN:
1541                                         /* select only NULL values */
1542                                         selec = freq_null;
1543                                         break;
1544                                 case IS_NOT_UNKNOWN:
1545                                         /* select non-NULL values */
1546                                         selec = 1.0 - freq_null;
1547                                         break;
1548                                 case IS_TRUE:
1549                                         /* select only TRUE values */
1550                                         selec = freq_true;
1551                                         break;
1552                                 case IS_NOT_TRUE:
1553                                         /* select non-TRUE values */
1554                                         selec = 1.0 - freq_true;
1555                                         break;
1556                                 case IS_FALSE:
1557                                         /* select only FALSE values */
1558                                         selec = freq_false;
1559                                         break;
1560                                 case IS_NOT_FALSE:
1561                                         /* select non-FALSE values */
1562                                         selec = 1.0 - freq_false;
1563                                         break;
1564                                 default:
1565                                         elog(ERROR, "unrecognized booltesttype: %d",
1566                                                  (int) booltesttype);
1567                                         selec = 0.0;    /* Keep compiler quiet */
1568                                         break;
1569                         }
1570
1571                         free_attstatsslot(vardata.atttype, values, nvalues,
1572                                                           numbers, nnumbers);
1573                 }
1574                 else
1575                 {
1576                         /*
1577                          * No most-common-value info available. Still have null fraction
1578                          * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1579                          * for null fraction and assume a 50-50 split of TRUE and FALSE.
1580                          */
1581                         switch (booltesttype)
1582                         {
1583                                 case IS_UNKNOWN:
1584                                         /* select only NULL values */
1585                                         selec = freq_null;
1586                                         break;
1587                                 case IS_NOT_UNKNOWN:
1588                                         /* select non-NULL values */
1589                                         selec = 1.0 - freq_null;
1590                                         break;
1591                                 case IS_TRUE:
1592                                 case IS_FALSE:
1593                                         /* Assume we select half of the non-NULL values */
1594                                         selec = (1.0 - freq_null) / 2.0;
1595                                         break;
1596                                 case IS_NOT_TRUE:
1597                                 case IS_NOT_FALSE:
1598                                         /* Assume we select NULLs plus half of the non-NULLs */
1599                                         /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1600                                         selec = (freq_null + 1.0) / 2.0;
1601                                         break;
1602                                 default:
1603                                         elog(ERROR, "unrecognized booltesttype: %d",
1604                                                  (int) booltesttype);
1605                                         selec = 0.0;    /* Keep compiler quiet */
1606                                         break;
1607                         }
1608                 }
1609         }
1610         else
1611         {
1612                 /*
1613                  * If we can't get variable statistics for the argument, perhaps
1614                  * clause_selectivity can do something with it.  We ignore the
1615                  * possibility of a NULL value when using clause_selectivity, and just
1616                  * assume the value is either TRUE or FALSE.
1617                  */
1618                 switch (booltesttype)
1619                 {
1620                         case IS_UNKNOWN:
1621                                 selec = DEFAULT_UNK_SEL;
1622                                 break;
1623                         case IS_NOT_UNKNOWN:
1624                                 selec = DEFAULT_NOT_UNK_SEL;
1625                                 break;
1626                         case IS_TRUE:
1627                         case IS_NOT_FALSE:
1628                                 selec = (double) clause_selectivity(root, arg,
1629                                                                                                         varRelid,
1630                                                                                                         jointype, sjinfo);
1631                                 break;
1632                         case IS_FALSE:
1633                         case IS_NOT_TRUE:
1634                                 selec = 1.0 - (double) clause_selectivity(root, arg,
1635                                                                                                                   varRelid,
1636                                                                                                                   jointype, sjinfo);
1637                                 break;
1638                         default:
1639                                 elog(ERROR, "unrecognized booltesttype: %d",
1640                                          (int) booltesttype);
1641                                 selec = 0.0;    /* Keep compiler quiet */
1642                                 break;
1643                 }
1644         }
1645
1646         ReleaseVariableStats(vardata);
1647
1648         /* result should be in range, but make sure... */
1649         CLAMP_PROBABILITY(selec);
1650
1651         return (Selectivity) selec;
1652 }
1653
1654 /*
1655  *              nulltestsel             - Selectivity of NullTest Node.
1656  */
1657 Selectivity
1658 nulltestsel(PlannerInfo *root, NullTestType nulltesttype, Node *arg,
1659                         int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1660 {
1661         VariableStatData vardata;
1662         double          selec;
1663
1664         examine_variable(root, arg, varRelid, &vardata);
1665
1666         if (HeapTupleIsValid(vardata.statsTuple))
1667         {
1668                 Form_pg_statistic stats;
1669                 double          freq_null;
1670
1671                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1672                 freq_null = stats->stanullfrac;
1673
1674                 switch (nulltesttype)
1675                 {
1676                         case IS_NULL:
1677
1678                                 /*
1679                                  * Use freq_null directly.
1680                                  */
1681                                 selec = freq_null;
1682                                 break;
1683                         case IS_NOT_NULL:
1684
1685                                 /*
1686                                  * Select not unknown (not null) values. Calculate from
1687                                  * freq_null.
1688                                  */
1689                                 selec = 1.0 - freq_null;
1690                                 break;
1691                         default:
1692                                 elog(ERROR, "unrecognized nulltesttype: %d",
1693                                          (int) nulltesttype);
1694                                 return (Selectivity) 0; /* keep compiler quiet */
1695                 }
1696         }
1697         else
1698         {
1699                 /*
1700                  * No ANALYZE stats available, so make a guess
1701                  */
1702                 switch (nulltesttype)
1703                 {
1704                         case IS_NULL:
1705                                 selec = DEFAULT_UNK_SEL;
1706                                 break;
1707                         case IS_NOT_NULL:
1708                                 selec = DEFAULT_NOT_UNK_SEL;
1709                                 break;
1710                         default:
1711                                 elog(ERROR, "unrecognized nulltesttype: %d",
1712                                          (int) nulltesttype);
1713                                 return (Selectivity) 0; /* keep compiler quiet */
1714                 }
1715         }
1716
1717         ReleaseVariableStats(vardata);
1718
1719         /* result should be in range, but make sure... */
1720         CLAMP_PROBABILITY(selec);
1721
1722         return (Selectivity) selec;
1723 }
1724
1725 /*
1726  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1727  *
1728  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1729  * but it seems possible that RelabelType might show up.  Also, the planner
1730  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1731  * so we need to be ready to deal with more than one level.
1732  */
1733 static Node *
1734 strip_array_coercion(Node *node)
1735 {
1736         for (;;)
1737         {
1738                 if (node && IsA(node, ArrayCoerceExpr) &&
1739                         ((ArrayCoerceExpr *) node)->elemfuncid == InvalidOid)
1740                 {
1741                         node = (Node *) ((ArrayCoerceExpr *) node)->arg;
1742                 }
1743                 else if (node && IsA(node, RelabelType))
1744                 {
1745                         /* We don't really expect this case, but may as well cope */
1746                         node = (Node *) ((RelabelType *) node)->arg;
1747                 }
1748                 else
1749                         break;
1750         }
1751         return node;
1752 }
1753
1754 /*
1755  *              scalararraysel          - Selectivity of ScalarArrayOpExpr Node.
1756  */
1757 Selectivity
1758 scalararraysel(PlannerInfo *root,
1759                            ScalarArrayOpExpr *clause,
1760                            bool is_join_clause,
1761                            int varRelid,
1762                            JoinType jointype,
1763                            SpecialJoinInfo *sjinfo)
1764 {
1765         Oid                     operator = clause->opno;
1766         bool            useOr = clause->useOr;
1767         bool            isEquality = false;
1768         bool            isInequality = false;
1769         Node       *leftop;
1770         Node       *rightop;
1771         Oid                     nominal_element_type;
1772         Oid                     nominal_element_collation;
1773         TypeCacheEntry *typentry;
1774         RegProcedure oprsel;
1775         FmgrInfo        oprselproc;
1776         Selectivity s1;
1777         Selectivity s1disjoint;
1778
1779         /* First, deconstruct the expression */
1780         Assert(list_length(clause->args) == 2);
1781         leftop = (Node *) linitial(clause->args);
1782         rightop = (Node *) lsecond(clause->args);
1783
1784         /* aggressively reduce both sides to constants */
1785         leftop = estimate_expression_value(root, leftop);
1786         rightop = estimate_expression_value(root, rightop);
1787
1788         /* get nominal (after relabeling) element type of rightop */
1789         nominal_element_type = get_base_element_type(exprType(rightop));
1790         if (!OidIsValid(nominal_element_type))
1791                 return (Selectivity) 0.5;               /* probably shouldn't happen */
1792         /* get nominal collation, too, for generating constants */
1793         nominal_element_collation = exprCollation(rightop);
1794
1795         /* look through any binary-compatible relabeling of rightop */
1796         rightop = strip_array_coercion(rightop);
1797
1798         /*
1799          * Detect whether the operator is the default equality or inequality
1800          * operator of the array element type.
1801          */
1802         typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1803         if (OidIsValid(typentry->eq_opr))
1804         {
1805                 if (operator == typentry->eq_opr)
1806                         isEquality = true;
1807                 else if (get_negator(operator) == typentry->eq_opr)
1808                         isInequality = true;
1809         }
1810
1811         /*
1812          * If it is equality or inequality, we might be able to estimate this as a
1813          * form of array containment; for instance "const = ANY(column)" can be
1814          * treated as "ARRAY[const] <@ column".  scalararraysel_containment tries
1815          * that, and returns the selectivity estimate if successful, or -1 if not.
1816          */
1817         if ((isEquality || isInequality) && !is_join_clause)
1818         {
1819                 s1 = scalararraysel_containment(root, leftop, rightop,
1820                                                                                 nominal_element_type,
1821                                                                                 isEquality, useOr, varRelid);
1822                 if (s1 >= 0.0)
1823                         return s1;
1824         }
1825
1826         /*
1827          * Look up the underlying operator's selectivity estimator. Punt if it
1828          * hasn't got one.
1829          */
1830         if (is_join_clause)
1831                 oprsel = get_oprjoin(operator);
1832         else
1833                 oprsel = get_oprrest(operator);
1834         if (!oprsel)
1835                 return (Selectivity) 0.5;
1836         fmgr_info(oprsel, &oprselproc);
1837
1838         /*
1839          * In the array-containment check above, we must only believe that an
1840          * operator is equality or inequality if it is the default btree equality
1841          * operator (or its negator) for the element type, since those are the
1842          * operators that array containment will use.  But in what follows, we can
1843          * be a little laxer, and also believe that any operators using eqsel() or
1844          * neqsel() as selectivity estimator act like equality or inequality.
1845          */
1846         if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1847                 isEquality = true;
1848         else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1849                 isInequality = true;
1850
1851         /*
1852          * We consider three cases:
1853          *
1854          * 1. rightop is an Array constant: deconstruct the array, apply the
1855          * operator's selectivity function for each array element, and merge the
1856          * results in the same way that clausesel.c does for AND/OR combinations.
1857          *
1858          * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1859          * function for each element of the ARRAY[] construct, and merge.
1860          *
1861          * 3. otherwise, make a guess ...
1862          */
1863         if (rightop && IsA(rightop, Const))
1864         {
1865                 Datum           arraydatum = ((Const *) rightop)->constvalue;
1866                 bool            arrayisnull = ((Const *) rightop)->constisnull;
1867                 ArrayType  *arrayval;
1868                 int16           elmlen;
1869                 bool            elmbyval;
1870                 char            elmalign;
1871                 int                     num_elems;
1872                 Datum      *elem_values;
1873                 bool       *elem_nulls;
1874                 int                     i;
1875
1876                 if (arrayisnull)                /* qual can't succeed if null array */
1877                         return (Selectivity) 0.0;
1878                 arrayval = DatumGetArrayTypeP(arraydatum);
1879                 get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
1880                                                          &elmlen, &elmbyval, &elmalign);
1881                 deconstruct_array(arrayval,
1882                                                   ARR_ELEMTYPE(arrayval),
1883                                                   elmlen, elmbyval, elmalign,
1884                                                   &elem_values, &elem_nulls, &num_elems);
1885
1886                 /*
1887                  * For generic operators, we assume the probability of success is
1888                  * independent for each array element.  But for "= ANY" or "<> ALL",
1889                  * if the array elements are distinct (which'd typically be the case)
1890                  * then the probabilities are disjoint, and we should just sum them.
1891                  *
1892                  * If we were being really tense we would try to confirm that the
1893                  * elements are all distinct, but that would be expensive and it
1894                  * doesn't seem to be worth the cycles; it would amount to penalizing
1895                  * well-written queries in favor of poorly-written ones.  However, we
1896                  * do protect ourselves a little bit by checking whether the
1897                  * disjointness assumption leads to an impossible (out of range)
1898                  * probability; if so, we fall back to the normal calculation.
1899                  */
1900                 s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1901
1902                 for (i = 0; i < num_elems; i++)
1903                 {
1904                         List       *args;
1905                         Selectivity s2;
1906
1907                         args = list_make2(leftop,
1908                                                           makeConst(nominal_element_type,
1909                                                                                 -1,
1910                                                                                 nominal_element_collation,
1911                                                                                 elmlen,
1912                                                                                 elem_values[i],
1913                                                                                 elem_nulls[i],
1914                                                                                 elmbyval));
1915                         if (is_join_clause)
1916                                 s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1917                                                                                                           clause->inputcollid,
1918                                                                                                           PointerGetDatum(root),
1919                                                                                                   ObjectIdGetDatum(operator),
1920                                                                                                           PointerGetDatum(args),
1921                                                                                                           Int16GetDatum(jointype),
1922                                                                                                    PointerGetDatum(sjinfo)));
1923                         else
1924                                 s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1925                                                                                                           clause->inputcollid,
1926                                                                                                           PointerGetDatum(root),
1927                                                                                                   ObjectIdGetDatum(operator),
1928                                                                                                           PointerGetDatum(args),
1929                                                                                                    Int32GetDatum(varRelid)));
1930
1931                         if (useOr)
1932                         {
1933                                 s1 = s1 + s2 - s1 * s2;
1934                                 if (isEquality)
1935                                         s1disjoint += s2;
1936                         }
1937                         else
1938                         {
1939                                 s1 = s1 * s2;
1940                                 if (isInequality)
1941                                         s1disjoint += s2 - 1.0;
1942                         }
1943                 }
1944
1945                 /* accept disjoint-probability estimate if in range */
1946                 if ((useOr ? isEquality : isInequality) &&
1947                         s1disjoint >= 0.0 && s1disjoint <= 1.0)
1948                         s1 = s1disjoint;
1949         }
1950         else if (rightop && IsA(rightop, ArrayExpr) &&
1951                          !((ArrayExpr *) rightop)->multidims)
1952         {
1953                 ArrayExpr  *arrayexpr = (ArrayExpr *) rightop;
1954                 int16           elmlen;
1955                 bool            elmbyval;
1956                 ListCell   *l;
1957
1958                 get_typlenbyval(arrayexpr->element_typeid,
1959                                                 &elmlen, &elmbyval);
1960
1961                 /*
1962                  * We use the assumption of disjoint probabilities here too, although
1963                  * the odds of equal array elements are rather higher if the elements
1964                  * are not all constants (which they won't be, else constant folding
1965                  * would have reduced the ArrayExpr to a Const).  In this path it's
1966                  * critical to have the sanity check on the s1disjoint estimate.
1967                  */
1968                 s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1969
1970                 foreach(l, arrayexpr->elements)
1971                 {
1972                         Node       *elem = (Node *) lfirst(l);
1973                         List       *args;
1974                         Selectivity s2;
1975
1976                         /*
1977                          * Theoretically, if elem isn't of nominal_element_type we should
1978                          * insert a RelabelType, but it seems unlikely that any operator
1979                          * estimation function would really care ...
1980                          */
1981                         args = list_make2(leftop, elem);
1982                         if (is_join_clause)
1983                                 s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1984                                                                                                           clause->inputcollid,
1985                                                                                                           PointerGetDatum(root),
1986                                                                                                   ObjectIdGetDatum(operator),
1987                                                                                                           PointerGetDatum(args),
1988                                                                                                           Int16GetDatum(jointype),
1989                                                                                                    PointerGetDatum(sjinfo)));
1990                         else
1991                                 s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1992                                                                                                           clause->inputcollid,
1993                                                                                                           PointerGetDatum(root),
1994                                                                                                   ObjectIdGetDatum(operator),
1995                                                                                                           PointerGetDatum(args),
1996                                                                                                    Int32GetDatum(varRelid)));
1997
1998                         if (useOr)
1999                         {
2000                                 s1 = s1 + s2 - s1 * s2;
2001                                 if (isEquality)
2002                                         s1disjoint += s2;
2003                         }
2004                         else
2005                         {
2006                                 s1 = s1 * s2;
2007                                 if (isInequality)
2008                                         s1disjoint += s2 - 1.0;
2009                         }
2010                 }
2011
2012                 /* accept disjoint-probability estimate if in range */
2013                 if ((useOr ? isEquality : isInequality) &&
2014                         s1disjoint >= 0.0 && s1disjoint <= 1.0)
2015                         s1 = s1disjoint;
2016         }
2017         else
2018         {
2019                 CaseTestExpr *dummyexpr;
2020                 List       *args;
2021                 Selectivity s2;
2022                 int                     i;
2023
2024                 /*
2025                  * We need a dummy rightop to pass to the operator selectivity
2026                  * routine.  It can be pretty much anything that doesn't look like a
2027                  * constant; CaseTestExpr is a convenient choice.
2028                  */
2029                 dummyexpr = makeNode(CaseTestExpr);
2030                 dummyexpr->typeId = nominal_element_type;
2031                 dummyexpr->typeMod = -1;
2032                 dummyexpr->collation = clause->inputcollid;
2033                 args = list_make2(leftop, dummyexpr);
2034                 if (is_join_clause)
2035                         s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2036                                                                                                   clause->inputcollid,
2037                                                                                                   PointerGetDatum(root),
2038                                                                                                   ObjectIdGetDatum(operator),
2039                                                                                                   PointerGetDatum(args),
2040                                                                                                   Int16GetDatum(jointype),
2041                                                                                                   PointerGetDatum(sjinfo)));
2042                 else
2043                         s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2044                                                                                                   clause->inputcollid,
2045                                                                                                   PointerGetDatum(root),
2046                                                                                                   ObjectIdGetDatum(operator),
2047                                                                                                   PointerGetDatum(args),
2048                                                                                                   Int32GetDatum(varRelid)));
2049                 s1 = useOr ? 0.0 : 1.0;
2050
2051                 /*
2052                  * Arbitrarily assume 10 elements in the eventual array value (see
2053                  * also estimate_array_length).  We don't risk an assumption of
2054                  * disjoint probabilities here.
2055                  */
2056                 for (i = 0; i < 10; i++)
2057                 {
2058                         if (useOr)
2059                                 s1 = s1 + s2 - s1 * s2;
2060                         else
2061                                 s1 = s1 * s2;
2062                 }
2063         }
2064
2065         /* result should be in range, but make sure... */
2066         CLAMP_PROBABILITY(s1);
2067
2068         return s1;
2069 }
2070
2071 /*
2072  * Estimate number of elements in the array yielded by an expression.
2073  *
2074  * It's important that this agree with scalararraysel.
2075  */
2076 int
2077 estimate_array_length(Node *arrayexpr)
2078 {
2079         /* look through any binary-compatible relabeling of arrayexpr */
2080         arrayexpr = strip_array_coercion(arrayexpr);
2081
2082         if (arrayexpr && IsA(arrayexpr, Const))
2083         {
2084                 Datum           arraydatum = ((Const *) arrayexpr)->constvalue;
2085                 bool            arrayisnull = ((Const *) arrayexpr)->constisnull;
2086                 ArrayType  *arrayval;
2087
2088                 if (arrayisnull)
2089                         return 0;
2090                 arrayval = DatumGetArrayTypeP(arraydatum);
2091                 return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2092         }
2093         else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2094                          !((ArrayExpr *) arrayexpr)->multidims)
2095         {
2096                 return list_length(((ArrayExpr *) arrayexpr)->elements);
2097         }
2098         else
2099         {
2100                 /* default guess --- see also scalararraysel */
2101                 return 10;
2102         }
2103 }
2104
2105 /*
2106  *              rowcomparesel           - Selectivity of RowCompareExpr Node.
2107  *
2108  * We estimate RowCompare selectivity by considering just the first (high
2109  * order) columns, which makes it equivalent to an ordinary OpExpr.  While
2110  * this estimate could be refined by considering additional columns, it
2111  * seems unlikely that we could do a lot better without multi-column
2112  * statistics.
2113  */
2114 Selectivity
2115 rowcomparesel(PlannerInfo *root,
2116                           RowCompareExpr *clause,
2117                           int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2118 {
2119         Selectivity s1;
2120         Oid                     opno = linitial_oid(clause->opnos);
2121         Oid                     inputcollid = linitial_oid(clause->inputcollids);
2122         List       *opargs;
2123         bool            is_join_clause;
2124
2125         /* Build equivalent arg list for single operator */
2126         opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2127
2128         /*
2129          * Decide if it's a join clause.  This should match clausesel.c's
2130          * treat_as_join_clause(), except that we intentionally consider only the
2131          * leading columns and not the rest of the clause.
2132          */
2133         if (varRelid != 0)
2134         {
2135                 /*
2136                  * Caller is forcing restriction mode (eg, because we are examining an
2137                  * inner indexscan qual).
2138                  */
2139                 is_join_clause = false;
2140         }
2141         else if (sjinfo == NULL)
2142         {
2143                 /*
2144                  * It must be a restriction clause, since it's being evaluated at a
2145                  * scan node.
2146                  */
2147                 is_join_clause = false;
2148         }
2149         else
2150         {
2151                 /*
2152                  * Otherwise, it's a join if there's more than one relation used.
2153                  */
2154                 is_join_clause = (NumRelids((Node *) opargs) > 1);
2155         }
2156
2157         if (is_join_clause)
2158         {
2159                 /* Estimate selectivity for a join clause. */
2160                 s1 = join_selectivity(root, opno,
2161                                                           opargs,
2162                                                           inputcollid,
2163                                                           jointype,
2164                                                           sjinfo);
2165         }
2166         else
2167         {
2168                 /* Estimate selectivity for a restriction clause. */
2169                 s1 = restriction_selectivity(root, opno,
2170                                                                          opargs,
2171                                                                          inputcollid,
2172                                                                          varRelid);
2173         }
2174
2175         return s1;
2176 }
2177
2178 /*
2179  *              eqjoinsel               - Join selectivity of "="
2180  */
2181 Datum
2182 eqjoinsel(PG_FUNCTION_ARGS)
2183 {
2184         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2185         Oid                     operator = PG_GETARG_OID(1);
2186         List       *args = (List *) PG_GETARG_POINTER(2);
2187
2188 #ifdef NOT_USED
2189         JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
2190 #endif
2191         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2192         double          selec;
2193         VariableStatData vardata1;
2194         VariableStatData vardata2;
2195         bool            join_is_reversed;
2196         RelOptInfo *inner_rel;
2197
2198         get_join_variables(root, args, sjinfo,
2199                                            &vardata1, &vardata2, &join_is_reversed);
2200
2201         switch (sjinfo->jointype)
2202         {
2203                 case JOIN_INNER:
2204                 case JOIN_LEFT:
2205                 case JOIN_FULL:
2206                         selec = eqjoinsel_inner(operator, &vardata1, &vardata2);
2207                         break;
2208                 case JOIN_SEMI:
2209                 case JOIN_ANTI:
2210
2211                         /*
2212                          * Look up the join's inner relation.  min_righthand is sufficient
2213                          * information because neither SEMI nor ANTI joins permit any
2214                          * reassociation into or out of their RHS, so the righthand will
2215                          * always be exactly that set of rels.
2216                          */
2217                         inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2218
2219                         if (!join_is_reversed)
2220                                 selec = eqjoinsel_semi(operator, &vardata1, &vardata2,
2221                                                                            inner_rel);
2222                         else
2223                                 selec = eqjoinsel_semi(get_commutator(operator),
2224                                                                            &vardata2, &vardata1,
2225                                                                            inner_rel);
2226                         break;
2227                 default:
2228                         /* other values not expected here */
2229                         elog(ERROR, "unrecognized join type: %d",
2230                                  (int) sjinfo->jointype);
2231                         selec = 0;                      /* keep compiler quiet */
2232                         break;
2233         }
2234
2235         ReleaseVariableStats(vardata1);
2236         ReleaseVariableStats(vardata2);
2237
2238         CLAMP_PROBABILITY(selec);
2239
2240         PG_RETURN_FLOAT8((float8) selec);
2241 }
2242
2243 /*
2244  * eqjoinsel_inner --- eqjoinsel for normal inner join
2245  *
2246  * We also use this for LEFT/FULL outer joins; it's not presently clear
2247  * that it's worth trying to distinguish them here.
2248  */
2249 static double
2250 eqjoinsel_inner(Oid operator,
2251                                 VariableStatData *vardata1, VariableStatData *vardata2)
2252 {
2253         double          selec;
2254         double          nd1;
2255         double          nd2;
2256         bool            isdefault1;
2257         bool            isdefault2;
2258         Form_pg_statistic stats1 = NULL;
2259         Form_pg_statistic stats2 = NULL;
2260         bool            have_mcvs1 = false;
2261         Datum      *values1 = NULL;
2262         int                     nvalues1 = 0;
2263         float4     *numbers1 = NULL;
2264         int                     nnumbers1 = 0;
2265         bool            have_mcvs2 = false;
2266         Datum      *values2 = NULL;
2267         int                     nvalues2 = 0;
2268         float4     *numbers2 = NULL;
2269         int                     nnumbers2 = 0;
2270
2271         nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2272         nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2273
2274         if (HeapTupleIsValid(vardata1->statsTuple))
2275         {
2276                 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2277                 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2278                                                                           vardata1->atttype,
2279                                                                           vardata1->atttypmod,
2280                                                                           STATISTIC_KIND_MCV,
2281                                                                           InvalidOid,
2282                                                                           NULL,
2283                                                                           &values1, &nvalues1,
2284                                                                           &numbers1, &nnumbers1);
2285         }
2286
2287         if (HeapTupleIsValid(vardata2->statsTuple))
2288         {
2289                 stats2 = (Form_pg_statistic) GETSTRUCT(vardata2->statsTuple);
2290                 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2291                                                                           vardata2->atttype,
2292                                                                           vardata2->atttypmod,
2293                                                                           STATISTIC_KIND_MCV,
2294                                                                           InvalidOid,
2295                                                                           NULL,
2296                                                                           &values2, &nvalues2,
2297                                                                           &numbers2, &nnumbers2);
2298         }
2299
2300         if (have_mcvs1 && have_mcvs2)
2301         {
2302                 /*
2303                  * We have most-common-value lists for both relations.  Run through
2304                  * the lists to see which MCVs actually join to each other with the
2305                  * given operator.  This allows us to determine the exact join
2306                  * selectivity for the portion of the relations represented by the MCV
2307                  * lists.  We still have to estimate for the remaining population, but
2308                  * in a skewed distribution this gives us a big leg up in accuracy.
2309                  * For motivation see the analysis in Y. Ioannidis and S.
2310                  * Christodoulakis, "On the propagation of errors in the size of join
2311                  * results", Technical Report 1018, Computer Science Dept., University
2312                  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2313                  */
2314                 FmgrInfo        eqproc;
2315                 bool       *hasmatch1;
2316                 bool       *hasmatch2;
2317                 double          nullfrac1 = stats1->stanullfrac;
2318                 double          nullfrac2 = stats2->stanullfrac;
2319                 double          matchprodfreq,
2320                                         matchfreq1,
2321                                         matchfreq2,
2322                                         unmatchfreq1,
2323                                         unmatchfreq2,
2324                                         otherfreq1,
2325                                         otherfreq2,
2326                                         totalsel1,
2327                                         totalsel2;
2328                 int                     i,
2329                                         nmatches;
2330
2331                 fmgr_info(get_opcode(operator), &eqproc);
2332                 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2333                 hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
2334
2335                 /*
2336                  * Note we assume that each MCV will match at most one member of the
2337                  * other MCV list.  If the operator isn't really equality, there could
2338                  * be multiple matches --- but we don't look for them, both for speed
2339                  * and because the math wouldn't add up...
2340                  */
2341                 matchprodfreq = 0.0;
2342                 nmatches = 0;
2343                 for (i = 0; i < nvalues1; i++)
2344                 {
2345                         int                     j;
2346
2347                         for (j = 0; j < nvalues2; j++)
2348                         {
2349                                 if (hasmatch2[j])
2350                                         continue;
2351                                 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2352                                                                                                    DEFAULT_COLLATION_OID,
2353                                                                                                    values1[i],
2354                                                                                                    values2[j])))
2355                                 {
2356                                         hasmatch1[i] = hasmatch2[j] = true;
2357                                         matchprodfreq += numbers1[i] * numbers2[j];
2358                                         nmatches++;
2359                                         break;
2360                                 }
2361                         }
2362                 }
2363                 CLAMP_PROBABILITY(matchprodfreq);
2364                 /* Sum up frequencies of matched and unmatched MCVs */
2365                 matchfreq1 = unmatchfreq1 = 0.0;
2366                 for (i = 0; i < nvalues1; i++)
2367                 {
2368                         if (hasmatch1[i])
2369                                 matchfreq1 += numbers1[i];
2370                         else
2371                                 unmatchfreq1 += numbers1[i];
2372                 }
2373                 CLAMP_PROBABILITY(matchfreq1);
2374                 CLAMP_PROBABILITY(unmatchfreq1);
2375                 matchfreq2 = unmatchfreq2 = 0.0;
2376                 for (i = 0; i < nvalues2; i++)
2377                 {
2378                         if (hasmatch2[i])
2379                                 matchfreq2 += numbers2[i];
2380                         else
2381                                 unmatchfreq2 += numbers2[i];
2382                 }
2383                 CLAMP_PROBABILITY(matchfreq2);
2384                 CLAMP_PROBABILITY(unmatchfreq2);
2385                 pfree(hasmatch1);
2386                 pfree(hasmatch2);
2387
2388                 /*
2389                  * Compute total frequency of non-null values that are not in the MCV
2390                  * lists.
2391                  */
2392                 otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2393                 otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2394                 CLAMP_PROBABILITY(otherfreq1);
2395                 CLAMP_PROBABILITY(otherfreq2);
2396
2397                 /*
2398                  * We can estimate the total selectivity from the point of view of
2399                  * relation 1 as: the known selectivity for matched MCVs, plus
2400                  * unmatched MCVs that are assumed to match against random members of
2401                  * relation 2's non-MCV population, plus non-MCV values that are
2402                  * assumed to match against random members of relation 2's unmatched
2403                  * MCVs plus non-MCV values.
2404                  */
2405                 totalsel1 = matchprodfreq;
2406                 if (nd2 > nvalues2)
2407                         totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - nvalues2);
2408                 if (nd2 > nmatches)
2409                         totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2410                                 (nd2 - nmatches);
2411                 /* Same estimate from the point of view of relation 2. */
2412                 totalsel2 = matchprodfreq;
2413                 if (nd1 > nvalues1)
2414                         totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - nvalues1);
2415                 if (nd1 > nmatches)
2416                         totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2417                                 (nd1 - nmatches);
2418
2419                 /*
2420                  * Use the smaller of the two estimates.  This can be justified in
2421                  * essentially the same terms as given below for the no-stats case: to
2422                  * a first approximation, we are estimating from the point of view of
2423                  * the relation with smaller nd.
2424                  */
2425                 selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2426         }
2427         else
2428         {
2429                 /*
2430                  * We do not have MCV lists for both sides.  Estimate the join
2431                  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2432                  * is plausible if we assume that the join operator is strict and the
2433                  * non-null values are about equally distributed: a given non-null
2434                  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2435                  * of rel2, so total join rows are at most
2436                  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2437                  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2438                  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2439                  * with MIN() is an upper bound.  Using the MIN() means we estimate
2440                  * from the point of view of the relation with smaller nd (since the
2441                  * larger nd is determining the MIN).  It is reasonable to assume that
2442                  * most tuples in this rel will have join partners, so the bound is
2443                  * probably reasonably tight and should be taken as-is.
2444                  *
2445                  * XXX Can we be smarter if we have an MCV list for just one side? It
2446                  * seems that if we assume equal distribution for the other side, we
2447                  * end up with the same answer anyway.
2448                  */
2449                 double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2450                 double          nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2451
2452                 selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2453                 if (nd1 > nd2)
2454                         selec /= nd1;
2455                 else
2456                         selec /= nd2;
2457         }
2458
2459         if (have_mcvs1)
2460                 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2461                                                   numbers1, nnumbers1);
2462         if (have_mcvs2)
2463                 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2464                                                   numbers2, nnumbers2);
2465
2466         return selec;
2467 }
2468
2469 /*
2470  * eqjoinsel_semi --- eqjoinsel for semi join
2471  *
2472  * (Also used for anti join, which we are supposed to estimate the same way.)
2473  * Caller has ensured that vardata1 is the LHS variable.
2474  */
2475 static double
2476 eqjoinsel_semi(Oid operator,
2477                            VariableStatData *vardata1, VariableStatData *vardata2,
2478                            RelOptInfo *inner_rel)
2479 {
2480         double          selec;
2481         double          nd1;
2482         double          nd2;
2483         bool            isdefault1;
2484         bool            isdefault2;
2485         Form_pg_statistic stats1 = NULL;
2486         bool            have_mcvs1 = false;
2487         Datum      *values1 = NULL;
2488         int                     nvalues1 = 0;
2489         float4     *numbers1 = NULL;
2490         int                     nnumbers1 = 0;
2491         bool            have_mcvs2 = false;
2492         Datum      *values2 = NULL;
2493         int                     nvalues2 = 0;
2494         float4     *numbers2 = NULL;
2495         int                     nnumbers2 = 0;
2496
2497         nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2498         nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2499
2500         /*
2501          * We clamp nd2 to be not more than what we estimate the inner relation's
2502          * size to be.  This is intuitively somewhat reasonable since obviously
2503          * there can't be more than that many distinct values coming from the
2504          * inner rel.  The reason for the asymmetry (ie, that we don't clamp nd1
2505          * likewise) is that this is the only pathway by which restriction clauses
2506          * applied to the inner rel will affect the join result size estimate,
2507          * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2508          * only the outer rel's size.  If we clamped nd1 we'd be double-counting
2509          * the selectivity of outer-rel restrictions.
2510          *
2511          * We can apply this clamping both with respect to the base relation from
2512          * which the join variable comes (if there is just one), and to the
2513          * immediate inner input relation of the current join.
2514          */
2515         if (vardata2->rel)
2516                 nd2 = Min(nd2, vardata2->rel->rows);
2517         nd2 = Min(nd2, inner_rel->rows);
2518
2519         if (HeapTupleIsValid(vardata1->statsTuple))
2520         {
2521                 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2522                 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2523                                                                           vardata1->atttype,
2524                                                                           vardata1->atttypmod,
2525                                                                           STATISTIC_KIND_MCV,
2526                                                                           InvalidOid,
2527                                                                           NULL,
2528                                                                           &values1, &nvalues1,
2529                                                                           &numbers1, &nnumbers1);
2530         }
2531
2532         if (HeapTupleIsValid(vardata2->statsTuple))
2533         {
2534                 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2535                                                                           vardata2->atttype,
2536                                                                           vardata2->atttypmod,
2537                                                                           STATISTIC_KIND_MCV,
2538                                                                           InvalidOid,
2539                                                                           NULL,
2540                                                                           &values2, &nvalues2,
2541                                                                           &numbers2, &nnumbers2);
2542         }
2543
2544         if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
2545         {
2546                 /*
2547                  * We have most-common-value lists for both relations.  Run through
2548                  * the lists to see which MCVs actually join to each other with the
2549                  * given operator.  This allows us to determine the exact join
2550                  * selectivity for the portion of the relations represented by the MCV
2551                  * lists.  We still have to estimate for the remaining population, but
2552                  * in a skewed distribution this gives us a big leg up in accuracy.
2553                  */
2554                 FmgrInfo        eqproc;
2555                 bool       *hasmatch1;
2556                 bool       *hasmatch2;
2557                 double          nullfrac1 = stats1->stanullfrac;
2558                 double          matchfreq1,
2559                                         uncertainfrac,
2560                                         uncertain;
2561                 int                     i,
2562                                         nmatches,
2563                                         clamped_nvalues2;
2564
2565                 /*
2566                  * The clamping above could have resulted in nd2 being less than
2567                  * nvalues2; in which case, we assume that precisely the nd2 most
2568                  * common values in the relation will appear in the join input, and so
2569                  * compare to only the first nd2 members of the MCV list.  Of course
2570                  * this is frequently wrong, but it's the best bet we can make.
2571                  */
2572                 clamped_nvalues2 = Min(nvalues2, nd2);
2573
2574                 fmgr_info(get_opcode(operator), &eqproc);
2575                 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2576                 hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2577
2578                 /*
2579                  * Note we assume that each MCV will match at most one member of the
2580                  * other MCV list.  If the operator isn't really equality, there could
2581                  * be multiple matches --- but we don't look for them, both for speed
2582                  * and because the math wouldn't add up...
2583                  */
2584                 nmatches = 0;
2585                 for (i = 0; i < nvalues1; i++)
2586                 {
2587                         int                     j;
2588
2589                         for (j = 0; j < clamped_nvalues2; j++)
2590                         {
2591                                 if (hasmatch2[j])
2592                                         continue;
2593                                 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2594                                                                                                    DEFAULT_COLLATION_OID,
2595                                                                                                    values1[i],
2596                                                                                                    values2[j])))
2597                                 {
2598                                         hasmatch1[i] = hasmatch2[j] = true;
2599                                         nmatches++;
2600                                         break;
2601                                 }
2602                         }
2603                 }
2604                 /* Sum up frequencies of matched MCVs */
2605                 matchfreq1 = 0.0;
2606                 for (i = 0; i < nvalues1; i++)
2607                 {
2608                         if (hasmatch1[i])
2609                                 matchfreq1 += numbers1[i];
2610                 }
2611                 CLAMP_PROBABILITY(matchfreq1);
2612                 pfree(hasmatch1);
2613                 pfree(hasmatch2);
2614
2615                 /*
2616                  * Now we need to estimate the fraction of relation 1 that has at
2617                  * least one join partner.  We know for certain that the matched MCVs
2618                  * do, so that gives us a lower bound, but we're really in the dark
2619                  * about everything else.  Our crude approach is: if nd1 <= nd2 then
2620                  * assume all non-null rel1 rows have join partners, else assume for
2621                  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2622                  * can discount the known-matched MCVs from the distinct-values counts
2623                  * before doing the division.
2624                  *
2625                  * Crude as the above is, it's completely useless if we don't have
2626                  * reliable ndistinct values for both sides.  Hence, if either nd1 or
2627                  * nd2 is default, punt and assume half of the uncertain rows have
2628                  * join partners.
2629                  */
2630                 if (!isdefault1 && !isdefault2)
2631                 {
2632                         nd1 -= nmatches;
2633                         nd2 -= nmatches;
2634                         if (nd1 <= nd2 || nd2 < 0)
2635                                 uncertainfrac = 1.0;
2636                         else
2637                                 uncertainfrac = nd2 / nd1;
2638                 }
2639                 else
2640                         uncertainfrac = 0.5;
2641                 uncertain = 1.0 - matchfreq1 - nullfrac1;
2642                 CLAMP_PROBABILITY(uncertain);
2643                 selec = matchfreq1 + uncertainfrac * uncertain;
2644         }
2645         else
2646         {
2647                 /*
2648                  * Without MCV lists for both sides, we can only use the heuristic
2649                  * about nd1 vs nd2.
2650                  */
2651                 double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2652
2653                 if (!isdefault1 && !isdefault2)
2654                 {
2655                         if (nd1 <= nd2 || nd2 < 0)
2656                                 selec = 1.0 - nullfrac1;
2657                         else
2658                                 selec = (nd2 / nd1) * (1.0 - nullfrac1);
2659                 }
2660                 else
2661                         selec = 0.5 * (1.0 - nullfrac1);
2662         }
2663
2664         if (have_mcvs1)
2665                 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2666                                                   numbers1, nnumbers1);
2667         if (have_mcvs2)
2668                 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2669                                                   numbers2, nnumbers2);
2670
2671         return selec;
2672 }
2673
2674 /*
2675  *              neqjoinsel              - Join selectivity of "!="
2676  */
2677 Datum
2678 neqjoinsel(PG_FUNCTION_ARGS)
2679 {
2680         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2681         Oid                     operator = PG_GETARG_OID(1);
2682         List       *args = (List *) PG_GETARG_POINTER(2);
2683         JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
2684         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2685         Oid                     eqop;
2686         float8          result;
2687
2688         /*
2689          * We want 1 - eqjoinsel() where the equality operator is the one
2690          * associated with this != operator, that is, its negator.
2691          */
2692         eqop = get_negator(operator);
2693         if (eqop)
2694         {
2695                 result = DatumGetFloat8(DirectFunctionCall5(eqjoinsel,
2696                                                                                                         PointerGetDatum(root),
2697                                                                                                         ObjectIdGetDatum(eqop),
2698                                                                                                         PointerGetDatum(args),
2699                                                                                                         Int16GetDatum(jointype),
2700                                                                                                         PointerGetDatum(sjinfo)));
2701         }
2702         else
2703         {
2704                 /* Use default selectivity (should we raise an error instead?) */
2705                 result = DEFAULT_EQ_SEL;
2706         }
2707         result = 1.0 - result;
2708         PG_RETURN_FLOAT8(result);
2709 }
2710
2711 /*
2712  *              scalarltjoinsel - Join selectivity of "<" and "<=" for scalars
2713  */
2714 Datum
2715 scalarltjoinsel(PG_FUNCTION_ARGS)
2716 {
2717         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2718 }
2719
2720 /*
2721  *              scalargtjoinsel - Join selectivity of ">" and ">=" for scalars
2722  */
2723 Datum
2724 scalargtjoinsel(PG_FUNCTION_ARGS)
2725 {
2726         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2727 }
2728
2729 /*
2730  * patternjoinsel               - Generic code for pattern-match join selectivity.
2731  */
2732 static double
2733 patternjoinsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
2734 {
2735         /* For the moment we just punt. */
2736         return negate ? (1.0 - DEFAULT_MATCH_SEL) : DEFAULT_MATCH_SEL;
2737 }
2738
2739 /*
2740  *              regexeqjoinsel  - Join selectivity of regular-expression pattern match.
2741  */
2742 Datum
2743 regexeqjoinsel(PG_FUNCTION_ARGS)
2744 {
2745         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, false));
2746 }
2747
2748 /*
2749  *              icregexeqjoinsel        - Join selectivity of case-insensitive regex match.
2750  */
2751 Datum
2752 icregexeqjoinsel(PG_FUNCTION_ARGS)
2753 {
2754         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, false));
2755 }
2756
2757 /*
2758  *              likejoinsel                     - Join selectivity of LIKE pattern match.
2759  */
2760 Datum
2761 likejoinsel(PG_FUNCTION_ARGS)
2762 {
2763         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, false));
2764 }
2765
2766 /*
2767  *              iclikejoinsel                   - Join selectivity of ILIKE pattern match.
2768  */
2769 Datum
2770 iclikejoinsel(PG_FUNCTION_ARGS)
2771 {
2772         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, false));
2773 }
2774
2775 /*
2776  *              regexnejoinsel  - Join selectivity of regex non-match.
2777  */
2778 Datum
2779 regexnejoinsel(PG_FUNCTION_ARGS)
2780 {
2781         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, true));
2782 }
2783
2784 /*
2785  *              icregexnejoinsel        - Join selectivity of case-insensitive regex non-match.
2786  */
2787 Datum
2788 icregexnejoinsel(PG_FUNCTION_ARGS)
2789 {
2790         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, true));
2791 }
2792
2793 /*
2794  *              nlikejoinsel            - Join selectivity of LIKE pattern non-match.
2795  */
2796 Datum
2797 nlikejoinsel(PG_FUNCTION_ARGS)
2798 {
2799         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, true));
2800 }
2801
2802 /*
2803  *              icnlikejoinsel          - Join selectivity of ILIKE pattern non-match.
2804  */
2805 Datum
2806 icnlikejoinsel(PG_FUNCTION_ARGS)
2807 {
2808         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, true));
2809 }
2810
2811 /*
2812  * mergejoinscansel                     - Scan selectivity of merge join.
2813  *
2814  * A merge join will stop as soon as it exhausts either input stream.
2815  * Therefore, if we can estimate the ranges of both input variables,
2816  * we can estimate how much of the input will actually be read.  This
2817  * can have a considerable impact on the cost when using indexscans.
2818  *
2819  * Also, we can estimate how much of each input has to be read before the
2820  * first join pair is found, which will affect the join's startup time.
2821  *
2822  * clause should be a clause already known to be mergejoinable.  opfamily,
2823  * strategy, and nulls_first specify the sort ordering being used.
2824  *
2825  * The outputs are:
2826  *              *leftstart is set to the fraction of the left-hand variable expected
2827  *               to be scanned before the first join pair is found (0 to 1).
2828  *              *leftend is set to the fraction of the left-hand variable expected
2829  *               to be scanned before the join terminates (0 to 1).
2830  *              *rightstart, *rightend similarly for the right-hand variable.
2831  */
2832 void
2833 mergejoinscansel(PlannerInfo *root, Node *clause,
2834                                  Oid opfamily, int strategy, bool nulls_first,
2835                                  Selectivity *leftstart, Selectivity *leftend,
2836                                  Selectivity *rightstart, Selectivity *rightend)
2837 {
2838         Node       *left,
2839                            *right;
2840         VariableStatData leftvar,
2841                                 rightvar;
2842         int                     op_strategy;
2843         Oid                     op_lefttype;
2844         Oid                     op_righttype;
2845         Oid                     opno,
2846                                 lsortop,
2847                                 rsortop,
2848                                 lstatop,
2849                                 rstatop,
2850                                 ltop,
2851                                 leop,
2852                                 revltop,
2853                                 revleop;
2854         bool            isgt;
2855         Datum           leftmin,
2856                                 leftmax,
2857                                 rightmin,
2858                                 rightmax;
2859         double          selec;
2860
2861         /* Set default results if we can't figure anything out. */
2862         /* XXX should default "start" fraction be a bit more than 0? */
2863         *leftstart = *rightstart = 0.0;
2864         *leftend = *rightend = 1.0;
2865
2866         /* Deconstruct the merge clause */
2867         if (!is_opclause(clause))
2868                 return;                                 /* shouldn't happen */
2869         opno = ((OpExpr *) clause)->opno;
2870         left = get_leftop((Expr *) clause);
2871         right = get_rightop((Expr *) clause);
2872         if (!right)
2873                 return;                                 /* shouldn't happen */
2874
2875         /* Look for stats for the inputs */
2876         examine_variable(root, left, 0, &leftvar);
2877         examine_variable(root, right, 0, &rightvar);
2878
2879         /* Extract the operator's declared left/right datatypes */
2880         get_op_opfamily_properties(opno, opfamily, false,
2881                                                            &op_strategy,
2882                                                            &op_lefttype,
2883                                                            &op_righttype);
2884         Assert(op_strategy == BTEqualStrategyNumber);
2885
2886         /*
2887          * Look up the various operators we need.  If we don't find them all, it
2888          * probably means the opfamily is broken, but we just fail silently.
2889          *
2890          * Note: we expect that pg_statistic histograms will be sorted by the '<'
2891          * operator, regardless of which sort direction we are considering.
2892          */
2893         switch (strategy)
2894         {
2895                 case BTLessStrategyNumber:
2896                         isgt = false;
2897                         if (op_lefttype == op_righttype)
2898                         {
2899                                 /* easy case */
2900                                 ltop = get_opfamily_member(opfamily,
2901                                                                                    op_lefttype, op_righttype,
2902                                                                                    BTLessStrategyNumber);
2903                                 leop = get_opfamily_member(opfamily,
2904                                                                                    op_lefttype, op_righttype,
2905                                                                                    BTLessEqualStrategyNumber);
2906                                 lsortop = ltop;
2907                                 rsortop = ltop;
2908                                 lstatop = lsortop;
2909                                 rstatop = rsortop;
2910                                 revltop = ltop;
2911                                 revleop = leop;
2912                         }
2913                         else
2914                         {
2915                                 ltop = get_opfamily_member(opfamily,
2916                                                                                    op_lefttype, op_righttype,
2917                                                                                    BTLessStrategyNumber);
2918                                 leop = get_opfamily_member(opfamily,
2919                                                                                    op_lefttype, op_righttype,
2920                                                                                    BTLessEqualStrategyNumber);
2921                                 lsortop = get_opfamily_member(opfamily,
2922                                                                                           op_lefttype, op_lefttype,
2923                                                                                           BTLessStrategyNumber);
2924                                 rsortop = get_opfamily_member(opfamily,
2925                                                                                           op_righttype, op_righttype,
2926                                                                                           BTLessStrategyNumber);
2927                                 lstatop = lsortop;
2928                                 rstatop = rsortop;
2929                                 revltop = get_opfamily_member(opfamily,
2930                                                                                           op_righttype, op_lefttype,
2931                                                                                           BTLessStrategyNumber);
2932                                 revleop = get_opfamily_member(opfamily,
2933                                                                                           op_righttype, op_lefttype,
2934                                                                                           BTLessEqualStrategyNumber);
2935                         }
2936                         break;
2937                 case BTGreaterStrategyNumber:
2938                         /* descending-order case */
2939                         isgt = true;
2940                         if (op_lefttype == op_righttype)
2941                         {
2942                                 /* easy case */
2943                                 ltop = get_opfamily_member(opfamily,
2944                                                                                    op_lefttype, op_righttype,
2945                                                                                    BTGreaterStrategyNumber);
2946                                 leop = get_opfamily_member(opfamily,
2947                                                                                    op_lefttype, op_righttype,
2948                                                                                    BTGreaterEqualStrategyNumber);
2949                                 lsortop = ltop;
2950                                 rsortop = ltop;
2951                                 lstatop = get_opfamily_member(opfamily,
2952                                                                                           op_lefttype, op_lefttype,
2953                                                                                           BTLessStrategyNumber);
2954                                 rstatop = lstatop;
2955                                 revltop = ltop;
2956                                 revleop = leop;
2957                         }
2958                         else
2959                         {
2960                                 ltop = get_opfamily_member(opfamily,
2961                                                                                    op_lefttype, op_righttype,
2962                                                                                    BTGreaterStrategyNumber);
2963                                 leop = get_opfamily_member(opfamily,
2964                                                                                    op_lefttype, op_righttype,
2965                                                                                    BTGreaterEqualStrategyNumber);
2966                                 lsortop = get_opfamily_member(opfamily,
2967                                                                                           op_lefttype, op_lefttype,
2968                                                                                           BTGreaterStrategyNumber);
2969                                 rsortop = get_opfamily_member(opfamily,
2970                                                                                           op_righttype, op_righttype,
2971                                                                                           BTGreaterStrategyNumber);
2972                                 lstatop = get_opfamily_member(opfamily,
2973                                                                                           op_lefttype, op_lefttype,
2974                                                                                           BTLessStrategyNumber);
2975                                 rstatop = get_opfamily_member(opfamily,
2976                                                                                           op_righttype, op_righttype,
2977                                                                                           BTLessStrategyNumber);
2978                                 revltop = get_opfamily_member(opfamily,
2979                                                                                           op_righttype, op_lefttype,
2980                                                                                           BTGreaterStrategyNumber);
2981                                 revleop = get_opfamily_member(opfamily,
2982                                                                                           op_righttype, op_lefttype,
2983                                                                                           BTGreaterEqualStrategyNumber);
2984                         }
2985                         break;
2986                 default:
2987                         goto fail;                      /* shouldn't get here */
2988         }
2989
2990         if (!OidIsValid(lsortop) ||
2991                 !OidIsValid(rsortop) ||
2992                 !OidIsValid(lstatop) ||
2993                 !OidIsValid(rstatop) ||
2994                 !OidIsValid(ltop) ||
2995                 !OidIsValid(leop) ||
2996                 !OidIsValid(revltop) ||
2997                 !OidIsValid(revleop))
2998                 goto fail;                              /* insufficient info in catalogs */
2999
3000         /* Try to get ranges of both inputs */
3001         if (!isgt)
3002         {
3003                 if (!get_variable_range(root, &leftvar, lstatop,
3004                                                                 &leftmin, &leftmax))
3005                         goto fail;                      /* no range available from stats */
3006                 if (!get_variable_range(root, &rightvar, rstatop,
3007                                                                 &rightmin, &rightmax))
3008                         goto fail;                      /* no range available from stats */
3009         }
3010         else
3011         {
3012                 /* need to swap the max and min */
3013                 if (!get_variable_range(root, &leftvar, lstatop,
3014                                                                 &leftmax, &leftmin))
3015                         goto fail;                      /* no range available from stats */
3016                 if (!get_variable_range(root, &rightvar, rstatop,
3017                                                                 &rightmax, &rightmin))
3018                         goto fail;                      /* no range available from stats */
3019         }
3020
3021         /*
3022          * Now, the fraction of the left variable that will be scanned is the
3023          * fraction that's <= the right-side maximum value.  But only believe
3024          * non-default estimates, else stick with our 1.0.
3025          */
3026         selec = scalarineqsel(root, leop, isgt, &leftvar,
3027                                                   rightmax, op_righttype);
3028         if (selec != DEFAULT_INEQ_SEL)
3029                 *leftend = selec;
3030
3031         /* And similarly for the right variable. */
3032         selec = scalarineqsel(root, revleop, isgt, &rightvar,
3033                                                   leftmax, op_lefttype);
3034         if (selec != DEFAULT_INEQ_SEL)
3035                 *rightend = selec;
3036
3037         /*
3038          * Only one of the two "end" fractions can really be less than 1.0;
3039          * believe the smaller estimate and reset the other one to exactly 1.0. If
3040          * we get exactly equal estimates (as can easily happen with self-joins),
3041          * believe neither.
3042          */
3043         if (*leftend > *rightend)
3044                 *leftend = 1.0;
3045         else if (*leftend < *rightend)
3046                 *rightend = 1.0;
3047         else
3048                 *leftend = *rightend = 1.0;
3049
3050         /*
3051          * Also, the fraction of the left variable that will be scanned before the
3052          * first join pair is found is the fraction that's < the right-side
3053          * minimum value.  But only believe non-default estimates, else stick with
3054          * our own default.
3055          */
3056         selec = scalarineqsel(root, ltop, isgt, &leftvar,
3057                                                   rightmin, op_righttype);
3058         if (selec != DEFAULT_INEQ_SEL)
3059                 *leftstart = selec;
3060
3061         /* And similarly for the right variable. */
3062         selec = scalarineqsel(root, revltop, isgt, &rightvar,
3063                                                   leftmin, op_lefttype);
3064         if (selec != DEFAULT_INEQ_SEL)
3065                 *rightstart = selec;
3066
3067         /*
3068          * Only one of the two "start" fractions can really be more than zero;
3069          * believe the larger estimate and reset the other one to exactly 0.0. If
3070          * we get exactly equal estimates (as can easily happen with self-joins),
3071          * believe neither.
3072          */
3073         if (*leftstart < *rightstart)
3074                 *leftstart = 0.0;
3075         else if (*leftstart > *rightstart)
3076                 *rightstart = 0.0;
3077         else
3078                 *leftstart = *rightstart = 0.0;
3079
3080         /*
3081          * If the sort order is nulls-first, we're going to have to skip over any
3082          * nulls too.  These would not have been counted by scalarineqsel, and we
3083          * can safely add in this fraction regardless of whether we believe
3084          * scalarineqsel's results or not.  But be sure to clamp the sum to 1.0!
3085          */
3086         if (nulls_first)
3087         {
3088                 Form_pg_statistic stats;
3089
3090                 if (HeapTupleIsValid(leftvar.statsTuple))
3091                 {
3092                         stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3093                         *leftstart += stats->stanullfrac;
3094                         CLAMP_PROBABILITY(*leftstart);
3095                         *leftend += stats->stanullfrac;
3096                         CLAMP_PROBABILITY(*leftend);
3097                 }
3098                 if (HeapTupleIsValid(rightvar.statsTuple))
3099                 {
3100                         stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3101                         *rightstart += stats->stanullfrac;
3102                         CLAMP_PROBABILITY(*rightstart);
3103                         *rightend += stats->stanullfrac;
3104                         CLAMP_PROBABILITY(*rightend);
3105                 }
3106         }
3107
3108         /* Disbelieve start >= end, just in case that can happen */
3109         if (*leftstart >= *leftend)
3110         {
3111                 *leftstart = 0.0;
3112                 *leftend = 1.0;
3113         }
3114         if (*rightstart >= *rightend)
3115         {
3116                 *rightstart = 0.0;
3117                 *rightend = 1.0;
3118         }
3119
3120 fail:
3121         ReleaseVariableStats(leftvar);
3122         ReleaseVariableStats(rightvar);
3123 }
3124
3125
3126 /*
3127  * Helper routine for estimate_num_groups: add an item to a list of
3128  * GroupVarInfos, but only if it's not known equal to any of the existing
3129  * entries.
3130  */
3131 typedef struct
3132 {
3133         Node       *var;                        /* might be an expression, not just a Var */
3134         RelOptInfo *rel;                        /* relation it belongs to */
3135         double          ndistinct;              /* # distinct values */
3136 } GroupVarInfo;
3137
3138 static List *
3139 add_unique_group_var(PlannerInfo *root, List *varinfos,
3140                                          Node *var, VariableStatData *vardata)
3141 {
3142         GroupVarInfo *varinfo;
3143         double          ndistinct;
3144         bool            isdefault;
3145         ListCell   *lc;
3146
3147         ndistinct = get_variable_numdistinct(vardata, &isdefault);
3148
3149         /* cannot use foreach here because of possible list_delete */
3150         lc = list_head(varinfos);
3151         while (lc)
3152         {
3153                 varinfo = (GroupVarInfo *) lfirst(lc);
3154
3155                 /* must advance lc before list_delete possibly pfree's it */
3156                 lc = lnext(lc);
3157
3158                 /* Drop exact duplicates */
3159                 if (equal(var, varinfo->var))
3160                         return varinfos;
3161
3162                 /*
3163                  * Drop known-equal vars, but only if they belong to different
3164                  * relations (see comments for estimate_num_groups)
3165                  */
3166                 if (vardata->rel != varinfo->rel &&
3167                         exprs_known_equal(root, var, varinfo->var))
3168                 {
3169                         if (varinfo->ndistinct <= ndistinct)
3170                         {
3171                                 /* Keep older item, forget new one */
3172                                 return varinfos;
3173                         }
3174                         else
3175                         {
3176                                 /* Delete the older item */
3177                                 varinfos = list_delete_ptr(varinfos, varinfo);
3178                         }
3179                 }
3180         }
3181
3182         varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3183
3184         varinfo->var = var;
3185         varinfo->rel = vardata->rel;
3186         varinfo->ndistinct = ndistinct;
3187         varinfos = lappend(varinfos, varinfo);
3188         return varinfos;
3189 }
3190
3191 /*
3192  * estimate_num_groups          - Estimate number of groups in a grouped query
3193  *
3194  * Given a query having a GROUP BY clause, estimate how many groups there
3195  * will be --- ie, the number of distinct combinations of the GROUP BY
3196  * expressions.
3197  *
3198  * This routine is also used to estimate the number of rows emitted by
3199  * a DISTINCT filtering step; that is an isomorphic problem.  (Note:
3200  * actually, we only use it for DISTINCT when there's no grouping or
3201  * aggregation ahead of the DISTINCT.)
3202  *
3203  * Inputs:
3204  *      root - the query
3205  *      groupExprs - list of expressions being grouped by
3206  *      input_rows - number of rows estimated to arrive at the group/unique
3207  *              filter step
3208  *      pgset - NULL, or a List** pointing to a grouping set to filter the
3209  *              groupExprs against
3210  *
3211  * Given the lack of any cross-correlation statistics in the system, it's
3212  * impossible to do anything really trustworthy with GROUP BY conditions
3213  * involving multiple Vars.  We should however avoid assuming the worst
3214  * case (all possible cross-product terms actually appear as groups) since
3215  * very often the grouped-by Vars are highly correlated.  Our current approach
3216  * is as follows:
3217  *      1.  Expressions yielding boolean are assumed to contribute two groups,
3218  *              independently of their content, and are ignored in the subsequent
3219  *              steps.  This is mainly because tests like "col IS NULL" break the
3220  *              heuristic used in step 2 especially badly.
3221  *      2.  Reduce the given expressions to a list of unique Vars used.  For
3222  *              example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3223  *              It is clearly correct not to count the same Var more than once.
3224  *              It is also reasonable to treat f(x) the same as x: f() cannot
3225  *              increase the number of distinct values (unless it is volatile,
3226  *              which we consider unlikely for grouping), but it probably won't
3227  *              reduce the number of distinct values much either.
3228  *              As a special case, if a GROUP BY expression can be matched to an
3229  *              expressional index for which we have statistics, then we treat the
3230  *              whole expression as though it were just a Var.
3231  *      3.  If the list contains Vars of different relations that are known equal
3232  *              due to equivalence classes, then drop all but one of the Vars from each
3233  *              known-equal set, keeping the one with smallest estimated # of values
3234  *              (since the extra values of the others can't appear in joined rows).
3235  *              Note the reason we only consider Vars of different relations is that
3236  *              if we considered ones of the same rel, we'd be double-counting the
3237  *              restriction selectivity of the equality in the next step.
3238  *      4.  For Vars within a single source rel, we multiply together the numbers
3239  *              of values, clamp to the number of rows in the rel (divided by 10 if
3240  *              more than one Var), and then multiply by a factor based on the
3241  *              selectivity of the restriction clauses for that rel.  When there's
3242  *              more than one Var, the initial product is probably too high (it's the
3243  *              worst case) but clamping to a fraction of the rel's rows seems to be a
3244  *              helpful heuristic for not letting the estimate get out of hand.  (The
3245  *              factor of 10 is derived from pre-Postgres-7.4 practice.)  The factor
3246  *              we multiply by to adjust for the restriction selectivity assumes that
3247  *              the restriction clauses are independent of the grouping, which may not
3248  *              be a valid assumption, but it's hard to do better.
3249  *      5.  If there are Vars from multiple rels, we repeat step 4 for each such
3250  *              rel, and multiply the results together.
3251  * Note that rels not containing grouped Vars are ignored completely, as are
3252  * join clauses.  Such rels cannot increase the number of groups, and we
3253  * assume such clauses do not reduce the number either (somewhat bogus,
3254  * but we don't have the info to do better).
3255  */
3256 double
3257 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3258                                         List **pgset)
3259 {
3260         List       *varinfos = NIL;
3261         double          numdistinct;
3262         ListCell   *l;
3263         int                     i;
3264
3265         /*
3266          * We don't ever want to return an estimate of zero groups, as that tends
3267          * to lead to division-by-zero and other unpleasantness.  The input_rows
3268          * estimate is usually already at least 1, but clamp it just in case it
3269          * isn't.
3270          */
3271         input_rows = clamp_row_est(input_rows);
3272
3273         /*
3274          * If no grouping columns, there's exactly one group.  (This can't happen
3275          * for normal cases with GROUP BY or DISTINCT, but it is possible for
3276          * corner cases with set operations.)
3277          */
3278         if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3279                 return 1.0;
3280
3281         /*
3282          * Count groups derived from boolean grouping expressions.  For other
3283          * expressions, find the unique Vars used, treating an expression as a Var
3284          * if we can find stats for it.  For each one, record the statistical
3285          * estimate of number of distinct values (total in its table, without
3286          * regard for filtering).
3287          */
3288         numdistinct = 1.0;
3289
3290         i = 0;
3291         foreach(l, groupExprs)
3292         {
3293                 Node       *groupexpr = (Node *) lfirst(l);
3294                 VariableStatData vardata;
3295                 List       *varshere;
3296                 ListCell   *l2;
3297
3298                 /* is expression in this grouping set? */
3299                 if (pgset && !list_member_int(*pgset, i++))
3300                         continue;
3301
3302                 /* Short-circuit for expressions returning boolean */
3303                 if (exprType(groupexpr) == BOOLOID)
3304                 {
3305                         numdistinct *= 2.0;
3306                         continue;
3307                 }
3308
3309                 /*
3310                  * If examine_variable is able to deduce anything about the GROUP BY
3311                  * expression, treat it as a single variable even if it's really more
3312                  * complicated.
3313                  */
3314                 examine_variable(root, groupexpr, 0, &vardata);
3315                 if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3316                 {
3317                         varinfos = add_unique_group_var(root, varinfos,
3318                                                                                         groupexpr, &vardata);
3319                         ReleaseVariableStats(vardata);
3320                         continue;
3321                 }
3322                 ReleaseVariableStats(vardata);
3323
3324                 /*
3325                  * Else pull out the component Vars.  Handle PlaceHolderVars by
3326                  * recursing into their arguments (effectively assuming that the
3327                  * PlaceHolderVar doesn't change the number of groups, which boils
3328                  * down to ignoring the possible addition of nulls to the result set).
3329                  */
3330                 varshere = pull_var_clause(groupexpr,
3331                                                                    PVC_RECURSE_AGGREGATES |
3332                                                                    PVC_RECURSE_WINDOWFUNCS |
3333                                                                    PVC_RECURSE_PLACEHOLDERS);
3334
3335                 /*
3336                  * If we find any variable-free GROUP BY item, then either it is a
3337                  * constant (and we can ignore it) or it contains a volatile function;
3338                  * in the latter case we punt and assume that each input row will
3339                  * yield a distinct group.
3340                  */
3341                 if (varshere == NIL)
3342                 {
3343                         if (contain_volatile_functions(groupexpr))
3344                                 return input_rows;
3345                         continue;
3346                 }
3347
3348                 /*
3349                  * Else add variables to varinfos list
3350                  */
3351                 foreach(l2, varshere)
3352                 {
3353                         Node       *var = (Node *) lfirst(l2);
3354
3355                         examine_variable(root, var, 0, &vardata);
3356                         varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3357                         ReleaseVariableStats(vardata);
3358                 }
3359         }
3360
3361         /*
3362          * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3363          * list.
3364          */
3365         if (varinfos == NIL)
3366         {
3367                 /* Guard against out-of-range answers */
3368                 if (numdistinct > input_rows)
3369                         numdistinct = input_rows;
3370                 return numdistinct;
3371         }
3372
3373         /*
3374          * Group Vars by relation and estimate total numdistinct.
3375          *
3376          * For each iteration of the outer loop, we process the frontmost Var in
3377          * varinfos, plus all other Vars in the same relation.  We remove these
3378          * Vars from the newvarinfos list for the next iteration. This is the
3379          * easiest way to group Vars of same rel together.
3380          */
3381         do
3382         {
3383                 GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3384                 RelOptInfo *rel = varinfo1->rel;
3385                 double          reldistinct = varinfo1->ndistinct;
3386                 double          relmaxndistinct = reldistinct;
3387                 int                     relvarcount = 1;
3388                 List       *newvarinfos = NIL;
3389
3390                 /*
3391                  * Get the product of numdistinct estimates of the Vars for this rel.
3392                  * Also, construct new varinfos list of remaining Vars.
3393                  */
3394                 for_each_cell(l, lnext(list_head(varinfos)))
3395                 {
3396                         GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3397
3398                         if (varinfo2->rel == varinfo1->rel)
3399                         {
3400                                 reldistinct *= varinfo2->ndistinct;
3401                                 if (relmaxndistinct < varinfo2->ndistinct)
3402                                         relmaxndistinct = varinfo2->ndistinct;
3403                                 relvarcount++;
3404                         }
3405                         else
3406                         {
3407                                 /* not time to process varinfo2 yet */
3408                                 newvarinfos = lcons(varinfo2, newvarinfos);
3409                         }
3410                 }
3411
3412                 /*
3413                  * Sanity check --- don't divide by zero if empty relation.
3414                  */
3415                 Assert(rel->reloptkind == RELOPT_BASEREL);
3416                 if (rel->tuples > 0)
3417                 {
3418                         /*
3419                          * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3420                          * fudge factor is because the Vars are probably correlated but we
3421                          * don't know by how much.  We should never clamp to less than the
3422                          * largest ndistinct value for any of the Vars, though, since
3423                          * there will surely be at least that many groups.
3424                          */
3425                         double          clamp = rel->tuples;
3426
3427                         if (relvarcount > 1)
3428                         {
3429                                 clamp *= 0.1;
3430                                 if (clamp < relmaxndistinct)
3431                                 {
3432                                         clamp = relmaxndistinct;
3433                                         /* for sanity in case some ndistinct is too large: */
3434                                         if (clamp > rel->tuples)
3435                                                 clamp = rel->tuples;
3436                                 }
3437                         }
3438                         if (reldistinct > clamp)
3439                                 reldistinct = clamp;
3440
3441                         /*
3442                          * Update the estimate based on the restriction selectivity,
3443                          * guarding against division by zero when reldistinct is zero.
3444                          * Also skip this if we know that we are returning all rows.
3445                          */
3446                         if (reldistinct > 0 && rel->rows < rel->tuples)
3447                         {
3448                                 /*
3449                                  * Given a table containing N rows with n distinct values in a
3450                                  * uniform distribution, if we select p rows at random then
3451                                  * the expected number of distinct values selected is
3452                                  *
3453                                  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3454                                  *
3455                                  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3456                                  *
3457                                  * See "Approximating block accesses in database
3458                                  * organizations", S. B. Yao, Communications of the ACM,
3459                                  * Volume 20 Issue 4, April 1977 Pages 260-261.
3460                                  *
3461                                  * Alternatively, re-arranging the terms from the factorials,
3462                                  * this may be written as
3463                                  *
3464                                  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3465                                  *
3466                                  * This form of the formula is more efficient to compute in
3467                                  * the common case where p is larger than N/n.  Additionally,
3468                                  * as pointed out by Dell'Era, if i << N for all terms in the
3469                                  * product, it can be approximated by
3470                                  *
3471                                  * n * (1 - ((N-p)/N)^(N/n))
3472                                  *
3473                                  * See "Expected distinct values when selecting from a bag
3474                                  * without replacement", Alberto Dell'Era,
3475                                  * http://www.adellera.it/investigations/distinct_balls/.
3476                                  *
3477                                  * The condition i << N is equivalent to n >> 1, so this is a
3478                                  * good approximation when the number of distinct values in
3479                                  * the table is large.  It turns out that this formula also
3480                                  * works well even when n is small.
3481                                  */
3482                                 reldistinct *=
3483                                         (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3484                                                          rel->tuples / reldistinct));
3485                         }
3486                         reldistinct = clamp_row_est(reldistinct);
3487
3488                         /*
3489                          * Update estimate of total distinct groups.
3490                          */
3491                         numdistinct *= reldistinct;
3492                 }
3493
3494                 varinfos = newvarinfos;
3495         } while (varinfos != NIL);
3496
3497         numdistinct = ceil(numdistinct);
3498
3499         /* Guard against out-of-range answers */
3500         if (numdistinct > input_rows)
3501                 numdistinct = input_rows;
3502         if (numdistinct < 1.0)
3503                 numdistinct = 1.0;
3504
3505         return numdistinct;
3506 }
3507
3508 /*
3509  * Estimate hash bucketsize fraction (ie, number of entries in a bucket
3510  * divided by total tuples in relation) if the specified expression is used
3511  * as a hash key.
3512  *
3513  * XXX This is really pretty bogus since we're effectively assuming that the
3514  * distribution of hash keys will be the same after applying restriction
3515  * clauses as it was in the underlying relation.  However, we are not nearly
3516  * smart enough to figure out how the restrict clauses might change the
3517  * distribution, so this will have to do for now.
3518  *
3519  * We are passed the number of buckets the executor will use for the given
3520  * input relation.  If the data were perfectly distributed, with the same
3521  * number of tuples going into each available bucket, then the bucketsize
3522  * fraction would be 1/nbuckets.  But this happy state of affairs will occur
3523  * only if (a) there are at least nbuckets distinct data values, and (b)
3524  * we have a not-too-skewed data distribution.  Otherwise the buckets will
3525  * be nonuniformly occupied.  If the other relation in the join has a key
3526  * distribution similar to this one's, then the most-loaded buckets are
3527  * exactly those that will be probed most often.  Therefore, the "average"
3528  * bucket size for costing purposes should really be taken as something close
3529  * to the "worst case" bucket size.  We try to estimate this by adjusting the
3530  * fraction if there are too few distinct data values, and then scaling up
3531  * by the ratio of the most common value's frequency to the average frequency.
3532  *
3533  * If no statistics are available, use a default estimate of 0.1.  This will
3534  * discourage use of a hash rather strongly if the inner relation is large,
3535  * which is what we want.  We do not want to hash unless we know that the
3536  * inner rel is well-dispersed (or the alternatives seem much worse).
3537  */
3538 Selectivity
3539 estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
3540 {
3541         VariableStatData vardata;
3542         double          estfract,
3543                                 ndistinct,
3544                                 stanullfrac,
3545                                 mcvfreq,
3546                                 avgfreq;
3547         bool            isdefault;
3548         float4     *numbers;
3549         int                     nnumbers;
3550
3551         examine_variable(root, hashkey, 0, &vardata);
3552
3553         /* Get number of distinct values */
3554         ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3555
3556         /* If ndistinct isn't real, punt and return 0.1, per comments above */
3557         if (isdefault)
3558         {
3559                 ReleaseVariableStats(vardata);
3560                 return (Selectivity) 0.1;
3561         }
3562
3563         /* Get fraction that are null */
3564         if (HeapTupleIsValid(vardata.statsTuple))
3565         {
3566                 Form_pg_statistic stats;
3567
3568                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3569                 stanullfrac = stats->stanullfrac;
3570         }
3571         else
3572                 stanullfrac = 0.0;
3573
3574         /* Compute avg freq of all distinct data values in raw relation */
3575         avgfreq = (1.0 - stanullfrac) / ndistinct;
3576
3577         /*
3578          * Adjust ndistinct to account for restriction clauses.  Observe we are
3579          * assuming that the data distribution is affected uniformly by the
3580          * restriction clauses!
3581          *
3582          * XXX Possibly better way, but much more expensive: multiply by
3583          * selectivity of rel's restriction clauses that mention the target Var.
3584          */
3585         if (vardata.rel && vardata.rel->tuples > 0)
3586         {
3587                 ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3588                 ndistinct = clamp_row_est(ndistinct);
3589         }
3590
3591         /*
3592          * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3593          * number of buckets is less than the expected number of distinct values;
3594          * otherwise it is 1/ndistinct.
3595          */
3596         if (ndistinct > nbuckets)
3597                 estfract = 1.0 / nbuckets;
3598         else
3599                 estfract = 1.0 / ndistinct;
3600
3601         /*
3602          * Look up the frequency of the most common value, if available.
3603          */
3604         mcvfreq = 0.0;
3605
3606         if (HeapTupleIsValid(vardata.statsTuple))
3607         {
3608                 if (get_attstatsslot(vardata.statsTuple,
3609                                                          vardata.atttype, vardata.atttypmod,
3610                                                          STATISTIC_KIND_MCV, InvalidOid,
3611                                                          NULL,
3612                                                          NULL, NULL,
3613                                                          &numbers, &nnumbers))
3614                 {
3615                         /*
3616                          * The first MCV stat is for the most common value.
3617                          */
3618                         if (nnumbers > 0)
3619                                 mcvfreq = numbers[0];
3620                         free_attstatsslot(vardata.atttype, NULL, 0,
3621                                                           numbers, nnumbers);
3622                 }
3623         }
3624
3625         /*
3626          * Adjust estimated bucketsize upward to account for skewed distribution.
3627          */
3628         if (avgfreq > 0.0 && mcvfreq > avgfreq)
3629                 estfract *= mcvfreq / avgfreq;
3630
3631         /*
3632          * Clamp bucketsize to sane range (the above adjustment could easily
3633          * produce an out-of-range result).  We set the lower bound a little above
3634          * zero, since zero isn't a very sane result.
3635          */
3636         if (estfract < 1.0e-6)
3637                 estfract = 1.0e-6;
3638         else if (estfract > 1.0)
3639                 estfract = 1.0;
3640
3641         ReleaseVariableStats(vardata);
3642
3643         return (Selectivity) estfract;
3644 }
3645
3646
3647 /*-------------------------------------------------------------------------
3648  *
3649  * Support routines
3650  *
3651  *-------------------------------------------------------------------------
3652  */
3653
3654 /*
3655  * convert_to_scalar
3656  *        Convert non-NULL values of the indicated types to the comparison
3657  *        scale needed by scalarineqsel().
3658  *        Returns "true" if successful.
3659  *
3660  * XXX this routine is a hack: ideally we should look up the conversion
3661  * subroutines in pg_type.
3662  *
3663  * All numeric datatypes are simply converted to their equivalent
3664  * "double" values.  (NUMERIC values that are outside the range of "double"
3665  * are clamped to +/- HUGE_VAL.)
3666  *
3667  * String datatypes are converted by convert_string_to_scalar(),
3668  * which is explained below.  The reason why this routine deals with
3669  * three values at a time, not just one, is that we need it for strings.
3670  *
3671  * The bytea datatype is just enough different from strings that it has
3672  * to be treated separately.
3673  *
3674  * The several datatypes representing absolute times are all converted
3675  * to Timestamp, which is actually a double, and then we just use that
3676  * double value.  Note this will give correct results even for the "special"
3677  * values of Timestamp, since those are chosen to compare correctly;
3678  * see timestamp_cmp.
3679  *
3680  * The several datatypes representing relative times (intervals) are all
3681  * converted to measurements expressed in seconds.
3682  */
3683 static bool
3684 convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
3685                                   Datum lobound, Datum hibound, Oid boundstypid,
3686                                   double *scaledlobound, double *scaledhibound)
3687 {
3688         /*
3689          * Both the valuetypid and the boundstypid should exactly match the
3690          * declared input type(s) of the operator we are invoked for, so we just
3691          * error out if either is not recognized.
3692          *
3693          * XXX The histogram we are interpolating between points of could belong
3694          * to a column that's only binary-compatible with the declared type. In
3695          * essence we are assuming that the semantics of binary-compatible types
3696          * are enough alike that we can use a histogram generated with one type's
3697          * operators to estimate selectivity for the other's.  This is outright
3698          * wrong in some cases --- in particular signed versus unsigned
3699          * interpretation could trip us up.  But it's useful enough in the
3700          * majority of cases that we do it anyway.  Should think about more
3701          * rigorous ways to do it.
3702          */
3703         switch (valuetypid)
3704         {
3705                         /*
3706                          * Built-in numeric types
3707                          */
3708                 case BOOLOID:
3709                 case INT2OID:
3710                 case INT4OID:
3711                 case INT8OID:
3712                 case FLOAT4OID:
3713                 case FLOAT8OID:
3714                 case NUMERICOID:
3715                 case OIDOID:
3716                 case REGPROCOID:
3717                 case REGPROCEDUREOID:
3718                 case REGOPEROID:
3719                 case REGOPERATOROID:
3720                 case REGCLASSOID:
3721                 case REGTYPEOID:
3722                 case REGCONFIGOID:
3723                 case REGDICTIONARYOID:
3724                 case REGROLEOID:
3725                 case REGNAMESPACEOID:
3726                         *scaledvalue = convert_numeric_to_scalar(value, valuetypid);
3727                         *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid);
3728                         *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid);
3729                         return true;
3730
3731                         /*
3732                          * Built-in string types
3733                          */
3734                 case CHAROID:
3735                 case BPCHAROID:
3736                 case VARCHAROID:
3737                 case TEXTOID:
3738                 case NAMEOID:
3739                         {
3740                                 char       *valstr = convert_string_datum(value, valuetypid);
3741                                 char       *lostr = convert_string_datum(lobound, boundstypid);
3742                                 char       *histr = convert_string_datum(hibound, boundstypid);
3743
3744                                 convert_string_to_scalar(valstr, scaledvalue,
3745                                                                                  lostr, scaledlobound,
3746                                                                                  histr, scaledhibound);
3747                                 pfree(valstr);
3748                                 pfree(lostr);
3749                                 pfree(histr);
3750                                 return true;
3751                         }
3752
3753                         /*
3754                          * Built-in bytea type
3755                          */
3756                 case BYTEAOID:
3757                         {
3758                                 convert_bytea_to_scalar(value, scaledvalue,
3759                                                                                 lobound, scaledlobound,
3760                                                                                 hibound, scaledhibound);
3761                                 return true;
3762                         }
3763
3764                         /*
3765                          * Built-in time types
3766                          */
3767                 case TIMESTAMPOID:
3768                 case TIMESTAMPTZOID:
3769                 case ABSTIMEOID:
3770                 case DATEOID:
3771                 case INTERVALOID:
3772                 case RELTIMEOID:
3773                 case TINTERVALOID:
3774                 case TIMEOID:
3775                 case TIMETZOID:
3776                         *scaledvalue = convert_timevalue_to_scalar(value, valuetypid);
3777                         *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid);
3778                         *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid);
3779                         return true;
3780
3781                         /*
3782                          * Built-in network types
3783                          */
3784                 case INETOID:
3785                 case CIDROID:
3786                 case MACADDROID:
3787                         *scaledvalue = convert_network_to_scalar(value, valuetypid);
3788                         *scaledlobound = convert_network_to_scalar(lobound, boundstypid);
3789                         *scaledhibound = convert_network_to_scalar(hibound, boundstypid);
3790                         return true;
3791         }
3792         /* Don't know how to convert */
3793         *scaledvalue = *scaledlobound = *scaledhibound = 0;
3794         return false;
3795 }
3796
3797 /*
3798  * Do convert_to_scalar()'s work for any numeric data type.
3799  */
3800 static double
3801 convert_numeric_to_scalar(Datum value, Oid typid)
3802 {
3803         switch (typid)
3804         {
3805                 case BOOLOID:
3806                         return (double) DatumGetBool(value);
3807                 case INT2OID:
3808                         return (double) DatumGetInt16(value);
3809                 case INT4OID:
3810                         return (double) DatumGetInt32(value);
3811                 case INT8OID:
3812                         return (double) DatumGetInt64(value);
3813                 case FLOAT4OID:
3814                         return (double) DatumGetFloat4(value);
3815                 case FLOAT8OID:
3816                         return (double) DatumGetFloat8(value);
3817                 case NUMERICOID:
3818                         /* Note: out-of-range values will be clamped to +-HUGE_VAL */
3819                         return (double)
3820                                 DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow,
3821                                                                                                    value));
3822                 case OIDOID:
3823                 case REGPROCOID:
3824                 case REGPROCEDUREOID:
3825                 case REGOPEROID:
3826                 case REGOPERATOROID:
3827                 case REGCLASSOID:
3828                 case REGTYPEOID:
3829                 case REGCONFIGOID:
3830                 case REGDICTIONARYOID:
3831                 case REGROLEOID:
3832                 case REGNAMESPACEOID:
3833                         /* we can treat OIDs as integers... */
3834                         return (double) DatumGetObjectId(value);
3835         }
3836
3837         /*
3838          * Can't get here unless someone tries to use scalarltsel/scalargtsel on
3839          * an operator with one numeric and one non-numeric operand.
3840          */
3841         elog(ERROR, "unsupported type: %u", typid);
3842         return 0;
3843 }
3844
3845 /*
3846  * Do convert_to_scalar()'s work for any character-string data type.
3847  *
3848  * String datatypes are converted to a scale that ranges from 0 to 1,
3849  * where we visualize the bytes of the string as fractional digits.
3850  *
3851  * We do not want the base to be 256, however, since that tends to
3852  * generate inflated selectivity estimates; few databases will have
3853  * occurrences of all 256 possible byte values at each position.
3854  * Instead, use the smallest and largest byte values seen in the bounds
3855  * as the estimated range for each byte, after some fudging to deal with
3856  * the fact that we probably aren't going to see the full range that way.
3857  *
3858  * An additional refinement is that we discard any common prefix of the
3859  * three strings before computing the scaled values.  This allows us to
3860  * "zoom in" when we encounter a narrow data range.  An example is a phone
3861  * number database where all the values begin with the same area code.
3862  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
3863  * so this is more likely to happen than you might think.)
3864  */
3865 static void
3866 convert_string_to_scalar(char *value,
3867                                                  double *scaledvalue,
3868                                                  char *lobound,
3869                                                  double *scaledlobound,
3870                                                  char *hibound,
3871                                                  double *scaledhibound)
3872 {
3873         int                     rangelo,
3874                                 rangehi;
3875         char       *sptr;
3876
3877         rangelo = rangehi = (unsigned char) hibound[0];
3878         for (sptr = lobound; *sptr; sptr++)
3879         {
3880                 if (rangelo > (unsigned char) *sptr)
3881                         rangelo = (unsigned char) *sptr;
3882                 if (rangehi < (unsigned char) *sptr)
3883                         rangehi = (unsigned char) *sptr;
3884         }
3885         for (sptr = hibound; *sptr; sptr++)
3886         {
3887                 if (rangelo > (unsigned char) *sptr)
3888                         rangelo = (unsigned char) *sptr;
3889                 if (rangehi < (unsigned char) *sptr)
3890                         rangehi = (unsigned char) *sptr;
3891         }
3892         /* If range includes any upper-case ASCII chars, make it include all */
3893         if (rangelo <= 'Z' && rangehi >= 'A')
3894         {
3895                 if (rangelo > 'A')
3896                         rangelo = 'A';
3897                 if (rangehi < 'Z')
3898                         rangehi = 'Z';
3899         }
3900         /* Ditto lower-case */
3901         if (rangelo <= 'z' && rangehi >= 'a')
3902         {
3903                 if (rangelo > 'a')
3904                         rangelo = 'a';
3905                 if (rangehi < 'z')
3906                         rangehi = 'z';
3907         }
3908         /* Ditto digits */
3909         if (rangelo <= '9' && rangehi >= '0')
3910         {
3911                 if (rangelo > '0')
3912                         rangelo = '0';
3913                 if (rangehi < '9')
3914                         rangehi = '9';
3915         }
3916
3917         /*
3918          * If range includes less than 10 chars, assume we have not got enough
3919          * data, and make it include regular ASCII set.
3920          */
3921         if (rangehi - rangelo < 9)
3922         {
3923                 rangelo = ' ';
3924                 rangehi = 127;
3925         }
3926
3927         /*
3928          * Now strip any common prefix of the three strings.
3929          */
3930         while (*lobound)
3931         {
3932                 if (*lobound != *hibound || *lobound != *value)
3933                         break;
3934                 lobound++, hibound++, value++;
3935         }
3936
3937         /*
3938          * Now we can do the conversions.
3939          */
3940         *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
3941         *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
3942         *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
3943 }
3944
3945 static double
3946 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
3947 {
3948         int                     slen = strlen(value);
3949         double          num,
3950                                 denom,
3951                                 base;
3952
3953         if (slen <= 0)
3954                 return 0.0;                             /* empty string has scalar value 0 */
3955
3956         /*
3957          * There seems little point in considering more than a dozen bytes from
3958          * the string.  Since base is at least 10, that will give us nominal
3959          * resolution of at least 12 decimal digits, which is surely far more
3960          * precision than this estimation technique has got anyway (especially in
3961          * non-C locales).  Also, even with the maximum possible base of 256, this
3962          * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
3963          * overflow on any known machine.
3964          */
3965         if (slen > 12)
3966                 slen = 12;
3967
3968         /* Convert initial characters to fraction */
3969         base = rangehi - rangelo + 1;
3970         num = 0.0;
3971         denom = base;
3972         while (slen-- > 0)
3973         {
3974                 int                     ch = (unsigned char) *value++;
3975
3976                 if (ch < rangelo)
3977                         ch = rangelo - 1;
3978                 else if (ch > rangehi)
3979                         ch = rangehi + 1;
3980                 num += ((double) (ch - rangelo)) / denom;
3981                 denom *= base;
3982         }
3983
3984         return num;
3985 }
3986
3987 /*
3988  * Convert a string-type Datum into a palloc'd, null-terminated string.
3989  *
3990  * When using a non-C locale, we must pass the string through strxfrm()
3991  * before continuing, so as to generate correct locale-specific results.
3992  */
3993 static char *
3994 convert_string_datum(Datum value, Oid typid)
3995 {
3996         char       *val;
3997
3998         switch (typid)
3999         {
4000                 case CHAROID:
4001                         val = (char *) palloc(2);
4002                         val[0] = DatumGetChar(value);
4003                         val[1] = '\0';
4004                         break;
4005                 case BPCHAROID:
4006                 case VARCHAROID:
4007                 case TEXTOID:
4008                         val = TextDatumGetCString(value);
4009                         break;
4010                 case NAMEOID:
4011                         {
4012                                 NameData   *nm = (NameData *) DatumGetPointer(value);
4013
4014                                 val = pstrdup(NameStr(*nm));
4015                                 break;
4016                         }
4017                 default:
4018
4019                         /*
4020                          * Can't get here unless someone tries to use scalarltsel on an
4021                          * operator with one string and one non-string operand.
4022                          */
4023                         elog(ERROR, "unsupported type: %u", typid);
4024                         return NULL;
4025         }
4026
4027         if (!lc_collate_is_c(DEFAULT_COLLATION_OID))
4028         {
4029                 char       *xfrmstr;
4030                 size_t          xfrmlen;
4031                 size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4032
4033                 /*
4034                  * XXX: We could guess at a suitable output buffer size and only call
4035                  * strxfrm twice if our guess is too small.
4036                  *
4037                  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4038                  * bogus data or set an error. This is not really a problem unless it
4039                  * crashes since it will only give an estimation error and nothing
4040                  * fatal.
4041                  */
4042 #if _MSC_VER == 1400                    /* VS.Net 2005 */
4043
4044                 /*
4045                  *
4046                  * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?
4047                  * FeedbackID=99694 */
4048                 {
4049                         char            x[1];
4050
4051                         xfrmlen = strxfrm(x, val, 0);
4052                 }
4053 #else
4054                 xfrmlen = strxfrm(NULL, val, 0);
4055 #endif
4056 #ifdef WIN32
4057
4058                 /*
4059                  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4060                  * of trying to allocate this much memory (and fail), just return the
4061                  * original string unmodified as if we were in the C locale.
4062                  */
4063                 if (xfrmlen == INT_MAX)
4064                         return val;
4065 #endif
4066                 xfrmstr = (char *) palloc(xfrmlen + 1);
4067                 xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4068
4069                 /*
4070                  * Some systems (e.g., glibc) can return a smaller value from the
4071                  * second call than the first; thus the Assert must be <= not ==.
4072                  */
4073                 Assert(xfrmlen2 <= xfrmlen);
4074                 pfree(val);
4075                 val = xfrmstr;
4076         }
4077
4078         return val;
4079 }
4080
4081 /*
4082  * Do convert_to_scalar()'s work for any bytea data type.
4083  *
4084  * Very similar to convert_string_to_scalar except we can't assume
4085  * null-termination and therefore pass explicit lengths around.
4086  *
4087  * Also, assumptions about likely "normal" ranges of characters have been
4088  * removed - a data range of 0..255 is always used, for now.  (Perhaps
4089  * someday we will add information about actual byte data range to
4090  * pg_statistic.)
4091  */
4092 static void
4093 convert_bytea_to_scalar(Datum value,
4094                                                 double *scaledvalue,
4095                                                 Datum lobound,
4096                                                 double *scaledlobound,
4097                                                 Datum hibound,
4098                                                 double *scaledhibound)
4099 {
4100         int                     rangelo,
4101                                 rangehi,
4102                                 valuelen = VARSIZE(DatumGetPointer(value)) - VARHDRSZ,
4103                                 loboundlen = VARSIZE(DatumGetPointer(lobound)) - VARHDRSZ,
4104                                 hiboundlen = VARSIZE(DatumGetPointer(hibound)) - VARHDRSZ,
4105                                 i,
4106                                 minlen;
4107         unsigned char *valstr = (unsigned char *) VARDATA(DatumGetPointer(value)),
4108                            *lostr = (unsigned char *) VARDATA(DatumGetPointer(lobound)),
4109                            *histr = (unsigned char *) VARDATA(DatumGetPointer(hibound));
4110
4111         /*
4112          * Assume bytea data is uniformly distributed across all byte values.
4113          */
4114         rangelo = 0;
4115         rangehi = 255;
4116
4117         /*
4118          * Now strip any common prefix of the three strings.
4119          */
4120         minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4121         for (i = 0; i < minlen; i++)
4122         {
4123                 if (*lostr != *histr || *lostr != *valstr)
4124                         break;
4125                 lostr++, histr++, valstr++;
4126                 loboundlen--, hiboundlen--, valuelen--;
4127         }
4128
4129         /*
4130          * Now we can do the conversions.
4131          */
4132         *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4133         *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4134         *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4135 }
4136
4137 static double
4138 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4139                                                         int rangelo, int rangehi)
4140 {
4141         double          num,
4142                                 denom,
4143                                 base;
4144
4145         if (valuelen <= 0)
4146                 return 0.0;                             /* empty string has scalar value 0 */
4147
4148         /*
4149          * Since base is 256, need not consider more than about 10 chars (even
4150          * this many seems like overkill)
4151          */
4152         if (valuelen > 10)
4153                 valuelen = 10;
4154
4155         /* Convert initial characters to fraction */
4156         base = rangehi - rangelo + 1;
4157         num = 0.0;
4158         denom = base;
4159         while (valuelen-- > 0)
4160         {
4161                 int                     ch = *value++;
4162
4163                 if (ch < rangelo)
4164                         ch = rangelo - 1;
4165                 else if (ch > rangehi)
4166                         ch = rangehi + 1;
4167                 num += ((double) (ch - rangelo)) / denom;
4168                 denom *= base;
4169         }
4170
4171         return num;
4172 }
4173
4174 /*
4175  * Do convert_to_scalar()'s work for any timevalue data type.
4176  */
4177 static double
4178 convert_timevalue_to_scalar(Datum value, Oid typid)
4179 {
4180         switch (typid)
4181         {
4182                 case TIMESTAMPOID:
4183                         return DatumGetTimestamp(value);
4184                 case TIMESTAMPTZOID:
4185                         return DatumGetTimestampTz(value);
4186                 case ABSTIMEOID:
4187                         return DatumGetTimestamp(DirectFunctionCall1(abstime_timestamp,
4188                                                                                                                  value));
4189                 case DATEOID:
4190                         return date2timestamp_no_overflow(DatumGetDateADT(value));
4191                 case INTERVALOID:
4192                         {
4193                                 Interval   *interval = DatumGetIntervalP(value);
4194
4195                                 /*
4196                                  * Convert the month part of Interval to days using assumed
4197                                  * average month length of 365.25/12.0 days.  Not too
4198                                  * accurate, but plenty good enough for our purposes.
4199                                  */
4200 #ifdef HAVE_INT64_TIMESTAMP
4201                                 return interval->time + interval->day * (double) USECS_PER_DAY +
4202                                         interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
4203 #else
4204                                 return interval->time + interval->day * SECS_PER_DAY +
4205                                         interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * (double) SECS_PER_DAY);
4206 #endif
4207                         }
4208                 case RELTIMEOID:
4209 #ifdef HAVE_INT64_TIMESTAMP
4210                         return (DatumGetRelativeTime(value) * 1000000.0);
4211 #else
4212                         return DatumGetRelativeTime(value);
4213 #endif
4214                 case TINTERVALOID:
4215                         {
4216                                 TimeInterval tinterval = DatumGetTimeInterval(value);
4217
4218 #ifdef HAVE_INT64_TIMESTAMP
4219                                 if (tinterval->status != 0)
4220                                         return ((tinterval->data[1] - tinterval->data[0]) * 1000000.0);
4221 #else
4222                                 if (tinterval->status != 0)
4223                                         return tinterval->data[1] - tinterval->data[0];
4224 #endif
4225                                 return 0;               /* for lack of a better idea */
4226                         }
4227                 case TIMEOID:
4228                         return DatumGetTimeADT(value);
4229                 case TIMETZOID:
4230                         {
4231                                 TimeTzADT  *timetz = DatumGetTimeTzADTP(value);
4232
4233                                 /* use GMT-equivalent time */
4234 #ifdef HAVE_INT64_TIMESTAMP
4235                                 return (double) (timetz->time + (timetz->zone * 1000000.0));
4236 #else
4237                                 return (double) (timetz->time + timetz->zone);
4238 #endif
4239                         }
4240         }
4241
4242         /*
4243          * Can't get here unless someone tries to use scalarltsel/scalargtsel on
4244          * an operator with one timevalue and one non-timevalue operand.
4245          */
4246         elog(ERROR, "unsupported type: %u", typid);
4247         return 0;
4248 }
4249
4250
4251 /*
4252  * get_restriction_variable
4253  *              Examine the args of a restriction clause to see if it's of the
4254  *              form (variable op pseudoconstant) or (pseudoconstant op variable),
4255  *              where "variable" could be either a Var or an expression in vars of a
4256  *              single relation.  If so, extract information about the variable,
4257  *              and also indicate which side it was on and the other argument.
4258  *
4259  * Inputs:
4260  *      root: the planner info
4261  *      args: clause argument list
4262  *      varRelid: see specs for restriction selectivity functions
4263  *
4264  * Outputs: (these are valid only if TRUE is returned)
4265  *      *vardata: gets information about variable (see examine_variable)
4266  *      *other: gets other clause argument, aggressively reduced to a constant
4267  *      *varonleft: set TRUE if variable is on the left, FALSE if on the right
4268  *
4269  * Returns TRUE if a variable is identified, otherwise FALSE.
4270  *
4271  * Note: if there are Vars on both sides of the clause, we must fail, because
4272  * callers are expecting that the other side will act like a pseudoconstant.
4273  */
4274 bool
4275 get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
4276                                                  VariableStatData *vardata, Node **other,
4277                                                  bool *varonleft)
4278 {
4279         Node       *left,
4280                            *right;
4281         VariableStatData rdata;
4282
4283         /* Fail if not a binary opclause (probably shouldn't happen) */
4284         if (list_length(args) != 2)
4285                 return false;
4286
4287         left = (Node *) linitial(args);
4288         right = (Node *) lsecond(args);
4289
4290         /*
4291          * Examine both sides.  Note that when varRelid is nonzero, Vars of other
4292          * relations will be treated as pseudoconstants.
4293          */
4294         examine_variable(root, left, varRelid, vardata);
4295         examine_variable(root, right, varRelid, &rdata);
4296
4297         /*
4298          * If one side is a variable and the other not, we win.
4299          */
4300         if (vardata->rel && rdata.rel == NULL)
4301         {
4302                 *varonleft = true;
4303                 *other = estimate_expression_value(root, rdata.var);
4304                 /* Assume we need no ReleaseVariableStats(rdata) here */
4305                 return true;
4306         }
4307
4308         if (vardata->rel == NULL && rdata.rel)
4309         {
4310                 *varonleft = false;
4311                 *other = estimate_expression_value(root, vardata->var);
4312                 /* Assume we need no ReleaseVariableStats(*vardata) here */
4313                 *vardata = rdata;
4314                 return true;
4315         }
4316
4317         /* Ooops, clause has wrong structure (probably var op var) */
4318         ReleaseVariableStats(*vardata);
4319         ReleaseVariableStats(rdata);
4320
4321         return false;
4322 }
4323
4324 /*
4325  * get_join_variables
4326  *              Apply examine_variable() to each side of a join clause.
4327  *              Also, attempt to identify whether the join clause has the same
4328  *              or reversed sense compared to the SpecialJoinInfo.
4329  *
4330  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4331  * or "reversed" if it is "rhs_var OP lhs_var".  In complicated cases
4332  * where we can't tell for sure, we default to assuming it's normal.
4333  */
4334 void
4335 get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo,
4336                                    VariableStatData *vardata1, VariableStatData *vardata2,
4337                                    bool *join_is_reversed)
4338 {
4339         Node       *left,
4340                            *right;
4341
4342         if (list_length(args) != 2)
4343                 elog(ERROR, "join operator should take two arguments");
4344
4345         left = (Node *) linitial(args);
4346         right = (Node *) lsecond(args);
4347
4348         examine_variable(root, left, 0, vardata1);
4349         examine_variable(root, right, 0, vardata2);
4350
4351         if (vardata1->rel &&
4352                 bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4353                 *join_is_reversed = true;               /* var1 is on RHS */
4354         else if (vardata2->rel &&
4355                          bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4356                 *join_is_reversed = true;               /* var2 is on LHS */
4357         else
4358                 *join_is_reversed = false;
4359 }
4360
4361 /*
4362  * examine_variable
4363  *              Try to look up statistical data about an expression.
4364  *              Fill in a VariableStatData struct to describe the expression.
4365  *
4366  * Inputs:
4367  *      root: the planner info
4368  *      node: the expression tree to examine
4369  *      varRelid: see specs for restriction selectivity functions
4370  *
4371  * Outputs: *vardata is filled as follows:
4372  *      var: the input expression (with any binary relabeling stripped, if
4373  *              it is or contains a variable; but otherwise the type is preserved)
4374  *      rel: RelOptInfo for relation containing variable; NULL if expression
4375  *              contains no Vars (NOTE this could point to a RelOptInfo of a
4376  *              subquery, not one in the current query).
4377  *      statsTuple: the pg_statistic entry for the variable, if one exists;
4378  *              otherwise NULL.
4379  *      freefunc: pointer to a function to release statsTuple with.
4380  *      vartype: exposed type of the expression; this should always match
4381  *              the declared input type of the operator we are estimating for.
4382  *      atttype, atttypmod: type data to pass to get_attstatsslot().  This is
4383  *              commonly the same as the exposed type of the variable argument,
4384  *              but can be different in binary-compatible-type cases.
4385  *      isunique: TRUE if we were able to match the var to a unique index or a
4386  *              single-column DISTINCT clause, implying its values are unique for
4387  *              this query.  (Caution: this should be trusted for statistical
4388  *              purposes only, since we do not check indimmediate nor verify that
4389  *              the exact same definition of equality applies.)
4390  *
4391  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4392  */
4393 void
4394 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4395                                  VariableStatData *vardata)
4396 {
4397         Node       *basenode;
4398         Relids          varnos;
4399         RelOptInfo *onerel;
4400
4401         /* Make sure we don't return dangling pointers in vardata */
4402         MemSet(vardata, 0, sizeof(VariableStatData));
4403
4404         /* Save the exposed type of the expression */
4405         vardata->vartype = exprType(node);
4406
4407         /* Look inside any binary-compatible relabeling */
4408
4409         if (IsA(node, RelabelType))
4410                 basenode = (Node *) ((RelabelType *) node)->arg;
4411         else
4412                 basenode = node;
4413
4414         /* Fast path for a simple Var */
4415
4416         if (IsA(basenode, Var) &&
4417                 (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4418         {
4419                 Var                *var = (Var *) basenode;
4420
4421                 /* Set up result fields other than the stats tuple */
4422                 vardata->var = basenode;        /* return Var without relabeling */
4423                 vardata->rel = find_base_rel(root, var->varno);
4424                 vardata->atttype = var->vartype;
4425                 vardata->atttypmod = var->vartypmod;
4426                 vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4427
4428                 /* Try to locate some stats */
4429                 examine_simple_variable(root, var, vardata);
4430
4431                 return;
4432         }
4433
4434         /*
4435          * Okay, it's a more complicated expression.  Determine variable
4436          * membership.  Note that when varRelid isn't zero, only vars of that
4437          * relation are considered "real" vars.
4438          */
4439         varnos = pull_varnos(basenode);
4440
4441         onerel = NULL;
4442
4443         switch (bms_membership(varnos))
4444         {
4445                 case BMS_EMPTY_SET:
4446                         /* No Vars at all ... must be pseudo-constant clause */
4447                         break;
4448                 case BMS_SINGLETON:
4449                         if (varRelid == 0 || bms_is_member(varRelid, varnos))
4450                         {
4451                                 onerel = find_base_rel(root,
4452                                            (varRelid ? varRelid : bms_singleton_member(varnos)));
4453                                 vardata->rel = onerel;
4454                                 node = basenode;        /* strip any relabeling */
4455                         }
4456                         /* else treat it as a constant */
4457                         break;
4458                 case BMS_MULTIPLE:
4459                         if (varRelid == 0)
4460                         {
4461                                 /* treat it as a variable of a join relation */
4462                                 vardata->rel = find_join_rel(root, varnos);
4463                                 node = basenode;        /* strip any relabeling */
4464                         }
4465                         else if (bms_is_member(varRelid, varnos))
4466                         {
4467                                 /* ignore the vars belonging to other relations */
4468                                 vardata->rel = find_base_rel(root, varRelid);
4469                                 node = basenode;        /* strip any relabeling */
4470                                 /* note: no point in expressional-index search here */
4471                         }
4472                         /* else treat it as a constant */
4473                         break;
4474         }
4475
4476         bms_free(varnos);
4477
4478         vardata->var = node;
4479         vardata->atttype = exprType(node);
4480         vardata->atttypmod = exprTypmod(node);
4481
4482         if (onerel)
4483         {
4484                 /*
4485                  * We have an expression in vars of a single relation.  Try to match
4486                  * it to expressional index columns, in hopes of finding some
4487                  * statistics.
4488                  *
4489                  * XXX it's conceivable that there are multiple matches with different
4490                  * index opfamilies; if so, we need to pick one that matches the
4491                  * operator we are estimating for.  FIXME later.
4492                  */
4493                 ListCell   *ilist;
4494
4495                 foreach(ilist, onerel->indexlist)
4496                 {
4497                         IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4498                         ListCell   *indexpr_item;
4499                         int                     pos;
4500
4501                         indexpr_item = list_head(index->indexprs);
4502                         if (indexpr_item == NULL)
4503                                 continue;               /* no expressions here... */
4504
4505                         for (pos = 0; pos < index->ncolumns; pos++)
4506                         {
4507                                 if (index->indexkeys[pos] == 0)
4508                                 {
4509                                         Node       *indexkey;
4510
4511                                         if (indexpr_item == NULL)
4512                                                 elog(ERROR, "too few entries in indexprs list");
4513                                         indexkey = (Node *) lfirst(indexpr_item);
4514                                         if (indexkey && IsA(indexkey, RelabelType))
4515                                                 indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4516                                         if (equal(node, indexkey))
4517                                         {
4518                                                 /*
4519                                                  * Found a match ... is it a unique index? Tests here
4520                                                  * should match has_unique_index().
4521                                                  */
4522                                                 if (index->unique &&
4523                                                         index->ncolumns == 1 &&
4524                                                         (index->indpred == NIL || index->predOK))
4525                                                         vardata->isunique = true;
4526
4527                                                 /*
4528                                                  * Has it got stats?  We only consider stats for
4529                                                  * non-partial indexes, since partial indexes probably
4530                                                  * don't reflect whole-relation statistics; the above
4531                                                  * check for uniqueness is the only info we take from
4532                                                  * a partial index.
4533                                                  *
4534                                                  * An index stats hook, however, must make its own
4535                                                  * decisions about what to do with partial indexes.
4536                                                  */
4537                                                 if (get_index_stats_hook &&
4538                                                         (*get_index_stats_hook) (root, index->indexoid,
4539                                                                                                          pos + 1, vardata))
4540                                                 {
4541                                                         /*
4542                                                          * The hook took control of acquiring a stats
4543                                                          * tuple.  If it did supply a tuple, it'd better
4544                                                          * have supplied a freefunc.
4545                                                          */
4546                                                         if (HeapTupleIsValid(vardata->statsTuple) &&
4547                                                                 !vardata->freefunc)
4548                                                                 elog(ERROR, "no function provided to release variable stats with");
4549                                                 }
4550                                                 else if (index->indpred == NIL)
4551                                                 {
4552                                                         vardata->statsTuple =
4553                                                                 SearchSysCache3(STATRELATTINH,
4554                                                                                    ObjectIdGetDatum(index->indexoid),
4555                                                                                                 Int16GetDatum(pos + 1),
4556                                                                                                 BoolGetDatum(false));
4557                                                         vardata->freefunc = ReleaseSysCache;
4558                                                 }
4559                                                 if (vardata->statsTuple)
4560                                                         break;
4561                                         }
4562                                         indexpr_item = lnext(indexpr_item);
4563                                 }
4564                         }
4565                         if (vardata->statsTuple)
4566                                 break;
4567                 }
4568         }
4569 }
4570
4571 /*
4572  * examine_simple_variable
4573  *              Handle a simple Var for examine_variable
4574  *
4575  * This is split out as a subroutine so that we can recurse to deal with
4576  * Vars referencing subqueries.
4577  *
4578  * We already filled in all the fields of *vardata except for the stats tuple.
4579  */
4580 static void
4581 examine_simple_variable(PlannerInfo *root, Var *var,
4582                                                 VariableStatData *vardata)
4583 {
4584         RangeTblEntry *rte = root->simple_rte_array[var->varno];
4585
4586         Assert(IsA(rte, RangeTblEntry));
4587
4588         if (get_relation_stats_hook &&
4589                 (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
4590         {
4591                 /*
4592                  * The hook took control of acquiring a stats tuple.  If it did supply
4593                  * a tuple, it'd better have supplied a freefunc.
4594                  */
4595                 if (HeapTupleIsValid(vardata->statsTuple) &&
4596                         !vardata->freefunc)
4597                         elog(ERROR, "no function provided to release variable stats with");
4598         }
4599         else if (rte->rtekind == RTE_RELATION)
4600         {
4601                 /*
4602                  * Plain table or parent of an inheritance appendrel, so look up the
4603                  * column in pg_statistic
4604                  */
4605                 vardata->statsTuple = SearchSysCache3(STATRELATTINH,
4606                                                                                           ObjectIdGetDatum(rte->relid),
4607                                                                                           Int16GetDatum(var->varattno),
4608                                                                                           BoolGetDatum(rte->inh));
4609                 vardata->freefunc = ReleaseSysCache;
4610         }
4611         else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
4612         {
4613                 /*
4614                  * Plain subquery (not one that was converted to an appendrel).
4615                  */
4616                 Query      *subquery = rte->subquery;
4617                 RelOptInfo *rel;
4618                 TargetEntry *ste;
4619
4620                 /*
4621                  * Punt if it's a whole-row var rather than a plain column reference.
4622                  */
4623                 if (var->varattno == InvalidAttrNumber)
4624                         return;
4625
4626                 /*
4627                  * Punt if subquery uses set operations or GROUP BY, as these will
4628                  * mash underlying columns' stats beyond recognition.  (Set ops are
4629                  * particularly nasty; if we forged ahead, we would return stats
4630                  * relevant to only the leftmost subselect...)  DISTINCT is also
4631                  * problematic, but we check that later because there is a possibility
4632                  * of learning something even with it.
4633                  */
4634                 if (subquery->setOperations ||
4635                         subquery->groupClause)
4636                         return;
4637
4638                 /*
4639                  * OK, fetch RelOptInfo for subquery.  Note that we don't change the
4640                  * rel returned in vardata, since caller expects it to be a rel of the
4641                  * caller's query level.  Because we might already be recursing, we
4642                  * can't use that rel pointer either, but have to look up the Var's
4643                  * rel afresh.
4644                  */
4645                 rel = find_base_rel(root, var->varno);
4646
4647                 /* If the subquery hasn't been planned yet, we have to punt */
4648                 if (rel->subroot == NULL)
4649                         return;
4650                 Assert(IsA(rel->subroot, PlannerInfo));
4651
4652                 /*
4653                  * Switch our attention to the subquery as mangled by the planner. It
4654                  * was okay to look at the pre-planning version for the tests above,
4655                  * but now we need a Var that will refer to the subroot's live
4656                  * RelOptInfos.  For instance, if any subquery pullup happened during
4657                  * planning, Vars in the targetlist might have gotten replaced, and we
4658                  * need to see the replacement expressions.
4659                  */
4660                 subquery = rel->subroot->parse;
4661                 Assert(IsA(subquery, Query));
4662
4663                 /* Get the subquery output expression referenced by the upper Var */
4664                 ste = get_tle_by_resno(subquery->targetList, var->varattno);
4665                 if (ste == NULL || ste->resjunk)
4666                         elog(ERROR, "subquery %s does not have attribute %d",
4667                                  rte->eref->aliasname, var->varattno);
4668                 var = (Var *) ste->expr;
4669
4670                 /*
4671                  * If subquery uses DISTINCT, we can't make use of any stats for the
4672                  * variable ... but, if it's the only DISTINCT column, we are entitled
4673                  * to consider it unique.  We do the test this way so that it works
4674                  * for cases involving DISTINCT ON.
4675                  */
4676                 if (subquery->distinctClause)
4677                 {
4678                         if (list_length(subquery->distinctClause) == 1 &&
4679                                 targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
4680                                 vardata->isunique = true;
4681                         /* cannot go further */
4682                         return;
4683                 }
4684
4685                 /*
4686                  * If the sub-query originated from a view with the security_barrier
4687                  * attribute, we must not look at the variable's statistics, though it
4688                  * seems all right to notice the existence of a DISTINCT clause. So
4689                  * stop here.
4690                  *
4691                  * This is probably a harsher restriction than necessary; it's
4692                  * certainly OK for the selectivity estimator (which is a C function,
4693                  * and therefore omnipotent anyway) to look at the statistics.  But
4694                  * many selectivity estimators will happily *invoke the operator
4695                  * function* to try to work out a good estimate - and that's not OK.
4696                  * So for now, don't dig down for stats.
4697                  */
4698                 if (rte->security_barrier)
4699                         return;
4700
4701                 /* Can only handle a simple Var of subquery's query level */
4702                 if (var && IsA(var, Var) &&
4703                         var->varlevelsup == 0)
4704                 {
4705                         /*
4706                          * OK, recurse into the subquery.  Note that the original setting
4707                          * of vardata->isunique (which will surely be false) is left
4708                          * unchanged in this situation.  That's what we want, since even
4709                          * if the underlying column is unique, the subquery may have
4710                          * joined to other tables in a way that creates duplicates.
4711                          */
4712                         examine_simple_variable(rel->subroot, var, vardata);
4713                 }
4714         }
4715         else
4716         {
4717                 /*
4718                  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE.  (We
4719                  * won't see RTE_JOIN here because join alias Vars have already been
4720                  * flattened.)  There's not much we can do with function outputs, but
4721                  * maybe someday try to be smarter about VALUES and/or CTEs.
4722                  */
4723         }
4724 }
4725
4726 /*
4727  * get_variable_numdistinct
4728  *        Estimate the number of distinct values of a variable.
4729  *
4730  * vardata: results of examine_variable
4731  * *isdefault: set to TRUE if the result is a default rather than based on
4732  * anything meaningful.
4733  *
4734  * NB: be careful to produce a positive integral result, since callers may
4735  * compare the result to exact integer counts, or might divide by it.
4736  */
4737 double
4738 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
4739 {
4740         double          stadistinct;
4741         double          stanullfrac = 0.0;
4742         double          ntuples;
4743
4744         *isdefault = false;
4745
4746         /*
4747          * Determine the stadistinct value to use.  There are cases where we can
4748          * get an estimate even without a pg_statistic entry, or can get a better
4749          * value than is in pg_statistic.  Grab stanullfrac too if we can find it
4750          * (otherwise, assume no nulls, for lack of any better idea).
4751          */
4752         if (HeapTupleIsValid(vardata->statsTuple))
4753         {
4754                 /* Use the pg_statistic entry */
4755                 Form_pg_statistic stats;
4756
4757                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
4758                 stadistinct = stats->stadistinct;
4759                 stanullfrac = stats->stanullfrac;
4760         }
4761         else if (vardata->vartype == BOOLOID)
4762         {
4763                 /*
4764                  * Special-case boolean columns: presumably, two distinct values.
4765                  *
4766                  * Are there any other datatypes we should wire in special estimates
4767                  * for?
4768                  */
4769                 stadistinct = 2.0;
4770         }
4771         else
4772         {
4773                 /*
4774                  * We don't keep statistics for system columns, but in some cases we
4775                  * can infer distinctness anyway.
4776                  */
4777                 if (vardata->var && IsA(vardata->var, Var))
4778                 {
4779                         switch (((Var *) vardata->var)->varattno)
4780                         {
4781                                 case ObjectIdAttributeNumber:
4782                                 case SelfItemPointerAttributeNumber:
4783                                         stadistinct = -1.0; /* unique (and all non null) */
4784                                         break;
4785                                 case TableOidAttributeNumber:
4786                                         stadistinct = 1.0;      /* only 1 value */
4787                                         break;
4788                                 default:
4789                                         stadistinct = 0.0;      /* means "unknown" */
4790                                         break;
4791                         }
4792                 }
4793                 else
4794                         stadistinct = 0.0;      /* means "unknown" */
4795
4796                 /*
4797                  * XXX consider using estimate_num_groups on expressions?
4798                  */
4799         }
4800
4801         /*
4802          * If there is a unique index or DISTINCT clause for the variable, assume
4803          * it is unique no matter what pg_statistic says; the statistics could be
4804          * out of date, or we might have found a partial unique index that proves
4805          * the var is unique for this query.  However, we'd better still believe
4806          * the null-fraction statistic.
4807          */
4808         if (vardata->isunique)
4809                 stadistinct = -1.0 * (1.0 - stanullfrac);
4810
4811         /*
4812          * If we had an absolute estimate, use that.
4813          */
4814         if (stadistinct > 0.0)
4815                 return clamp_row_est(stadistinct);
4816
4817         /*
4818          * Otherwise we need to get the relation size; punt if not available.
4819          */
4820         if (vardata->rel == NULL)
4821         {
4822                 *isdefault = true;
4823                 return DEFAULT_NUM_DISTINCT;
4824         }
4825         ntuples = vardata->rel->tuples;
4826         if (ntuples <= 0.0)
4827         {
4828                 *isdefault = true;
4829                 return DEFAULT_NUM_DISTINCT;
4830         }
4831
4832         /*
4833          * If we had a relative estimate, use that.
4834          */
4835         if (stadistinct < 0.0)
4836                 return clamp_row_est(-stadistinct * ntuples);
4837
4838         /*
4839          * With no data, estimate ndistinct = ntuples if the table is small, else
4840          * use default.  We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
4841          * that the behavior isn't discontinuous.
4842          */
4843         if (ntuples < DEFAULT_NUM_DISTINCT)
4844                 return clamp_row_est(ntuples);
4845
4846         *isdefault = true;
4847         return DEFAULT_NUM_DISTINCT;
4848 }
4849
4850 /*
4851  * get_variable_range
4852  *              Estimate the minimum and maximum value of the specified variable.
4853  *              If successful, store values in *min and *max, and return TRUE.
4854  *              If no data available, return FALSE.
4855  *
4856  * sortop is the "<" comparison operator to use.  This should generally
4857  * be "<" not ">", as only the former is likely to be found in pg_statistic.
4858  */
4859 static bool
4860 get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
4861                                    Datum *min, Datum *max)
4862 {
4863         Datum           tmin = 0;
4864         Datum           tmax = 0;
4865         bool            have_data = false;
4866         int16           typLen;
4867         bool            typByVal;
4868         Datum      *values;
4869         int                     nvalues;
4870         int                     i;
4871
4872         /*
4873          * XXX It's very tempting to try to use the actual column min and max, if
4874          * we can get them relatively-cheaply with an index probe.  However, since
4875          * this function is called many times during join planning, that could
4876          * have unpleasant effects on planning speed.  Need more investigation
4877          * before enabling this.
4878          */
4879 #ifdef NOT_USED
4880         if (get_actual_variable_range(root, vardata, sortop, min, max))
4881                 return true;
4882 #endif
4883
4884         if (!HeapTupleIsValid(vardata->statsTuple))
4885         {
4886                 /* no stats available, so default result */
4887                 return false;
4888         }
4889
4890         get_typlenbyval(vardata->atttype, &typLen, &typByVal);
4891
4892         /*
4893          * If there is a histogram, grab the first and last values.
4894          *
4895          * If there is a histogram that is sorted with some other operator than
4896          * the one we want, fail --- this suggests that there is data we can't
4897          * use.
4898          */
4899         if (get_attstatsslot(vardata->statsTuple,
4900                                                  vardata->atttype, vardata->atttypmod,
4901                                                  STATISTIC_KIND_HISTOGRAM, sortop,
4902                                                  NULL,
4903                                                  &values, &nvalues,
4904                                                  NULL, NULL))
4905         {
4906                 if (nvalues > 0)
4907                 {
4908                         tmin = datumCopy(values[0], typByVal, typLen);
4909                         tmax = datumCopy(values[nvalues - 1], typByVal, typLen);
4910                         have_data = true;
4911                 }
4912                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4913         }
4914         else if (get_attstatsslot(vardata->statsTuple,
4915                                                           vardata->atttype, vardata->atttypmod,
4916                                                           STATISTIC_KIND_HISTOGRAM, InvalidOid,
4917                                                           NULL,
4918                                                           &values, &nvalues,
4919                                                           NULL, NULL))
4920         {
4921                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4922                 return false;
4923         }
4924
4925         /*
4926          * If we have most-common-values info, look for extreme MCVs.  This is
4927          * needed even if we also have a histogram, since the histogram excludes
4928          * the MCVs.  However, usually the MCVs will not be the extreme values, so
4929          * avoid unnecessary data copying.
4930          */
4931         if (get_attstatsslot(vardata->statsTuple,
4932                                                  vardata->atttype, vardata->atttypmod,
4933                                                  STATISTIC_KIND_MCV, InvalidOid,
4934                                                  NULL,
4935                                                  &values, &nvalues,
4936                                                  NULL, NULL))
4937         {
4938                 bool            tmin_is_mcv = false;
4939                 bool            tmax_is_mcv = false;
4940                 FmgrInfo        opproc;
4941
4942                 fmgr_info(get_opcode(sortop), &opproc);
4943
4944                 for (i = 0; i < nvalues; i++)
4945                 {
4946                         if (!have_data)
4947                         {
4948                                 tmin = tmax = values[i];
4949                                 tmin_is_mcv = tmax_is_mcv = have_data = true;
4950                                 continue;
4951                         }
4952                         if (DatumGetBool(FunctionCall2Coll(&opproc,
4953                                                                                            DEFAULT_COLLATION_OID,
4954                                                                                            values[i], tmin)))
4955                         {
4956                                 tmin = values[i];
4957                                 tmin_is_mcv = true;
4958                         }
4959                         if (DatumGetBool(FunctionCall2Coll(&opproc,
4960                                                                                            DEFAULT_COLLATION_OID,
4961                                                                                            tmax, values[i])))
4962                         {
4963                                 tmax = values[i];
4964                                 tmax_is_mcv = true;
4965                         }
4966                 }
4967                 if (tmin_is_mcv)
4968                         tmin = datumCopy(tmin, typByVal, typLen);
4969                 if (tmax_is_mcv)
4970                         tmax = datumCopy(tmax, typByVal, typLen);
4971                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4972         }
4973
4974         *min = tmin;
4975         *max = tmax;
4976         return have_data;
4977 }
4978
4979
4980 /*
4981  * get_actual_variable_range
4982  *              Attempt to identify the current *actual* minimum and/or maximum
4983  *              of the specified variable, by looking for a suitable btree index
4984  *              and fetching its low and/or high values.
4985  *              If successful, store values in *min and *max, and return TRUE.
4986  *              (Either pointer can be NULL if that endpoint isn't needed.)
4987  *              If no data available, return FALSE.
4988  *
4989  * sortop is the "<" comparison operator to use.
4990  */
4991 static bool
4992 get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
4993                                                   Oid sortop,
4994                                                   Datum *min, Datum *max)
4995 {
4996         bool            have_data = false;
4997         RelOptInfo *rel = vardata->rel;
4998         RangeTblEntry *rte;
4999         ListCell   *lc;
5000
5001         /* No hope if no relation or it doesn't have indexes */
5002         if (rel == NULL || rel->indexlist == NIL)
5003                 return false;
5004         /* If it has indexes it must be a plain relation */
5005         rte = root->simple_rte_array[rel->relid];
5006         Assert(rte->rtekind == RTE_RELATION);
5007
5008         /* Search through the indexes to see if any match our problem */
5009         foreach(lc, rel->indexlist)
5010         {
5011                 IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
5012                 ScanDirection indexscandir;
5013
5014                 /* Ignore non-btree indexes */
5015                 if (index->relam != BTREE_AM_OID)
5016                         continue;
5017
5018                 /*
5019                  * Ignore partial indexes --- we only want stats that cover the entire
5020                  * relation.
5021                  */
5022                 if (index->indpred != NIL)
5023                         continue;
5024
5025                 /*
5026                  * The index list might include hypothetical indexes inserted by a
5027                  * get_relation_info hook --- don't try to access them.
5028                  */
5029                 if (index->hypothetical)
5030                         continue;
5031
5032                 /*
5033                  * The first index column must match the desired variable and sort
5034                  * operator --- but we can use a descending-order index.
5035                  */
5036                 if (!match_index_to_operand(vardata->var, 0, index))
5037                         continue;
5038                 switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
5039                 {
5040                         case BTLessStrategyNumber:
5041                                 if (index->reverse_sort[0])
5042                                         indexscandir = BackwardScanDirection;
5043                                 else
5044                                         indexscandir = ForwardScanDirection;
5045                                 break;
5046                         case BTGreaterStrategyNumber:
5047                                 if (index->reverse_sort[0])
5048                                         indexscandir = ForwardScanDirection;
5049                                 else
5050                                         indexscandir = BackwardScanDirection;
5051                                 break;
5052                         default:
5053                                 /* index doesn't match the sortop */
5054                                 continue;
5055                 }
5056
5057                 /*
5058                  * Found a suitable index to extract data from.  We'll need an EState
5059                  * and a bunch of other infrastructure.
5060                  */
5061                 {
5062                         EState     *estate;
5063                         ExprContext *econtext;
5064                         MemoryContext tmpcontext;
5065                         MemoryContext oldcontext;
5066                         Relation        heapRel;
5067                         Relation        indexRel;
5068                         IndexInfo  *indexInfo;
5069                         TupleTableSlot *slot;
5070                         int16           typLen;
5071                         bool            typByVal;
5072                         ScanKeyData scankeys[1];
5073                         IndexScanDesc index_scan;
5074                         HeapTuple       tup;
5075                         Datum           values[INDEX_MAX_KEYS];
5076                         bool            isnull[INDEX_MAX_KEYS];
5077                         SnapshotData SnapshotDirty;
5078
5079                         estate = CreateExecutorState();
5080                         econtext = GetPerTupleExprContext(estate);
5081                         /* Make sure any cruft is generated in the econtext's memory */
5082                         tmpcontext = econtext->ecxt_per_tuple_memory;
5083                         oldcontext = MemoryContextSwitchTo(tmpcontext);
5084
5085                         /*
5086                          * Open the table and index so we can read from them.  We should
5087                          * already have at least AccessShareLock on the table, but not
5088                          * necessarily on the index.
5089                          */
5090                         heapRel = heap_open(rte->relid, NoLock);
5091                         indexRel = index_open(index->indexoid, AccessShareLock);
5092
5093                         /* extract index key information from the index's pg_index info */
5094                         indexInfo = BuildIndexInfo(indexRel);
5095
5096                         /* some other stuff */
5097                         slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRel));
5098                         econtext->ecxt_scantuple = slot;
5099                         get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5100                         InitDirtySnapshot(SnapshotDirty);
5101
5102                         /* set up an IS NOT NULL scan key so that we ignore nulls */
5103                         ScanKeyEntryInitialize(&scankeys[0],
5104                                                                    SK_ISNULL | SK_SEARCHNOTNULL,
5105                                                                    1,   /* index col to scan */
5106                                                                    InvalidStrategy,             /* no strategy */
5107                                                                    InvalidOid,  /* no strategy subtype */
5108                                                                    InvalidOid,  /* no collation */
5109                                                                    InvalidOid,  /* no reg proc for this */
5110                                                                    (Datum) 0);  /* constant */
5111
5112                         have_data = true;
5113
5114                         /* If min is requested ... */
5115                         if (min)
5116                         {
5117                                 /*
5118                                  * In principle, we should scan the index with our current
5119                                  * active snapshot, which is the best approximation we've got
5120                                  * to what the query will see when executed.  But that won't
5121                                  * be exact if a new snap is taken before running the query,
5122                                  * and it can be very expensive if a lot of uncommitted rows
5123                                  * exist at the end of the index (because we'll laboriously
5124                                  * fetch each one and reject it).  What seems like a good
5125                                  * compromise is to use SnapshotDirty.  That will accept
5126                                  * uncommitted rows, and thus avoid fetching multiple heap
5127                                  * tuples in this scenario.  On the other hand, it will reject
5128                                  * known-dead rows, and thus not give a bogus answer when the
5129                                  * extreme value has been deleted; that case motivates not
5130                                  * using SnapshotAny here.
5131                                  */
5132                                 index_scan = index_beginscan(heapRel, indexRel, &SnapshotDirty,
5133                                                                                          1, 0);
5134                                 index_rescan(index_scan, scankeys, 1, NULL, 0);
5135
5136                                 /* Fetch first tuple in sortop's direction */
5137                                 if ((tup = index_getnext(index_scan,
5138                                                                                  indexscandir)) != NULL)
5139                                 {
5140                                         /* Extract the index column values from the heap tuple */
5141                                         ExecStoreTuple(tup, slot, InvalidBuffer, false);
5142                                         FormIndexDatum(indexInfo, slot, estate,
5143                                                                    values, isnull);
5144
5145                                         /* Shouldn't have got a null, but be careful */
5146                                         if (isnull[0])
5147                                                 elog(ERROR, "found unexpected null value in index \"%s\"",
5148                                                          RelationGetRelationName(indexRel));
5149
5150                                         /* Copy the index column value out to caller's context */
5151                                         MemoryContextSwitchTo(oldcontext);
5152                                         *min = datumCopy(values[0], typByVal, typLen);
5153                                         MemoryContextSwitchTo(tmpcontext);
5154                                 }
5155                                 else
5156                                         have_data = false;
5157
5158                                 index_endscan(index_scan);
5159                         }
5160
5161                         /* If max is requested, and we didn't find the index is empty */
5162                         if (max && have_data)
5163                         {
5164                                 index_scan = index_beginscan(heapRel, indexRel, &SnapshotDirty,
5165                                                                                          1, 0);
5166                                 index_rescan(index_scan, scankeys, 1, NULL, 0);
5167
5168                                 /* Fetch first tuple in reverse direction */
5169                                 if ((tup = index_getnext(index_scan,
5170                                                                                  -indexscandir)) != NULL)
5171                                 {
5172                                         /* Extract the index column values from the heap tuple */
5173                                         ExecStoreTuple(tup, slot, InvalidBuffer, false);
5174                                         FormIndexDatum(indexInfo, slot, estate,
5175                                                                    values, isnull);
5176
5177                                         /* Shouldn't have got a null, but be careful */
5178                                         if (isnull[0])
5179                                                 elog(ERROR, "found unexpected null value in index \"%s\"",
5180                                                          RelationGetRelationName(indexRel));
5181
5182                                         /* Copy the index column value out to caller's context */
5183                                         MemoryContextSwitchTo(oldcontext);
5184                                         *max = datumCopy(values[0], typByVal, typLen);
5185                                         MemoryContextSwitchTo(tmpcontext);
5186                                 }
5187                                 else
5188                                         have_data = false;
5189
5190                                 index_endscan(index_scan);
5191                         }
5192
5193                         /* Clean everything up */
5194                         ExecDropSingleTupleTableSlot(slot);
5195
5196                         index_close(indexRel, AccessShareLock);
5197                         heap_close(heapRel, NoLock);
5198
5199                         MemoryContextSwitchTo(oldcontext);
5200                         FreeExecutorState(estate);
5201
5202                         /* And we're done */
5203                         break;
5204                 }
5205         }
5206
5207         return have_data;
5208 }
5209
5210 /*
5211  * find_join_input_rel
5212  *              Look up the input relation for a join.
5213  *
5214  * We assume that the input relation's RelOptInfo must have been constructed
5215  * already.
5216  */
5217 static RelOptInfo *
5218 find_join_input_rel(PlannerInfo *root, Relids relids)
5219 {
5220         RelOptInfo *rel = NULL;
5221
5222         switch (bms_membership(relids))
5223         {
5224                 case BMS_EMPTY_SET:
5225                         /* should not happen */
5226                         break;
5227                 case BMS_SINGLETON:
5228                         rel = find_base_rel(root, bms_singleton_member(relids));
5229                         break;
5230                 case BMS_MULTIPLE:
5231                         rel = find_join_rel(root, relids);
5232                         break;
5233         }
5234
5235         if (rel == NULL)
5236                 elog(ERROR, "could not find RelOptInfo for given relids");
5237
5238         return rel;
5239 }
5240
5241
5242 /*-------------------------------------------------------------------------
5243  *
5244  * Pattern analysis functions
5245  *
5246  * These routines support analysis of LIKE and regular-expression patterns
5247  * by the planner/optimizer.  It's important that they agree with the
5248  * regular-expression code in backend/regex/ and the LIKE code in
5249  * backend/utils/adt/like.c.  Also, the computation of the fixed prefix
5250  * must be conservative: if we report a string longer than the true fixed
5251  * prefix, the query may produce actually wrong answers, rather than just
5252  * getting a bad selectivity estimate!
5253  *
5254  * Note that the prefix-analysis functions are called from
5255  * backend/optimizer/path/indxpath.c as well as from routines in this file.
5256  *
5257  *-------------------------------------------------------------------------
5258  */
5259
5260 /*
5261  * Check whether char is a letter (and, hence, subject to case-folding)
5262  *
5263  * In multibyte character sets, we can't use isalpha, and it does not seem
5264  * worth trying to convert to wchar_t to use iswalpha.  Instead, just assume
5265  * any multibyte char is potentially case-varying.
5266  */
5267 static int
5268 pattern_char_isalpha(char c, bool is_multibyte,
5269                                          pg_locale_t locale, bool locale_is_c)
5270 {
5271         if (locale_is_c)
5272                 return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
5273         else if (is_multibyte && IS_HIGHBIT_SET(c))
5274                 return true;
5275 #ifdef HAVE_LOCALE_T
5276         else if (locale)
5277                 return isalpha_l((unsigned char) c, locale);
5278 #endif
5279         else
5280                 return isalpha((unsigned char) c);
5281 }
5282
5283 /*
5284  * Extract the fixed prefix, if any, for a pattern.
5285  *
5286  * *prefix is set to a palloc'd prefix string (in the form of a Const node),
5287  *      or to NULL if no fixed prefix exists for the pattern.
5288  * If rest_selec is not NULL, *rest_selec is set to an estimate of the
5289  *      selectivity of the remainder of the pattern (without any fixed prefix).
5290  * The prefix Const has the same type (TEXT or BYTEA) as the input pattern.
5291  *
5292  * The return value distinguishes no fixed prefix, a partial prefix,
5293  * or an exact-match-only pattern.
5294  */
5295
5296 static Pattern_Prefix_Status
5297 like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5298                                   Const **prefix_const, Selectivity *rest_selec)
5299 {
5300         char       *match;
5301         char       *patt;
5302         int                     pattlen;
5303         Oid                     typeid = patt_const->consttype;
5304         int                     pos,
5305                                 match_pos;
5306         bool            is_multibyte = (pg_database_encoding_max_length() > 1);
5307         pg_locale_t locale = 0;
5308         bool            locale_is_c = false;
5309
5310         /* the right-hand const is type text or bytea */
5311         Assert(typeid == BYTEAOID || typeid == TEXTOID);
5312
5313         if (case_insensitive)
5314         {
5315                 if (typeid == BYTEAOID)
5316                         ereport(ERROR,
5317                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5318                         errmsg("case insensitive matching not supported on type bytea")));
5319
5320                 /* If case-insensitive, we need locale info */
5321                 if (lc_ctype_is_c(collation))
5322                         locale_is_c = true;
5323                 else if (collation != DEFAULT_COLLATION_OID)
5324                 {
5325                         if (!OidIsValid(collation))
5326                         {
5327                                 /*
5328                                  * This typically means that the parser could not resolve a
5329                                  * conflict of implicit collations, so report it that way.
5330                                  */
5331                                 ereport(ERROR,
5332                                                 (errcode(ERRCODE_INDETERMINATE_COLLATION),
5333                                                  errmsg("could not determine which collation to use for ILIKE"),
5334                                                  errhint("Use the COLLATE clause to set the collation explicitly.")));
5335                         }
5336                         locale = pg_newlocale_from_collation(collation);
5337                 }
5338         }
5339
5340         if (typeid != BYTEAOID)
5341         {
5342                 patt = TextDatumGetCString(patt_const->constvalue);
5343                 pattlen = strlen(patt);
5344         }
5345         else
5346         {
5347                 bytea      *bstr = DatumGetByteaP(patt_const->constvalue);
5348
5349                 pattlen = VARSIZE(bstr) - VARHDRSZ;
5350                 patt = (char *) palloc(pattlen);
5351                 memcpy(patt, VARDATA(bstr), pattlen);
5352                 if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
5353                         pfree(bstr);
5354         }
5355
5356         match = palloc(pattlen + 1);
5357         match_pos = 0;
5358         for (pos = 0; pos < pattlen; pos++)
5359         {
5360                 /* % and _ are wildcard characters in LIKE */
5361                 if (patt[pos] == '%' ||
5362                         patt[pos] == '_')
5363                         break;
5364
5365                 /* Backslash escapes the next character */
5366                 if (patt[pos] == '\\')
5367                 {
5368                         pos++;
5369                         if (pos >= pattlen)
5370                                 break;
5371                 }
5372
5373                 /* Stop if case-varying character (it's sort of a wildcard) */
5374                 if (case_insensitive &&
5375                   pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
5376                         break;
5377
5378                 match[match_pos++] = patt[pos];
5379         }
5380
5381         match[match_pos] = '\0';
5382
5383         if (typeid != BYTEAOID)
5384                 *prefix_const = string_to_const(match, typeid);
5385         else
5386                 *prefix_const = string_to_bytea_const(match, match_pos);
5387
5388         if (rest_selec != NULL)
5389                 *rest_selec = like_selectivity(&patt[pos], pattlen - pos,
5390                                                                            case_insensitive);
5391
5392         pfree(patt);
5393         pfree(match);
5394
5395         /* in LIKE, an empty pattern is an exact match! */
5396         if (pos == pattlen)
5397                 return Pattern_Prefix_Exact;    /* reached end of pattern, so exact */
5398
5399         if (match_pos > 0)
5400                 return Pattern_Prefix_Partial;
5401
5402         return Pattern_Prefix_None;
5403 }
5404
5405 static Pattern_Prefix_Status
5406 regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5407                                    Const **prefix_const, Selectivity *rest_selec)
5408 {
5409         Oid                     typeid = patt_const->consttype;
5410         char       *prefix;
5411         bool            exact;
5412
5413         /*
5414          * Should be unnecessary, there are no bytea regex operators defined. As
5415          * such, it should be noted that the rest of this function has *not* been
5416          * made safe for binary (possibly NULL containing) strings.
5417          */
5418         if (typeid == BYTEAOID)
5419                 ereport(ERROR,
5420                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5421                  errmsg("regular-expression matching not supported on type bytea")));
5422
5423         /* Use the regexp machinery to extract the prefix, if any */
5424         prefix = regexp_fixed_prefix(DatumGetTextPP(patt_const->constvalue),
5425                                                                  case_insensitive, collation,
5426                                                                  &exact);
5427
5428         if (prefix == NULL)
5429         {
5430                 *prefix_const = NULL;
5431
5432                 if (rest_selec != NULL)
5433                 {
5434                         char       *patt = TextDatumGetCString(patt_const->constvalue);
5435
5436                         *rest_selec = regex_selectivity(patt, strlen(patt),
5437                                                                                         case_insensitive,
5438                                                                                         0);
5439                         pfree(patt);
5440                 }
5441
5442                 return Pattern_Prefix_None;
5443         }
5444
5445         *prefix_const = string_to_const(prefix, typeid);
5446
5447         if (rest_selec != NULL)
5448         {
5449                 if (exact)
5450                 {
5451                         /* Exact match, so there's no additional selectivity */
5452                         *rest_selec = 1.0;
5453                 }
5454                 else
5455                 {
5456                         char       *patt = TextDatumGetCString(patt_const->constvalue);
5457
5458                         *rest_selec = regex_selectivity(patt, strlen(patt),
5459                                                                                         case_insensitive,
5460                                                                                         strlen(prefix));
5461                         pfree(patt);
5462                 }
5463         }
5464
5465         pfree(prefix);
5466
5467         if (exact)
5468                 return Pattern_Prefix_Exact;    /* pattern specifies exact match */
5469         else
5470                 return Pattern_Prefix_Partial;
5471 }
5472
5473 Pattern_Prefix_Status
5474 pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
5475                                          Const **prefix, Selectivity *rest_selec)
5476 {
5477         Pattern_Prefix_Status result;
5478
5479         switch (ptype)
5480         {
5481                 case Pattern_Type_Like:
5482                         result = like_fixed_prefix(patt, false, collation,
5483                                                                            prefix, rest_selec);
5484                         break;
5485                 case Pattern_Type_Like_IC:
5486                         result = like_fixed_prefix(patt, true, collation,
5487                                                                            prefix, rest_selec);
5488                         break;
5489                 case Pattern_Type_Regex:
5490                         result = regex_fixed_prefix(patt, false, collation,
5491                                                                                 prefix, rest_selec);
5492                         break;
5493                 case Pattern_Type_Regex_IC:
5494                         result = regex_fixed_prefix(patt, true, collation,
5495                                                                                 prefix, rest_selec);
5496                         break;
5497                 default:
5498                         elog(ERROR, "unrecognized ptype: %d", (int) ptype);
5499                         result = Pattern_Prefix_None;           /* keep compiler quiet */
5500                         break;
5501         }
5502         return result;
5503 }
5504
5505 /*
5506  * Estimate the selectivity of a fixed prefix for a pattern match.
5507  *
5508  * A fixed prefix "foo" is estimated as the selectivity of the expression
5509  * "variable >= 'foo' AND variable < 'fop'" (see also indxpath.c).
5510  *
5511  * The selectivity estimate is with respect to the portion of the column
5512  * population represented by the histogram --- the caller must fold this
5513  * together with info about MCVs and NULLs.
5514  *
5515  * We use the >= and < operators from the specified btree opfamily to do the
5516  * estimation.  The given variable and Const must be of the associated
5517  * datatype.
5518  *
5519  * XXX Note: we make use of the upper bound to estimate operator selectivity
5520  * even if the locale is such that we cannot rely on the upper-bound string.
5521  * The selectivity only needs to be approximately right anyway, so it seems
5522  * more useful to use the upper-bound code than not.
5523  */
5524 static Selectivity
5525 prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
5526                                    Oid vartype, Oid opfamily, Const *prefixcon)
5527 {
5528         Selectivity prefixsel;
5529         Oid                     cmpopr;
5530         FmgrInfo        opproc;
5531         Const      *greaterstrcon;
5532         Selectivity eq_sel;
5533
5534         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5535                                                                  BTGreaterEqualStrategyNumber);
5536         if (cmpopr == InvalidOid)
5537                 elog(ERROR, "no >= operator for opfamily %u", opfamily);
5538         fmgr_info(get_opcode(cmpopr), &opproc);
5539
5540         prefixsel = ineq_histogram_selectivity(root, vardata, &opproc, true,
5541                                                                                    prefixcon->constvalue,
5542                                                                                    prefixcon->consttype);
5543
5544         if (prefixsel < 0.0)
5545         {
5546                 /* No histogram is present ... return a suitable default estimate */
5547                 return DEFAULT_MATCH_SEL;
5548         }
5549
5550         /*-------
5551          * If we can create a string larger than the prefix, say
5552          *      "x < greaterstr".
5553          *-------
5554          */
5555         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5556                                                                  BTLessStrategyNumber);
5557         if (cmpopr == InvalidOid)
5558                 elog(ERROR, "no < operator for opfamily %u", opfamily);
5559         fmgr_info(get_opcode(cmpopr), &opproc);
5560         greaterstrcon = make_greater_string(prefixcon, &opproc,
5561                                                                                 DEFAULT_COLLATION_OID);
5562         if (greaterstrcon)
5563         {
5564                 Selectivity topsel;
5565
5566                 topsel = ineq_histogram_selectivity(root, vardata, &opproc, false,
5567                                                                                         greaterstrcon->constvalue,
5568                                                                                         greaterstrcon->consttype);
5569
5570                 /* ineq_histogram_selectivity worked before, it shouldn't fail now */
5571                 Assert(topsel >= 0.0);
5572
5573                 /*
5574                  * Merge the two selectivities in the same way as for a range query
5575                  * (see clauselist_selectivity()).  Note that we don't need to worry
5576                  * about double-exclusion of nulls, since ineq_histogram_selectivity
5577                  * doesn't count those anyway.
5578                  */
5579                 prefixsel = topsel + prefixsel - 1.0;
5580         }
5581
5582         /*
5583          * If the prefix is long then the two bounding values might be too close
5584          * together for the histogram to distinguish them usefully, resulting in a
5585          * zero estimate (plus or minus roundoff error). To avoid returning a
5586          * ridiculously small estimate, compute the estimated selectivity for
5587          * "variable = 'foo'", and clamp to that. (Obviously, the resultant
5588          * estimate should be at least that.)
5589          *
5590          * We apply this even if we couldn't make a greater string.  That case
5591          * suggests that the prefix is near the maximum possible, and thus
5592          * probably off the end of the histogram, and thus we probably got a very
5593          * small estimate from the >= condition; so we still need to clamp.
5594          */
5595         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5596                                                                  BTEqualStrategyNumber);
5597         if (cmpopr == InvalidOid)
5598                 elog(ERROR, "no = operator for opfamily %u", opfamily);
5599         eq_sel = var_eq_const(vardata, cmpopr, prefixcon->constvalue,
5600                                                   false, true);
5601
5602         prefixsel = Max(prefixsel, eq_sel);
5603
5604         return prefixsel;
5605 }
5606
5607
5608 /*
5609  * Estimate the selectivity of a pattern of the specified type.
5610  * Note that any fixed prefix of the pattern will have been removed already,
5611  * so actually we may be looking at just a fragment of the pattern.
5612  *
5613  * For now, we use a very simplistic approach: fixed characters reduce the
5614  * selectivity a good deal, character ranges reduce it a little,
5615  * wildcards (such as % for LIKE or .* for regex) increase it.
5616  */
5617
5618 #define FIXED_CHAR_SEL  0.20    /* about 1/5 */
5619 #define CHAR_RANGE_SEL  0.25
5620 #define ANY_CHAR_SEL    0.9             /* not 1, since it won't match end-of-string */
5621 #define FULL_WILDCARD_SEL 5.0
5622 #define PARTIAL_WILDCARD_SEL 2.0
5623
5624 static Selectivity
5625 like_selectivity(const char *patt, int pattlen, bool case_insensitive)
5626 {
5627         Selectivity sel = 1.0;
5628         int                     pos;
5629
5630         /* Skip any leading wildcard; it's already factored into initial sel */
5631         for (pos = 0; pos < pattlen; pos++)
5632         {
5633                 if (patt[pos] != '%' && patt[pos] != '_')
5634                         break;
5635         }
5636
5637         for (; pos < pattlen; pos++)
5638         {
5639                 /* % and _ are wildcard characters in LIKE */
5640                 if (patt[pos] == '%')
5641                         sel *= FULL_WILDCARD_SEL;
5642                 else if (patt[pos] == '_')
5643                         sel *= ANY_CHAR_SEL;
5644                 else if (patt[pos] == '\\')
5645                 {
5646                         /* Backslash quotes the next character */
5647                         pos++;
5648                         if (pos >= pattlen)
5649                                 break;
5650                         sel *= FIXED_CHAR_SEL;
5651                 }
5652                 else
5653                         sel *= FIXED_CHAR_SEL;
5654         }
5655         /* Could get sel > 1 if multiple wildcards */
5656         if (sel > 1.0)
5657                 sel = 1.0;
5658         return sel;
5659 }
5660
5661 static Selectivity
5662 regex_selectivity_sub(const char *patt, int pattlen, bool case_insensitive)
5663 {
5664         Selectivity sel = 1.0;
5665         int                     paren_depth = 0;
5666         int                     paren_pos = 0;  /* dummy init to keep compiler quiet */
5667         int                     pos;
5668
5669         for (pos = 0; pos < pattlen; pos++)
5670         {
5671                 if (patt[pos] == '(')
5672                 {
5673                         if (paren_depth == 0)
5674                                 paren_pos = pos;        /* remember start of parenthesized item */
5675                         paren_depth++;
5676                 }
5677                 else if (patt[pos] == ')' && paren_depth > 0)
5678                 {
5679                         paren_depth--;
5680                         if (paren_depth == 0)
5681                                 sel *= regex_selectivity_sub(patt + (paren_pos + 1),
5682                                                                                          pos - (paren_pos + 1),
5683                                                                                          case_insensitive);
5684                 }
5685                 else if (patt[pos] == '|' && paren_depth == 0)
5686                 {
5687                         /*
5688                          * If unquoted | is present at paren level 0 in pattern, we have
5689                          * multiple alternatives; sum their probabilities.
5690                          */
5691                         sel += regex_selectivity_sub(patt + (pos + 1),
5692                                                                                  pattlen - (pos + 1),
5693                                                                                  case_insensitive);
5694                         break;                          /* rest of pattern is now processed */
5695                 }
5696                 else if (patt[pos] == '[')
5697                 {
5698                         bool            negclass = false;
5699
5700                         if (patt[++pos] == '^')
5701                         {
5702                                 negclass = true;
5703                                 pos++;
5704                         }
5705                         if (patt[pos] == ']')           /* ']' at start of class is not
5706                                                                                  * special */
5707                                 pos++;
5708                         while (pos < pattlen && patt[pos] != ']')
5709                                 pos++;
5710                         if (paren_depth == 0)
5711                                 sel *= (negclass ? (1.0 - CHAR_RANGE_SEL) : CHAR_RANGE_SEL);
5712                 }
5713                 else if (patt[pos] == '.')
5714                 {
5715                         if (paren_depth == 0)
5716                                 sel *= ANY_CHAR_SEL;
5717                 }
5718                 else if (patt[pos] == '*' ||
5719                                  patt[pos] == '?' ||
5720                                  patt[pos] == '+')
5721                 {
5722                         /* Ought to be smarter about quantifiers... */
5723                         if (paren_depth == 0)
5724                                 sel *= PARTIAL_WILDCARD_SEL;
5725                 }
5726                 else if (patt[pos] == '{')
5727                 {
5728                         while (pos < pattlen && patt[pos] != '}')
5729                                 pos++;
5730                         if (paren_depth == 0)
5731                                 sel *= PARTIAL_WILDCARD_SEL;
5732                 }
5733                 else if (patt[pos] == '\\')
5734                 {
5735                         /* backslash quotes the next character */
5736                         pos++;
5737                         if (pos >= pattlen)
5738                                 break;
5739                         if (paren_depth == 0)
5740                                 sel *= FIXED_CHAR_SEL;
5741                 }
5742                 else
5743                 {
5744                         if (paren_depth == 0)
5745                                 sel *= FIXED_CHAR_SEL;
5746                 }
5747         }
5748         /* Could get sel > 1 if multiple wildcards */
5749         if (sel > 1.0)
5750                 sel = 1.0;
5751         return sel;
5752 }
5753
5754 static Selectivity
5755 regex_selectivity(const char *patt, int pattlen, bool case_insensitive,
5756                                   int fixed_prefix_len)
5757 {
5758         Selectivity sel;
5759
5760         /* If patt doesn't end with $, consider it to have a trailing wildcard */
5761         if (pattlen > 0 && patt[pattlen - 1] == '$' &&
5762                 (pattlen == 1 || patt[pattlen - 2] != '\\'))
5763         {
5764                 /* has trailing $ */
5765                 sel = regex_selectivity_sub(patt, pattlen - 1, case_insensitive);
5766         }
5767         else
5768         {
5769                 /* no trailing $ */
5770                 sel = regex_selectivity_sub(patt, pattlen, case_insensitive);
5771                 sel *= FULL_WILDCARD_SEL;
5772         }
5773
5774         /* If there's a fixed prefix, discount its selectivity */
5775         if (fixed_prefix_len > 0)
5776                 sel /= pow(FIXED_CHAR_SEL, fixed_prefix_len);
5777
5778         /* Make sure result stays in range */
5779         CLAMP_PROBABILITY(sel);
5780         return sel;
5781 }
5782
5783
5784 /*
5785  * For bytea, the increment function need only increment the current byte
5786  * (there are no multibyte characters to worry about).
5787  */
5788 static bool
5789 byte_increment(unsigned char *ptr, int len)
5790 {
5791         if (*ptr >= 255)
5792                 return false;
5793         (*ptr)++;
5794         return true;
5795 }
5796
5797 /*
5798  * Try to generate a string greater than the given string or any
5799  * string it is a prefix of.  If successful, return a palloc'd string
5800  * in the form of a Const node; else return NULL.
5801  *
5802  * The caller must provide the appropriate "less than" comparison function
5803  * for testing the strings, along with the collation to use.
5804  *
5805  * The key requirement here is that given a prefix string, say "foo",
5806  * we must be able to generate another string "fop" that is greater than
5807  * all strings "foobar" starting with "foo".  We can test that we have
5808  * generated a string greater than the prefix string, but in non-C collations
5809  * that is not a bulletproof guarantee that an extension of the string might
5810  * not sort after it; an example is that "foo " is less than "foo!", but it
5811  * is not clear that a "dictionary" sort ordering will consider "foo!" less
5812  * than "foo bar".  CAUTION: Therefore, this function should be used only for
5813  * estimation purposes when working in a non-C collation.
5814  *
5815  * To try to catch most cases where an extended string might otherwise sort
5816  * before the result value, we determine which of the strings "Z", "z", "y",
5817  * and "9" is seen as largest by the collation, and append that to the given
5818  * prefix before trying to find a string that compares as larger.
5819  *
5820  * To search for a greater string, we repeatedly "increment" the rightmost
5821  * character, using an encoding-specific character incrementer function.
5822  * When it's no longer possible to increment the last character, we truncate
5823  * off that character and start incrementing the next-to-rightmost.
5824  * For example, if "z" were the last character in the sort order, then we
5825  * could produce "foo" as a string greater than "fonz".
5826  *
5827  * This could be rather slow in the worst case, but in most cases we
5828  * won't have to try more than one or two strings before succeeding.
5829  *
5830  * Note that it's important for the character incrementer not to be too anal
5831  * about producing every possible character code, since in some cases the only
5832  * way to get a larger string is to increment a previous character position.
5833  * So we don't want to spend too much time trying every possible character
5834  * code at the last position.  A good rule of thumb is to be sure that we
5835  * don't try more than 256*K values for a K-byte character (and definitely
5836  * not 256^K, which is what an exhaustive search would approach).
5837  */
5838 Const *
5839 make_greater_string(const Const *str_const, FmgrInfo *ltproc, Oid collation)
5840 {
5841         Oid                     datatype = str_const->consttype;
5842         char       *workstr;
5843         int                     len;
5844         Datum           cmpstr;
5845         text       *cmptxt = NULL;
5846         mbcharacter_incrementer charinc;
5847
5848         /*
5849          * Get a modifiable copy of the prefix string in C-string format, and set
5850          * up the string we will compare to as a Datum.  In C locale this can just
5851          * be the given prefix string, otherwise we need to add a suffix.  Types
5852          * NAME and BYTEA sort bytewise so they don't need a suffix either.
5853          */
5854         if (datatype == NAMEOID)
5855         {
5856                 workstr = DatumGetCString(DirectFunctionCall1(nameout,
5857                                                                                                           str_const->constvalue));
5858                 len = strlen(workstr);
5859                 cmpstr = str_const->constvalue;
5860         }
5861         else if (datatype == BYTEAOID)
5862         {
5863                 bytea      *bstr = DatumGetByteaP(str_const->constvalue);
5864
5865                 len = VARSIZE(bstr) - VARHDRSZ;
5866                 workstr = (char *) palloc(len);
5867                 memcpy(workstr, VARDATA(bstr), len);
5868                 if ((Pointer) bstr != DatumGetPointer(str_const->constvalue))
5869                         pfree(bstr);
5870                 cmpstr = str_const->constvalue;
5871         }
5872         else
5873         {
5874                 workstr = TextDatumGetCString(str_const->constvalue);
5875                 len = strlen(workstr);
5876                 if (lc_collate_is_c(collation) || len == 0)
5877                         cmpstr = str_const->constvalue;
5878                 else
5879                 {
5880                         /* If first time through, determine the suffix to use */
5881                         static char suffixchar = 0;
5882                         static Oid      suffixcollation = 0;
5883
5884                         if (!suffixchar || suffixcollation != collation)
5885                         {
5886                                 char       *best;
5887
5888                                 best = "Z";
5889                                 if (varstr_cmp(best, 1, "z", 1, collation) < 0)
5890                                         best = "z";
5891                                 if (varstr_cmp(best, 1, "y", 1, collation) < 0)
5892                                         best = "y";
5893                                 if (varstr_cmp(best, 1, "9", 1, collation) < 0)
5894                                         best = "9";
5895                                 suffixchar = *best;
5896                                 suffixcollation = collation;
5897                         }
5898
5899                         /* And build the string to compare to */
5900                         cmptxt = (text *) palloc(VARHDRSZ + len + 1);
5901                         SET_VARSIZE(cmptxt, VARHDRSZ + len + 1);
5902                         memcpy(VARDATA(cmptxt), workstr, len);
5903                         *(VARDATA(cmptxt) + len) = suffixchar;
5904                         cmpstr = PointerGetDatum(cmptxt);
5905                 }
5906         }
5907
5908         /* Select appropriate character-incrementer function */
5909         if (datatype == BYTEAOID)
5910                 charinc = byte_increment;
5911         else
5912                 charinc = pg_database_encoding_character_incrementer();
5913
5914         /* And search ... */
5915         while (len > 0)
5916         {
5917                 int                     charlen;
5918                 unsigned char *lastchar;
5919
5920                 /* Identify the last character --- for bytea, just the last byte */
5921                 if (datatype == BYTEAOID)
5922                         charlen = 1;
5923                 else
5924                         charlen = len - pg_mbcliplen(workstr, len, len - 1);
5925                 lastchar = (unsigned char *) (workstr + len - charlen);
5926
5927                 /*
5928                  * Try to generate a larger string by incrementing the last character
5929                  * (for BYTEA, we treat each byte as a character).
5930                  *
5931                  * Note: the incrementer function is expected to return true if it's
5932                  * generated a valid-per-the-encoding new character, otherwise false.
5933                  * The contents of the character on false return are unspecified.
5934                  */
5935                 while (charinc(lastchar, charlen))
5936                 {
5937                         Const      *workstr_const;
5938
5939                         if (datatype == BYTEAOID)
5940                                 workstr_const = string_to_bytea_const(workstr, len);
5941                         else
5942                                 workstr_const = string_to_const(workstr, datatype);
5943
5944                         if (DatumGetBool(FunctionCall2Coll(ltproc,
5945                                                                                            collation,
5946                                                                                            cmpstr,
5947                                                                                            workstr_const->constvalue)))
5948                         {
5949                                 /* Successfully made a string larger than cmpstr */
5950                                 if (cmptxt)
5951                                         pfree(cmptxt);
5952                                 pfree(workstr);
5953                                 return workstr_const;
5954                         }
5955
5956                         /* No good, release unusable value and try again */
5957                         pfree(DatumGetPointer(workstr_const->constvalue));
5958                         pfree(workstr_const);
5959                 }
5960
5961                 /*
5962                  * No luck here, so truncate off the last character and try to
5963                  * increment the next one.
5964                  */
5965                 len -= charlen;
5966                 workstr[len] = '\0';
5967         }
5968
5969         /* Failed... */
5970         if (cmptxt)
5971                 pfree(cmptxt);
5972         pfree(workstr);
5973
5974         return NULL;
5975 }
5976
5977 /*
5978  * Generate a Datum of the appropriate type from a C string.
5979  * Note that all of the supported types are pass-by-ref, so the
5980  * returned value should be pfree'd if no longer needed.
5981  */
5982 static Datum
5983 string_to_datum(const char *str, Oid datatype)
5984 {
5985         Assert(str != NULL);
5986
5987         /*
5988          * We cheat a little by assuming that CStringGetTextDatum() will do for
5989          * bpchar and varchar constants too...
5990          */
5991         if (datatype == NAMEOID)
5992                 return DirectFunctionCall1(namein, CStringGetDatum(str));
5993         else if (datatype == BYTEAOID)
5994                 return DirectFunctionCall1(byteain, CStringGetDatum(str));
5995         else
5996                 return CStringGetTextDatum(str);
5997 }
5998
5999 /*
6000  * Generate a Const node of the appropriate type from a C string.
6001  */
6002 static Const *
6003 string_to_const(const char *str, Oid datatype)
6004 {
6005         Datum           conval = string_to_datum(str, datatype);
6006         Oid                     collation;
6007         int                     constlen;
6008
6009         /*
6010          * We only need to support a few datatypes here, so hard-wire properties
6011          * instead of incurring the expense of catalog lookups.
6012          */
6013         switch (datatype)
6014         {
6015                 case TEXTOID:
6016                 case VARCHAROID:
6017                 case BPCHAROID:
6018                         collation = DEFAULT_COLLATION_OID;
6019                         constlen = -1;
6020                         break;
6021
6022                 case NAMEOID:
6023                         collation = InvalidOid;
6024                         constlen = NAMEDATALEN;
6025                         break;
6026
6027                 case BYTEAOID:
6028                         collation = InvalidOid;
6029                         constlen = -1;
6030                         break;
6031
6032                 default:
6033                         elog(ERROR, "unexpected datatype in string_to_const: %u",
6034                                  datatype);
6035                         return NULL;
6036         }
6037
6038         return makeConst(datatype, -1, collation, constlen,
6039                                          conval, false, false);
6040 }
6041
6042 /*
6043  * Generate a Const node of bytea type from a binary C string and a length.
6044  */
6045 static Const *
6046 string_to_bytea_const(const char *str, size_t str_len)
6047 {
6048         bytea      *bstr = palloc(VARHDRSZ + str_len);
6049         Datum           conval;
6050
6051         memcpy(VARDATA(bstr), str, str_len);
6052         SET_VARSIZE(bstr, VARHDRSZ + str_len);
6053         conval = PointerGetDatum(bstr);
6054
6055         return makeConst(BYTEAOID, -1, InvalidOid, -1, conval, false, false);
6056 }
6057
6058 /*-------------------------------------------------------------------------
6059  *
6060  * Index cost estimation functions
6061  *
6062  *-------------------------------------------------------------------------
6063  */
6064
6065 List *
6066 deconstruct_indexquals(IndexPath *path)
6067 {
6068         List       *result = NIL;
6069         IndexOptInfo *index = path->indexinfo;
6070         ListCell   *lcc,
6071                            *lci;
6072
6073         forboth(lcc, path->indexquals, lci, path->indexqualcols)
6074         {
6075                 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lcc);
6076                 int                     indexcol = lfirst_int(lci);
6077                 Expr       *clause;
6078                 Node       *leftop,
6079                                    *rightop;
6080                 IndexQualInfo *qinfo;
6081
6082                 Assert(IsA(rinfo, RestrictInfo));
6083                 clause = rinfo->clause;
6084
6085                 qinfo = (IndexQualInfo *) palloc(sizeof(IndexQualInfo));
6086                 qinfo->rinfo = rinfo;
6087                 qinfo->indexcol = indexcol;
6088
6089                 if (IsA(clause, OpExpr))
6090                 {
6091                         qinfo->clause_op = ((OpExpr *) clause)->opno;
6092                         leftop = get_leftop(clause);
6093                         rightop = get_rightop(clause);
6094                         if (match_index_to_operand(leftop, indexcol, index))
6095                         {
6096                                 qinfo->varonleft = true;
6097                                 qinfo->other_operand = rightop;
6098                         }
6099                         else
6100                         {
6101                                 Assert(match_index_to_operand(rightop, indexcol, index));
6102                                 qinfo->varonleft = false;
6103                                 qinfo->other_operand = leftop;
6104                         }
6105                 }
6106                 else if (IsA(clause, RowCompareExpr))
6107                 {
6108                         RowCompareExpr *rc = (RowCompareExpr *) clause;
6109
6110                         qinfo->clause_op = linitial_oid(rc->opnos);
6111                         /* Examine only first columns to determine left/right sides */
6112                         if (match_index_to_operand((Node *) linitial(rc->largs),
6113                                                                            indexcol, index))
6114                         {
6115                                 qinfo->varonleft = true;
6116                                 qinfo->other_operand = (Node *) rc->rargs;
6117                         }
6118                         else
6119                         {
6120                                 Assert(match_index_to_operand((Node *) linitial(rc->rargs),
6121                                                                                           indexcol, index));
6122                                 qinfo->varonleft = false;
6123                                 qinfo->other_operand = (Node *) rc->largs;
6124                         }
6125                 }
6126                 else if (IsA(clause, ScalarArrayOpExpr))
6127                 {
6128                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6129
6130                         qinfo->clause_op = saop->opno;
6131                         /* index column is always on the left in this case */
6132                         Assert(match_index_to_operand((Node *) linitial(saop->args),
6133                                                                                   indexcol, index));
6134                         qinfo->varonleft = true;
6135                         qinfo->other_operand = (Node *) lsecond(saop->args);
6136                 }
6137                 else if (IsA(clause, NullTest))
6138                 {
6139                         qinfo->clause_op = InvalidOid;
6140                         Assert(match_index_to_operand((Node *) ((NullTest *) clause)->arg,
6141                                                                                   indexcol, index));
6142                         qinfo->varonleft = true;
6143                         qinfo->other_operand = NULL;
6144                 }
6145                 else
6146                 {
6147                         elog(ERROR, "unsupported indexqual type: %d",
6148                                  (int) nodeTag(clause));
6149                 }
6150
6151                 result = lappend(result, qinfo);
6152         }
6153         return result;
6154 }
6155
6156 /*
6157  * Simple function to compute the total eval cost of the "other operands"
6158  * in an IndexQualInfo list.  Since we know these will be evaluated just
6159  * once per scan, there's no need to distinguish startup from per-row cost.
6160  */
6161 static Cost
6162 other_operands_eval_cost(PlannerInfo *root, List *qinfos)
6163 {
6164         Cost            qual_arg_cost = 0;
6165         ListCell   *lc;
6166
6167         foreach(lc, qinfos)
6168         {
6169                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6170                 QualCost        index_qual_cost;
6171
6172                 cost_qual_eval_node(&index_qual_cost, qinfo->other_operand, root);
6173                 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6174         }
6175         return qual_arg_cost;
6176 }
6177
6178 /*
6179  * Get other-operand eval cost for an index orderby list.
6180  *
6181  * Index orderby expressions aren't represented as RestrictInfos (since they
6182  * aren't boolean, usually).  So we can't apply deconstruct_indexquals to
6183  * them.  However, they are much simpler to deal with since they are always
6184  * OpExprs and the index column is always on the left.
6185  */
6186 static Cost
6187 orderby_operands_eval_cost(PlannerInfo *root, IndexPath *path)
6188 {
6189         Cost            qual_arg_cost = 0;
6190         ListCell   *lc;
6191
6192         foreach(lc, path->indexorderbys)
6193         {
6194                 Expr       *clause = (Expr *) lfirst(lc);
6195                 Node       *other_operand;
6196                 QualCost        index_qual_cost;
6197
6198                 if (IsA(clause, OpExpr))
6199                 {
6200                         other_operand = get_rightop(clause);
6201                 }
6202                 else
6203                 {
6204                         elog(ERROR, "unsupported indexorderby type: %d",
6205                                  (int) nodeTag(clause));
6206                         other_operand = NULL;           /* keep compiler quiet */
6207                 }
6208
6209                 cost_qual_eval_node(&index_qual_cost, other_operand, root);
6210                 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6211         }
6212         return qual_arg_cost;
6213 }
6214
6215 void
6216 genericcostestimate(PlannerInfo *root,
6217                                         IndexPath *path,
6218                                         double loop_count,
6219                                         List *qinfos,
6220                                         GenericCosts *costs)
6221 {
6222         IndexOptInfo *index = path->indexinfo;
6223         List       *indexQuals = path->indexquals;
6224         List       *indexOrderBys = path->indexorderbys;
6225         Cost            indexStartupCost;
6226         Cost            indexTotalCost;
6227         Selectivity indexSelectivity;
6228         double          indexCorrelation;
6229         double          numIndexPages;
6230         double          numIndexTuples;
6231         double          spc_random_page_cost;
6232         double          num_sa_scans;
6233         double          num_outer_scans;
6234         double          num_scans;
6235         double          qual_op_cost;
6236         double          qual_arg_cost;
6237         List       *selectivityQuals;
6238         ListCell   *l;
6239
6240         /*
6241          * If the index is partial, AND the index predicate with the explicitly
6242          * given indexquals to produce a more accurate idea of the index
6243          * selectivity.
6244          */
6245         selectivityQuals = add_predicate_to_quals(index, indexQuals);
6246
6247         /*
6248          * Check for ScalarArrayOpExpr index quals, and estimate the number of
6249          * index scans that will be performed.
6250          */
6251         num_sa_scans = 1;
6252         foreach(l, indexQuals)
6253         {
6254                 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6255
6256                 if (IsA(rinfo->clause, ScalarArrayOpExpr))
6257                 {
6258                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6259                         int                     alength = estimate_array_length(lsecond(saop->args));
6260
6261                         if (alength > 1)
6262                                 num_sa_scans *= alength;
6263                 }
6264         }
6265
6266         /* Estimate the fraction of main-table tuples that will be visited */
6267         indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6268                                                                                           index->rel->relid,
6269                                                                                           JOIN_INNER,
6270                                                                                           NULL);
6271
6272         /*
6273          * If caller didn't give us an estimate, estimate the number of index
6274          * tuples that will be visited.  We do it in this rather peculiar-looking
6275          * way in order to get the right answer for partial indexes.
6276          */
6277         numIndexTuples = costs->numIndexTuples;
6278         if (numIndexTuples <= 0.0)
6279         {
6280                 numIndexTuples = indexSelectivity * index->rel->tuples;
6281
6282                 /*
6283                  * The above calculation counts all the tuples visited across all
6284                  * scans induced by ScalarArrayOpExpr nodes.  We want to consider the
6285                  * average per-indexscan number, so adjust.  This is a handy place to
6286                  * round to integer, too.  (If caller supplied tuple estimate, it's
6287                  * responsible for handling these considerations.)
6288                  */
6289                 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6290         }
6291
6292         /*
6293          * We can bound the number of tuples by the index size in any case. Also,
6294          * always estimate at least one tuple is touched, even when
6295          * indexSelectivity estimate is tiny.
6296          */
6297         if (numIndexTuples > index->tuples)
6298                 numIndexTuples = index->tuples;
6299         if (numIndexTuples < 1.0)
6300                 numIndexTuples = 1.0;
6301
6302         /*
6303          * Estimate the number of index pages that will be retrieved.
6304          *
6305          * We use the simplistic method of taking a pro-rata fraction of the total
6306          * number of index pages.  In effect, this counts only leaf pages and not
6307          * any overhead such as index metapage or upper tree levels.
6308          *
6309          * In practice access to upper index levels is often nearly free because
6310          * those tend to stay in cache under load; moreover, the cost involved is
6311          * highly dependent on index type.  We therefore ignore such costs here
6312          * and leave it to the caller to add a suitable charge if needed.
6313          */
6314         if (index->pages > 1 && index->tuples > 1)
6315                 numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6316         else
6317                 numIndexPages = 1.0;
6318
6319         /* fetch estimated page cost for tablespace containing index */
6320         get_tablespace_page_costs(index->reltablespace,
6321                                                           &spc_random_page_cost,
6322                                                           NULL);
6323
6324         /*
6325          * Now compute the disk access costs.
6326          *
6327          * The above calculations are all per-index-scan.  However, if we are in a
6328          * nestloop inner scan, we can expect the scan to be repeated (with
6329          * different search keys) for each row of the outer relation.  Likewise,
6330          * ScalarArrayOpExpr quals result in multiple index scans.  This creates
6331          * the potential for cache effects to reduce the number of disk page
6332          * fetches needed.  We want to estimate the average per-scan I/O cost in
6333          * the presence of caching.
6334          *
6335          * We use the Mackert-Lohman formula (see costsize.c for details) to
6336          * estimate the total number of page fetches that occur.  While this
6337          * wasn't what it was designed for, it seems a reasonable model anyway.
6338          * Note that we are counting pages not tuples anymore, so we take N = T =
6339          * index size, as if there were one "tuple" per page.
6340          */
6341         num_outer_scans = loop_count;
6342         num_scans = num_sa_scans * num_outer_scans;
6343
6344         if (num_scans > 1)
6345         {
6346                 double          pages_fetched;
6347
6348                 /* total page fetches ignoring cache effects */
6349                 pages_fetched = numIndexPages * num_scans;
6350
6351                 /* use Mackert and Lohman formula to adjust for cache effects */
6352                 pages_fetched = index_pages_fetched(pages_fetched,
6353                                                                                         index->pages,
6354                                                                                         (double) index->pages,
6355                                                                                         root);
6356
6357                 /*
6358                  * Now compute the total disk access cost, and then report a pro-rated
6359                  * share for each outer scan.  (Don't pro-rate for ScalarArrayOpExpr,
6360                  * since that's internal to the indexscan.)
6361                  */
6362                 indexTotalCost = (pages_fetched * spc_random_page_cost)
6363                         / num_outer_scans;
6364         }
6365         else
6366         {
6367                 /*
6368                  * For a single index scan, we just charge spc_random_page_cost per
6369                  * page touched.
6370                  */
6371                 indexTotalCost = numIndexPages * spc_random_page_cost;
6372         }
6373
6374         /*
6375          * CPU cost: any complex expressions in the indexquals will need to be
6376          * evaluated once at the start of the scan to reduce them to runtime keys
6377          * to pass to the index AM (see nodeIndexscan.c).  We model the per-tuple
6378          * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6379          * indexqual operator.  Because we have numIndexTuples as a per-scan
6380          * number, we have to multiply by num_sa_scans to get the correct result
6381          * for ScalarArrayOpExpr cases.  Similarly add in costs for any index
6382          * ORDER BY expressions.
6383          *
6384          * Note: this neglects the possible costs of rechecking lossy operators.
6385          * Detecting that that might be needed seems more expensive than it's
6386          * worth, though, considering all the other inaccuracies here ...
6387          */
6388         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
6389                 orderby_operands_eval_cost(root, path);
6390         qual_op_cost = cpu_operator_cost *
6391                 (list_length(indexQuals) + list_length(indexOrderBys));
6392
6393         indexStartupCost = qual_arg_cost;
6394         indexTotalCost += qual_arg_cost;
6395         indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6396
6397         /*
6398          * Generic assumption about index correlation: there isn't any.
6399          */
6400         indexCorrelation = 0.0;
6401
6402         /*
6403          * Return everything to caller.
6404          */
6405         costs->indexStartupCost = indexStartupCost;
6406         costs->indexTotalCost = indexTotalCost;
6407         costs->indexSelectivity = indexSelectivity;
6408         costs->indexCorrelation = indexCorrelation;
6409         costs->numIndexPages = numIndexPages;
6410         costs->numIndexTuples = numIndexTuples;
6411         costs->spc_random_page_cost = spc_random_page_cost;
6412         costs->num_sa_scans = num_sa_scans;
6413 }
6414
6415 /*
6416  * If the index is partial, add its predicate to the given qual list.
6417  *
6418  * ANDing the index predicate with the explicitly given indexquals produces
6419  * a more accurate idea of the index's selectivity.  However, we need to be
6420  * careful not to insert redundant clauses, because clauselist_selectivity()
6421  * is easily fooled into computing a too-low selectivity estimate.  Our
6422  * approach is to add only the predicate clause(s) that cannot be proven to
6423  * be implied by the given indexquals.  This successfully handles cases such
6424  * as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
6425  * There are many other cases where we won't detect redundancy, leading to a
6426  * too-low selectivity estimate, which will bias the system in favor of using
6427  * partial indexes where possible.  That is not necessarily bad though.
6428  *
6429  * Note that indexQuals contains RestrictInfo nodes while the indpred
6430  * does not, so the output list will be mixed.  This is OK for both
6431  * predicate_implied_by() and clauselist_selectivity(), but might be
6432  * problematic if the result were passed to other things.
6433  */
6434 static List *
6435 add_predicate_to_quals(IndexOptInfo *index, List *indexQuals)
6436 {
6437         List       *predExtraQuals = NIL;
6438         ListCell   *lc;
6439
6440         if (index->indpred == NIL)
6441                 return indexQuals;
6442
6443         foreach(lc, index->indpred)
6444         {
6445                 Node       *predQual = (Node *) lfirst(lc);
6446                 List       *oneQual = list_make1(predQual);
6447
6448                 if (!predicate_implied_by(oneQual, indexQuals))
6449                         predExtraQuals = list_concat(predExtraQuals, oneQual);
6450         }
6451         /* list_concat avoids modifying the passed-in indexQuals list */
6452         return list_concat(predExtraQuals, indexQuals);
6453 }
6454
6455
6456 void
6457 btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6458                            Cost *indexStartupCost, Cost *indexTotalCost,
6459                            Selectivity *indexSelectivity, double *indexCorrelation)
6460 {
6461         IndexOptInfo *index = path->indexinfo;
6462         List       *qinfos;
6463         GenericCosts costs;
6464         Oid                     relid;
6465         AttrNumber      colnum;
6466         VariableStatData vardata;
6467         double          numIndexTuples;
6468         Cost            descentCost;
6469         List       *indexBoundQuals;
6470         int                     indexcol;
6471         bool            eqQualHere;
6472         bool            found_saop;
6473         bool            found_is_null_op;
6474         double          num_sa_scans;
6475         ListCell   *lc;
6476
6477         /* Do preliminary analysis of indexquals */
6478         qinfos = deconstruct_indexquals(path);
6479
6480         /*
6481          * For a btree scan, only leading '=' quals plus inequality quals for the
6482          * immediately next attribute contribute to index selectivity (these are
6483          * the "boundary quals" that determine the starting and stopping points of
6484          * the index scan).  Additional quals can suppress visits to the heap, so
6485          * it's OK to count them in indexSelectivity, but they should not count
6486          * for estimating numIndexTuples.  So we must examine the given indexquals
6487          * to find out which ones count as boundary quals.  We rely on the
6488          * knowledge that they are given in index column order.
6489          *
6490          * For a RowCompareExpr, we consider only the first column, just as
6491          * rowcomparesel() does.
6492          *
6493          * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6494          * index scans not one, but the ScalarArrayOpExpr's operator can be
6495          * considered to act the same as it normally does.
6496          */
6497         indexBoundQuals = NIL;
6498         indexcol = 0;
6499         eqQualHere = false;
6500         found_saop = false;
6501         found_is_null_op = false;
6502         num_sa_scans = 1;
6503         foreach(lc, qinfos)
6504         {
6505                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6506                 RestrictInfo *rinfo = qinfo->rinfo;
6507                 Expr       *clause = rinfo->clause;
6508                 Oid                     clause_op;
6509                 int                     op_strategy;
6510
6511                 if (indexcol != qinfo->indexcol)
6512                 {
6513                         /* Beginning of a new column's quals */
6514                         if (!eqQualHere)
6515                                 break;                  /* done if no '=' qual for indexcol */
6516                         eqQualHere = false;
6517                         indexcol++;
6518                         if (indexcol != qinfo->indexcol)
6519                                 break;                  /* no quals at all for indexcol */
6520                 }
6521
6522                 if (IsA(clause, ScalarArrayOpExpr))
6523                 {
6524                         int                     alength = estimate_array_length(qinfo->other_operand);
6525
6526                         found_saop = true;
6527                         /* count up number of SA scans induced by indexBoundQuals only */
6528                         if (alength > 1)
6529                                 num_sa_scans *= alength;
6530                 }
6531                 else if (IsA(clause, NullTest))
6532                 {
6533                         NullTest   *nt = (NullTest *) clause;
6534
6535                         if (nt->nulltesttype == IS_NULL)
6536                         {
6537                                 found_is_null_op = true;
6538                                 /* IS NULL is like = for selectivity determination purposes */
6539                                 eqQualHere = true;
6540                         }
6541                 }
6542
6543                 /*
6544                  * We would need to commute the clause_op if not varonleft, except
6545                  * that we only care if it's equality or not, so that refinement is
6546                  * unnecessary.
6547                  */
6548                 clause_op = qinfo->clause_op;
6549
6550                 /* check for equality operator */
6551                 if (OidIsValid(clause_op))
6552                 {
6553                         op_strategy = get_op_opfamily_strategy(clause_op,
6554                                                                                                    index->opfamily[indexcol]);
6555                         Assert(op_strategy != 0);       /* not a member of opfamily?? */
6556                         if (op_strategy == BTEqualStrategyNumber)
6557                                 eqQualHere = true;
6558                 }
6559
6560                 indexBoundQuals = lappend(indexBoundQuals, rinfo);
6561         }
6562
6563         /*
6564          * If index is unique and we found an '=' clause for each column, we can
6565          * just assume numIndexTuples = 1 and skip the expensive
6566          * clauselist_selectivity calculations.  However, a ScalarArrayOp or
6567          * NullTest invalidates that theory, even though it sets eqQualHere.
6568          */
6569         if (index->unique &&
6570                 indexcol == index->ncolumns - 1 &&
6571                 eqQualHere &&
6572                 !found_saop &&
6573                 !found_is_null_op)
6574                 numIndexTuples = 1.0;
6575         else
6576         {
6577                 List       *selectivityQuals;
6578                 Selectivity btreeSelectivity;
6579
6580                 /*
6581                  * If the index is partial, AND the index predicate with the
6582                  * index-bound quals to produce a more accurate idea of the number of
6583                  * rows covered by the bound conditions.
6584                  */
6585                 selectivityQuals = add_predicate_to_quals(index, indexBoundQuals);
6586
6587                 btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6588                                                                                                   index->rel->relid,
6589                                                                                                   JOIN_INNER,
6590                                                                                                   NULL);
6591                 numIndexTuples = btreeSelectivity * index->rel->tuples;
6592
6593                 /*
6594                  * As in genericcostestimate(), we have to adjust for any
6595                  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6596                  * to integer.
6597                  */
6598                 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6599         }
6600
6601         /*
6602          * Now do generic index cost estimation.
6603          */
6604         MemSet(&costs, 0, sizeof(costs));
6605         costs.numIndexTuples = numIndexTuples;
6606
6607         genericcostestimate(root, path, loop_count, qinfos, &costs);
6608
6609         /*
6610          * Add a CPU-cost component to represent the costs of initial btree
6611          * descent.  We don't charge any I/O cost for touching upper btree levels,
6612          * since they tend to stay in cache, but we still have to do about log2(N)
6613          * comparisons to descend a btree of N leaf tuples.  We charge one
6614          * cpu_operator_cost per comparison.
6615          *
6616          * If there are ScalarArrayOpExprs, charge this once per SA scan.  The
6617          * ones after the first one are not startup cost so far as the overall
6618          * plan is concerned, so add them only to "total" cost.
6619          */
6620         if (index->tuples > 1)          /* avoid computing log(0) */
6621         {
6622                 descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6623                 costs.indexStartupCost += descentCost;
6624                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6625         }
6626
6627         /*
6628          * Even though we're not charging I/O cost for touching upper btree pages,
6629          * it's still reasonable to charge some CPU cost per page descended
6630          * through.  Moreover, if we had no such charge at all, bloated indexes
6631          * would appear to have the same search cost as unbloated ones, at least
6632          * in cases where only a single leaf page is expected to be visited.  This
6633          * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6634          * touched.  The number of such pages is btree tree height plus one (ie,
6635          * we charge for the leaf page too).  As above, charge once per SA scan.
6636          */
6637         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6638         costs.indexStartupCost += descentCost;
6639         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6640
6641         /*
6642          * If we can get an estimate of the first column's ordering correlation C
6643          * from pg_statistic, estimate the index correlation as C for a
6644          * single-column index, or C * 0.75 for multiple columns. (The idea here
6645          * is that multiple columns dilute the importance of the first column's
6646          * ordering, but don't negate it entirely.  Before 8.0 we divided the
6647          * correlation by the number of columns, but that seems too strong.)
6648          */
6649         MemSet(&vardata, 0, sizeof(vardata));
6650
6651         if (index->indexkeys[0] != 0)
6652         {
6653                 /* Simple variable --- look to stats for the underlying table */
6654                 RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6655
6656                 Assert(rte->rtekind == RTE_RELATION);
6657                 relid = rte->relid;
6658                 Assert(relid != InvalidOid);
6659                 colnum = index->indexkeys[0];
6660
6661                 if (get_relation_stats_hook &&
6662                         (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6663                 {
6664                         /*
6665                          * The hook took control of acquiring a stats tuple.  If it did
6666                          * supply a tuple, it'd better have supplied a freefunc.
6667                          */
6668                         if (HeapTupleIsValid(vardata.statsTuple) &&
6669                                 !vardata.freefunc)
6670                                 elog(ERROR, "no function provided to release variable stats with");
6671                 }
6672                 else
6673                 {
6674                         vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6675                                                                                                  ObjectIdGetDatum(relid),
6676                                                                                                  Int16GetDatum(colnum),
6677                                                                                                  BoolGetDatum(rte->inh));
6678                         vardata.freefunc = ReleaseSysCache;
6679                 }
6680         }
6681         else
6682         {
6683                 /* Expression --- maybe there are stats for the index itself */
6684                 relid = index->indexoid;
6685                 colnum = 1;
6686
6687                 if (get_index_stats_hook &&
6688                         (*get_index_stats_hook) (root, relid, colnum, &vardata))
6689                 {
6690                         /*
6691                          * The hook took control of acquiring a stats tuple.  If it did
6692                          * supply a tuple, it'd better have supplied a freefunc.
6693                          */
6694                         if (HeapTupleIsValid(vardata.statsTuple) &&
6695                                 !vardata.freefunc)
6696                                 elog(ERROR, "no function provided to release variable stats with");
6697                 }
6698                 else
6699                 {
6700                         vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6701                                                                                                  ObjectIdGetDatum(relid),
6702                                                                                                  Int16GetDatum(colnum),
6703                                                                                                  BoolGetDatum(false));
6704                         vardata.freefunc = ReleaseSysCache;
6705                 }
6706         }
6707
6708         if (HeapTupleIsValid(vardata.statsTuple))
6709         {
6710                 Oid                     sortop;
6711                 float4     *numbers;
6712                 int                     nnumbers;
6713
6714                 sortop = get_opfamily_member(index->opfamily[0],
6715                                                                          index->opcintype[0],
6716                                                                          index->opcintype[0],
6717                                                                          BTLessStrategyNumber);
6718                 if (OidIsValid(sortop) &&
6719                         get_attstatsslot(vardata.statsTuple, InvalidOid, 0,
6720                                                          STATISTIC_KIND_CORRELATION,
6721                                                          sortop,
6722                                                          NULL,
6723                                                          NULL, NULL,
6724                                                          &numbers, &nnumbers))
6725                 {
6726                         double          varCorrelation;
6727
6728                         Assert(nnumbers == 1);
6729                         varCorrelation = numbers[0];
6730
6731                         if (index->reverse_sort[0])
6732                                 varCorrelation = -varCorrelation;
6733
6734                         if (index->ncolumns > 1)
6735                                 costs.indexCorrelation = varCorrelation * 0.75;
6736                         else
6737                                 costs.indexCorrelation = varCorrelation;
6738
6739                         free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
6740                 }
6741         }
6742
6743         ReleaseVariableStats(vardata);
6744
6745         *indexStartupCost = costs.indexStartupCost;
6746         *indexTotalCost = costs.indexTotalCost;
6747         *indexSelectivity = costs.indexSelectivity;
6748         *indexCorrelation = costs.indexCorrelation;
6749 }
6750
6751 void
6752 hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6753                                  Cost *indexStartupCost, Cost *indexTotalCost,
6754                                  Selectivity *indexSelectivity, double *indexCorrelation)
6755 {
6756         List       *qinfos;
6757         GenericCosts costs;
6758
6759         /* Do preliminary analysis of indexquals */
6760         qinfos = deconstruct_indexquals(path);
6761
6762         MemSet(&costs, 0, sizeof(costs));
6763
6764         genericcostestimate(root, path, loop_count, qinfos, &costs);
6765
6766         /*
6767          * A hash index has no descent costs as such, since the index AM can go
6768          * directly to the target bucket after computing the hash value.  There
6769          * are a couple of other hash-specific costs that we could conceivably add
6770          * here, though:
6771          *
6772          * Ideally we'd charge spc_random_page_cost for each page in the target
6773          * bucket, not just the numIndexPages pages that genericcostestimate
6774          * thought we'd visit.  However in most cases we don't know which bucket
6775          * that will be.  There's no point in considering the average bucket size
6776          * because the hash AM makes sure that's always one page.
6777          *
6778          * Likewise, we could consider charging some CPU for each index tuple in
6779          * the bucket, if we knew how many there were.  But the per-tuple cost is
6780          * just a hash value comparison, not a general datatype-dependent
6781          * comparison, so any such charge ought to be quite a bit less than
6782          * cpu_operator_cost; which makes it probably not worth worrying about.
6783          *
6784          * A bigger issue is that chance hash-value collisions will result in
6785          * wasted probes into the heap.  We don't currently attempt to model this
6786          * cost on the grounds that it's rare, but maybe it's not rare enough.
6787          * (Any fix for this ought to consider the generic lossy-operator problem,
6788          * though; it's not entirely hash-specific.)
6789          */
6790
6791         *indexStartupCost = costs.indexStartupCost;
6792         *indexTotalCost = costs.indexTotalCost;
6793         *indexSelectivity = costs.indexSelectivity;
6794         *indexCorrelation = costs.indexCorrelation;
6795 }
6796
6797 void
6798 gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6799                                  Cost *indexStartupCost, Cost *indexTotalCost,
6800                                  Selectivity *indexSelectivity, double *indexCorrelation)
6801 {
6802         IndexOptInfo *index = path->indexinfo;
6803         List       *qinfos;
6804         GenericCosts costs;
6805         Cost            descentCost;
6806
6807         /* Do preliminary analysis of indexquals */
6808         qinfos = deconstruct_indexquals(path);
6809
6810         MemSet(&costs, 0, sizeof(costs));
6811
6812         genericcostestimate(root, path, loop_count, qinfos, &costs);
6813
6814         /*
6815          * We model index descent costs similarly to those for btree, but to do
6816          * that we first need an idea of the tree height.  We somewhat arbitrarily
6817          * assume that the fanout is 100, meaning the tree height is at most
6818          * log100(index->pages).
6819          *
6820          * Although this computation isn't really expensive enough to require
6821          * caching, we might as well use index->tree_height to cache it.
6822          */
6823         if (index->tree_height < 0) /* unknown? */
6824         {
6825                 if (index->pages > 1)   /* avoid computing log(0) */
6826                         index->tree_height = (int) (log(index->pages) / log(100.0));
6827                 else
6828                         index->tree_height = 0;
6829         }
6830
6831         /*
6832          * Add a CPU-cost component to represent the costs of initial descent. We
6833          * just use log(N) here not log2(N) since the branching factor isn't
6834          * necessarily two anyway.  As for btree, charge once per SA scan.
6835          */
6836         if (index->tuples > 1)          /* avoid computing log(0) */
6837         {
6838                 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6839                 costs.indexStartupCost += descentCost;
6840                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6841         }
6842
6843         /*
6844          * Likewise add a per-page charge, calculated the same as for btrees.
6845          */
6846         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6847         costs.indexStartupCost += descentCost;
6848         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6849
6850         *indexStartupCost = costs.indexStartupCost;
6851         *indexTotalCost = costs.indexTotalCost;
6852         *indexSelectivity = costs.indexSelectivity;
6853         *indexCorrelation = costs.indexCorrelation;
6854 }
6855
6856 void
6857 spgcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6858                                 Cost *indexStartupCost, Cost *indexTotalCost,
6859                                 Selectivity *indexSelectivity, double *indexCorrelation)
6860 {
6861         IndexOptInfo *index = path->indexinfo;
6862         List       *qinfos;
6863         GenericCosts costs;
6864         Cost            descentCost;
6865
6866         /* Do preliminary analysis of indexquals */
6867         qinfos = deconstruct_indexquals(path);
6868
6869         MemSet(&costs, 0, sizeof(costs));
6870
6871         genericcostestimate(root, path, loop_count, qinfos, &costs);
6872
6873         /*
6874          * We model index descent costs similarly to those for btree, but to do
6875          * that we first need an idea of the tree height.  We somewhat arbitrarily
6876          * assume that the fanout is 100, meaning the tree height is at most
6877          * log100(index->pages).
6878          *
6879          * Although this computation isn't really expensive enough to require
6880          * caching, we might as well use index->tree_height to cache it.
6881          */
6882         if (index->tree_height < 0) /* unknown? */
6883         {
6884                 if (index->pages > 1)   /* avoid computing log(0) */
6885                         index->tree_height = (int) (log(index->pages) / log(100.0));
6886                 else
6887                         index->tree_height = 0;
6888         }
6889
6890         /*
6891          * Add a CPU-cost component to represent the costs of initial descent. We
6892          * just use log(N) here not log2(N) since the branching factor isn't
6893          * necessarily two anyway.  As for btree, charge once per SA scan.
6894          */
6895         if (index->tuples > 1)          /* avoid computing log(0) */
6896         {
6897                 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6898                 costs.indexStartupCost += descentCost;
6899                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6900         }
6901
6902         /*
6903          * Likewise add a per-page charge, calculated the same as for btrees.
6904          */
6905         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6906         costs.indexStartupCost += descentCost;
6907         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6908
6909         *indexStartupCost = costs.indexStartupCost;
6910         *indexTotalCost = costs.indexTotalCost;
6911         *indexSelectivity = costs.indexSelectivity;
6912         *indexCorrelation = costs.indexCorrelation;
6913 }
6914
6915
6916 /*
6917  * Support routines for gincostestimate
6918  */
6919
6920 typedef struct
6921 {
6922         bool            haveFullScan;
6923         double          partialEntries;
6924         double          exactEntries;
6925         double          searchEntries;
6926         double          arrayScans;
6927 } GinQualCounts;
6928
6929 /*
6930  * Estimate the number of index terms that need to be searched for while
6931  * testing the given GIN query, and increment the counts in *counts
6932  * appropriately.  If the query is unsatisfiable, return false.
6933  */
6934 static bool
6935 gincost_pattern(IndexOptInfo *index, int indexcol,
6936                                 Oid clause_op, Datum query,
6937                                 GinQualCounts *counts)
6938 {
6939         Oid                     extractProcOid;
6940         Oid                     collation;
6941         int                     strategy_op;
6942         Oid                     lefttype,
6943                                 righttype;
6944         int32           nentries = 0;
6945         bool       *partial_matches = NULL;
6946         Pointer    *extra_data = NULL;
6947         bool       *nullFlags = NULL;
6948         int32           searchMode = GIN_SEARCH_MODE_DEFAULT;
6949         int32           i;
6950
6951         /*
6952          * Get the operator's strategy number and declared input data types within
6953          * the index opfamily.  (We don't need the latter, but we use
6954          * get_op_opfamily_properties because it will throw error if it fails to
6955          * find a matching pg_amop entry.)
6956          */
6957         get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
6958                                                            &strategy_op, &lefttype, &righttype);
6959
6960         /*
6961          * GIN always uses the "default" support functions, which are those with
6962          * lefttype == righttype == the opclass' opcintype (see
6963          * IndexSupportInitialize in relcache.c).
6964          */
6965         extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
6966                                                                            index->opcintype[indexcol],
6967                                                                            index->opcintype[indexcol],
6968                                                                            GIN_EXTRACTQUERY_PROC);
6969
6970         if (!OidIsValid(extractProcOid))
6971         {
6972                 /* should not happen; throw same error as index_getprocinfo */
6973                 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
6974                          GIN_EXTRACTQUERY_PROC, indexcol + 1,
6975                          get_rel_name(index->indexoid));
6976         }
6977
6978         /*
6979          * Choose collation to pass to extractProc (should match initGinState).
6980          */
6981         if (OidIsValid(index->indexcollations[indexcol]))
6982                 collation = index->indexcollations[indexcol];
6983         else
6984                 collation = DEFAULT_COLLATION_OID;
6985
6986         OidFunctionCall7Coll(extractProcOid,
6987                                                  collation,
6988                                                  query,
6989                                                  PointerGetDatum(&nentries),
6990                                                  UInt16GetDatum(strategy_op),
6991                                                  PointerGetDatum(&partial_matches),
6992                                                  PointerGetDatum(&extra_data),
6993                                                  PointerGetDatum(&nullFlags),
6994                                                  PointerGetDatum(&searchMode));
6995
6996         if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
6997         {
6998                 /* No match is possible */
6999                 return false;
7000         }
7001
7002         for (i = 0; i < nentries; i++)
7003         {
7004                 /*
7005                  * For partial match we haven't any information to estimate number of
7006                  * matched entries in index, so, we just estimate it as 100
7007                  */
7008                 if (partial_matches && partial_matches[i])
7009                         counts->partialEntries += 100;
7010                 else
7011                         counts->exactEntries++;
7012
7013                 counts->searchEntries++;
7014         }
7015
7016         if (searchMode == GIN_SEARCH_MODE_INCLUDE_EMPTY)
7017         {
7018                 /* Treat "include empty" like an exact-match item */
7019                 counts->exactEntries++;
7020                 counts->searchEntries++;
7021         }
7022         else if (searchMode != GIN_SEARCH_MODE_DEFAULT)
7023         {
7024                 /* It's GIN_SEARCH_MODE_ALL */
7025                 counts->haveFullScan = true;
7026         }
7027
7028         return true;
7029 }
7030
7031 /*
7032  * Estimate the number of index terms that need to be searched for while
7033  * testing the given GIN index clause, and increment the counts in *counts
7034  * appropriately.  If the query is unsatisfiable, return false.
7035  */
7036 static bool
7037 gincost_opexpr(PlannerInfo *root,
7038                            IndexOptInfo *index,
7039                            IndexQualInfo *qinfo,
7040                            GinQualCounts *counts)
7041 {
7042         int                     indexcol = qinfo->indexcol;
7043         Oid                     clause_op = qinfo->clause_op;
7044         Node       *operand = qinfo->other_operand;
7045
7046         if (!qinfo->varonleft)
7047         {
7048                 /* must commute the operator */
7049                 clause_op = get_commutator(clause_op);
7050         }
7051
7052         /* aggressively reduce to a constant, and look through relabeling */
7053         operand = estimate_expression_value(root, operand);
7054
7055         if (IsA(operand, RelabelType))
7056                 operand = (Node *) ((RelabelType *) operand)->arg;
7057
7058         /*
7059          * It's impossible to call extractQuery method for unknown operand. So
7060          * unless operand is a Const we can't do much; just assume there will be
7061          * one ordinary search entry from the operand at runtime.
7062          */
7063         if (!IsA(operand, Const))
7064         {
7065                 counts->exactEntries++;
7066                 counts->searchEntries++;
7067                 return true;
7068         }
7069
7070         /* If Const is null, there can be no matches */
7071         if (((Const *) operand)->constisnull)
7072                 return false;
7073
7074         /* Otherwise, apply extractQuery and get the actual term counts */
7075         return gincost_pattern(index, indexcol, clause_op,
7076                                                    ((Const *) operand)->constvalue,
7077                                                    counts);
7078 }
7079
7080 /*
7081  * Estimate the number of index terms that need to be searched for while
7082  * testing the given GIN index clause, and increment the counts in *counts
7083  * appropriately.  If the query is unsatisfiable, return false.
7084  *
7085  * A ScalarArrayOpExpr will give rise to N separate indexscans at runtime,
7086  * each of which involves one value from the RHS array, plus all the
7087  * non-array quals (if any).  To model this, we average the counts across
7088  * the RHS elements, and add the averages to the counts in *counts (which
7089  * correspond to per-indexscan costs).  We also multiply counts->arrayScans
7090  * by N, causing gincostestimate to scale up its estimates accordingly.
7091  */
7092 static bool
7093 gincost_scalararrayopexpr(PlannerInfo *root,
7094                                                   IndexOptInfo *index,
7095                                                   IndexQualInfo *qinfo,
7096                                                   double numIndexEntries,
7097                                                   GinQualCounts *counts)
7098 {
7099         int                     indexcol = qinfo->indexcol;
7100         Oid                     clause_op = qinfo->clause_op;
7101         Node       *rightop = qinfo->other_operand;
7102         ArrayType  *arrayval;
7103         int16           elmlen;
7104         bool            elmbyval;
7105         char            elmalign;
7106         int                     numElems;
7107         Datum      *elemValues;
7108         bool       *elemNulls;
7109         GinQualCounts arraycounts;
7110         int                     numPossible = 0;
7111         int                     i;
7112
7113         Assert(((ScalarArrayOpExpr *) qinfo->rinfo->clause)->useOr);
7114
7115         /* aggressively reduce to a constant, and look through relabeling */
7116         rightop = estimate_expression_value(root, rightop);
7117
7118         if (IsA(rightop, RelabelType))
7119                 rightop = (Node *) ((RelabelType *) rightop)->arg;
7120
7121         /*
7122          * It's impossible to call extractQuery method for unknown operand. So
7123          * unless operand is a Const we can't do much; just assume there will be
7124          * one ordinary search entry from each array entry at runtime, and fall
7125          * back on a probably-bad estimate of the number of array entries.
7126          */
7127         if (!IsA(rightop, Const))
7128         {
7129                 counts->exactEntries++;
7130                 counts->searchEntries++;
7131                 counts->arrayScans *= estimate_array_length(rightop);
7132                 return true;
7133         }
7134
7135         /* If Const is null, there can be no matches */
7136         if (((Const *) rightop)->constisnull)
7137                 return false;
7138
7139         /* Otherwise, extract the array elements and iterate over them */
7140         arrayval = DatumGetArrayTypeP(((Const *) rightop)->constvalue);
7141         get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
7142                                                  &elmlen, &elmbyval, &elmalign);
7143         deconstruct_array(arrayval,
7144                                           ARR_ELEMTYPE(arrayval),
7145                                           elmlen, elmbyval, elmalign,
7146                                           &elemValues, &elemNulls, &numElems);
7147
7148         memset(&arraycounts, 0, sizeof(arraycounts));
7149
7150         for (i = 0; i < numElems; i++)
7151         {
7152                 GinQualCounts elemcounts;
7153
7154                 /* NULL can't match anything, so ignore, as the executor will */
7155                 if (elemNulls[i])
7156                         continue;
7157
7158                 /* Otherwise, apply extractQuery and get the actual term counts */
7159                 memset(&elemcounts, 0, sizeof(elemcounts));
7160
7161                 if (gincost_pattern(index, indexcol, clause_op, elemValues[i],
7162                                                         &elemcounts))
7163                 {
7164                         /* We ignore array elements that are unsatisfiable patterns */
7165                         numPossible++;
7166
7167                         if (elemcounts.haveFullScan)
7168                         {
7169                                 /*
7170                                  * Full index scan will be required.  We treat this as if
7171                                  * every key in the index had been listed in the query; is
7172                                  * that reasonable?
7173                                  */
7174                                 elemcounts.partialEntries = 0;
7175                                 elemcounts.exactEntries = numIndexEntries;
7176                                 elemcounts.searchEntries = numIndexEntries;
7177                         }
7178                         arraycounts.partialEntries += elemcounts.partialEntries;
7179                         arraycounts.exactEntries += elemcounts.exactEntries;
7180                         arraycounts.searchEntries += elemcounts.searchEntries;
7181                 }
7182         }
7183
7184         if (numPossible == 0)
7185         {
7186                 /* No satisfiable patterns in the array */
7187                 return false;
7188         }
7189
7190         /*
7191          * Now add the averages to the global counts.  This will give us an
7192          * estimate of the average number of terms searched for in each indexscan,
7193          * including contributions from both array and non-array quals.
7194          */
7195         counts->partialEntries += arraycounts.partialEntries / numPossible;
7196         counts->exactEntries += arraycounts.exactEntries / numPossible;
7197         counts->searchEntries += arraycounts.searchEntries / numPossible;
7198
7199         counts->arrayScans *= numPossible;
7200
7201         return true;
7202 }
7203
7204 /*
7205  * GIN has search behavior completely different from other index types
7206  */
7207 void
7208 gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7209                                 Cost *indexStartupCost, Cost *indexTotalCost,
7210                                 Selectivity *indexSelectivity, double *indexCorrelation)
7211 {
7212         IndexOptInfo *index = path->indexinfo;
7213         List       *indexQuals = path->indexquals;
7214         List       *indexOrderBys = path->indexorderbys;
7215         List       *qinfos;
7216         ListCell   *l;
7217         List       *selectivityQuals;
7218         double          numPages = index->pages,
7219                                 numTuples = index->tuples;
7220         double          numEntryPages,
7221                                 numDataPages,
7222                                 numPendingPages,
7223                                 numEntries;
7224         GinQualCounts counts;
7225         bool            matchPossible;
7226         double          partialScale;
7227         double          entryPagesFetched,
7228                                 dataPagesFetched,
7229                                 dataPagesFetchedBySel;
7230         double          qual_op_cost,
7231                                 qual_arg_cost,
7232                                 spc_random_page_cost,
7233                                 outer_scans;
7234         Relation        indexRel;
7235         GinStatsData ginStats;
7236
7237         /* Do preliminary analysis of indexquals */
7238         qinfos = deconstruct_indexquals(path);
7239
7240         /*
7241          * Obtain statistical information from the meta page, if possible.  Else
7242          * set ginStats to zeroes, and we'll cope below.
7243          */
7244         if (!index->hypothetical)
7245         {
7246                 indexRel = index_open(index->indexoid, AccessShareLock);
7247                 ginGetStats(indexRel, &ginStats);
7248                 index_close(indexRel, AccessShareLock);
7249         }
7250         else
7251         {
7252                 memset(&ginStats, 0, sizeof(ginStats));
7253         }
7254
7255         /*
7256          * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7257          * trusted, but the other fields are data as of the last VACUUM.  We can
7258          * scale them up to account for growth since then, but that method only
7259          * goes so far; in the worst case, the stats might be for a completely
7260          * empty index, and scaling them will produce pretty bogus numbers.
7261          * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7262          * it's grown more than that, fall back to estimating things only from the
7263          * assumed-accurate index size.  But we'll trust nPendingPages in any case
7264          * so long as it's not clearly insane, ie, more than the index size.
7265          */
7266         if (ginStats.nPendingPages < numPages)
7267                 numPendingPages = ginStats.nPendingPages;
7268         else
7269                 numPendingPages = 0;
7270
7271         if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7272                 ginStats.nTotalPages > numPages / 4 &&
7273                 ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7274         {
7275                 /*
7276                  * OK, the stats seem close enough to sane to be trusted.  But we
7277                  * still need to scale them by the ratio numPages / nTotalPages to
7278                  * account for growth since the last VACUUM.
7279                  */
7280                 double          scale = numPages / ginStats.nTotalPages;
7281
7282                 numEntryPages = ceil(ginStats.nEntryPages * scale);
7283                 numDataPages = ceil(ginStats.nDataPages * scale);
7284                 numEntries = ceil(ginStats.nEntries * scale);
7285                 /* ensure we didn't round up too much */
7286                 numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7287                 numDataPages = Min(numDataPages,
7288                                                    numPages - numPendingPages - numEntryPages);
7289         }
7290         else
7291         {
7292                 /*
7293                  * We might get here because it's a hypothetical index, or an index
7294                  * created pre-9.1 and never vacuumed since upgrading (in which case
7295                  * its stats would read as zeroes), or just because it's grown too
7296                  * much since the last VACUUM for us to put our faith in scaling.
7297                  *
7298                  * Invent some plausible internal statistics based on the index page
7299                  * count (and clamp that to at least 10 pages, just in case).  We
7300                  * estimate that 90% of the index is entry pages, and the rest is data
7301                  * pages.  Estimate 100 entries per entry page; this is rather bogus
7302                  * since it'll depend on the size of the keys, but it's more robust
7303                  * than trying to predict the number of entries per heap tuple.
7304                  */
7305                 numPages = Max(numPages, 10);
7306                 numEntryPages = floor((numPages - numPendingPages) * 0.90);
7307                 numDataPages = numPages - numPendingPages - numEntryPages;
7308                 numEntries = floor(numEntryPages * 100);
7309         }
7310
7311         /* In an empty index, numEntries could be zero.  Avoid divide-by-zero */
7312         if (numEntries < 1)
7313                 numEntries = 1;
7314
7315         /*
7316          * Include predicate in selectivityQuals (should match
7317          * genericcostestimate)
7318          */
7319         if (index->indpred != NIL)
7320         {
7321                 List       *predExtraQuals = NIL;
7322
7323                 foreach(l, index->indpred)
7324                 {
7325                         Node       *predQual = (Node *) lfirst(l);
7326                         List       *oneQual = list_make1(predQual);
7327
7328                         if (!predicate_implied_by(oneQual, indexQuals))
7329                                 predExtraQuals = list_concat(predExtraQuals, oneQual);
7330                 }
7331                 /* list_concat avoids modifying the passed-in indexQuals list */
7332                 selectivityQuals = list_concat(predExtraQuals, indexQuals);
7333         }
7334         else
7335                 selectivityQuals = indexQuals;
7336
7337         /* Estimate the fraction of main-table tuples that will be visited */
7338         *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7339                                                                                            index->rel->relid,
7340                                                                                            JOIN_INNER,
7341                                                                                            NULL);
7342
7343         /* fetch estimated page cost for tablespace containing index */
7344         get_tablespace_page_costs(index->reltablespace,
7345                                                           &spc_random_page_cost,
7346                                                           NULL);
7347
7348         /*
7349          * Generic assumption about index correlation: there isn't any.
7350          */
7351         *indexCorrelation = 0.0;
7352
7353         /*
7354          * Examine quals to estimate number of search entries & partial matches
7355          */
7356         memset(&counts, 0, sizeof(counts));
7357         counts.arrayScans = 1;
7358         matchPossible = true;
7359
7360         foreach(l, qinfos)
7361         {
7362                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(l);
7363                 Expr       *clause = qinfo->rinfo->clause;
7364
7365                 if (IsA(clause, OpExpr))
7366                 {
7367                         matchPossible = gincost_opexpr(root,
7368                                                                                    index,
7369                                                                                    qinfo,
7370                                                                                    &counts);
7371                         if (!matchPossible)
7372                                 break;
7373                 }
7374                 else if (IsA(clause, ScalarArrayOpExpr))
7375                 {
7376                         matchPossible = gincost_scalararrayopexpr(root,
7377                                                                                                           index,
7378                                                                                                           qinfo,
7379                                                                                                           numEntries,
7380                                                                                                           &counts);
7381                         if (!matchPossible)
7382                                 break;
7383                 }
7384                 else
7385                 {
7386                         /* shouldn't be anything else for a GIN index */
7387                         elog(ERROR, "unsupported GIN indexqual type: %d",
7388                                  (int) nodeTag(clause));
7389                 }
7390         }
7391
7392         /* Fall out if there were any provably-unsatisfiable quals */
7393         if (!matchPossible)
7394         {
7395                 *indexStartupCost = 0;
7396                 *indexTotalCost = 0;
7397                 *indexSelectivity = 0;
7398                 return;
7399         }
7400
7401         if (counts.haveFullScan || indexQuals == NIL)
7402         {
7403                 /*
7404                  * Full index scan will be required.  We treat this as if every key in
7405                  * the index had been listed in the query; is that reasonable?
7406                  */
7407                 counts.partialEntries = 0;
7408                 counts.exactEntries = numEntries;
7409                 counts.searchEntries = numEntries;
7410         }
7411
7412         /* Will we have more than one iteration of a nestloop scan? */
7413         outer_scans = loop_count;
7414
7415         /*
7416          * Compute cost to begin scan, first of all, pay attention to pending
7417          * list.
7418          */
7419         entryPagesFetched = numPendingPages;
7420
7421         /*
7422          * Estimate number of entry pages read.  We need to do
7423          * counts.searchEntries searches.  Use a power function as it should be,
7424          * but tuples on leaf pages usually is much greater. Here we include all
7425          * searches in entry tree, including search of first entry in partial
7426          * match algorithm
7427          */
7428         entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7429
7430         /*
7431          * Add an estimate of entry pages read by partial match algorithm. It's a
7432          * scan over leaf pages in entry tree.  We haven't any useful stats here,
7433          * so estimate it as proportion.  Because counts.partialEntries is really
7434          * pretty bogus (see code above), it's possible that it is more than
7435          * numEntries; clamp the proportion to ensure sanity.
7436          */
7437         partialScale = counts.partialEntries / numEntries;
7438         partialScale = Min(partialScale, 1.0);
7439
7440         entryPagesFetched += ceil(numEntryPages * partialScale);
7441
7442         /*
7443          * Partial match algorithm reads all data pages before doing actual scan,
7444          * so it's a startup cost.  Again, we haven't any useful stats here, so
7445          * estimate it as proportion.
7446          */
7447         dataPagesFetched = ceil(numDataPages * partialScale);
7448
7449         /*
7450          * Calculate cache effects if more than one scan due to nestloops or array
7451          * quals.  The result is pro-rated per nestloop scan, but the array qual
7452          * factor shouldn't be pro-rated (compare genericcostestimate).
7453          */
7454         if (outer_scans > 1 || counts.arrayScans > 1)
7455         {
7456                 entryPagesFetched *= outer_scans * counts.arrayScans;
7457                 entryPagesFetched = index_pages_fetched(entryPagesFetched,
7458                                                                                                 (BlockNumber) numEntryPages,
7459                                                                                                 numEntryPages, root);
7460                 entryPagesFetched /= outer_scans;
7461                 dataPagesFetched *= outer_scans * counts.arrayScans;
7462                 dataPagesFetched = index_pages_fetched(dataPagesFetched,
7463                                                                                            (BlockNumber) numDataPages,
7464                                                                                            numDataPages, root);
7465                 dataPagesFetched /= outer_scans;
7466         }
7467
7468         /*
7469          * Here we use random page cost because logically-close pages could be far
7470          * apart on disk.
7471          */
7472         *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7473
7474         /*
7475          * Now compute the number of data pages fetched during the scan.
7476          *
7477          * We assume every entry to have the same number of items, and that there
7478          * is no overlap between them. (XXX: tsvector and array opclasses collect
7479          * statistics on the frequency of individual keys; it would be nice to use
7480          * those here.)
7481          */
7482         dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7483
7484         /*
7485          * If there is a lot of overlap among the entries, in particular if one of
7486          * the entries is very frequent, the above calculation can grossly
7487          * under-estimate.  As a simple cross-check, calculate a lower bound based
7488          * on the overall selectivity of the quals.  At a minimum, we must read
7489          * one item pointer for each matching entry.
7490          *
7491          * The width of each item pointer varies, based on the level of
7492          * compression.  We don't have statistics on that, but an average of
7493          * around 3 bytes per item is fairly typical.
7494          */
7495         dataPagesFetchedBySel = ceil(*indexSelectivity *
7496                                                                  (numTuples / (BLCKSZ / 3)));
7497         if (dataPagesFetchedBySel > dataPagesFetched)
7498                 dataPagesFetched = dataPagesFetchedBySel;
7499
7500         /* Account for cache effects, the same as above */
7501         if (outer_scans > 1 || counts.arrayScans > 1)
7502         {
7503                 dataPagesFetched *= outer_scans * counts.arrayScans;
7504                 dataPagesFetched = index_pages_fetched(dataPagesFetched,
7505                                                                                            (BlockNumber) numDataPages,
7506                                                                                            numDataPages, root);
7507                 dataPagesFetched /= outer_scans;
7508         }
7509
7510         /* And apply random_page_cost as the cost per page */
7511         *indexTotalCost = *indexStartupCost +
7512                 dataPagesFetched * spc_random_page_cost;
7513
7514         /*
7515          * Add on index qual eval costs, much as in genericcostestimate
7516          */
7517         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7518                 orderby_operands_eval_cost(root, path);
7519         qual_op_cost = cpu_operator_cost *
7520                 (list_length(indexQuals) + list_length(indexOrderBys));
7521
7522         *indexStartupCost += qual_arg_cost;
7523         *indexTotalCost += qual_arg_cost;
7524         *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7525 }
7526
7527 /*
7528  * BRIN has search behavior completely different from other index types
7529  */
7530 void
7531 brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7532                                  Cost *indexStartupCost, Cost *indexTotalCost,
7533                                  Selectivity *indexSelectivity, double *indexCorrelation)
7534 {
7535         IndexOptInfo *index = path->indexinfo;
7536         List       *indexQuals = path->indexquals;
7537         List       *indexOrderBys = path->indexorderbys;
7538         double          numPages = index->pages;
7539         double          numTuples = index->tuples;
7540         List       *qinfos;
7541         Cost            spc_seq_page_cost;
7542         Cost            spc_random_page_cost;
7543         double          qual_op_cost;
7544         double          qual_arg_cost;
7545
7546         /* Do preliminary analysis of indexquals */
7547         qinfos = deconstruct_indexquals(path);
7548
7549         /* fetch estimated page cost for tablespace containing index */
7550         get_tablespace_page_costs(index->reltablespace,
7551                                                           &spc_random_page_cost,
7552                                                           &spc_seq_page_cost);
7553
7554         /*
7555          * BRIN indexes are always read in full; use that as startup cost.
7556          *
7557          * XXX maybe only include revmap pages here?
7558          */
7559         *indexStartupCost = spc_seq_page_cost * numPages * loop_count;
7560
7561         /*
7562          * To read a BRIN index there might be a bit of back and forth over
7563          * regular pages, as revmap might point to them out of sequential order;
7564          * calculate this as reading the whole index in random order.
7565          */
7566         *indexTotalCost = spc_random_page_cost * numPages * loop_count;
7567
7568         *indexSelectivity =
7569                 clauselist_selectivity(root, indexQuals,
7570                                                            path->indexinfo->rel->relid,
7571                                                            JOIN_INNER, NULL);
7572         *indexCorrelation = 1;
7573
7574         /*
7575          * Add on index qual eval costs, much as in genericcostestimate.
7576          */
7577         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7578                 orderby_operands_eval_cost(root, path);
7579         qual_op_cost = cpu_operator_cost *
7580                 (list_length(indexQuals) + list_length(indexOrderBys));
7581
7582         *indexStartupCost += qual_arg_cost;
7583         *indexTotalCost += qual_arg_cost;
7584         *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7585
7586         /* XXX what about pages_per_range? */
7587 }