]> granicus.if.org Git - postgresql/blob - src/backend/utils/adt/selfuncs.c
Give pull_var_clause() reject/recurse/return behavior for WindowFuncs too.
[postgresql] / src / backend / utils / adt / selfuncs.c
1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  *        Selectivity functions and index cost estimation functions for
5  *        standard operators and index access methods.
6  *
7  *        Selectivity routines are registered in the pg_operator catalog
8  *        in the "oprrest" and "oprjoin" attributes.
9  *
10  *        Index cost functions are located via the index AM's API struct,
11  *        which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  *        src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  *              Restriction clause estimation: the clause involves vars of just
28  *                      one relation.
29  *              Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values).  So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  *              Selectivity oprrest (PlannerInfo *root,
42  *                                                       Oid operator,
43  *                                                       List *args,
44  *                                                       int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation.  May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  *              float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  *              Selectivity oprjoin (PlannerInfo *root,
67  *                                                       Oid operator,
68  *                                                       List *args,
69  *                                                       JoinType jointype,
70  *                                                       SpecialJoinInfo *sjinfo);
71  *
72  *              float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters.  That signature is still allowed, but deprecated.)  The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator.  For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION().  Note, however, that all
90  * statistics in pg_statistic are currently built using the database's default
91  * collation.  Thus, in most cases where we are looking at statistics, we
92  * should ignore the actual operator collation and use DEFAULT_COLLATION_OID.
93  * We expect that the error induced by doing this is usually not large enough
94  * to justify complicating matters.
95  *----------
96  */
97
98 #include "postgres.h"
99
100 #include <ctype.h>
101 #include <math.h>
102
103 #include "access/gin.h"
104 #include "access/htup_details.h"
105 #include "access/sysattr.h"
106 #include "catalog/index.h"
107 #include "catalog/pg_am.h"
108 #include "catalog/pg_collation.h"
109 #include "catalog/pg_operator.h"
110 #include "catalog/pg_opfamily.h"
111 #include "catalog/pg_statistic.h"
112 #include "catalog/pg_type.h"
113 #include "executor/executor.h"
114 #include "mb/pg_wchar.h"
115 #include "nodes/makefuncs.h"
116 #include "nodes/nodeFuncs.h"
117 #include "optimizer/clauses.h"
118 #include "optimizer/cost.h"
119 #include "optimizer/pathnode.h"
120 #include "optimizer/paths.h"
121 #include "optimizer/plancat.h"
122 #include "optimizer/predtest.h"
123 #include "optimizer/restrictinfo.h"
124 #include "optimizer/var.h"
125 #include "parser/parse_clause.h"
126 #include "parser/parse_coerce.h"
127 #include "parser/parsetree.h"
128 #include "utils/builtins.h"
129 #include "utils/bytea.h"
130 #include "utils/date.h"
131 #include "utils/datum.h"
132 #include "utils/fmgroids.h"
133 #include "utils/index_selfuncs.h"
134 #include "utils/lsyscache.h"
135 #include "utils/nabstime.h"
136 #include "utils/pg_locale.h"
137 #include "utils/rel.h"
138 #include "utils/selfuncs.h"
139 #include "utils/spccache.h"
140 #include "utils/syscache.h"
141 #include "utils/timestamp.h"
142 #include "utils/tqual.h"
143 #include "utils/typcache.h"
144
145
146 /* Hooks for plugins to get control when we ask for stats */
147 get_relation_stats_hook_type get_relation_stats_hook = NULL;
148 get_index_stats_hook_type get_index_stats_hook = NULL;
149
150 static double var_eq_const(VariableStatData *vardata, Oid operator,
151                          Datum constval, bool constisnull,
152                          bool varonleft);
153 static double var_eq_non_const(VariableStatData *vardata, Oid operator,
154                                  Node *other,
155                                  bool varonleft);
156 static double ineq_histogram_selectivity(PlannerInfo *root,
157                                                    VariableStatData *vardata,
158                                                    FmgrInfo *opproc, bool isgt,
159                                                    Datum constval, Oid consttype);
160 static double eqjoinsel_inner(Oid operator,
161                                 VariableStatData *vardata1, VariableStatData *vardata2);
162 static double eqjoinsel_semi(Oid operator,
163                            VariableStatData *vardata1, VariableStatData *vardata2,
164                            RelOptInfo *inner_rel);
165 static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
166                                   Datum lobound, Datum hibound, Oid boundstypid,
167                                   double *scaledlobound, double *scaledhibound);
168 static double convert_numeric_to_scalar(Datum value, Oid typid);
169 static void convert_string_to_scalar(char *value,
170                                                  double *scaledvalue,
171                                                  char *lobound,
172                                                  double *scaledlobound,
173                                                  char *hibound,
174                                                  double *scaledhibound);
175 static void convert_bytea_to_scalar(Datum value,
176                                                 double *scaledvalue,
177                                                 Datum lobound,
178                                                 double *scaledlobound,
179                                                 Datum hibound,
180                                                 double *scaledhibound);
181 static double convert_one_string_to_scalar(char *value,
182                                                          int rangelo, int rangehi);
183 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
184                                                         int rangelo, int rangehi);
185 static char *convert_string_datum(Datum value, Oid typid);
186 static double convert_timevalue_to_scalar(Datum value, Oid typid);
187 static void examine_simple_variable(PlannerInfo *root, Var *var,
188                                                 VariableStatData *vardata);
189 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
190                                    Oid sortop, Datum *min, Datum *max);
191 static bool get_actual_variable_range(PlannerInfo *root,
192                                                   VariableStatData *vardata,
193                                                   Oid sortop,
194                                                   Datum *min, Datum *max);
195 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
196 static Selectivity prefix_selectivity(PlannerInfo *root,
197                                    VariableStatData *vardata,
198                                    Oid vartype, Oid opfamily, Const *prefixcon);
199 static Selectivity like_selectivity(const char *patt, int pattlen,
200                                  bool case_insensitive);
201 static Selectivity regex_selectivity(const char *patt, int pattlen,
202                                   bool case_insensitive,
203                                   int fixed_prefix_len);
204 static Datum string_to_datum(const char *str, Oid datatype);
205 static Const *string_to_const(const char *str, Oid datatype);
206 static Const *string_to_bytea_const(const char *str, size_t str_len);
207 static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
208
209
210 /*
211  *              eqsel                   - Selectivity of "=" for any data types.
212  *
213  * Note: this routine is also used to estimate selectivity for some
214  * operators that are not "=" but have comparable selectivity behavior,
215  * such as "~=" (geometric approximate-match).  Even for "=", we must
216  * keep in mind that the left and right datatypes may differ.
217  */
218 Datum
219 eqsel(PG_FUNCTION_ARGS)
220 {
221         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
222         Oid                     operator = PG_GETARG_OID(1);
223         List       *args = (List *) PG_GETARG_POINTER(2);
224         int                     varRelid = PG_GETARG_INT32(3);
225         VariableStatData vardata;
226         Node       *other;
227         bool            varonleft;
228         double          selec;
229
230         /*
231          * If expression is not variable = something or something = variable, then
232          * punt and return a default estimate.
233          */
234         if (!get_restriction_variable(root, args, varRelid,
235                                                                   &vardata, &other, &varonleft))
236                 PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
237
238         /*
239          * We can do a lot better if the something is a constant.  (Note: the
240          * Const might result from estimation rather than being a simple constant
241          * in the query.)
242          */
243         if (IsA(other, Const))
244                 selec = var_eq_const(&vardata, operator,
245                                                          ((Const *) other)->constvalue,
246                                                          ((Const *) other)->constisnull,
247                                                          varonleft);
248         else
249                 selec = var_eq_non_const(&vardata, operator, other,
250                                                                  varonleft);
251
252         ReleaseVariableStats(vardata);
253
254         PG_RETURN_FLOAT8((float8) selec);
255 }
256
257 /*
258  * var_eq_const --- eqsel for var = const case
259  *
260  * This is split out so that some other estimation functions can use it.
261  */
262 static double
263 var_eq_const(VariableStatData *vardata, Oid operator,
264                          Datum constval, bool constisnull,
265                          bool varonleft)
266 {
267         double          selec;
268         bool            isdefault;
269
270         /*
271          * If the constant is NULL, assume operator is strict and return zero, ie,
272          * operator will never return TRUE.
273          */
274         if (constisnull)
275                 return 0.0;
276
277         /*
278          * If we matched the var to a unique index or DISTINCT clause, assume
279          * there is exactly one match regardless of anything else.  (This is
280          * slightly bogus, since the index or clause's equality operator might be
281          * different from ours, but it's much more likely to be right than
282          * ignoring the information.)
283          */
284         if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
285                 return 1.0 / vardata->rel->tuples;
286
287         if (HeapTupleIsValid(vardata->statsTuple))
288         {
289                 Form_pg_statistic stats;
290                 Datum      *values;
291                 int                     nvalues;
292                 float4     *numbers;
293                 int                     nnumbers;
294                 bool            match = false;
295                 int                     i;
296
297                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
298
299                 /*
300                  * Is the constant "=" to any of the column's most common values?
301                  * (Although the given operator may not really be "=", we will assume
302                  * that seeing whether it returns TRUE is an appropriate test.  If you
303                  * don't like this, maybe you shouldn't be using eqsel for your
304                  * operator...)
305                  */
306                 if (get_attstatsslot(vardata->statsTuple,
307                                                          vardata->atttype, vardata->atttypmod,
308                                                          STATISTIC_KIND_MCV, InvalidOid,
309                                                          NULL,
310                                                          &values, &nvalues,
311                                                          &numbers, &nnumbers))
312                 {
313                         FmgrInfo        eqproc;
314
315                         fmgr_info(get_opcode(operator), &eqproc);
316
317                         for (i = 0; i < nvalues; i++)
318                         {
319                                 /* be careful to apply operator right way 'round */
320                                 if (varonleft)
321                                         match = DatumGetBool(FunctionCall2Coll(&eqproc,
322                                                                                                            DEFAULT_COLLATION_OID,
323                                                                                                                    values[i],
324                                                                                                                    constval));
325                                 else
326                                         match = DatumGetBool(FunctionCall2Coll(&eqproc,
327                                                                                                            DEFAULT_COLLATION_OID,
328                                                                                                                    constval,
329                                                                                                                    values[i]));
330                                 if (match)
331                                         break;
332                         }
333                 }
334                 else
335                 {
336                         /* no most-common-value info available */
337                         values = NULL;
338                         numbers = NULL;
339                         i = nvalues = nnumbers = 0;
340                 }
341
342                 if (match)
343                 {
344                         /*
345                          * Constant is "=" to this common value.  We know selectivity
346                          * exactly (or as exactly as ANALYZE could calculate it, anyway).
347                          */
348                         selec = numbers[i];
349                 }
350                 else
351                 {
352                         /*
353                          * Comparison is against a constant that is neither NULL nor any
354                          * of the common values.  Its selectivity cannot be more than
355                          * this:
356                          */
357                         double          sumcommon = 0.0;
358                         double          otherdistinct;
359
360                         for (i = 0; i < nnumbers; i++)
361                                 sumcommon += numbers[i];
362                         selec = 1.0 - sumcommon - stats->stanullfrac;
363                         CLAMP_PROBABILITY(selec);
364
365                         /*
366                          * and in fact it's probably a good deal less. We approximate that
367                          * all the not-common values share this remaining fraction
368                          * equally, so we divide by the number of other distinct values.
369                          */
370                         otherdistinct = get_variable_numdistinct(vardata, &isdefault) - nnumbers;
371                         if (otherdistinct > 1)
372                                 selec /= otherdistinct;
373
374                         /*
375                          * Another cross-check: selectivity shouldn't be estimated as more
376                          * than the least common "most common value".
377                          */
378                         if (nnumbers > 0 && selec > numbers[nnumbers - 1])
379                                 selec = numbers[nnumbers - 1];
380                 }
381
382                 free_attstatsslot(vardata->atttype, values, nvalues,
383                                                   numbers, nnumbers);
384         }
385         else
386         {
387                 /*
388                  * No ANALYZE stats available, so make a guess using estimated number
389                  * of distinct values and assuming they are equally common. (The guess
390                  * is unlikely to be very good, but we do know a few special cases.)
391                  */
392                 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
393         }
394
395         /* result should be in range, but make sure... */
396         CLAMP_PROBABILITY(selec);
397
398         return selec;
399 }
400
401 /*
402  * var_eq_non_const --- eqsel for var = something-other-than-const case
403  */
404 static double
405 var_eq_non_const(VariableStatData *vardata, Oid operator,
406                                  Node *other,
407                                  bool varonleft)
408 {
409         double          selec;
410         bool            isdefault;
411
412         /*
413          * If we matched the var to a unique index or DISTINCT clause, assume
414          * there is exactly one match regardless of anything else.  (This is
415          * slightly bogus, since the index or clause's equality operator might be
416          * different from ours, but it's much more likely to be right than
417          * ignoring the information.)
418          */
419         if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
420                 return 1.0 / vardata->rel->tuples;
421
422         if (HeapTupleIsValid(vardata->statsTuple))
423         {
424                 Form_pg_statistic stats;
425                 double          ndistinct;
426                 float4     *numbers;
427                 int                     nnumbers;
428
429                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
430
431                 /*
432                  * Search is for a value that we do not know a priori, but we will
433                  * assume it is not NULL.  Estimate the selectivity as non-null
434                  * fraction divided by number of distinct values, so that we get a
435                  * result averaged over all possible values whether common or
436                  * uncommon.  (Essentially, we are assuming that the not-yet-known
437                  * comparison value is equally likely to be any of the possible
438                  * values, regardless of their frequency in the table.  Is that a good
439                  * idea?)
440                  */
441                 selec = 1.0 - stats->stanullfrac;
442                 ndistinct = get_variable_numdistinct(vardata, &isdefault);
443                 if (ndistinct > 1)
444                         selec /= ndistinct;
445
446                 /*
447                  * Cross-check: selectivity should never be estimated as more than the
448                  * most common value's.
449                  */
450                 if (get_attstatsslot(vardata->statsTuple,
451                                                          vardata->atttype, vardata->atttypmod,
452                                                          STATISTIC_KIND_MCV, InvalidOid,
453                                                          NULL,
454                                                          NULL, NULL,
455                                                          &numbers, &nnumbers))
456                 {
457                         if (nnumbers > 0 && selec > numbers[0])
458                                 selec = numbers[0];
459                         free_attstatsslot(vardata->atttype, NULL, 0, numbers, nnumbers);
460                 }
461         }
462         else
463         {
464                 /*
465                  * No ANALYZE stats available, so make a guess using estimated number
466                  * of distinct values and assuming they are equally common. (The guess
467                  * is unlikely to be very good, but we do know a few special cases.)
468                  */
469                 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
470         }
471
472         /* result should be in range, but make sure... */
473         CLAMP_PROBABILITY(selec);
474
475         return selec;
476 }
477
478 /*
479  *              neqsel                  - Selectivity of "!=" for any data types.
480  *
481  * This routine is also used for some operators that are not "!="
482  * but have comparable selectivity behavior.  See above comments
483  * for eqsel().
484  */
485 Datum
486 neqsel(PG_FUNCTION_ARGS)
487 {
488         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
489         Oid                     operator = PG_GETARG_OID(1);
490         List       *args = (List *) PG_GETARG_POINTER(2);
491         int                     varRelid = PG_GETARG_INT32(3);
492         Oid                     eqop;
493         float8          result;
494
495         /*
496          * We want 1 - eqsel() where the equality operator is the one associated
497          * with this != operator, that is, its negator.
498          */
499         eqop = get_negator(operator);
500         if (eqop)
501         {
502                 result = DatumGetFloat8(DirectFunctionCall4(eqsel,
503                                                                                                         PointerGetDatum(root),
504                                                                                                         ObjectIdGetDatum(eqop),
505                                                                                                         PointerGetDatum(args),
506                                                                                                         Int32GetDatum(varRelid)));
507         }
508         else
509         {
510                 /* Use default selectivity (should we raise an error instead?) */
511                 result = DEFAULT_EQ_SEL;
512         }
513         result = 1.0 - result;
514         PG_RETURN_FLOAT8(result);
515 }
516
517 /*
518  *      scalarineqsel           - Selectivity of "<", "<=", ">", ">=" for scalars.
519  *
520  * This is the guts of both scalarltsel and scalargtsel.  The caller has
521  * commuted the clause, if necessary, so that we can treat the variable as
522  * being on the left.  The caller must also make sure that the other side
523  * of the clause is a non-null Const, and dissect same into a value and
524  * datatype.
525  *
526  * This routine works for any datatype (or pair of datatypes) known to
527  * convert_to_scalar().  If it is applied to some other datatype,
528  * it will return a default estimate.
529  */
530 static double
531 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
532                           VariableStatData *vardata, Datum constval, Oid consttype)
533 {
534         Form_pg_statistic stats;
535         FmgrInfo        opproc;
536         double          mcv_selec,
537                                 hist_selec,
538                                 sumcommon;
539         double          selec;
540
541         if (!HeapTupleIsValid(vardata->statsTuple))
542         {
543                 /* no stats available, so default result */
544                 return DEFAULT_INEQ_SEL;
545         }
546         stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
547
548         fmgr_info(get_opcode(operator), &opproc);
549
550         /*
551          * If we have most-common-values info, add up the fractions of the MCV
552          * entries that satisfy MCV OP CONST.  These fractions contribute directly
553          * to the result selectivity.  Also add up the total fraction represented
554          * by MCV entries.
555          */
556         mcv_selec = mcv_selectivity(vardata, &opproc, constval, true,
557                                                                 &sumcommon);
558
559         /*
560          * If there is a histogram, determine which bin the constant falls in, and
561          * compute the resulting contribution to selectivity.
562          */
563         hist_selec = ineq_histogram_selectivity(root, vardata, &opproc, isgt,
564                                                                                         constval, consttype);
565
566         /*
567          * Now merge the results from the MCV and histogram calculations,
568          * realizing that the histogram covers only the non-null values that are
569          * not listed in MCV.
570          */
571         selec = 1.0 - stats->stanullfrac - sumcommon;
572
573         if (hist_selec >= 0.0)
574                 selec *= hist_selec;
575         else
576         {
577                 /*
578                  * If no histogram but there are values not accounted for by MCV,
579                  * arbitrarily assume half of them will match.
580                  */
581                 selec *= 0.5;
582         }
583
584         selec += mcv_selec;
585
586         /* result should be in range, but make sure... */
587         CLAMP_PROBABILITY(selec);
588
589         return selec;
590 }
591
592 /*
593  *      mcv_selectivity                 - Examine the MCV list for selectivity estimates
594  *
595  * Determine the fraction of the variable's MCV population that satisfies
596  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.  Also
597  * compute the fraction of the total column population represented by the MCV
598  * list.  This code will work for any boolean-returning predicate operator.
599  *
600  * The function result is the MCV selectivity, and the fraction of the
601  * total population is returned into *sumcommonp.  Zeroes are returned
602  * if there is no MCV list.
603  */
604 double
605 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
606                                 Datum constval, bool varonleft,
607                                 double *sumcommonp)
608 {
609         double          mcv_selec,
610                                 sumcommon;
611         Datum      *values;
612         int                     nvalues;
613         float4     *numbers;
614         int                     nnumbers;
615         int                     i;
616
617         mcv_selec = 0.0;
618         sumcommon = 0.0;
619
620         if (HeapTupleIsValid(vardata->statsTuple) &&
621                 get_attstatsslot(vardata->statsTuple,
622                                                  vardata->atttype, vardata->atttypmod,
623                                                  STATISTIC_KIND_MCV, InvalidOid,
624                                                  NULL,
625                                                  &values, &nvalues,
626                                                  &numbers, &nnumbers))
627         {
628                 for (i = 0; i < nvalues; i++)
629                 {
630                         if (varonleft ?
631                                 DatumGetBool(FunctionCall2Coll(opproc,
632                                                                                            DEFAULT_COLLATION_OID,
633                                                                                            values[i],
634                                                                                            constval)) :
635                                 DatumGetBool(FunctionCall2Coll(opproc,
636                                                                                            DEFAULT_COLLATION_OID,
637                                                                                            constval,
638                                                                                            values[i])))
639                                 mcv_selec += numbers[i];
640                         sumcommon += numbers[i];
641                 }
642                 free_attstatsslot(vardata->atttype, values, nvalues,
643                                                   numbers, nnumbers);
644         }
645
646         *sumcommonp = sumcommon;
647         return mcv_selec;
648 }
649
650 /*
651  *      histogram_selectivity   - Examine the histogram for selectivity estimates
652  *
653  * Determine the fraction of the variable's histogram entries that satisfy
654  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
655  *
656  * This code will work for any boolean-returning predicate operator, whether
657  * or not it has anything to do with the histogram sort operator.  We are
658  * essentially using the histogram just as a representative sample.  However,
659  * small histograms are unlikely to be all that representative, so the caller
660  * should be prepared to fall back on some other estimation approach when the
661  * histogram is missing or very small.  It may also be prudent to combine this
662  * approach with another one when the histogram is small.
663  *
664  * If the actual histogram size is not at least min_hist_size, we won't bother
665  * to do the calculation at all.  Also, if the n_skip parameter is > 0, we
666  * ignore the first and last n_skip histogram elements, on the grounds that
667  * they are outliers and hence not very representative.  Typical values for
668  * these parameters are 10 and 1.
669  *
670  * The function result is the selectivity, or -1 if there is no histogram
671  * or it's smaller than min_hist_size.
672  *
673  * The output parameter *hist_size receives the actual histogram size,
674  * or zero if no histogram.  Callers may use this number to decide how
675  * much faith to put in the function result.
676  *
677  * Note that the result disregards both the most-common-values (if any) and
678  * null entries.  The caller is expected to combine this result with
679  * statistics for those portions of the column population.  It may also be
680  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
681  */
682 double
683 histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
684                                           Datum constval, bool varonleft,
685                                           int min_hist_size, int n_skip,
686                                           int *hist_size)
687 {
688         double          result;
689         Datum      *values;
690         int                     nvalues;
691
692         /* check sanity of parameters */
693         Assert(n_skip >= 0);
694         Assert(min_hist_size > 2 * n_skip);
695
696         if (HeapTupleIsValid(vardata->statsTuple) &&
697                 get_attstatsslot(vardata->statsTuple,
698                                                  vardata->atttype, vardata->atttypmod,
699                                                  STATISTIC_KIND_HISTOGRAM, InvalidOid,
700                                                  NULL,
701                                                  &values, &nvalues,
702                                                  NULL, NULL))
703         {
704                 *hist_size = nvalues;
705                 if (nvalues >= min_hist_size)
706                 {
707                         int                     nmatch = 0;
708                         int                     i;
709
710                         for (i = n_skip; i < nvalues - n_skip; i++)
711                         {
712                                 if (varonleft ?
713                                         DatumGetBool(FunctionCall2Coll(opproc,
714                                                                                                    DEFAULT_COLLATION_OID,
715                                                                                                    values[i],
716                                                                                                    constval)) :
717                                         DatumGetBool(FunctionCall2Coll(opproc,
718                                                                                                    DEFAULT_COLLATION_OID,
719                                                                                                    constval,
720                                                                                                    values[i])))
721                                         nmatch++;
722                         }
723                         result = ((double) nmatch) / ((double) (nvalues - 2 * n_skip));
724                 }
725                 else
726                         result = -1;
727                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
728         }
729         else
730         {
731                 *hist_size = 0;
732                 result = -1;
733         }
734
735         return result;
736 }
737
738 /*
739  *      ineq_histogram_selectivity      - Examine the histogram for scalarineqsel
740  *
741  * Determine the fraction of the variable's histogram population that
742  * satisfies the inequality condition, ie, VAR < CONST or VAR > CONST.
743  *
744  * Returns -1 if there is no histogram (valid results will always be >= 0).
745  *
746  * Note that the result disregards both the most-common-values (if any) and
747  * null entries.  The caller is expected to combine this result with
748  * statistics for those portions of the column population.
749  */
750 static double
751 ineq_histogram_selectivity(PlannerInfo *root,
752                                                    VariableStatData *vardata,
753                                                    FmgrInfo *opproc, bool isgt,
754                                                    Datum constval, Oid consttype)
755 {
756         double          hist_selec;
757         Oid                     hist_op;
758         Datum      *values;
759         int                     nvalues;
760
761         hist_selec = -1.0;
762
763         /*
764          * Someday, ANALYZE might store more than one histogram per rel/att,
765          * corresponding to more than one possible sort ordering defined for the
766          * column type.  However, to make that work we will need to figure out
767          * which staop to search for --- it's not necessarily the one we have at
768          * hand!  (For example, we might have a '<=' operator rather than the '<'
769          * operator that will appear in staop.)  For now, assume that whatever
770          * appears in pg_statistic is sorted the same way our operator sorts, or
771          * the reverse way if isgt is TRUE.
772          */
773         if (HeapTupleIsValid(vardata->statsTuple) &&
774                 get_attstatsslot(vardata->statsTuple,
775                                                  vardata->atttype, vardata->atttypmod,
776                                                  STATISTIC_KIND_HISTOGRAM, InvalidOid,
777                                                  &hist_op,
778                                                  &values, &nvalues,
779                                                  NULL, NULL))
780         {
781                 if (nvalues > 1)
782                 {
783                         /*
784                          * Use binary search to find proper location, ie, the first slot
785                          * at which the comparison fails.  (If the given operator isn't
786                          * actually sort-compatible with the histogram, you'll get garbage
787                          * results ... but probably not any more garbage-y than you would
788                          * from the old linear search.)
789                          *
790                          * If the binary search accesses the first or last histogram
791                          * entry, we try to replace that endpoint with the true column min
792                          * or max as found by get_actual_variable_range().  This
793                          * ameliorates misestimates when the min or max is moving as a
794                          * result of changes since the last ANALYZE.  Note that this could
795                          * result in effectively including MCVs into the histogram that
796                          * weren't there before, but we don't try to correct for that.
797                          */
798                         double          histfrac;
799                         int                     lobound = 0;    /* first possible slot to search */
800                         int                     hibound = nvalues;              /* last+1 slot to search */
801                         bool            have_end = false;
802
803                         /*
804                          * If there are only two histogram entries, we'll want up-to-date
805                          * values for both.  (If there are more than two, we need at most
806                          * one of them to be updated, so we deal with that within the
807                          * loop.)
808                          */
809                         if (nvalues == 2)
810                                 have_end = get_actual_variable_range(root,
811                                                                                                          vardata,
812                                                                                                          hist_op,
813                                                                                                          &values[0],
814                                                                                                          &values[1]);
815
816                         while (lobound < hibound)
817                         {
818                                 int                     probe = (lobound + hibound) / 2;
819                                 bool            ltcmp;
820
821                                 /*
822                                  * If we find ourselves about to compare to the first or last
823                                  * histogram entry, first try to replace it with the actual
824                                  * current min or max (unless we already did so above).
825                                  */
826                                 if (probe == 0 && nvalues > 2)
827                                         have_end = get_actual_variable_range(root,
828                                                                                                                  vardata,
829                                                                                                                  hist_op,
830                                                                                                                  &values[0],
831                                                                                                                  NULL);
832                                 else if (probe == nvalues - 1 && nvalues > 2)
833                                         have_end = get_actual_variable_range(root,
834                                                                                                                  vardata,
835                                                                                                                  hist_op,
836                                                                                                                  NULL,
837                                                                                                                  &values[probe]);
838
839                                 ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
840                                                                                                            DEFAULT_COLLATION_OID,
841                                                                                                            values[probe],
842                                                                                                            constval));
843                                 if (isgt)
844                                         ltcmp = !ltcmp;
845                                 if (ltcmp)
846                                         lobound = probe + 1;
847                                 else
848                                         hibound = probe;
849                         }
850
851                         if (lobound <= 0)
852                         {
853                                 /* Constant is below lower histogram boundary. */
854                                 histfrac = 0.0;
855                         }
856                         else if (lobound >= nvalues)
857                         {
858                                 /* Constant is above upper histogram boundary. */
859                                 histfrac = 1.0;
860                         }
861                         else
862                         {
863                                 int                     i = lobound;
864                                 double          val,
865                                                         high,
866                                                         low;
867                                 double          binfrac;
868
869                                 /*
870                                  * We have values[i-1] <= constant <= values[i].
871                                  *
872                                  * Convert the constant and the two nearest bin boundary
873                                  * values to a uniform comparison scale, and do a linear
874                                  * interpolation within this bin.
875                                  */
876                                 if (convert_to_scalar(constval, consttype, &val,
877                                                                           values[i - 1], values[i],
878                                                                           vardata->vartype,
879                                                                           &low, &high))
880                                 {
881                                         if (high <= low)
882                                         {
883                                                 /* cope if bin boundaries appear identical */
884                                                 binfrac = 0.5;
885                                         }
886                                         else if (val <= low)
887                                                 binfrac = 0.0;
888                                         else if (val >= high)
889                                                 binfrac = 1.0;
890                                         else
891                                         {
892                                                 binfrac = (val - low) / (high - low);
893
894                                                 /*
895                                                  * Watch out for the possibility that we got a NaN or
896                                                  * Infinity from the division.  This can happen
897                                                  * despite the previous checks, if for example "low"
898                                                  * is -Infinity.
899                                                  */
900                                                 if (isnan(binfrac) ||
901                                                         binfrac < 0.0 || binfrac > 1.0)
902                                                         binfrac = 0.5;
903                                         }
904                                 }
905                                 else
906                                 {
907                                         /*
908                                          * Ideally we'd produce an error here, on the grounds that
909                                          * the given operator shouldn't have scalarXXsel
910                                          * registered as its selectivity func unless we can deal
911                                          * with its operand types.  But currently, all manner of
912                                          * stuff is invoking scalarXXsel, so give a default
913                                          * estimate until that can be fixed.
914                                          */
915                                         binfrac = 0.5;
916                                 }
917
918                                 /*
919                                  * Now, compute the overall selectivity across the values
920                                  * represented by the histogram.  We have i-1 full bins and
921                                  * binfrac partial bin below the constant.
922                                  */
923                                 histfrac = (double) (i - 1) + binfrac;
924                                 histfrac /= (double) (nvalues - 1);
925                         }
926
927                         /*
928                          * Now histfrac = fraction of histogram entries below the
929                          * constant.
930                          *
931                          * Account for "<" vs ">"
932                          */
933                         hist_selec = isgt ? (1.0 - histfrac) : histfrac;
934
935                         /*
936                          * The histogram boundaries are only approximate to begin with,
937                          * and may well be out of date anyway.  Therefore, don't believe
938                          * extremely small or large selectivity estimates --- unless we
939                          * got actual current endpoint values from the table.
940                          */
941                         if (have_end)
942                                 CLAMP_PROBABILITY(hist_selec);
943                         else
944                         {
945                                 if (hist_selec < 0.0001)
946                                         hist_selec = 0.0001;
947                                 else if (hist_selec > 0.9999)
948                                         hist_selec = 0.9999;
949                         }
950                 }
951
952                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
953         }
954
955         return hist_selec;
956 }
957
958 /*
959  *              scalarltsel             - Selectivity of "<" (also "<=") for scalars.
960  */
961 Datum
962 scalarltsel(PG_FUNCTION_ARGS)
963 {
964         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
965         Oid                     operator = PG_GETARG_OID(1);
966         List       *args = (List *) PG_GETARG_POINTER(2);
967         int                     varRelid = PG_GETARG_INT32(3);
968         VariableStatData vardata;
969         Node       *other;
970         bool            varonleft;
971         Datum           constval;
972         Oid                     consttype;
973         bool            isgt;
974         double          selec;
975
976         /*
977          * If expression is not variable op something or something op variable,
978          * then punt and return a default estimate.
979          */
980         if (!get_restriction_variable(root, args, varRelid,
981                                                                   &vardata, &other, &varonleft))
982                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
983
984         /*
985          * Can't do anything useful if the something is not a constant, either.
986          */
987         if (!IsA(other, Const))
988         {
989                 ReleaseVariableStats(vardata);
990                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
991         }
992
993         /*
994          * If the constant is NULL, assume operator is strict and return zero, ie,
995          * operator will never return TRUE.
996          */
997         if (((Const *) other)->constisnull)
998         {
999                 ReleaseVariableStats(vardata);
1000                 PG_RETURN_FLOAT8(0.0);
1001         }
1002         constval = ((Const *) other)->constvalue;
1003         consttype = ((Const *) other)->consttype;
1004
1005         /*
1006          * Force the var to be on the left to simplify logic in scalarineqsel.
1007          */
1008         if (varonleft)
1009         {
1010                 /* we have var < other */
1011                 isgt = false;
1012         }
1013         else
1014         {
1015                 /* we have other < var, commute to make var > other */
1016                 operator = get_commutator(operator);
1017                 if (!operator)
1018                 {
1019                         /* Use default selectivity (should we raise an error instead?) */
1020                         ReleaseVariableStats(vardata);
1021                         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1022                 }
1023                 isgt = true;
1024         }
1025
1026         selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1027
1028         ReleaseVariableStats(vardata);
1029
1030         PG_RETURN_FLOAT8((float8) selec);
1031 }
1032
1033 /*
1034  *              scalargtsel             - Selectivity of ">" (also ">=") for integers.
1035  */
1036 Datum
1037 scalargtsel(PG_FUNCTION_ARGS)
1038 {
1039         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1040         Oid                     operator = PG_GETARG_OID(1);
1041         List       *args = (List *) PG_GETARG_POINTER(2);
1042         int                     varRelid = PG_GETARG_INT32(3);
1043         VariableStatData vardata;
1044         Node       *other;
1045         bool            varonleft;
1046         Datum           constval;
1047         Oid                     consttype;
1048         bool            isgt;
1049         double          selec;
1050
1051         /*
1052          * If expression is not variable op something or something op variable,
1053          * then punt and return a default estimate.
1054          */
1055         if (!get_restriction_variable(root, args, varRelid,
1056                                                                   &vardata, &other, &varonleft))
1057                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1058
1059         /*
1060          * Can't do anything useful if the something is not a constant, either.
1061          */
1062         if (!IsA(other, Const))
1063         {
1064                 ReleaseVariableStats(vardata);
1065                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1066         }
1067
1068         /*
1069          * If the constant is NULL, assume operator is strict and return zero, ie,
1070          * operator will never return TRUE.
1071          */
1072         if (((Const *) other)->constisnull)
1073         {
1074                 ReleaseVariableStats(vardata);
1075                 PG_RETURN_FLOAT8(0.0);
1076         }
1077         constval = ((Const *) other)->constvalue;
1078         consttype = ((Const *) other)->consttype;
1079
1080         /*
1081          * Force the var to be on the left to simplify logic in scalarineqsel.
1082          */
1083         if (varonleft)
1084         {
1085                 /* we have var > other */
1086                 isgt = true;
1087         }
1088         else
1089         {
1090                 /* we have other > var, commute to make var < other */
1091                 operator = get_commutator(operator);
1092                 if (!operator)
1093                 {
1094                         /* Use default selectivity (should we raise an error instead?) */
1095                         ReleaseVariableStats(vardata);
1096                         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1097                 }
1098                 isgt = false;
1099         }
1100
1101         selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1102
1103         ReleaseVariableStats(vardata);
1104
1105         PG_RETURN_FLOAT8((float8) selec);
1106 }
1107
1108 /*
1109  * patternsel                   - Generic code for pattern-match selectivity.
1110  */
1111 static double
1112 patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
1113 {
1114         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1115         Oid                     operator = PG_GETARG_OID(1);
1116         List       *args = (List *) PG_GETARG_POINTER(2);
1117         int                     varRelid = PG_GETARG_INT32(3);
1118         Oid                     collation = PG_GET_COLLATION();
1119         VariableStatData vardata;
1120         Node       *other;
1121         bool            varonleft;
1122         Datum           constval;
1123         Oid                     consttype;
1124         Oid                     vartype;
1125         Oid                     opfamily;
1126         Pattern_Prefix_Status pstatus;
1127         Const      *patt;
1128         Const      *prefix = NULL;
1129         Selectivity rest_selec = 0;
1130         double          result;
1131
1132         /*
1133          * If this is for a NOT LIKE or similar operator, get the corresponding
1134          * positive-match operator and work with that.  Set result to the correct
1135          * default estimate, too.
1136          */
1137         if (negate)
1138         {
1139                 operator = get_negator(operator);
1140                 if (!OidIsValid(operator))
1141                         elog(ERROR, "patternsel called for operator without a negator");
1142                 result = 1.0 - DEFAULT_MATCH_SEL;
1143         }
1144         else
1145         {
1146                 result = DEFAULT_MATCH_SEL;
1147         }
1148
1149         /*
1150          * If expression is not variable op constant, then punt and return a
1151          * default estimate.
1152          */
1153         if (!get_restriction_variable(root, args, varRelid,
1154                                                                   &vardata, &other, &varonleft))
1155                 return result;
1156         if (!varonleft || !IsA(other, Const))
1157         {
1158                 ReleaseVariableStats(vardata);
1159                 return result;
1160         }
1161
1162         /*
1163          * If the constant is NULL, assume operator is strict and return zero, ie,
1164          * operator will never return TRUE.  (It's zero even for a negator op.)
1165          */
1166         if (((Const *) other)->constisnull)
1167         {
1168                 ReleaseVariableStats(vardata);
1169                 return 0.0;
1170         }
1171         constval = ((Const *) other)->constvalue;
1172         consttype = ((Const *) other)->consttype;
1173
1174         /*
1175          * The right-hand const is type text or bytea for all supported operators.
1176          * We do not expect to see binary-compatible types here, since
1177          * const-folding should have relabeled the const to exactly match the
1178          * operator's declared type.
1179          */
1180         if (consttype != TEXTOID && consttype != BYTEAOID)
1181         {
1182                 ReleaseVariableStats(vardata);
1183                 return result;
1184         }
1185
1186         /*
1187          * Similarly, the exposed type of the left-hand side should be one of
1188          * those we know.  (Do not look at vardata.atttype, which might be
1189          * something binary-compatible but different.)  We can use it to choose
1190          * the index opfamily from which we must draw the comparison operators.
1191          *
1192          * NOTE: It would be more correct to use the PATTERN opfamilies than the
1193          * simple ones, but at the moment ANALYZE will not generate statistics for
1194          * the PATTERN operators.  But our results are so approximate anyway that
1195          * it probably hardly matters.
1196          */
1197         vartype = vardata.vartype;
1198
1199         switch (vartype)
1200         {
1201                 case TEXTOID:
1202                         opfamily = TEXT_BTREE_FAM_OID;
1203                         break;
1204                 case BPCHAROID:
1205                         opfamily = BPCHAR_BTREE_FAM_OID;
1206                         break;
1207                 case NAMEOID:
1208                         opfamily = NAME_BTREE_FAM_OID;
1209                         break;
1210                 case BYTEAOID:
1211                         opfamily = BYTEA_BTREE_FAM_OID;
1212                         break;
1213                 default:
1214                         ReleaseVariableStats(vardata);
1215                         return result;
1216         }
1217
1218         /*
1219          * Pull out any fixed prefix implied by the pattern, and estimate the
1220          * fractional selectivity of the remainder of the pattern.  Unlike many of
1221          * the other functions in this file, we use the pattern operator's actual
1222          * collation for this step.  This is not because we expect the collation
1223          * to make a big difference in the selectivity estimate (it seldom would),
1224          * but because we want to be sure we cache compiled regexps under the
1225          * right cache key, so that they can be re-used at runtime.
1226          */
1227         patt = (Const *) other;
1228         pstatus = pattern_fixed_prefix(patt, ptype, collation,
1229                                                                    &prefix, &rest_selec);
1230
1231         /*
1232          * If necessary, coerce the prefix constant to the right type.
1233          */
1234         if (prefix && prefix->consttype != vartype)
1235         {
1236                 char       *prefixstr;
1237
1238                 switch (prefix->consttype)
1239                 {
1240                         case TEXTOID:
1241                                 prefixstr = TextDatumGetCString(prefix->constvalue);
1242                                 break;
1243                         case BYTEAOID:
1244                                 prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
1245                                                                                                                 prefix->constvalue));
1246                                 break;
1247                         default:
1248                                 elog(ERROR, "unrecognized consttype: %u",
1249                                          prefix->consttype);
1250                                 ReleaseVariableStats(vardata);
1251                                 return result;
1252                 }
1253                 prefix = string_to_const(prefixstr, vartype);
1254                 pfree(prefixstr);
1255         }
1256
1257         if (pstatus == Pattern_Prefix_Exact)
1258         {
1259                 /*
1260                  * Pattern specifies an exact match, so pretend operator is '='
1261                  */
1262                 Oid                     eqopr = get_opfamily_member(opfamily, vartype, vartype,
1263                                                                                                 BTEqualStrategyNumber);
1264
1265                 if (eqopr == InvalidOid)
1266                         elog(ERROR, "no = operator for opfamily %u", opfamily);
1267                 result = var_eq_const(&vardata, eqopr, prefix->constvalue,
1268                                                           false, true);
1269         }
1270         else
1271         {
1272                 /*
1273                  * Not exact-match pattern.  If we have a sufficiently large
1274                  * histogram, estimate selectivity for the histogram part of the
1275                  * population by counting matches in the histogram.  If not, estimate
1276                  * selectivity of the fixed prefix and remainder of pattern
1277                  * separately, then combine the two to get an estimate of the
1278                  * selectivity for the part of the column population represented by
1279                  * the histogram.  (For small histograms, we combine these
1280                  * approaches.)
1281                  *
1282                  * We then add up data for any most-common-values values; these are
1283                  * not in the histogram population, and we can get exact answers for
1284                  * them by applying the pattern operator, so there's no reason to
1285                  * approximate.  (If the MCVs cover a significant part of the total
1286                  * population, this gives us a big leg up in accuracy.)
1287                  */
1288                 Selectivity selec;
1289                 int                     hist_size;
1290                 FmgrInfo        opproc;
1291                 double          nullfrac,
1292                                         mcv_selec,
1293                                         sumcommon;
1294
1295                 /* Try to use the histogram entries to get selectivity */
1296                 fmgr_info(get_opcode(operator), &opproc);
1297
1298                 selec = histogram_selectivity(&vardata, &opproc, constval, true,
1299                                                                           10, 1, &hist_size);
1300
1301                 /* If not at least 100 entries, use the heuristic method */
1302                 if (hist_size < 100)
1303                 {
1304                         Selectivity heursel;
1305                         Selectivity prefixsel;
1306
1307                         if (pstatus == Pattern_Prefix_Partial)
1308                                 prefixsel = prefix_selectivity(root, &vardata, vartype,
1309                                                                                            opfamily, prefix);
1310                         else
1311                                 prefixsel = 1.0;
1312                         heursel = prefixsel * rest_selec;
1313
1314                         if (selec < 0)          /* fewer than 10 histogram entries? */
1315                                 selec = heursel;
1316                         else
1317                         {
1318                                 /*
1319                                  * For histogram sizes from 10 to 100, we combine the
1320                                  * histogram and heuristic selectivities, putting increasingly
1321                                  * more trust in the histogram for larger sizes.
1322                                  */
1323                                 double          hist_weight = hist_size / 100.0;
1324
1325                                 selec = selec * hist_weight + heursel * (1.0 - hist_weight);
1326                         }
1327                 }
1328
1329                 /* In any case, don't believe extremely small or large estimates. */
1330                 if (selec < 0.0001)
1331                         selec = 0.0001;
1332                 else if (selec > 0.9999)
1333                         selec = 0.9999;
1334
1335                 /*
1336                  * If we have most-common-values info, add up the fractions of the MCV
1337                  * entries that satisfy MCV OP PATTERN.  These fractions contribute
1338                  * directly to the result selectivity.  Also add up the total fraction
1339                  * represented by MCV entries.
1340                  */
1341                 mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
1342                                                                         &sumcommon);
1343
1344                 if (HeapTupleIsValid(vardata.statsTuple))
1345                         nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
1346                 else
1347                         nullfrac = 0.0;
1348
1349                 /*
1350                  * Now merge the results from the MCV and histogram calculations,
1351                  * realizing that the histogram covers only the non-null values that
1352                  * are not listed in MCV.
1353                  */
1354                 selec *= 1.0 - nullfrac - sumcommon;
1355                 selec += mcv_selec;
1356
1357                 /* result should be in range, but make sure... */
1358                 CLAMP_PROBABILITY(selec);
1359                 result = selec;
1360         }
1361
1362         if (prefix)
1363         {
1364                 pfree(DatumGetPointer(prefix->constvalue));
1365                 pfree(prefix);
1366         }
1367
1368         ReleaseVariableStats(vardata);
1369
1370         return negate ? (1.0 - result) : result;
1371 }
1372
1373 /*
1374  *              regexeqsel              - Selectivity of regular-expression pattern match.
1375  */
1376 Datum
1377 regexeqsel(PG_FUNCTION_ARGS)
1378 {
1379         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, false));
1380 }
1381
1382 /*
1383  *              icregexeqsel    - Selectivity of case-insensitive regex match.
1384  */
1385 Datum
1386 icregexeqsel(PG_FUNCTION_ARGS)
1387 {
1388         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, false));
1389 }
1390
1391 /*
1392  *              likesel                 - Selectivity of LIKE pattern match.
1393  */
1394 Datum
1395 likesel(PG_FUNCTION_ARGS)
1396 {
1397         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, false));
1398 }
1399
1400 /*
1401  *              iclikesel                       - Selectivity of ILIKE pattern match.
1402  */
1403 Datum
1404 iclikesel(PG_FUNCTION_ARGS)
1405 {
1406         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, false));
1407 }
1408
1409 /*
1410  *              regexnesel              - Selectivity of regular-expression pattern non-match.
1411  */
1412 Datum
1413 regexnesel(PG_FUNCTION_ARGS)
1414 {
1415         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, true));
1416 }
1417
1418 /*
1419  *              icregexnesel    - Selectivity of case-insensitive regex non-match.
1420  */
1421 Datum
1422 icregexnesel(PG_FUNCTION_ARGS)
1423 {
1424         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, true));
1425 }
1426
1427 /*
1428  *              nlikesel                - Selectivity of LIKE pattern non-match.
1429  */
1430 Datum
1431 nlikesel(PG_FUNCTION_ARGS)
1432 {
1433         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, true));
1434 }
1435
1436 /*
1437  *              icnlikesel              - Selectivity of ILIKE pattern non-match.
1438  */
1439 Datum
1440 icnlikesel(PG_FUNCTION_ARGS)
1441 {
1442         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, true));
1443 }
1444
1445 /*
1446  *              boolvarsel              - Selectivity of Boolean variable.
1447  *
1448  * This can actually be called on any boolean-valued expression.  If it
1449  * involves only Vars of the specified relation, and if there are statistics
1450  * about the Var or expression (the latter is possible if it's indexed) then
1451  * we'll produce a real estimate; otherwise it's just a default.
1452  */
1453 Selectivity
1454 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1455 {
1456         VariableStatData vardata;
1457         double          selec;
1458
1459         examine_variable(root, arg, varRelid, &vardata);
1460         if (HeapTupleIsValid(vardata.statsTuple))
1461         {
1462                 /*
1463                  * A boolean variable V is equivalent to the clause V = 't', so we
1464                  * compute the selectivity as if that is what we have.
1465                  */
1466                 selec = var_eq_const(&vardata, BooleanEqualOperator,
1467                                                          BoolGetDatum(true), false, true);
1468         }
1469         else if (is_funcclause(arg))
1470         {
1471                 /*
1472                  * If we have no stats and it's a function call, estimate 0.3333333.
1473                  * This seems a pretty unprincipled choice, but Postgres has been
1474                  * using that estimate for function calls since 1992.  The hoariness
1475                  * of this behavior suggests that we should not be in too much hurry
1476                  * to use another value.
1477                  */
1478                 selec = 0.3333333;
1479         }
1480         else
1481         {
1482                 /* Otherwise, the default estimate is 0.5 */
1483                 selec = 0.5;
1484         }
1485         ReleaseVariableStats(vardata);
1486         return selec;
1487 }
1488
1489 /*
1490  *              booltestsel             - Selectivity of BooleanTest Node.
1491  */
1492 Selectivity
1493 booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
1494                         int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1495 {
1496         VariableStatData vardata;
1497         double          selec;
1498
1499         examine_variable(root, arg, varRelid, &vardata);
1500
1501         if (HeapTupleIsValid(vardata.statsTuple))
1502         {
1503                 Form_pg_statistic stats;
1504                 double          freq_null;
1505                 Datum      *values;
1506                 int                     nvalues;
1507                 float4     *numbers;
1508                 int                     nnumbers;
1509
1510                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1511                 freq_null = stats->stanullfrac;
1512
1513                 if (get_attstatsslot(vardata.statsTuple,
1514                                                          vardata.atttype, vardata.atttypmod,
1515                                                          STATISTIC_KIND_MCV, InvalidOid,
1516                                                          NULL,
1517                                                          &values, &nvalues,
1518                                                          &numbers, &nnumbers)
1519                         && nnumbers > 0)
1520                 {
1521                         double          freq_true;
1522                         double          freq_false;
1523
1524                         /*
1525                          * Get first MCV frequency and derive frequency for true.
1526                          */
1527                         if (DatumGetBool(values[0]))
1528                                 freq_true = numbers[0];
1529                         else
1530                                 freq_true = 1.0 - numbers[0] - freq_null;
1531
1532                         /*
1533                          * Next derive frequency for false. Then use these as appropriate
1534                          * to derive frequency for each case.
1535                          */
1536                         freq_false = 1.0 - freq_true - freq_null;
1537
1538                         switch (booltesttype)
1539                         {
1540                                 case IS_UNKNOWN:
1541                                         /* select only NULL values */
1542                                         selec = freq_null;
1543                                         break;
1544                                 case IS_NOT_UNKNOWN:
1545                                         /* select non-NULL values */
1546                                         selec = 1.0 - freq_null;
1547                                         break;
1548                                 case IS_TRUE:
1549                                         /* select only TRUE values */
1550                                         selec = freq_true;
1551                                         break;
1552                                 case IS_NOT_TRUE:
1553                                         /* select non-TRUE values */
1554                                         selec = 1.0 - freq_true;
1555                                         break;
1556                                 case IS_FALSE:
1557                                         /* select only FALSE values */
1558                                         selec = freq_false;
1559                                         break;
1560                                 case IS_NOT_FALSE:
1561                                         /* select non-FALSE values */
1562                                         selec = 1.0 - freq_false;
1563                                         break;
1564                                 default:
1565                                         elog(ERROR, "unrecognized booltesttype: %d",
1566                                                  (int) booltesttype);
1567                                         selec = 0.0;    /* Keep compiler quiet */
1568                                         break;
1569                         }
1570
1571                         free_attstatsslot(vardata.atttype, values, nvalues,
1572                                                           numbers, nnumbers);
1573                 }
1574                 else
1575                 {
1576                         /*
1577                          * No most-common-value info available. Still have null fraction
1578                          * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1579                          * for null fraction and assume a 50-50 split of TRUE and FALSE.
1580                          */
1581                         switch (booltesttype)
1582                         {
1583                                 case IS_UNKNOWN:
1584                                         /* select only NULL values */
1585                                         selec = freq_null;
1586                                         break;
1587                                 case IS_NOT_UNKNOWN:
1588                                         /* select non-NULL values */
1589                                         selec = 1.0 - freq_null;
1590                                         break;
1591                                 case IS_TRUE:
1592                                 case IS_FALSE:
1593                                         /* Assume we select half of the non-NULL values */
1594                                         selec = (1.0 - freq_null) / 2.0;
1595                                         break;
1596                                 case IS_NOT_TRUE:
1597                                 case IS_NOT_FALSE:
1598                                         /* Assume we select NULLs plus half of the non-NULLs */
1599                                         /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1600                                         selec = (freq_null + 1.0) / 2.0;
1601                                         break;
1602                                 default:
1603                                         elog(ERROR, "unrecognized booltesttype: %d",
1604                                                  (int) booltesttype);
1605                                         selec = 0.0;    /* Keep compiler quiet */
1606                                         break;
1607                         }
1608                 }
1609         }
1610         else
1611         {
1612                 /*
1613                  * If we can't get variable statistics for the argument, perhaps
1614                  * clause_selectivity can do something with it.  We ignore the
1615                  * possibility of a NULL value when using clause_selectivity, and just
1616                  * assume the value is either TRUE or FALSE.
1617                  */
1618                 switch (booltesttype)
1619                 {
1620                         case IS_UNKNOWN:
1621                                 selec = DEFAULT_UNK_SEL;
1622                                 break;
1623                         case IS_NOT_UNKNOWN:
1624                                 selec = DEFAULT_NOT_UNK_SEL;
1625                                 break;
1626                         case IS_TRUE:
1627                         case IS_NOT_FALSE:
1628                                 selec = (double) clause_selectivity(root, arg,
1629                                                                                                         varRelid,
1630                                                                                                         jointype, sjinfo);
1631                                 break;
1632                         case IS_FALSE:
1633                         case IS_NOT_TRUE:
1634                                 selec = 1.0 - (double) clause_selectivity(root, arg,
1635                                                                                                                   varRelid,
1636                                                                                                                   jointype, sjinfo);
1637                                 break;
1638                         default:
1639                                 elog(ERROR, "unrecognized booltesttype: %d",
1640                                          (int) booltesttype);
1641                                 selec = 0.0;    /* Keep compiler quiet */
1642                                 break;
1643                 }
1644         }
1645
1646         ReleaseVariableStats(vardata);
1647
1648         /* result should be in range, but make sure... */
1649         CLAMP_PROBABILITY(selec);
1650
1651         return (Selectivity) selec;
1652 }
1653
1654 /*
1655  *              nulltestsel             - Selectivity of NullTest Node.
1656  */
1657 Selectivity
1658 nulltestsel(PlannerInfo *root, NullTestType nulltesttype, Node *arg,
1659                         int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1660 {
1661         VariableStatData vardata;
1662         double          selec;
1663
1664         examine_variable(root, arg, varRelid, &vardata);
1665
1666         if (HeapTupleIsValid(vardata.statsTuple))
1667         {
1668                 Form_pg_statistic stats;
1669                 double          freq_null;
1670
1671                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1672                 freq_null = stats->stanullfrac;
1673
1674                 switch (nulltesttype)
1675                 {
1676                         case IS_NULL:
1677
1678                                 /*
1679                                  * Use freq_null directly.
1680                                  */
1681                                 selec = freq_null;
1682                                 break;
1683                         case IS_NOT_NULL:
1684
1685                                 /*
1686                                  * Select not unknown (not null) values. Calculate from
1687                                  * freq_null.
1688                                  */
1689                                 selec = 1.0 - freq_null;
1690                                 break;
1691                         default:
1692                                 elog(ERROR, "unrecognized nulltesttype: %d",
1693                                          (int) nulltesttype);
1694                                 return (Selectivity) 0; /* keep compiler quiet */
1695                 }
1696         }
1697         else
1698         {
1699                 /*
1700                  * No ANALYZE stats available, so make a guess
1701                  */
1702                 switch (nulltesttype)
1703                 {
1704                         case IS_NULL:
1705                                 selec = DEFAULT_UNK_SEL;
1706                                 break;
1707                         case IS_NOT_NULL:
1708                                 selec = DEFAULT_NOT_UNK_SEL;
1709                                 break;
1710                         default:
1711                                 elog(ERROR, "unrecognized nulltesttype: %d",
1712                                          (int) nulltesttype);
1713                                 return (Selectivity) 0; /* keep compiler quiet */
1714                 }
1715         }
1716
1717         ReleaseVariableStats(vardata);
1718
1719         /* result should be in range, but make sure... */
1720         CLAMP_PROBABILITY(selec);
1721
1722         return (Selectivity) selec;
1723 }
1724
1725 /*
1726  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1727  *
1728  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1729  * but it seems possible that RelabelType might show up.  Also, the planner
1730  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1731  * so we need to be ready to deal with more than one level.
1732  */
1733 static Node *
1734 strip_array_coercion(Node *node)
1735 {
1736         for (;;)
1737         {
1738                 if (node && IsA(node, ArrayCoerceExpr) &&
1739                         ((ArrayCoerceExpr *) node)->elemfuncid == InvalidOid)
1740                 {
1741                         node = (Node *) ((ArrayCoerceExpr *) node)->arg;
1742                 }
1743                 else if (node && IsA(node, RelabelType))
1744                 {
1745                         /* We don't really expect this case, but may as well cope */
1746                         node = (Node *) ((RelabelType *) node)->arg;
1747                 }
1748                 else
1749                         break;
1750         }
1751         return node;
1752 }
1753
1754 /*
1755  *              scalararraysel          - Selectivity of ScalarArrayOpExpr Node.
1756  */
1757 Selectivity
1758 scalararraysel(PlannerInfo *root,
1759                            ScalarArrayOpExpr *clause,
1760                            bool is_join_clause,
1761                            int varRelid,
1762                            JoinType jointype,
1763                            SpecialJoinInfo *sjinfo)
1764 {
1765         Oid                     operator = clause->opno;
1766         bool            useOr = clause->useOr;
1767         bool            isEquality = false;
1768         bool            isInequality = false;
1769         Node       *leftop;
1770         Node       *rightop;
1771         Oid                     nominal_element_type;
1772         Oid                     nominal_element_collation;
1773         TypeCacheEntry *typentry;
1774         RegProcedure oprsel;
1775         FmgrInfo        oprselproc;
1776         Selectivity s1;
1777         Selectivity s1disjoint;
1778
1779         /* First, deconstruct the expression */
1780         Assert(list_length(clause->args) == 2);
1781         leftop = (Node *) linitial(clause->args);
1782         rightop = (Node *) lsecond(clause->args);
1783
1784         /* aggressively reduce both sides to constants */
1785         leftop = estimate_expression_value(root, leftop);
1786         rightop = estimate_expression_value(root, rightop);
1787
1788         /* get nominal (after relabeling) element type of rightop */
1789         nominal_element_type = get_base_element_type(exprType(rightop));
1790         if (!OidIsValid(nominal_element_type))
1791                 return (Selectivity) 0.5;               /* probably shouldn't happen */
1792         /* get nominal collation, too, for generating constants */
1793         nominal_element_collation = exprCollation(rightop);
1794
1795         /* look through any binary-compatible relabeling of rightop */
1796         rightop = strip_array_coercion(rightop);
1797
1798         /*
1799          * Detect whether the operator is the default equality or inequality
1800          * operator of the array element type.
1801          */
1802         typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1803         if (OidIsValid(typentry->eq_opr))
1804         {
1805                 if (operator == typentry->eq_opr)
1806                         isEquality = true;
1807                 else if (get_negator(operator) == typentry->eq_opr)
1808                         isInequality = true;
1809         }
1810
1811         /*
1812          * If it is equality or inequality, we might be able to estimate this as a
1813          * form of array containment; for instance "const = ANY(column)" can be
1814          * treated as "ARRAY[const] <@ column".  scalararraysel_containment tries
1815          * that, and returns the selectivity estimate if successful, or -1 if not.
1816          */
1817         if ((isEquality || isInequality) && !is_join_clause)
1818         {
1819                 s1 = scalararraysel_containment(root, leftop, rightop,
1820                                                                                 nominal_element_type,
1821                                                                                 isEquality, useOr, varRelid);
1822                 if (s1 >= 0.0)
1823                         return s1;
1824         }
1825
1826         /*
1827          * Look up the underlying operator's selectivity estimator. Punt if it
1828          * hasn't got one.
1829          */
1830         if (is_join_clause)
1831                 oprsel = get_oprjoin(operator);
1832         else
1833                 oprsel = get_oprrest(operator);
1834         if (!oprsel)
1835                 return (Selectivity) 0.5;
1836         fmgr_info(oprsel, &oprselproc);
1837
1838         /*
1839          * In the array-containment check above, we must only believe that an
1840          * operator is equality or inequality if it is the default btree equality
1841          * operator (or its negator) for the element type, since those are the
1842          * operators that array containment will use.  But in what follows, we can
1843          * be a little laxer, and also believe that any operators using eqsel() or
1844          * neqsel() as selectivity estimator act like equality or inequality.
1845          */
1846         if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1847                 isEquality = true;
1848         else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1849                 isInequality = true;
1850
1851         /*
1852          * We consider three cases:
1853          *
1854          * 1. rightop is an Array constant: deconstruct the array, apply the
1855          * operator's selectivity function for each array element, and merge the
1856          * results in the same way that clausesel.c does for AND/OR combinations.
1857          *
1858          * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1859          * function for each element of the ARRAY[] construct, and merge.
1860          *
1861          * 3. otherwise, make a guess ...
1862          */
1863         if (rightop && IsA(rightop, Const))
1864         {
1865                 Datum           arraydatum = ((Const *) rightop)->constvalue;
1866                 bool            arrayisnull = ((Const *) rightop)->constisnull;
1867                 ArrayType  *arrayval;
1868                 int16           elmlen;
1869                 bool            elmbyval;
1870                 char            elmalign;
1871                 int                     num_elems;
1872                 Datum      *elem_values;
1873                 bool       *elem_nulls;
1874                 int                     i;
1875
1876                 if (arrayisnull)                /* qual can't succeed if null array */
1877                         return (Selectivity) 0.0;
1878                 arrayval = DatumGetArrayTypeP(arraydatum);
1879                 get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
1880                                                          &elmlen, &elmbyval, &elmalign);
1881                 deconstruct_array(arrayval,
1882                                                   ARR_ELEMTYPE(arrayval),
1883                                                   elmlen, elmbyval, elmalign,
1884                                                   &elem_values, &elem_nulls, &num_elems);
1885
1886                 /*
1887                  * For generic operators, we assume the probability of success is
1888                  * independent for each array element.  But for "= ANY" or "<> ALL",
1889                  * if the array elements are distinct (which'd typically be the case)
1890                  * then the probabilities are disjoint, and we should just sum them.
1891                  *
1892                  * If we were being really tense we would try to confirm that the
1893                  * elements are all distinct, but that would be expensive and it
1894                  * doesn't seem to be worth the cycles; it would amount to penalizing
1895                  * well-written queries in favor of poorly-written ones.  However, we
1896                  * do protect ourselves a little bit by checking whether the
1897                  * disjointness assumption leads to an impossible (out of range)
1898                  * probability; if so, we fall back to the normal calculation.
1899                  */
1900                 s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1901
1902                 for (i = 0; i < num_elems; i++)
1903                 {
1904                         List       *args;
1905                         Selectivity s2;
1906
1907                         args = list_make2(leftop,
1908                                                           makeConst(nominal_element_type,
1909                                                                                 -1,
1910                                                                                 nominal_element_collation,
1911                                                                                 elmlen,
1912                                                                                 elem_values[i],
1913                                                                                 elem_nulls[i],
1914                                                                                 elmbyval));
1915                         if (is_join_clause)
1916                                 s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1917                                                                                                           clause->inputcollid,
1918                                                                                                           PointerGetDatum(root),
1919                                                                                                   ObjectIdGetDatum(operator),
1920                                                                                                           PointerGetDatum(args),
1921                                                                                                           Int16GetDatum(jointype),
1922                                                                                                    PointerGetDatum(sjinfo)));
1923                         else
1924                                 s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1925                                                                                                           clause->inputcollid,
1926                                                                                                           PointerGetDatum(root),
1927                                                                                                   ObjectIdGetDatum(operator),
1928                                                                                                           PointerGetDatum(args),
1929                                                                                                    Int32GetDatum(varRelid)));
1930
1931                         if (useOr)
1932                         {
1933                                 s1 = s1 + s2 - s1 * s2;
1934                                 if (isEquality)
1935                                         s1disjoint += s2;
1936                         }
1937                         else
1938                         {
1939                                 s1 = s1 * s2;
1940                                 if (isInequality)
1941                                         s1disjoint += s2 - 1.0;
1942                         }
1943                 }
1944
1945                 /* accept disjoint-probability estimate if in range */
1946                 if ((useOr ? isEquality : isInequality) &&
1947                         s1disjoint >= 0.0 && s1disjoint <= 1.0)
1948                         s1 = s1disjoint;
1949         }
1950         else if (rightop && IsA(rightop, ArrayExpr) &&
1951                          !((ArrayExpr *) rightop)->multidims)
1952         {
1953                 ArrayExpr  *arrayexpr = (ArrayExpr *) rightop;
1954                 int16           elmlen;
1955                 bool            elmbyval;
1956                 ListCell   *l;
1957
1958                 get_typlenbyval(arrayexpr->element_typeid,
1959                                                 &elmlen, &elmbyval);
1960
1961                 /*
1962                  * We use the assumption of disjoint probabilities here too, although
1963                  * the odds of equal array elements are rather higher if the elements
1964                  * are not all constants (which they won't be, else constant folding
1965                  * would have reduced the ArrayExpr to a Const).  In this path it's
1966                  * critical to have the sanity check on the s1disjoint estimate.
1967                  */
1968                 s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1969
1970                 foreach(l, arrayexpr->elements)
1971                 {
1972                         Node       *elem = (Node *) lfirst(l);
1973                         List       *args;
1974                         Selectivity s2;
1975
1976                         /*
1977                          * Theoretically, if elem isn't of nominal_element_type we should
1978                          * insert a RelabelType, but it seems unlikely that any operator
1979                          * estimation function would really care ...
1980                          */
1981                         args = list_make2(leftop, elem);
1982                         if (is_join_clause)
1983                                 s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1984                                                                                                           clause->inputcollid,
1985                                                                                                           PointerGetDatum(root),
1986                                                                                                   ObjectIdGetDatum(operator),
1987                                                                                                           PointerGetDatum(args),
1988                                                                                                           Int16GetDatum(jointype),
1989                                                                                                    PointerGetDatum(sjinfo)));
1990                         else
1991                                 s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1992                                                                                                           clause->inputcollid,
1993                                                                                                           PointerGetDatum(root),
1994                                                                                                   ObjectIdGetDatum(operator),
1995                                                                                                           PointerGetDatum(args),
1996                                                                                                    Int32GetDatum(varRelid)));
1997
1998                         if (useOr)
1999                         {
2000                                 s1 = s1 + s2 - s1 * s2;
2001                                 if (isEquality)
2002                                         s1disjoint += s2;
2003                         }
2004                         else
2005                         {
2006                                 s1 = s1 * s2;
2007                                 if (isInequality)
2008                                         s1disjoint += s2 - 1.0;
2009                         }
2010                 }
2011
2012                 /* accept disjoint-probability estimate if in range */
2013                 if ((useOr ? isEquality : isInequality) &&
2014                         s1disjoint >= 0.0 && s1disjoint <= 1.0)
2015                         s1 = s1disjoint;
2016         }
2017         else
2018         {
2019                 CaseTestExpr *dummyexpr;
2020                 List       *args;
2021                 Selectivity s2;
2022                 int                     i;
2023
2024                 /*
2025                  * We need a dummy rightop to pass to the operator selectivity
2026                  * routine.  It can be pretty much anything that doesn't look like a
2027                  * constant; CaseTestExpr is a convenient choice.
2028                  */
2029                 dummyexpr = makeNode(CaseTestExpr);
2030                 dummyexpr->typeId = nominal_element_type;
2031                 dummyexpr->typeMod = -1;
2032                 dummyexpr->collation = clause->inputcollid;
2033                 args = list_make2(leftop, dummyexpr);
2034                 if (is_join_clause)
2035                         s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2036                                                                                                   clause->inputcollid,
2037                                                                                                   PointerGetDatum(root),
2038                                                                                                   ObjectIdGetDatum(operator),
2039                                                                                                   PointerGetDatum(args),
2040                                                                                                   Int16GetDatum(jointype),
2041                                                                                                   PointerGetDatum(sjinfo)));
2042                 else
2043                         s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2044                                                                                                   clause->inputcollid,
2045                                                                                                   PointerGetDatum(root),
2046                                                                                                   ObjectIdGetDatum(operator),
2047                                                                                                   PointerGetDatum(args),
2048                                                                                                   Int32GetDatum(varRelid)));
2049                 s1 = useOr ? 0.0 : 1.0;
2050
2051                 /*
2052                  * Arbitrarily assume 10 elements in the eventual array value (see
2053                  * also estimate_array_length).  We don't risk an assumption of
2054                  * disjoint probabilities here.
2055                  */
2056                 for (i = 0; i < 10; i++)
2057                 {
2058                         if (useOr)
2059                                 s1 = s1 + s2 - s1 * s2;
2060                         else
2061                                 s1 = s1 * s2;
2062                 }
2063         }
2064
2065         /* result should be in range, but make sure... */
2066         CLAMP_PROBABILITY(s1);
2067
2068         return s1;
2069 }
2070
2071 /*
2072  * Estimate number of elements in the array yielded by an expression.
2073  *
2074  * It's important that this agree with scalararraysel.
2075  */
2076 int
2077 estimate_array_length(Node *arrayexpr)
2078 {
2079         /* look through any binary-compatible relabeling of arrayexpr */
2080         arrayexpr = strip_array_coercion(arrayexpr);
2081
2082         if (arrayexpr && IsA(arrayexpr, Const))
2083         {
2084                 Datum           arraydatum = ((Const *) arrayexpr)->constvalue;
2085                 bool            arrayisnull = ((Const *) arrayexpr)->constisnull;
2086                 ArrayType  *arrayval;
2087
2088                 if (arrayisnull)
2089                         return 0;
2090                 arrayval = DatumGetArrayTypeP(arraydatum);
2091                 return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2092         }
2093         else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2094                          !((ArrayExpr *) arrayexpr)->multidims)
2095         {
2096                 return list_length(((ArrayExpr *) arrayexpr)->elements);
2097         }
2098         else
2099         {
2100                 /* default guess --- see also scalararraysel */
2101                 return 10;
2102         }
2103 }
2104
2105 /*
2106  *              rowcomparesel           - Selectivity of RowCompareExpr Node.
2107  *
2108  * We estimate RowCompare selectivity by considering just the first (high
2109  * order) columns, which makes it equivalent to an ordinary OpExpr.  While
2110  * this estimate could be refined by considering additional columns, it
2111  * seems unlikely that we could do a lot better without multi-column
2112  * statistics.
2113  */
2114 Selectivity
2115 rowcomparesel(PlannerInfo *root,
2116                           RowCompareExpr *clause,
2117                           int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2118 {
2119         Selectivity s1;
2120         Oid                     opno = linitial_oid(clause->opnos);
2121         Oid                     inputcollid = linitial_oid(clause->inputcollids);
2122         List       *opargs;
2123         bool            is_join_clause;
2124
2125         /* Build equivalent arg list for single operator */
2126         opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2127
2128         /*
2129          * Decide if it's a join clause.  This should match clausesel.c's
2130          * treat_as_join_clause(), except that we intentionally consider only the
2131          * leading columns and not the rest of the clause.
2132          */
2133         if (varRelid != 0)
2134         {
2135                 /*
2136                  * Caller is forcing restriction mode (eg, because we are examining an
2137                  * inner indexscan qual).
2138                  */
2139                 is_join_clause = false;
2140         }
2141         else if (sjinfo == NULL)
2142         {
2143                 /*
2144                  * It must be a restriction clause, since it's being evaluated at a
2145                  * scan node.
2146                  */
2147                 is_join_clause = false;
2148         }
2149         else
2150         {
2151                 /*
2152                  * Otherwise, it's a join if there's more than one relation used.
2153                  */
2154                 is_join_clause = (NumRelids((Node *) opargs) > 1);
2155         }
2156
2157         if (is_join_clause)
2158         {
2159                 /* Estimate selectivity for a join clause. */
2160                 s1 = join_selectivity(root, opno,
2161                                                           opargs,
2162                                                           inputcollid,
2163                                                           jointype,
2164                                                           sjinfo);
2165         }
2166         else
2167         {
2168                 /* Estimate selectivity for a restriction clause. */
2169                 s1 = restriction_selectivity(root, opno,
2170                                                                          opargs,
2171                                                                          inputcollid,
2172                                                                          varRelid);
2173         }
2174
2175         return s1;
2176 }
2177
2178 /*
2179  *              eqjoinsel               - Join selectivity of "="
2180  */
2181 Datum
2182 eqjoinsel(PG_FUNCTION_ARGS)
2183 {
2184         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2185         Oid                     operator = PG_GETARG_OID(1);
2186         List       *args = (List *) PG_GETARG_POINTER(2);
2187
2188 #ifdef NOT_USED
2189         JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
2190 #endif
2191         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2192         double          selec;
2193         VariableStatData vardata1;
2194         VariableStatData vardata2;
2195         bool            join_is_reversed;
2196         RelOptInfo *inner_rel;
2197
2198         get_join_variables(root, args, sjinfo,
2199                                            &vardata1, &vardata2, &join_is_reversed);
2200
2201         switch (sjinfo->jointype)
2202         {
2203                 case JOIN_INNER:
2204                 case JOIN_LEFT:
2205                 case JOIN_FULL:
2206                         selec = eqjoinsel_inner(operator, &vardata1, &vardata2);
2207                         break;
2208                 case JOIN_SEMI:
2209                 case JOIN_ANTI:
2210
2211                         /*
2212                          * Look up the join's inner relation.  min_righthand is sufficient
2213                          * information because neither SEMI nor ANTI joins permit any
2214                          * reassociation into or out of their RHS, so the righthand will
2215                          * always be exactly that set of rels.
2216                          */
2217                         inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2218
2219                         if (!join_is_reversed)
2220                                 selec = eqjoinsel_semi(operator, &vardata1, &vardata2,
2221                                                                            inner_rel);
2222                         else
2223                                 selec = eqjoinsel_semi(get_commutator(operator),
2224                                                                            &vardata2, &vardata1,
2225                                                                            inner_rel);
2226                         break;
2227                 default:
2228                         /* other values not expected here */
2229                         elog(ERROR, "unrecognized join type: %d",
2230                                  (int) sjinfo->jointype);
2231                         selec = 0;                      /* keep compiler quiet */
2232                         break;
2233         }
2234
2235         ReleaseVariableStats(vardata1);
2236         ReleaseVariableStats(vardata2);
2237
2238         CLAMP_PROBABILITY(selec);
2239
2240         PG_RETURN_FLOAT8((float8) selec);
2241 }
2242
2243 /*
2244  * eqjoinsel_inner --- eqjoinsel for normal inner join
2245  *
2246  * We also use this for LEFT/FULL outer joins; it's not presently clear
2247  * that it's worth trying to distinguish them here.
2248  */
2249 static double
2250 eqjoinsel_inner(Oid operator,
2251                                 VariableStatData *vardata1, VariableStatData *vardata2)
2252 {
2253         double          selec;
2254         double          nd1;
2255         double          nd2;
2256         bool            isdefault1;
2257         bool            isdefault2;
2258         Form_pg_statistic stats1 = NULL;
2259         Form_pg_statistic stats2 = NULL;
2260         bool            have_mcvs1 = false;
2261         Datum      *values1 = NULL;
2262         int                     nvalues1 = 0;
2263         float4     *numbers1 = NULL;
2264         int                     nnumbers1 = 0;
2265         bool            have_mcvs2 = false;
2266         Datum      *values2 = NULL;
2267         int                     nvalues2 = 0;
2268         float4     *numbers2 = NULL;
2269         int                     nnumbers2 = 0;
2270
2271         nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2272         nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2273
2274         if (HeapTupleIsValid(vardata1->statsTuple))
2275         {
2276                 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2277                 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2278                                                                           vardata1->atttype,
2279                                                                           vardata1->atttypmod,
2280                                                                           STATISTIC_KIND_MCV,
2281                                                                           InvalidOid,
2282                                                                           NULL,
2283                                                                           &values1, &nvalues1,
2284                                                                           &numbers1, &nnumbers1);
2285         }
2286
2287         if (HeapTupleIsValid(vardata2->statsTuple))
2288         {
2289                 stats2 = (Form_pg_statistic) GETSTRUCT(vardata2->statsTuple);
2290                 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2291                                                                           vardata2->atttype,
2292                                                                           vardata2->atttypmod,
2293                                                                           STATISTIC_KIND_MCV,
2294                                                                           InvalidOid,
2295                                                                           NULL,
2296                                                                           &values2, &nvalues2,
2297                                                                           &numbers2, &nnumbers2);
2298         }
2299
2300         if (have_mcvs1 && have_mcvs2)
2301         {
2302                 /*
2303                  * We have most-common-value lists for both relations.  Run through
2304                  * the lists to see which MCVs actually join to each other with the
2305                  * given operator.  This allows us to determine the exact join
2306                  * selectivity for the portion of the relations represented by the MCV
2307                  * lists.  We still have to estimate for the remaining population, but
2308                  * in a skewed distribution this gives us a big leg up in accuracy.
2309                  * For motivation see the analysis in Y. Ioannidis and S.
2310                  * Christodoulakis, "On the propagation of errors in the size of join
2311                  * results", Technical Report 1018, Computer Science Dept., University
2312                  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2313                  */
2314                 FmgrInfo        eqproc;
2315                 bool       *hasmatch1;
2316                 bool       *hasmatch2;
2317                 double          nullfrac1 = stats1->stanullfrac;
2318                 double          nullfrac2 = stats2->stanullfrac;
2319                 double          matchprodfreq,
2320                                         matchfreq1,
2321                                         matchfreq2,
2322                                         unmatchfreq1,
2323                                         unmatchfreq2,
2324                                         otherfreq1,
2325                                         otherfreq2,
2326                                         totalsel1,
2327                                         totalsel2;
2328                 int                     i,
2329                                         nmatches;
2330
2331                 fmgr_info(get_opcode(operator), &eqproc);
2332                 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2333                 hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
2334
2335                 /*
2336                  * Note we assume that each MCV will match at most one member of the
2337                  * other MCV list.  If the operator isn't really equality, there could
2338                  * be multiple matches --- but we don't look for them, both for speed
2339                  * and because the math wouldn't add up...
2340                  */
2341                 matchprodfreq = 0.0;
2342                 nmatches = 0;
2343                 for (i = 0; i < nvalues1; i++)
2344                 {
2345                         int                     j;
2346
2347                         for (j = 0; j < nvalues2; j++)
2348                         {
2349                                 if (hasmatch2[j])
2350                                         continue;
2351                                 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2352                                                                                                    DEFAULT_COLLATION_OID,
2353                                                                                                    values1[i],
2354                                                                                                    values2[j])))
2355                                 {
2356                                         hasmatch1[i] = hasmatch2[j] = true;
2357                                         matchprodfreq += numbers1[i] * numbers2[j];
2358                                         nmatches++;
2359                                         break;
2360                                 }
2361                         }
2362                 }
2363                 CLAMP_PROBABILITY(matchprodfreq);
2364                 /* Sum up frequencies of matched and unmatched MCVs */
2365                 matchfreq1 = unmatchfreq1 = 0.0;
2366                 for (i = 0; i < nvalues1; i++)
2367                 {
2368                         if (hasmatch1[i])
2369                                 matchfreq1 += numbers1[i];
2370                         else
2371                                 unmatchfreq1 += numbers1[i];
2372                 }
2373                 CLAMP_PROBABILITY(matchfreq1);
2374                 CLAMP_PROBABILITY(unmatchfreq1);
2375                 matchfreq2 = unmatchfreq2 = 0.0;
2376                 for (i = 0; i < nvalues2; i++)
2377                 {
2378                         if (hasmatch2[i])
2379                                 matchfreq2 += numbers2[i];
2380                         else
2381                                 unmatchfreq2 += numbers2[i];
2382                 }
2383                 CLAMP_PROBABILITY(matchfreq2);
2384                 CLAMP_PROBABILITY(unmatchfreq2);
2385                 pfree(hasmatch1);
2386                 pfree(hasmatch2);
2387
2388                 /*
2389                  * Compute total frequency of non-null values that are not in the MCV
2390                  * lists.
2391                  */
2392                 otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2393                 otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2394                 CLAMP_PROBABILITY(otherfreq1);
2395                 CLAMP_PROBABILITY(otherfreq2);
2396
2397                 /*
2398                  * We can estimate the total selectivity from the point of view of
2399                  * relation 1 as: the known selectivity for matched MCVs, plus
2400                  * unmatched MCVs that are assumed to match against random members of
2401                  * relation 2's non-MCV population, plus non-MCV values that are
2402                  * assumed to match against random members of relation 2's unmatched
2403                  * MCVs plus non-MCV values.
2404                  */
2405                 totalsel1 = matchprodfreq;
2406                 if (nd2 > nvalues2)
2407                         totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - nvalues2);
2408                 if (nd2 > nmatches)
2409                         totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2410                                 (nd2 - nmatches);
2411                 /* Same estimate from the point of view of relation 2. */
2412                 totalsel2 = matchprodfreq;
2413                 if (nd1 > nvalues1)
2414                         totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - nvalues1);
2415                 if (nd1 > nmatches)
2416                         totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2417                                 (nd1 - nmatches);
2418
2419                 /*
2420                  * Use the smaller of the two estimates.  This can be justified in
2421                  * essentially the same terms as given below for the no-stats case: to
2422                  * a first approximation, we are estimating from the point of view of
2423                  * the relation with smaller nd.
2424                  */
2425                 selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2426         }
2427         else
2428         {
2429                 /*
2430                  * We do not have MCV lists for both sides.  Estimate the join
2431                  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2432                  * is plausible if we assume that the join operator is strict and the
2433                  * non-null values are about equally distributed: a given non-null
2434                  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2435                  * of rel2, so total join rows are at most
2436                  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2437                  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2438                  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2439                  * with MIN() is an upper bound.  Using the MIN() means we estimate
2440                  * from the point of view of the relation with smaller nd (since the
2441                  * larger nd is determining the MIN).  It is reasonable to assume that
2442                  * most tuples in this rel will have join partners, so the bound is
2443                  * probably reasonably tight and should be taken as-is.
2444                  *
2445                  * XXX Can we be smarter if we have an MCV list for just one side? It
2446                  * seems that if we assume equal distribution for the other side, we
2447                  * end up with the same answer anyway.
2448                  */
2449                 double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2450                 double          nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2451
2452                 selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2453                 if (nd1 > nd2)
2454                         selec /= nd1;
2455                 else
2456                         selec /= nd2;
2457         }
2458
2459         if (have_mcvs1)
2460                 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2461                                                   numbers1, nnumbers1);
2462         if (have_mcvs2)
2463                 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2464                                                   numbers2, nnumbers2);
2465
2466         return selec;
2467 }
2468
2469 /*
2470  * eqjoinsel_semi --- eqjoinsel for semi join
2471  *
2472  * (Also used for anti join, which we are supposed to estimate the same way.)
2473  * Caller has ensured that vardata1 is the LHS variable.
2474  */
2475 static double
2476 eqjoinsel_semi(Oid operator,
2477                            VariableStatData *vardata1, VariableStatData *vardata2,
2478                            RelOptInfo *inner_rel)
2479 {
2480         double          selec;
2481         double          nd1;
2482         double          nd2;
2483         bool            isdefault1;
2484         bool            isdefault2;
2485         Form_pg_statistic stats1 = NULL;
2486         bool            have_mcvs1 = false;
2487         Datum      *values1 = NULL;
2488         int                     nvalues1 = 0;
2489         float4     *numbers1 = NULL;
2490         int                     nnumbers1 = 0;
2491         bool            have_mcvs2 = false;
2492         Datum      *values2 = NULL;
2493         int                     nvalues2 = 0;
2494         float4     *numbers2 = NULL;
2495         int                     nnumbers2 = 0;
2496
2497         nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2498         nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2499
2500         /*
2501          * We clamp nd2 to be not more than what we estimate the inner relation's
2502          * size to be.  This is intuitively somewhat reasonable since obviously
2503          * there can't be more than that many distinct values coming from the
2504          * inner rel.  The reason for the asymmetry (ie, that we don't clamp nd1
2505          * likewise) is that this is the only pathway by which restriction clauses
2506          * applied to the inner rel will affect the join result size estimate,
2507          * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2508          * only the outer rel's size.  If we clamped nd1 we'd be double-counting
2509          * the selectivity of outer-rel restrictions.
2510          *
2511          * We can apply this clamping both with respect to the base relation from
2512          * which the join variable comes (if there is just one), and to the
2513          * immediate inner input relation of the current join.
2514          */
2515         if (vardata2->rel)
2516                 nd2 = Min(nd2, vardata2->rel->rows);
2517         nd2 = Min(nd2, inner_rel->rows);
2518
2519         if (HeapTupleIsValid(vardata1->statsTuple))
2520         {
2521                 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2522                 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2523                                                                           vardata1->atttype,
2524                                                                           vardata1->atttypmod,
2525                                                                           STATISTIC_KIND_MCV,
2526                                                                           InvalidOid,
2527                                                                           NULL,
2528                                                                           &values1, &nvalues1,
2529                                                                           &numbers1, &nnumbers1);
2530         }
2531
2532         if (HeapTupleIsValid(vardata2->statsTuple))
2533         {
2534                 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2535                                                                           vardata2->atttype,
2536                                                                           vardata2->atttypmod,
2537                                                                           STATISTIC_KIND_MCV,
2538                                                                           InvalidOid,
2539                                                                           NULL,
2540                                                                           &values2, &nvalues2,
2541                                                                           &numbers2, &nnumbers2);
2542         }
2543
2544         if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
2545         {
2546                 /*
2547                  * We have most-common-value lists for both relations.  Run through
2548                  * the lists to see which MCVs actually join to each other with the
2549                  * given operator.  This allows us to determine the exact join
2550                  * selectivity for the portion of the relations represented by the MCV
2551                  * lists.  We still have to estimate for the remaining population, but
2552                  * in a skewed distribution this gives us a big leg up in accuracy.
2553                  */
2554                 FmgrInfo        eqproc;
2555                 bool       *hasmatch1;
2556                 bool       *hasmatch2;
2557                 double          nullfrac1 = stats1->stanullfrac;
2558                 double          matchfreq1,
2559                                         uncertainfrac,
2560                                         uncertain;
2561                 int                     i,
2562                                         nmatches,
2563                                         clamped_nvalues2;
2564
2565                 /*
2566                  * The clamping above could have resulted in nd2 being less than
2567                  * nvalues2; in which case, we assume that precisely the nd2 most
2568                  * common values in the relation will appear in the join input, and so
2569                  * compare to only the first nd2 members of the MCV list.  Of course
2570                  * this is frequently wrong, but it's the best bet we can make.
2571                  */
2572                 clamped_nvalues2 = Min(nvalues2, nd2);
2573
2574                 fmgr_info(get_opcode(operator), &eqproc);
2575                 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2576                 hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2577
2578                 /*
2579                  * Note we assume that each MCV will match at most one member of the
2580                  * other MCV list.  If the operator isn't really equality, there could
2581                  * be multiple matches --- but we don't look for them, both for speed
2582                  * and because the math wouldn't add up...
2583                  */
2584                 nmatches = 0;
2585                 for (i = 0; i < nvalues1; i++)
2586                 {
2587                         int                     j;
2588
2589                         for (j = 0; j < clamped_nvalues2; j++)
2590                         {
2591                                 if (hasmatch2[j])
2592                                         continue;
2593                                 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2594                                                                                                    DEFAULT_COLLATION_OID,
2595                                                                                                    values1[i],
2596                                                                                                    values2[j])))
2597                                 {
2598                                         hasmatch1[i] = hasmatch2[j] = true;
2599                                         nmatches++;
2600                                         break;
2601                                 }
2602                         }
2603                 }
2604                 /* Sum up frequencies of matched MCVs */
2605                 matchfreq1 = 0.0;
2606                 for (i = 0; i < nvalues1; i++)
2607                 {
2608                         if (hasmatch1[i])
2609                                 matchfreq1 += numbers1[i];
2610                 }
2611                 CLAMP_PROBABILITY(matchfreq1);
2612                 pfree(hasmatch1);
2613                 pfree(hasmatch2);
2614
2615                 /*
2616                  * Now we need to estimate the fraction of relation 1 that has at
2617                  * least one join partner.  We know for certain that the matched MCVs
2618                  * do, so that gives us a lower bound, but we're really in the dark
2619                  * about everything else.  Our crude approach is: if nd1 <= nd2 then
2620                  * assume all non-null rel1 rows have join partners, else assume for
2621                  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2622                  * can discount the known-matched MCVs from the distinct-values counts
2623                  * before doing the division.
2624                  *
2625                  * Crude as the above is, it's completely useless if we don't have
2626                  * reliable ndistinct values for both sides.  Hence, if either nd1 or
2627                  * nd2 is default, punt and assume half of the uncertain rows have
2628                  * join partners.
2629                  */
2630                 if (!isdefault1 && !isdefault2)
2631                 {
2632                         nd1 -= nmatches;
2633                         nd2 -= nmatches;
2634                         if (nd1 <= nd2 || nd2 < 0)
2635                                 uncertainfrac = 1.0;
2636                         else
2637                                 uncertainfrac = nd2 / nd1;
2638                 }
2639                 else
2640                         uncertainfrac = 0.5;
2641                 uncertain = 1.0 - matchfreq1 - nullfrac1;
2642                 CLAMP_PROBABILITY(uncertain);
2643                 selec = matchfreq1 + uncertainfrac * uncertain;
2644         }
2645         else
2646         {
2647                 /*
2648                  * Without MCV lists for both sides, we can only use the heuristic
2649                  * about nd1 vs nd2.
2650                  */
2651                 double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2652
2653                 if (!isdefault1 && !isdefault2)
2654                 {
2655                         if (nd1 <= nd2 || nd2 < 0)
2656                                 selec = 1.0 - nullfrac1;
2657                         else
2658                                 selec = (nd2 / nd1) * (1.0 - nullfrac1);
2659                 }
2660                 else
2661                         selec = 0.5 * (1.0 - nullfrac1);
2662         }
2663
2664         if (have_mcvs1)
2665                 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2666                                                   numbers1, nnumbers1);
2667         if (have_mcvs2)
2668                 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2669                                                   numbers2, nnumbers2);
2670
2671         return selec;
2672 }
2673
2674 /*
2675  *              neqjoinsel              - Join selectivity of "!="
2676  */
2677 Datum
2678 neqjoinsel(PG_FUNCTION_ARGS)
2679 {
2680         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2681         Oid                     operator = PG_GETARG_OID(1);
2682         List       *args = (List *) PG_GETARG_POINTER(2);
2683         JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
2684         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2685         Oid                     eqop;
2686         float8          result;
2687
2688         /*
2689          * We want 1 - eqjoinsel() where the equality operator is the one
2690          * associated with this != operator, that is, its negator.
2691          */
2692         eqop = get_negator(operator);
2693         if (eqop)
2694         {
2695                 result = DatumGetFloat8(DirectFunctionCall5(eqjoinsel,
2696                                                                                                         PointerGetDatum(root),
2697                                                                                                         ObjectIdGetDatum(eqop),
2698                                                                                                         PointerGetDatum(args),
2699                                                                                                         Int16GetDatum(jointype),
2700                                                                                                         PointerGetDatum(sjinfo)));
2701         }
2702         else
2703         {
2704                 /* Use default selectivity (should we raise an error instead?) */
2705                 result = DEFAULT_EQ_SEL;
2706         }
2707         result = 1.0 - result;
2708         PG_RETURN_FLOAT8(result);
2709 }
2710
2711 /*
2712  *              scalarltjoinsel - Join selectivity of "<" and "<=" for scalars
2713  */
2714 Datum
2715 scalarltjoinsel(PG_FUNCTION_ARGS)
2716 {
2717         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2718 }
2719
2720 /*
2721  *              scalargtjoinsel - Join selectivity of ">" and ">=" for scalars
2722  */
2723 Datum
2724 scalargtjoinsel(PG_FUNCTION_ARGS)
2725 {
2726         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2727 }
2728
2729 /*
2730  * patternjoinsel               - Generic code for pattern-match join selectivity.
2731  */
2732 static double
2733 patternjoinsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
2734 {
2735         /* For the moment we just punt. */
2736         return negate ? (1.0 - DEFAULT_MATCH_SEL) : DEFAULT_MATCH_SEL;
2737 }
2738
2739 /*
2740  *              regexeqjoinsel  - Join selectivity of regular-expression pattern match.
2741  */
2742 Datum
2743 regexeqjoinsel(PG_FUNCTION_ARGS)
2744 {
2745         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, false));
2746 }
2747
2748 /*
2749  *              icregexeqjoinsel        - Join selectivity of case-insensitive regex match.
2750  */
2751 Datum
2752 icregexeqjoinsel(PG_FUNCTION_ARGS)
2753 {
2754         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, false));
2755 }
2756
2757 /*
2758  *              likejoinsel                     - Join selectivity of LIKE pattern match.
2759  */
2760 Datum
2761 likejoinsel(PG_FUNCTION_ARGS)
2762 {
2763         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, false));
2764 }
2765
2766 /*
2767  *              iclikejoinsel                   - Join selectivity of ILIKE pattern match.
2768  */
2769 Datum
2770 iclikejoinsel(PG_FUNCTION_ARGS)
2771 {
2772         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, false));
2773 }
2774
2775 /*
2776  *              regexnejoinsel  - Join selectivity of regex non-match.
2777  */
2778 Datum
2779 regexnejoinsel(PG_FUNCTION_ARGS)
2780 {
2781         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, true));
2782 }
2783
2784 /*
2785  *              icregexnejoinsel        - Join selectivity of case-insensitive regex non-match.
2786  */
2787 Datum
2788 icregexnejoinsel(PG_FUNCTION_ARGS)
2789 {
2790         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, true));
2791 }
2792
2793 /*
2794  *              nlikejoinsel            - Join selectivity of LIKE pattern non-match.
2795  */
2796 Datum
2797 nlikejoinsel(PG_FUNCTION_ARGS)
2798 {
2799         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, true));
2800 }
2801
2802 /*
2803  *              icnlikejoinsel          - Join selectivity of ILIKE pattern non-match.
2804  */
2805 Datum
2806 icnlikejoinsel(PG_FUNCTION_ARGS)
2807 {
2808         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, true));
2809 }
2810
2811 /*
2812  * mergejoinscansel                     - Scan selectivity of merge join.
2813  *
2814  * A merge join will stop as soon as it exhausts either input stream.
2815  * Therefore, if we can estimate the ranges of both input variables,
2816  * we can estimate how much of the input will actually be read.  This
2817  * can have a considerable impact on the cost when using indexscans.
2818  *
2819  * Also, we can estimate how much of each input has to be read before the
2820  * first join pair is found, which will affect the join's startup time.
2821  *
2822  * clause should be a clause already known to be mergejoinable.  opfamily,
2823  * strategy, and nulls_first specify the sort ordering being used.
2824  *
2825  * The outputs are:
2826  *              *leftstart is set to the fraction of the left-hand variable expected
2827  *               to be scanned before the first join pair is found (0 to 1).
2828  *              *leftend is set to the fraction of the left-hand variable expected
2829  *               to be scanned before the join terminates (0 to 1).
2830  *              *rightstart, *rightend similarly for the right-hand variable.
2831  */
2832 void
2833 mergejoinscansel(PlannerInfo *root, Node *clause,
2834                                  Oid opfamily, int strategy, bool nulls_first,
2835                                  Selectivity *leftstart, Selectivity *leftend,
2836                                  Selectivity *rightstart, Selectivity *rightend)
2837 {
2838         Node       *left,
2839                            *right;
2840         VariableStatData leftvar,
2841                                 rightvar;
2842         int                     op_strategy;
2843         Oid                     op_lefttype;
2844         Oid                     op_righttype;
2845         Oid                     opno,
2846                                 lsortop,
2847                                 rsortop,
2848                                 lstatop,
2849                                 rstatop,
2850                                 ltop,
2851                                 leop,
2852                                 revltop,
2853                                 revleop;
2854         bool            isgt;
2855         Datum           leftmin,
2856                                 leftmax,
2857                                 rightmin,
2858                                 rightmax;
2859         double          selec;
2860
2861         /* Set default results if we can't figure anything out. */
2862         /* XXX should default "start" fraction be a bit more than 0? */
2863         *leftstart = *rightstart = 0.0;
2864         *leftend = *rightend = 1.0;
2865
2866         /* Deconstruct the merge clause */
2867         if (!is_opclause(clause))
2868                 return;                                 /* shouldn't happen */
2869         opno = ((OpExpr *) clause)->opno;
2870         left = get_leftop((Expr *) clause);
2871         right = get_rightop((Expr *) clause);
2872         if (!right)
2873                 return;                                 /* shouldn't happen */
2874
2875         /* Look for stats for the inputs */
2876         examine_variable(root, left, 0, &leftvar);
2877         examine_variable(root, right, 0, &rightvar);
2878
2879         /* Extract the operator's declared left/right datatypes */
2880         get_op_opfamily_properties(opno, opfamily, false,
2881                                                            &op_strategy,
2882                                                            &op_lefttype,
2883                                                            &op_righttype);
2884         Assert(op_strategy == BTEqualStrategyNumber);
2885
2886         /*
2887          * Look up the various operators we need.  If we don't find them all, it
2888          * probably means the opfamily is broken, but we just fail silently.
2889          *
2890          * Note: we expect that pg_statistic histograms will be sorted by the '<'
2891          * operator, regardless of which sort direction we are considering.
2892          */
2893         switch (strategy)
2894         {
2895                 case BTLessStrategyNumber:
2896                         isgt = false;
2897                         if (op_lefttype == op_righttype)
2898                         {
2899                                 /* easy case */
2900                                 ltop = get_opfamily_member(opfamily,
2901                                                                                    op_lefttype, op_righttype,
2902                                                                                    BTLessStrategyNumber);
2903                                 leop = get_opfamily_member(opfamily,
2904                                                                                    op_lefttype, op_righttype,
2905                                                                                    BTLessEqualStrategyNumber);
2906                                 lsortop = ltop;
2907                                 rsortop = ltop;
2908                                 lstatop = lsortop;
2909                                 rstatop = rsortop;
2910                                 revltop = ltop;
2911                                 revleop = leop;
2912                         }
2913                         else
2914                         {
2915                                 ltop = get_opfamily_member(opfamily,
2916                                                                                    op_lefttype, op_righttype,
2917                                                                                    BTLessStrategyNumber);
2918                                 leop = get_opfamily_member(opfamily,
2919                                                                                    op_lefttype, op_righttype,
2920                                                                                    BTLessEqualStrategyNumber);
2921                                 lsortop = get_opfamily_member(opfamily,
2922                                                                                           op_lefttype, op_lefttype,
2923                                                                                           BTLessStrategyNumber);
2924                                 rsortop = get_opfamily_member(opfamily,
2925                                                                                           op_righttype, op_righttype,
2926                                                                                           BTLessStrategyNumber);
2927                                 lstatop = lsortop;
2928                                 rstatop = rsortop;
2929                                 revltop = get_opfamily_member(opfamily,
2930                                                                                           op_righttype, op_lefttype,
2931                                                                                           BTLessStrategyNumber);
2932                                 revleop = get_opfamily_member(opfamily,
2933                                                                                           op_righttype, op_lefttype,
2934                                                                                           BTLessEqualStrategyNumber);
2935                         }
2936                         break;
2937                 case BTGreaterStrategyNumber:
2938                         /* descending-order case */
2939                         isgt = true;
2940                         if (op_lefttype == op_righttype)
2941                         {
2942                                 /* easy case */
2943                                 ltop = get_opfamily_member(opfamily,
2944                                                                                    op_lefttype, op_righttype,
2945                                                                                    BTGreaterStrategyNumber);
2946                                 leop = get_opfamily_member(opfamily,
2947                                                                                    op_lefttype, op_righttype,
2948                                                                                    BTGreaterEqualStrategyNumber);
2949                                 lsortop = ltop;
2950                                 rsortop = ltop;
2951                                 lstatop = get_opfamily_member(opfamily,
2952                                                                                           op_lefttype, op_lefttype,
2953                                                                                           BTLessStrategyNumber);
2954                                 rstatop = lstatop;
2955                                 revltop = ltop;
2956                                 revleop = leop;
2957                         }
2958                         else
2959                         {
2960                                 ltop = get_opfamily_member(opfamily,
2961                                                                                    op_lefttype, op_righttype,
2962                                                                                    BTGreaterStrategyNumber);
2963                                 leop = get_opfamily_member(opfamily,
2964                                                                                    op_lefttype, op_righttype,
2965                                                                                    BTGreaterEqualStrategyNumber);
2966                                 lsortop = get_opfamily_member(opfamily,
2967                                                                                           op_lefttype, op_lefttype,
2968                                                                                           BTGreaterStrategyNumber);
2969                                 rsortop = get_opfamily_member(opfamily,
2970                                                                                           op_righttype, op_righttype,
2971                                                                                           BTGreaterStrategyNumber);
2972                                 lstatop = get_opfamily_member(opfamily,
2973                                                                                           op_lefttype, op_lefttype,
2974                                                                                           BTLessStrategyNumber);
2975                                 rstatop = get_opfamily_member(opfamily,
2976                                                                                           op_righttype, op_righttype,
2977                                                                                           BTLessStrategyNumber);
2978                                 revltop = get_opfamily_member(opfamily,
2979                                                                                           op_righttype, op_lefttype,
2980                                                                                           BTGreaterStrategyNumber);
2981                                 revleop = get_opfamily_member(opfamily,
2982                                                                                           op_righttype, op_lefttype,
2983                                                                                           BTGreaterEqualStrategyNumber);
2984                         }
2985                         break;
2986                 default:
2987                         goto fail;                      /* shouldn't get here */
2988         }
2989
2990         if (!OidIsValid(lsortop) ||
2991                 !OidIsValid(rsortop) ||
2992                 !OidIsValid(lstatop) ||
2993                 !OidIsValid(rstatop) ||
2994                 !OidIsValid(ltop) ||
2995                 !OidIsValid(leop) ||
2996                 !OidIsValid(revltop) ||
2997                 !OidIsValid(revleop))
2998                 goto fail;                              /* insufficient info in catalogs */
2999
3000         /* Try to get ranges of both inputs */
3001         if (!isgt)
3002         {
3003                 if (!get_variable_range(root, &leftvar, lstatop,
3004                                                                 &leftmin, &leftmax))
3005                         goto fail;                      /* no range available from stats */
3006                 if (!get_variable_range(root, &rightvar, rstatop,
3007                                                                 &rightmin, &rightmax))
3008                         goto fail;                      /* no range available from stats */
3009         }
3010         else
3011         {
3012                 /* need to swap the max and min */
3013                 if (!get_variable_range(root, &leftvar, lstatop,
3014                                                                 &leftmax, &leftmin))
3015                         goto fail;                      /* no range available from stats */
3016                 if (!get_variable_range(root, &rightvar, rstatop,
3017                                                                 &rightmax, &rightmin))
3018                         goto fail;                      /* no range available from stats */
3019         }
3020
3021         /*
3022          * Now, the fraction of the left variable that will be scanned is the
3023          * fraction that's <= the right-side maximum value.  But only believe
3024          * non-default estimates, else stick with our 1.0.
3025          */
3026         selec = scalarineqsel(root, leop, isgt, &leftvar,
3027                                                   rightmax, op_righttype);
3028         if (selec != DEFAULT_INEQ_SEL)
3029                 *leftend = selec;
3030
3031         /* And similarly for the right variable. */
3032         selec = scalarineqsel(root, revleop, isgt, &rightvar,
3033                                                   leftmax, op_lefttype);
3034         if (selec != DEFAULT_INEQ_SEL)
3035                 *rightend = selec;
3036
3037         /*
3038          * Only one of the two "end" fractions can really be less than 1.0;
3039          * believe the smaller estimate and reset the other one to exactly 1.0. If
3040          * we get exactly equal estimates (as can easily happen with self-joins),
3041          * believe neither.
3042          */
3043         if (*leftend > *rightend)
3044                 *leftend = 1.0;
3045         else if (*leftend < *rightend)
3046                 *rightend = 1.0;
3047         else
3048                 *leftend = *rightend = 1.0;
3049
3050         /*
3051          * Also, the fraction of the left variable that will be scanned before the
3052          * first join pair is found is the fraction that's < the right-side
3053          * minimum value.  But only believe non-default estimates, else stick with
3054          * our own default.
3055          */
3056         selec = scalarineqsel(root, ltop, isgt, &leftvar,
3057                                                   rightmin, op_righttype);
3058         if (selec != DEFAULT_INEQ_SEL)
3059                 *leftstart = selec;
3060
3061         /* And similarly for the right variable. */
3062         selec = scalarineqsel(root, revltop, isgt, &rightvar,
3063                                                   leftmin, op_lefttype);
3064         if (selec != DEFAULT_INEQ_SEL)
3065                 *rightstart = selec;
3066
3067         /*
3068          * Only one of the two "start" fractions can really be more than zero;
3069          * believe the larger estimate and reset the other one to exactly 0.0. If
3070          * we get exactly equal estimates (as can easily happen with self-joins),
3071          * believe neither.
3072          */
3073         if (*leftstart < *rightstart)
3074                 *leftstart = 0.0;
3075         else if (*leftstart > *rightstart)
3076                 *rightstart = 0.0;
3077         else
3078                 *leftstart = *rightstart = 0.0;
3079
3080         /*
3081          * If the sort order is nulls-first, we're going to have to skip over any
3082          * nulls too.  These would not have been counted by scalarineqsel, and we
3083          * can safely add in this fraction regardless of whether we believe
3084          * scalarineqsel's results or not.  But be sure to clamp the sum to 1.0!
3085          */
3086         if (nulls_first)
3087         {
3088                 Form_pg_statistic stats;
3089
3090                 if (HeapTupleIsValid(leftvar.statsTuple))
3091                 {
3092                         stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3093                         *leftstart += stats->stanullfrac;
3094                         CLAMP_PROBABILITY(*leftstart);
3095                         *leftend += stats->stanullfrac;
3096                         CLAMP_PROBABILITY(*leftend);
3097                 }
3098                 if (HeapTupleIsValid(rightvar.statsTuple))
3099                 {
3100                         stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3101                         *rightstart += stats->stanullfrac;
3102                         CLAMP_PROBABILITY(*rightstart);
3103                         *rightend += stats->stanullfrac;
3104                         CLAMP_PROBABILITY(*rightend);
3105                 }
3106         }
3107
3108         /* Disbelieve start >= end, just in case that can happen */
3109         if (*leftstart >= *leftend)
3110         {
3111                 *leftstart = 0.0;
3112                 *leftend = 1.0;
3113         }
3114         if (*rightstart >= *rightend)
3115         {
3116                 *rightstart = 0.0;
3117                 *rightend = 1.0;
3118         }
3119
3120 fail:
3121         ReleaseVariableStats(leftvar);
3122         ReleaseVariableStats(rightvar);
3123 }
3124
3125
3126 /*
3127  * Helper routine for estimate_num_groups: add an item to a list of
3128  * GroupVarInfos, but only if it's not known equal to any of the existing
3129  * entries.
3130  */
3131 typedef struct
3132 {
3133         Node       *var;                        /* might be an expression, not just a Var */
3134         RelOptInfo *rel;                        /* relation it belongs to */
3135         double          ndistinct;              /* # distinct values */
3136 } GroupVarInfo;
3137
3138 static List *
3139 add_unique_group_var(PlannerInfo *root, List *varinfos,
3140                                          Node *var, VariableStatData *vardata)
3141 {
3142         GroupVarInfo *varinfo;
3143         double          ndistinct;
3144         bool            isdefault;
3145         ListCell   *lc;
3146
3147         ndistinct = get_variable_numdistinct(vardata, &isdefault);
3148
3149         /* cannot use foreach here because of possible list_delete */
3150         lc = list_head(varinfos);
3151         while (lc)
3152         {
3153                 varinfo = (GroupVarInfo *) lfirst(lc);
3154
3155                 /* must advance lc before list_delete possibly pfree's it */
3156                 lc = lnext(lc);
3157
3158                 /* Drop exact duplicates */
3159                 if (equal(var, varinfo->var))
3160                         return varinfos;
3161
3162                 /*
3163                  * Drop known-equal vars, but only if they belong to different
3164                  * relations (see comments for estimate_num_groups)
3165                  */
3166                 if (vardata->rel != varinfo->rel &&
3167                         exprs_known_equal(root, var, varinfo->var))
3168                 {
3169                         if (varinfo->ndistinct <= ndistinct)
3170                         {
3171                                 /* Keep older item, forget new one */
3172                                 return varinfos;
3173                         }
3174                         else
3175                         {
3176                                 /* Delete the older item */
3177                                 varinfos = list_delete_ptr(varinfos, varinfo);
3178                         }
3179                 }
3180         }
3181
3182         varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3183
3184         varinfo->var = var;
3185         varinfo->rel = vardata->rel;
3186         varinfo->ndistinct = ndistinct;
3187         varinfos = lappend(varinfos, varinfo);
3188         return varinfos;
3189 }
3190
3191 /*
3192  * estimate_num_groups          - Estimate number of groups in a grouped query
3193  *
3194  * Given a query having a GROUP BY clause, estimate how many groups there
3195  * will be --- ie, the number of distinct combinations of the GROUP BY
3196  * expressions.
3197  *
3198  * This routine is also used to estimate the number of rows emitted by
3199  * a DISTINCT filtering step; that is an isomorphic problem.  (Note:
3200  * actually, we only use it for DISTINCT when there's no grouping or
3201  * aggregation ahead of the DISTINCT.)
3202  *
3203  * Inputs:
3204  *      root - the query
3205  *      groupExprs - list of expressions being grouped by
3206  *      input_rows - number of rows estimated to arrive at the group/unique
3207  *              filter step
3208  *      pgset - NULL, or a List** pointing to a grouping set to filter the
3209  *              groupExprs against
3210  *
3211  * Given the lack of any cross-correlation statistics in the system, it's
3212  * impossible to do anything really trustworthy with GROUP BY conditions
3213  * involving multiple Vars.  We should however avoid assuming the worst
3214  * case (all possible cross-product terms actually appear as groups) since
3215  * very often the grouped-by Vars are highly correlated.  Our current approach
3216  * is as follows:
3217  *      1.  Expressions yielding boolean are assumed to contribute two groups,
3218  *              independently of their content, and are ignored in the subsequent
3219  *              steps.  This is mainly because tests like "col IS NULL" break the
3220  *              heuristic used in step 2 especially badly.
3221  *      2.  Reduce the given expressions to a list of unique Vars used.  For
3222  *              example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3223  *              It is clearly correct not to count the same Var more than once.
3224  *              It is also reasonable to treat f(x) the same as x: f() cannot
3225  *              increase the number of distinct values (unless it is volatile,
3226  *              which we consider unlikely for grouping), but it probably won't
3227  *              reduce the number of distinct values much either.
3228  *              As a special case, if a GROUP BY expression can be matched to an
3229  *              expressional index for which we have statistics, then we treat the
3230  *              whole expression as though it were just a Var.
3231  *      3.  If the list contains Vars of different relations that are known equal
3232  *              due to equivalence classes, then drop all but one of the Vars from each
3233  *              known-equal set, keeping the one with smallest estimated # of values
3234  *              (since the extra values of the others can't appear in joined rows).
3235  *              Note the reason we only consider Vars of different relations is that
3236  *              if we considered ones of the same rel, we'd be double-counting the
3237  *              restriction selectivity of the equality in the next step.
3238  *      4.  For Vars within a single source rel, we multiply together the numbers
3239  *              of values, clamp to the number of rows in the rel (divided by 10 if
3240  *              more than one Var), and then multiply by the selectivity of the
3241  *              restriction clauses for that rel.  When there's more than one Var,
3242  *              the initial product is probably too high (it's the worst case) but
3243  *              clamping to a fraction of the rel's rows seems to be a helpful
3244  *              heuristic for not letting the estimate get out of hand.  (The factor
3245  *              of 10 is derived from pre-Postgres-7.4 practice.)  Multiplying
3246  *              by the restriction selectivity is effectively assuming that the
3247  *              restriction clauses are independent of the grouping, which is a crummy
3248  *              assumption, but it's hard to do better.
3249  *      5.  If there are Vars from multiple rels, we repeat step 4 for each such
3250  *              rel, and multiply the results together.
3251  * Note that rels not containing grouped Vars are ignored completely, as are
3252  * join clauses.  Such rels cannot increase the number of groups, and we
3253  * assume such clauses do not reduce the number either (somewhat bogus,
3254  * but we don't have the info to do better).
3255  */
3256 double
3257 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3258                                         List **pgset)
3259 {
3260         List       *varinfos = NIL;
3261         double          numdistinct;
3262         ListCell   *l;
3263         int                     i;
3264
3265         /*
3266          * We don't ever want to return an estimate of zero groups, as that tends
3267          * to lead to division-by-zero and other unpleasantness.  The input_rows
3268          * estimate is usually already at least 1, but clamp it just in case it
3269          * isn't.
3270          */
3271         input_rows = clamp_row_est(input_rows);
3272
3273         /*
3274          * If no grouping columns, there's exactly one group.  (This can't happen
3275          * for normal cases with GROUP BY or DISTINCT, but it is possible for
3276          * corner cases with set operations.)
3277          */
3278         if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3279                 return 1.0;
3280
3281         /*
3282          * Count groups derived from boolean grouping expressions.  For other
3283          * expressions, find the unique Vars used, treating an expression as a Var
3284          * if we can find stats for it.  For each one, record the statistical
3285          * estimate of number of distinct values (total in its table, without
3286          * regard for filtering).
3287          */
3288         numdistinct = 1.0;
3289
3290         i = 0;
3291         foreach(l, groupExprs)
3292         {
3293                 Node       *groupexpr = (Node *) lfirst(l);
3294                 VariableStatData vardata;
3295                 List       *varshere;
3296                 ListCell   *l2;
3297
3298                 /* is expression in this grouping set? */
3299                 if (pgset && !list_member_int(*pgset, i++))
3300                         continue;
3301
3302                 /* Short-circuit for expressions returning boolean */
3303                 if (exprType(groupexpr) == BOOLOID)
3304                 {
3305                         numdistinct *= 2.0;
3306                         continue;
3307                 }
3308
3309                 /*
3310                  * If examine_variable is able to deduce anything about the GROUP BY
3311                  * expression, treat it as a single variable even if it's really more
3312                  * complicated.
3313                  */
3314                 examine_variable(root, groupexpr, 0, &vardata);
3315                 if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3316                 {
3317                         varinfos = add_unique_group_var(root, varinfos,
3318                                                                                         groupexpr, &vardata);
3319                         ReleaseVariableStats(vardata);
3320                         continue;
3321                 }
3322                 ReleaseVariableStats(vardata);
3323
3324                 /*
3325                  * Else pull out the component Vars.  Handle PlaceHolderVars by
3326                  * recursing into their arguments (effectively assuming that the
3327                  * PlaceHolderVar doesn't change the number of groups, which boils
3328                  * down to ignoring the possible addition of nulls to the result set).
3329                  */
3330                 varshere = pull_var_clause(groupexpr,
3331                                                                    PVC_RECURSE_AGGREGATES |
3332                                                                    PVC_RECURSE_WINDOWFUNCS |
3333                                                                    PVC_RECURSE_PLACEHOLDERS);
3334
3335                 /*
3336                  * If we find any variable-free GROUP BY item, then either it is a
3337                  * constant (and we can ignore it) or it contains a volatile function;
3338                  * in the latter case we punt and assume that each input row will
3339                  * yield a distinct group.
3340                  */
3341                 if (varshere == NIL)
3342                 {
3343                         if (contain_volatile_functions(groupexpr))
3344                                 return input_rows;
3345                         continue;
3346                 }
3347
3348                 /*
3349                  * Else add variables to varinfos list
3350                  */
3351                 foreach(l2, varshere)
3352                 {
3353                         Node       *var = (Node *) lfirst(l2);
3354
3355                         examine_variable(root, var, 0, &vardata);
3356                         varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3357                         ReleaseVariableStats(vardata);
3358                 }
3359         }
3360
3361         /*
3362          * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3363          * list.
3364          */
3365         if (varinfos == NIL)
3366         {
3367                 /* Guard against out-of-range answers */
3368                 if (numdistinct > input_rows)
3369                         numdistinct = input_rows;
3370                 return numdistinct;
3371         }
3372
3373         /*
3374          * Group Vars by relation and estimate total numdistinct.
3375          *
3376          * For each iteration of the outer loop, we process the frontmost Var in
3377          * varinfos, plus all other Vars in the same relation.  We remove these
3378          * Vars from the newvarinfos list for the next iteration. This is the
3379          * easiest way to group Vars of same rel together.
3380          */
3381         do
3382         {
3383                 GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3384                 RelOptInfo *rel = varinfo1->rel;
3385                 double          reldistinct = varinfo1->ndistinct;
3386                 double          relmaxndistinct = reldistinct;
3387                 int                     relvarcount = 1;
3388                 List       *newvarinfos = NIL;
3389
3390                 /*
3391                  * Get the product of numdistinct estimates of the Vars for this rel.
3392                  * Also, construct new varinfos list of remaining Vars.
3393                  */
3394                 for_each_cell(l, lnext(list_head(varinfos)))
3395                 {
3396                         GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3397
3398                         if (varinfo2->rel == varinfo1->rel)
3399                         {
3400                                 reldistinct *= varinfo2->ndistinct;
3401                                 if (relmaxndistinct < varinfo2->ndistinct)
3402                                         relmaxndistinct = varinfo2->ndistinct;
3403                                 relvarcount++;
3404                         }
3405                         else
3406                         {
3407                                 /* not time to process varinfo2 yet */
3408                                 newvarinfos = lcons(varinfo2, newvarinfos);
3409                         }
3410                 }
3411
3412                 /*
3413                  * Sanity check --- don't divide by zero if empty relation.
3414                  */
3415                 Assert(rel->reloptkind == RELOPT_BASEREL);
3416                 if (rel->tuples > 0)
3417                 {
3418                         /*
3419                          * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3420                          * fudge factor is because the Vars are probably correlated but we
3421                          * don't know by how much.  We should never clamp to less than the
3422                          * largest ndistinct value for any of the Vars, though, since
3423                          * there will surely be at least that many groups.
3424                          */
3425                         double          clamp = rel->tuples;
3426
3427                         if (relvarcount > 1)
3428                         {
3429                                 clamp *= 0.1;
3430                                 if (clamp < relmaxndistinct)
3431                                 {
3432                                         clamp = relmaxndistinct;
3433                                         /* for sanity in case some ndistinct is too large: */
3434                                         if (clamp > rel->tuples)
3435                                                 clamp = rel->tuples;
3436                                 }
3437                         }
3438                         if (reldistinct > clamp)
3439                                 reldistinct = clamp;
3440
3441                         /*
3442                          * Multiply by restriction selectivity.
3443                          */
3444                         reldistinct *= rel->rows / rel->tuples;
3445
3446                         /*
3447                          * Update estimate of total distinct groups.
3448                          */
3449                         numdistinct *= reldistinct;
3450                 }
3451
3452                 varinfos = newvarinfos;
3453         } while (varinfos != NIL);
3454
3455         numdistinct = ceil(numdistinct);
3456
3457         /* Guard against out-of-range answers */
3458         if (numdistinct > input_rows)
3459                 numdistinct = input_rows;
3460         if (numdistinct < 1.0)
3461                 numdistinct = 1.0;
3462
3463         return numdistinct;
3464 }
3465
3466 /*
3467  * Estimate hash bucketsize fraction (ie, number of entries in a bucket
3468  * divided by total tuples in relation) if the specified expression is used
3469  * as a hash key.
3470  *
3471  * XXX This is really pretty bogus since we're effectively assuming that the
3472  * distribution of hash keys will be the same after applying restriction
3473  * clauses as it was in the underlying relation.  However, we are not nearly
3474  * smart enough to figure out how the restrict clauses might change the
3475  * distribution, so this will have to do for now.
3476  *
3477  * We are passed the number of buckets the executor will use for the given
3478  * input relation.  If the data were perfectly distributed, with the same
3479  * number of tuples going into each available bucket, then the bucketsize
3480  * fraction would be 1/nbuckets.  But this happy state of affairs will occur
3481  * only if (a) there are at least nbuckets distinct data values, and (b)
3482  * we have a not-too-skewed data distribution.  Otherwise the buckets will
3483  * be nonuniformly occupied.  If the other relation in the join has a key
3484  * distribution similar to this one's, then the most-loaded buckets are
3485  * exactly those that will be probed most often.  Therefore, the "average"
3486  * bucket size for costing purposes should really be taken as something close
3487  * to the "worst case" bucket size.  We try to estimate this by adjusting the
3488  * fraction if there are too few distinct data values, and then scaling up
3489  * by the ratio of the most common value's frequency to the average frequency.
3490  *
3491  * If no statistics are available, use a default estimate of 0.1.  This will
3492  * discourage use of a hash rather strongly if the inner relation is large,
3493  * which is what we want.  We do not want to hash unless we know that the
3494  * inner rel is well-dispersed (or the alternatives seem much worse).
3495  */
3496 Selectivity
3497 estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
3498 {
3499         VariableStatData vardata;
3500         double          estfract,
3501                                 ndistinct,
3502                                 stanullfrac,
3503                                 mcvfreq,
3504                                 avgfreq;
3505         bool            isdefault;
3506         float4     *numbers;
3507         int                     nnumbers;
3508
3509         examine_variable(root, hashkey, 0, &vardata);
3510
3511         /* Get number of distinct values */
3512         ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3513
3514         /* If ndistinct isn't real, punt and return 0.1, per comments above */
3515         if (isdefault)
3516         {
3517                 ReleaseVariableStats(vardata);
3518                 return (Selectivity) 0.1;
3519         }
3520
3521         /* Get fraction that are null */
3522         if (HeapTupleIsValid(vardata.statsTuple))
3523         {
3524                 Form_pg_statistic stats;
3525
3526                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3527                 stanullfrac = stats->stanullfrac;
3528         }
3529         else
3530                 stanullfrac = 0.0;
3531
3532         /* Compute avg freq of all distinct data values in raw relation */
3533         avgfreq = (1.0 - stanullfrac) / ndistinct;
3534
3535         /*
3536          * Adjust ndistinct to account for restriction clauses.  Observe we are
3537          * assuming that the data distribution is affected uniformly by the
3538          * restriction clauses!
3539          *
3540          * XXX Possibly better way, but much more expensive: multiply by
3541          * selectivity of rel's restriction clauses that mention the target Var.
3542          */
3543         if (vardata.rel)
3544                 ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3545
3546         /*
3547          * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3548          * number of buckets is less than the expected number of distinct values;
3549          * otherwise it is 1/ndistinct.
3550          */
3551         if (ndistinct > nbuckets)
3552                 estfract = 1.0 / nbuckets;
3553         else
3554                 estfract = 1.0 / ndistinct;
3555
3556         /*
3557          * Look up the frequency of the most common value, if available.
3558          */
3559         mcvfreq = 0.0;
3560
3561         if (HeapTupleIsValid(vardata.statsTuple))
3562         {
3563                 if (get_attstatsslot(vardata.statsTuple,
3564                                                          vardata.atttype, vardata.atttypmod,
3565                                                          STATISTIC_KIND_MCV, InvalidOid,
3566                                                          NULL,
3567                                                          NULL, NULL,
3568                                                          &numbers, &nnumbers))
3569                 {
3570                         /*
3571                          * The first MCV stat is for the most common value.
3572                          */
3573                         if (nnumbers > 0)
3574                                 mcvfreq = numbers[0];
3575                         free_attstatsslot(vardata.atttype, NULL, 0,
3576                                                           numbers, nnumbers);
3577                 }
3578         }
3579
3580         /*
3581          * Adjust estimated bucketsize upward to account for skewed distribution.
3582          */
3583         if (avgfreq > 0.0 && mcvfreq > avgfreq)
3584                 estfract *= mcvfreq / avgfreq;
3585
3586         /*
3587          * Clamp bucketsize to sane range (the above adjustment could easily
3588          * produce an out-of-range result).  We set the lower bound a little above
3589          * zero, since zero isn't a very sane result.
3590          */
3591         if (estfract < 1.0e-6)
3592                 estfract = 1.0e-6;
3593         else if (estfract > 1.0)
3594                 estfract = 1.0;
3595
3596         ReleaseVariableStats(vardata);
3597
3598         return (Selectivity) estfract;
3599 }
3600
3601
3602 /*-------------------------------------------------------------------------
3603  *
3604  * Support routines
3605  *
3606  *-------------------------------------------------------------------------
3607  */
3608
3609 /*
3610  * convert_to_scalar
3611  *        Convert non-NULL values of the indicated types to the comparison
3612  *        scale needed by scalarineqsel().
3613  *        Returns "true" if successful.
3614  *
3615  * XXX this routine is a hack: ideally we should look up the conversion
3616  * subroutines in pg_type.
3617  *
3618  * All numeric datatypes are simply converted to their equivalent
3619  * "double" values.  (NUMERIC values that are outside the range of "double"
3620  * are clamped to +/- HUGE_VAL.)
3621  *
3622  * String datatypes are converted by convert_string_to_scalar(),
3623  * which is explained below.  The reason why this routine deals with
3624  * three values at a time, not just one, is that we need it for strings.
3625  *
3626  * The bytea datatype is just enough different from strings that it has
3627  * to be treated separately.
3628  *
3629  * The several datatypes representing absolute times are all converted
3630  * to Timestamp, which is actually a double, and then we just use that
3631  * double value.  Note this will give correct results even for the "special"
3632  * values of Timestamp, since those are chosen to compare correctly;
3633  * see timestamp_cmp.
3634  *
3635  * The several datatypes representing relative times (intervals) are all
3636  * converted to measurements expressed in seconds.
3637  */
3638 static bool
3639 convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
3640                                   Datum lobound, Datum hibound, Oid boundstypid,
3641                                   double *scaledlobound, double *scaledhibound)
3642 {
3643         /*
3644          * Both the valuetypid and the boundstypid should exactly match the
3645          * declared input type(s) of the operator we are invoked for, so we just
3646          * error out if either is not recognized.
3647          *
3648          * XXX The histogram we are interpolating between points of could belong
3649          * to a column that's only binary-compatible with the declared type. In
3650          * essence we are assuming that the semantics of binary-compatible types
3651          * are enough alike that we can use a histogram generated with one type's
3652          * operators to estimate selectivity for the other's.  This is outright
3653          * wrong in some cases --- in particular signed versus unsigned
3654          * interpretation could trip us up.  But it's useful enough in the
3655          * majority of cases that we do it anyway.  Should think about more
3656          * rigorous ways to do it.
3657          */
3658         switch (valuetypid)
3659         {
3660                         /*
3661                          * Built-in numeric types
3662                          */
3663                 case BOOLOID:
3664                 case INT2OID:
3665                 case INT4OID:
3666                 case INT8OID:
3667                 case FLOAT4OID:
3668                 case FLOAT8OID:
3669                 case NUMERICOID:
3670                 case OIDOID:
3671                 case REGPROCOID:
3672                 case REGPROCEDUREOID:
3673                 case REGOPEROID:
3674                 case REGOPERATOROID:
3675                 case REGCLASSOID:
3676                 case REGTYPEOID:
3677                 case REGCONFIGOID:
3678                 case REGDICTIONARYOID:
3679                 case REGROLEOID:
3680                 case REGNAMESPACEOID:
3681                         *scaledvalue = convert_numeric_to_scalar(value, valuetypid);
3682                         *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid);
3683                         *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid);
3684                         return true;
3685
3686                         /*
3687                          * Built-in string types
3688                          */
3689                 case CHAROID:
3690                 case BPCHAROID:
3691                 case VARCHAROID:
3692                 case TEXTOID:
3693                 case NAMEOID:
3694                         {
3695                                 char       *valstr = convert_string_datum(value, valuetypid);
3696                                 char       *lostr = convert_string_datum(lobound, boundstypid);
3697                                 char       *histr = convert_string_datum(hibound, boundstypid);
3698
3699                                 convert_string_to_scalar(valstr, scaledvalue,
3700                                                                                  lostr, scaledlobound,
3701                                                                                  histr, scaledhibound);
3702                                 pfree(valstr);
3703                                 pfree(lostr);
3704                                 pfree(histr);
3705                                 return true;
3706                         }
3707
3708                         /*
3709                          * Built-in bytea type
3710                          */
3711                 case BYTEAOID:
3712                         {
3713                                 convert_bytea_to_scalar(value, scaledvalue,
3714                                                                                 lobound, scaledlobound,
3715                                                                                 hibound, scaledhibound);
3716                                 return true;
3717                         }
3718
3719                         /*
3720                          * Built-in time types
3721                          */
3722                 case TIMESTAMPOID:
3723                 case TIMESTAMPTZOID:
3724                 case ABSTIMEOID:
3725                 case DATEOID:
3726                 case INTERVALOID:
3727                 case RELTIMEOID:
3728                 case TINTERVALOID:
3729                 case TIMEOID:
3730                 case TIMETZOID:
3731                         *scaledvalue = convert_timevalue_to_scalar(value, valuetypid);
3732                         *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid);
3733                         *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid);
3734                         return true;
3735
3736                         /*
3737                          * Built-in network types
3738                          */
3739                 case INETOID:
3740                 case CIDROID:
3741                 case MACADDROID:
3742                         *scaledvalue = convert_network_to_scalar(value, valuetypid);
3743                         *scaledlobound = convert_network_to_scalar(lobound, boundstypid);
3744                         *scaledhibound = convert_network_to_scalar(hibound, boundstypid);
3745                         return true;
3746         }
3747         /* Don't know how to convert */
3748         *scaledvalue = *scaledlobound = *scaledhibound = 0;
3749         return false;
3750 }
3751
3752 /*
3753  * Do convert_to_scalar()'s work for any numeric data type.
3754  */
3755 static double
3756 convert_numeric_to_scalar(Datum value, Oid typid)
3757 {
3758         switch (typid)
3759         {
3760                 case BOOLOID:
3761                         return (double) DatumGetBool(value);
3762                 case INT2OID:
3763                         return (double) DatumGetInt16(value);
3764                 case INT4OID:
3765                         return (double) DatumGetInt32(value);
3766                 case INT8OID:
3767                         return (double) DatumGetInt64(value);
3768                 case FLOAT4OID:
3769                         return (double) DatumGetFloat4(value);
3770                 case FLOAT8OID:
3771                         return (double) DatumGetFloat8(value);
3772                 case NUMERICOID:
3773                         /* Note: out-of-range values will be clamped to +-HUGE_VAL */
3774                         return (double)
3775                                 DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow,
3776                                                                                                    value));
3777                 case OIDOID:
3778                 case REGPROCOID:
3779                 case REGPROCEDUREOID:
3780                 case REGOPEROID:
3781                 case REGOPERATOROID:
3782                 case REGCLASSOID:
3783                 case REGTYPEOID:
3784                 case REGCONFIGOID:
3785                 case REGDICTIONARYOID:
3786                 case REGROLEOID:
3787                 case REGNAMESPACEOID:
3788                         /* we can treat OIDs as integers... */
3789                         return (double) DatumGetObjectId(value);
3790         }
3791
3792         /*
3793          * Can't get here unless someone tries to use scalarltsel/scalargtsel on
3794          * an operator with one numeric and one non-numeric operand.
3795          */
3796         elog(ERROR, "unsupported type: %u", typid);
3797         return 0;
3798 }
3799
3800 /*
3801  * Do convert_to_scalar()'s work for any character-string data type.
3802  *
3803  * String datatypes are converted to a scale that ranges from 0 to 1,
3804  * where we visualize the bytes of the string as fractional digits.
3805  *
3806  * We do not want the base to be 256, however, since that tends to
3807  * generate inflated selectivity estimates; few databases will have
3808  * occurrences of all 256 possible byte values at each position.
3809  * Instead, use the smallest and largest byte values seen in the bounds
3810  * as the estimated range for each byte, after some fudging to deal with
3811  * the fact that we probably aren't going to see the full range that way.
3812  *
3813  * An additional refinement is that we discard any common prefix of the
3814  * three strings before computing the scaled values.  This allows us to
3815  * "zoom in" when we encounter a narrow data range.  An example is a phone
3816  * number database where all the values begin with the same area code.
3817  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
3818  * so this is more likely to happen than you might think.)
3819  */
3820 static void
3821 convert_string_to_scalar(char *value,
3822                                                  double *scaledvalue,
3823                                                  char *lobound,
3824                                                  double *scaledlobound,
3825                                                  char *hibound,
3826                                                  double *scaledhibound)
3827 {
3828         int                     rangelo,
3829                                 rangehi;
3830         char       *sptr;
3831
3832         rangelo = rangehi = (unsigned char) hibound[0];
3833         for (sptr = lobound; *sptr; sptr++)
3834         {
3835                 if (rangelo > (unsigned char) *sptr)
3836                         rangelo = (unsigned char) *sptr;
3837                 if (rangehi < (unsigned char) *sptr)
3838                         rangehi = (unsigned char) *sptr;
3839         }
3840         for (sptr = hibound; *sptr; sptr++)
3841         {
3842                 if (rangelo > (unsigned char) *sptr)
3843                         rangelo = (unsigned char) *sptr;
3844                 if (rangehi < (unsigned char) *sptr)
3845                         rangehi = (unsigned char) *sptr;
3846         }
3847         /* If range includes any upper-case ASCII chars, make it include all */
3848         if (rangelo <= 'Z' && rangehi >= 'A')
3849         {
3850                 if (rangelo > 'A')
3851                         rangelo = 'A';
3852                 if (rangehi < 'Z')
3853                         rangehi = 'Z';
3854         }
3855         /* Ditto lower-case */
3856         if (rangelo <= 'z' && rangehi >= 'a')
3857         {
3858                 if (rangelo > 'a')
3859                         rangelo = 'a';
3860                 if (rangehi < 'z')
3861                         rangehi = 'z';
3862         }
3863         /* Ditto digits */
3864         if (rangelo <= '9' && rangehi >= '0')
3865         {
3866                 if (rangelo > '0')
3867                         rangelo = '0';
3868                 if (rangehi < '9')
3869                         rangehi = '9';
3870         }
3871
3872         /*
3873          * If range includes less than 10 chars, assume we have not got enough
3874          * data, and make it include regular ASCII set.
3875          */
3876         if (rangehi - rangelo < 9)
3877         {
3878                 rangelo = ' ';
3879                 rangehi = 127;
3880         }
3881
3882         /*
3883          * Now strip any common prefix of the three strings.
3884          */
3885         while (*lobound)
3886         {
3887                 if (*lobound != *hibound || *lobound != *value)
3888                         break;
3889                 lobound++, hibound++, value++;
3890         }
3891
3892         /*
3893          * Now we can do the conversions.
3894          */
3895         *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
3896         *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
3897         *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
3898 }
3899
3900 static double
3901 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
3902 {
3903         int                     slen = strlen(value);
3904         double          num,
3905                                 denom,
3906                                 base;
3907
3908         if (slen <= 0)
3909                 return 0.0;                             /* empty string has scalar value 0 */
3910
3911         /*
3912          * There seems little point in considering more than a dozen bytes from
3913          * the string.  Since base is at least 10, that will give us nominal
3914          * resolution of at least 12 decimal digits, which is surely far more
3915          * precision than this estimation technique has got anyway (especially in
3916          * non-C locales).  Also, even with the maximum possible base of 256, this
3917          * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
3918          * overflow on any known machine.
3919          */
3920         if (slen > 12)
3921                 slen = 12;
3922
3923         /* Convert initial characters to fraction */
3924         base = rangehi - rangelo + 1;
3925         num = 0.0;
3926         denom = base;
3927         while (slen-- > 0)
3928         {
3929                 int                     ch = (unsigned char) *value++;
3930
3931                 if (ch < rangelo)
3932                         ch = rangelo - 1;
3933                 else if (ch > rangehi)
3934                         ch = rangehi + 1;
3935                 num += ((double) (ch - rangelo)) / denom;
3936                 denom *= base;
3937         }
3938
3939         return num;
3940 }
3941
3942 /*
3943  * Convert a string-type Datum into a palloc'd, null-terminated string.
3944  *
3945  * When using a non-C locale, we must pass the string through strxfrm()
3946  * before continuing, so as to generate correct locale-specific results.
3947  */
3948 static char *
3949 convert_string_datum(Datum value, Oid typid)
3950 {
3951         char       *val;
3952
3953         switch (typid)
3954         {
3955                 case CHAROID:
3956                         val = (char *) palloc(2);
3957                         val[0] = DatumGetChar(value);
3958                         val[1] = '\0';
3959                         break;
3960                 case BPCHAROID:
3961                 case VARCHAROID:
3962                 case TEXTOID:
3963                         val = TextDatumGetCString(value);
3964                         break;
3965                 case NAMEOID:
3966                         {
3967                                 NameData   *nm = (NameData *) DatumGetPointer(value);
3968
3969                                 val = pstrdup(NameStr(*nm));
3970                                 break;
3971                         }
3972                 default:
3973
3974                         /*
3975                          * Can't get here unless someone tries to use scalarltsel on an
3976                          * operator with one string and one non-string operand.
3977                          */
3978                         elog(ERROR, "unsupported type: %u", typid);
3979                         return NULL;
3980         }
3981
3982         if (!lc_collate_is_c(DEFAULT_COLLATION_OID))
3983         {
3984                 char       *xfrmstr;
3985                 size_t          xfrmlen;
3986                 size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
3987
3988                 /*
3989                  * XXX: We could guess at a suitable output buffer size and only call
3990                  * strxfrm twice if our guess is too small.
3991                  *
3992                  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
3993                  * bogus data or set an error. This is not really a problem unless it
3994                  * crashes since it will only give an estimation error and nothing
3995                  * fatal.
3996                  */
3997 #if _MSC_VER == 1400                    /* VS.Net 2005 */
3998
3999                 /*
4000                  *
4001                  * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?
4002                  * FeedbackID=99694 */
4003                 {
4004                         char            x[1];
4005
4006                         xfrmlen = strxfrm(x, val, 0);
4007                 }
4008 #else
4009                 xfrmlen = strxfrm(NULL, val, 0);
4010 #endif
4011 #ifdef WIN32
4012
4013                 /*
4014                  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4015                  * of trying to allocate this much memory (and fail), just return the
4016                  * original string unmodified as if we were in the C locale.
4017                  */
4018                 if (xfrmlen == INT_MAX)
4019                         return val;
4020 #endif
4021                 xfrmstr = (char *) palloc(xfrmlen + 1);
4022                 xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4023
4024                 /*
4025                  * Some systems (e.g., glibc) can return a smaller value from the
4026                  * second call than the first; thus the Assert must be <= not ==.
4027                  */
4028                 Assert(xfrmlen2 <= xfrmlen);
4029                 pfree(val);
4030                 val = xfrmstr;
4031         }
4032
4033         return val;
4034 }
4035
4036 /*
4037  * Do convert_to_scalar()'s work for any bytea data type.
4038  *
4039  * Very similar to convert_string_to_scalar except we can't assume
4040  * null-termination and therefore pass explicit lengths around.
4041  *
4042  * Also, assumptions about likely "normal" ranges of characters have been
4043  * removed - a data range of 0..255 is always used, for now.  (Perhaps
4044  * someday we will add information about actual byte data range to
4045  * pg_statistic.)
4046  */
4047 static void
4048 convert_bytea_to_scalar(Datum value,
4049                                                 double *scaledvalue,
4050                                                 Datum lobound,
4051                                                 double *scaledlobound,
4052                                                 Datum hibound,
4053                                                 double *scaledhibound)
4054 {
4055         int                     rangelo,
4056                                 rangehi,
4057                                 valuelen = VARSIZE(DatumGetPointer(value)) - VARHDRSZ,
4058                                 loboundlen = VARSIZE(DatumGetPointer(lobound)) - VARHDRSZ,
4059                                 hiboundlen = VARSIZE(DatumGetPointer(hibound)) - VARHDRSZ,
4060                                 i,
4061                                 minlen;
4062         unsigned char *valstr = (unsigned char *) VARDATA(DatumGetPointer(value)),
4063                            *lostr = (unsigned char *) VARDATA(DatumGetPointer(lobound)),
4064                            *histr = (unsigned char *) VARDATA(DatumGetPointer(hibound));
4065
4066         /*
4067          * Assume bytea data is uniformly distributed across all byte values.
4068          */
4069         rangelo = 0;
4070         rangehi = 255;
4071
4072         /*
4073          * Now strip any common prefix of the three strings.
4074          */
4075         minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4076         for (i = 0; i < minlen; i++)
4077         {
4078                 if (*lostr != *histr || *lostr != *valstr)
4079                         break;
4080                 lostr++, histr++, valstr++;
4081                 loboundlen--, hiboundlen--, valuelen--;
4082         }
4083
4084         /*
4085          * Now we can do the conversions.
4086          */
4087         *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4088         *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4089         *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4090 }
4091
4092 static double
4093 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4094                                                         int rangelo, int rangehi)
4095 {
4096         double          num,
4097                                 denom,
4098                                 base;
4099
4100         if (valuelen <= 0)
4101                 return 0.0;                             /* empty string has scalar value 0 */
4102
4103         /*
4104          * Since base is 256, need not consider more than about 10 chars (even
4105          * this many seems like overkill)
4106          */
4107         if (valuelen > 10)
4108                 valuelen = 10;
4109
4110         /* Convert initial characters to fraction */
4111         base = rangehi - rangelo + 1;
4112         num = 0.0;
4113         denom = base;
4114         while (valuelen-- > 0)
4115         {
4116                 int                     ch = *value++;
4117
4118                 if (ch < rangelo)
4119                         ch = rangelo - 1;
4120                 else if (ch > rangehi)
4121                         ch = rangehi + 1;
4122                 num += ((double) (ch - rangelo)) / denom;
4123                 denom *= base;
4124         }
4125
4126         return num;
4127 }
4128
4129 /*
4130  * Do convert_to_scalar()'s work for any timevalue data type.
4131  */
4132 static double
4133 convert_timevalue_to_scalar(Datum value, Oid typid)
4134 {
4135         switch (typid)
4136         {
4137                 case TIMESTAMPOID:
4138                         return DatumGetTimestamp(value);
4139                 case TIMESTAMPTZOID:
4140                         return DatumGetTimestampTz(value);
4141                 case ABSTIMEOID:
4142                         return DatumGetTimestamp(DirectFunctionCall1(abstime_timestamp,
4143                                                                                                                  value));
4144                 case DATEOID:
4145                         return date2timestamp_no_overflow(DatumGetDateADT(value));
4146                 case INTERVALOID:
4147                         {
4148                                 Interval   *interval = DatumGetIntervalP(value);
4149
4150                                 /*
4151                                  * Convert the month part of Interval to days using assumed
4152                                  * average month length of 365.25/12.0 days.  Not too
4153                                  * accurate, but plenty good enough for our purposes.
4154                                  */
4155 #ifdef HAVE_INT64_TIMESTAMP
4156                                 return interval->time + interval->day * (double) USECS_PER_DAY +
4157                                         interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
4158 #else
4159                                 return interval->time + interval->day * SECS_PER_DAY +
4160                                         interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * (double) SECS_PER_DAY);
4161 #endif
4162                         }
4163                 case RELTIMEOID:
4164 #ifdef HAVE_INT64_TIMESTAMP
4165                         return (DatumGetRelativeTime(value) * 1000000.0);
4166 #else
4167                         return DatumGetRelativeTime(value);
4168 #endif
4169                 case TINTERVALOID:
4170                         {
4171                                 TimeInterval tinterval = DatumGetTimeInterval(value);
4172
4173 #ifdef HAVE_INT64_TIMESTAMP
4174                                 if (tinterval->status != 0)
4175                                         return ((tinterval->data[1] - tinterval->data[0]) * 1000000.0);
4176 #else
4177                                 if (tinterval->status != 0)
4178                                         return tinterval->data[1] - tinterval->data[0];
4179 #endif
4180                                 return 0;               /* for lack of a better idea */
4181                         }
4182                 case TIMEOID:
4183                         return DatumGetTimeADT(value);
4184                 case TIMETZOID:
4185                         {
4186                                 TimeTzADT  *timetz = DatumGetTimeTzADTP(value);
4187
4188                                 /* use GMT-equivalent time */
4189 #ifdef HAVE_INT64_TIMESTAMP
4190                                 return (double) (timetz->time + (timetz->zone * 1000000.0));
4191 #else
4192                                 return (double) (timetz->time + timetz->zone);
4193 #endif
4194                         }
4195         }
4196
4197         /*
4198          * Can't get here unless someone tries to use scalarltsel/scalargtsel on
4199          * an operator with one timevalue and one non-timevalue operand.
4200          */
4201         elog(ERROR, "unsupported type: %u", typid);
4202         return 0;
4203 }
4204
4205
4206 /*
4207  * get_restriction_variable
4208  *              Examine the args of a restriction clause to see if it's of the
4209  *              form (variable op pseudoconstant) or (pseudoconstant op variable),
4210  *              where "variable" could be either a Var or an expression in vars of a
4211  *              single relation.  If so, extract information about the variable,
4212  *              and also indicate which side it was on and the other argument.
4213  *
4214  * Inputs:
4215  *      root: the planner info
4216  *      args: clause argument list
4217  *      varRelid: see specs for restriction selectivity functions
4218  *
4219  * Outputs: (these are valid only if TRUE is returned)
4220  *      *vardata: gets information about variable (see examine_variable)
4221  *      *other: gets other clause argument, aggressively reduced to a constant
4222  *      *varonleft: set TRUE if variable is on the left, FALSE if on the right
4223  *
4224  * Returns TRUE if a variable is identified, otherwise FALSE.
4225  *
4226  * Note: if there are Vars on both sides of the clause, we must fail, because
4227  * callers are expecting that the other side will act like a pseudoconstant.
4228  */
4229 bool
4230 get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
4231                                                  VariableStatData *vardata, Node **other,
4232                                                  bool *varonleft)
4233 {
4234         Node       *left,
4235                            *right;
4236         VariableStatData rdata;
4237
4238         /* Fail if not a binary opclause (probably shouldn't happen) */
4239         if (list_length(args) != 2)
4240                 return false;
4241
4242         left = (Node *) linitial(args);
4243         right = (Node *) lsecond(args);
4244
4245         /*
4246          * Examine both sides.  Note that when varRelid is nonzero, Vars of other
4247          * relations will be treated as pseudoconstants.
4248          */
4249         examine_variable(root, left, varRelid, vardata);
4250         examine_variable(root, right, varRelid, &rdata);
4251
4252         /*
4253          * If one side is a variable and the other not, we win.
4254          */
4255         if (vardata->rel && rdata.rel == NULL)
4256         {
4257                 *varonleft = true;
4258                 *other = estimate_expression_value(root, rdata.var);
4259                 /* Assume we need no ReleaseVariableStats(rdata) here */
4260                 return true;
4261         }
4262
4263         if (vardata->rel == NULL && rdata.rel)
4264         {
4265                 *varonleft = false;
4266                 *other = estimate_expression_value(root, vardata->var);
4267                 /* Assume we need no ReleaseVariableStats(*vardata) here */
4268                 *vardata = rdata;
4269                 return true;
4270         }
4271
4272         /* Ooops, clause has wrong structure (probably var op var) */
4273         ReleaseVariableStats(*vardata);
4274         ReleaseVariableStats(rdata);
4275
4276         return false;
4277 }
4278
4279 /*
4280  * get_join_variables
4281  *              Apply examine_variable() to each side of a join clause.
4282  *              Also, attempt to identify whether the join clause has the same
4283  *              or reversed sense compared to the SpecialJoinInfo.
4284  *
4285  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4286  * or "reversed" if it is "rhs_var OP lhs_var".  In complicated cases
4287  * where we can't tell for sure, we default to assuming it's normal.
4288  */
4289 void
4290 get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo,
4291                                    VariableStatData *vardata1, VariableStatData *vardata2,
4292                                    bool *join_is_reversed)
4293 {
4294         Node       *left,
4295                            *right;
4296
4297         if (list_length(args) != 2)
4298                 elog(ERROR, "join operator should take two arguments");
4299
4300         left = (Node *) linitial(args);
4301         right = (Node *) lsecond(args);
4302
4303         examine_variable(root, left, 0, vardata1);
4304         examine_variable(root, right, 0, vardata2);
4305
4306         if (vardata1->rel &&
4307                 bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4308                 *join_is_reversed = true;               /* var1 is on RHS */
4309         else if (vardata2->rel &&
4310                          bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4311                 *join_is_reversed = true;               /* var2 is on LHS */
4312         else
4313                 *join_is_reversed = false;
4314 }
4315
4316 /*
4317  * examine_variable
4318  *              Try to look up statistical data about an expression.
4319  *              Fill in a VariableStatData struct to describe the expression.
4320  *
4321  * Inputs:
4322  *      root: the planner info
4323  *      node: the expression tree to examine
4324  *      varRelid: see specs for restriction selectivity functions
4325  *
4326  * Outputs: *vardata is filled as follows:
4327  *      var: the input expression (with any binary relabeling stripped, if
4328  *              it is or contains a variable; but otherwise the type is preserved)
4329  *      rel: RelOptInfo for relation containing variable; NULL if expression
4330  *              contains no Vars (NOTE this could point to a RelOptInfo of a
4331  *              subquery, not one in the current query).
4332  *      statsTuple: the pg_statistic entry for the variable, if one exists;
4333  *              otherwise NULL.
4334  *      freefunc: pointer to a function to release statsTuple with.
4335  *      vartype: exposed type of the expression; this should always match
4336  *              the declared input type of the operator we are estimating for.
4337  *      atttype, atttypmod: type data to pass to get_attstatsslot().  This is
4338  *              commonly the same as the exposed type of the variable argument,
4339  *              but can be different in binary-compatible-type cases.
4340  *      isunique: TRUE if we were able to match the var to a unique index or a
4341  *              single-column DISTINCT clause, implying its values are unique for
4342  *              this query.  (Caution: this should be trusted for statistical
4343  *              purposes only, since we do not check indimmediate nor verify that
4344  *              the exact same definition of equality applies.)
4345  *
4346  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4347  */
4348 void
4349 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4350                                  VariableStatData *vardata)
4351 {
4352         Node       *basenode;
4353         Relids          varnos;
4354         RelOptInfo *onerel;
4355
4356         /* Make sure we don't return dangling pointers in vardata */
4357         MemSet(vardata, 0, sizeof(VariableStatData));
4358
4359         /* Save the exposed type of the expression */
4360         vardata->vartype = exprType(node);
4361
4362         /* Look inside any binary-compatible relabeling */
4363
4364         if (IsA(node, RelabelType))
4365                 basenode = (Node *) ((RelabelType *) node)->arg;
4366         else
4367                 basenode = node;
4368
4369         /* Fast path for a simple Var */
4370
4371         if (IsA(basenode, Var) &&
4372                 (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4373         {
4374                 Var                *var = (Var *) basenode;
4375
4376                 /* Set up result fields other than the stats tuple */
4377                 vardata->var = basenode;        /* return Var without relabeling */
4378                 vardata->rel = find_base_rel(root, var->varno);
4379                 vardata->atttype = var->vartype;
4380                 vardata->atttypmod = var->vartypmod;
4381                 vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4382
4383                 /* Try to locate some stats */
4384                 examine_simple_variable(root, var, vardata);
4385
4386                 return;
4387         }
4388
4389         /*
4390          * Okay, it's a more complicated expression.  Determine variable
4391          * membership.  Note that when varRelid isn't zero, only vars of that
4392          * relation are considered "real" vars.
4393          */
4394         varnos = pull_varnos(basenode);
4395
4396         onerel = NULL;
4397
4398         switch (bms_membership(varnos))
4399         {
4400                 case BMS_EMPTY_SET:
4401                         /* No Vars at all ... must be pseudo-constant clause */
4402                         break;
4403                 case BMS_SINGLETON:
4404                         if (varRelid == 0 || bms_is_member(varRelid, varnos))
4405                         {
4406                                 onerel = find_base_rel(root,
4407                                            (varRelid ? varRelid : bms_singleton_member(varnos)));
4408                                 vardata->rel = onerel;
4409                                 node = basenode;        /* strip any relabeling */
4410                         }
4411                         /* else treat it as a constant */
4412                         break;
4413                 case BMS_MULTIPLE:
4414                         if (varRelid == 0)
4415                         {
4416                                 /* treat it as a variable of a join relation */
4417                                 vardata->rel = find_join_rel(root, varnos);
4418                                 node = basenode;        /* strip any relabeling */
4419                         }
4420                         else if (bms_is_member(varRelid, varnos))
4421                         {
4422                                 /* ignore the vars belonging to other relations */
4423                                 vardata->rel = find_base_rel(root, varRelid);
4424                                 node = basenode;        /* strip any relabeling */
4425                                 /* note: no point in expressional-index search here */
4426                         }
4427                         /* else treat it as a constant */
4428                         break;
4429         }
4430
4431         bms_free(varnos);
4432
4433         vardata->var = node;
4434         vardata->atttype = exprType(node);
4435         vardata->atttypmod = exprTypmod(node);
4436
4437         if (onerel)
4438         {
4439                 /*
4440                  * We have an expression in vars of a single relation.  Try to match
4441                  * it to expressional index columns, in hopes of finding some
4442                  * statistics.
4443                  *
4444                  * XXX it's conceivable that there are multiple matches with different
4445                  * index opfamilies; if so, we need to pick one that matches the
4446                  * operator we are estimating for.  FIXME later.
4447                  */
4448                 ListCell   *ilist;
4449
4450                 foreach(ilist, onerel->indexlist)
4451                 {
4452                         IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4453                         ListCell   *indexpr_item;
4454                         int                     pos;
4455
4456                         indexpr_item = list_head(index->indexprs);
4457                         if (indexpr_item == NULL)
4458                                 continue;               /* no expressions here... */
4459
4460                         for (pos = 0; pos < index->ncolumns; pos++)
4461                         {
4462                                 if (index->indexkeys[pos] == 0)
4463                                 {
4464                                         Node       *indexkey;
4465
4466                                         if (indexpr_item == NULL)
4467                                                 elog(ERROR, "too few entries in indexprs list");
4468                                         indexkey = (Node *) lfirst(indexpr_item);
4469                                         if (indexkey && IsA(indexkey, RelabelType))
4470                                                 indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4471                                         if (equal(node, indexkey))
4472                                         {
4473                                                 /*
4474                                                  * Found a match ... is it a unique index? Tests here
4475                                                  * should match has_unique_index().
4476                                                  */
4477                                                 if (index->unique &&
4478                                                         index->ncolumns == 1 &&
4479                                                         (index->indpred == NIL || index->predOK))
4480                                                         vardata->isunique = true;
4481
4482                                                 /*
4483                                                  * Has it got stats?  We only consider stats for
4484                                                  * non-partial indexes, since partial indexes probably
4485                                                  * don't reflect whole-relation statistics; the above
4486                                                  * check for uniqueness is the only info we take from
4487                                                  * a partial index.
4488                                                  *
4489                                                  * An index stats hook, however, must make its own
4490                                                  * decisions about what to do with partial indexes.
4491                                                  */
4492                                                 if (get_index_stats_hook &&
4493                                                         (*get_index_stats_hook) (root, index->indexoid,
4494                                                                                                          pos + 1, vardata))
4495                                                 {
4496                                                         /*
4497                                                          * The hook took control of acquiring a stats
4498                                                          * tuple.  If it did supply a tuple, it'd better
4499                                                          * have supplied a freefunc.
4500                                                          */
4501                                                         if (HeapTupleIsValid(vardata->statsTuple) &&
4502                                                                 !vardata->freefunc)
4503                                                                 elog(ERROR, "no function provided to release variable stats with");
4504                                                 }
4505                                                 else if (index->indpred == NIL)
4506                                                 {
4507                                                         vardata->statsTuple =
4508                                                                 SearchSysCache3(STATRELATTINH,
4509                                                                                    ObjectIdGetDatum(index->indexoid),
4510                                                                                                 Int16GetDatum(pos + 1),
4511                                                                                                 BoolGetDatum(false));
4512                                                         vardata->freefunc = ReleaseSysCache;
4513                                                 }
4514                                                 if (vardata->statsTuple)
4515                                                         break;
4516                                         }
4517                                         indexpr_item = lnext(indexpr_item);
4518                                 }
4519                         }
4520                         if (vardata->statsTuple)
4521                                 break;
4522                 }
4523         }
4524 }
4525
4526 /*
4527  * examine_simple_variable
4528  *              Handle a simple Var for examine_variable
4529  *
4530  * This is split out as a subroutine so that we can recurse to deal with
4531  * Vars referencing subqueries.
4532  *
4533  * We already filled in all the fields of *vardata except for the stats tuple.
4534  */
4535 static void
4536 examine_simple_variable(PlannerInfo *root, Var *var,
4537                                                 VariableStatData *vardata)
4538 {
4539         RangeTblEntry *rte = root->simple_rte_array[var->varno];
4540
4541         Assert(IsA(rte, RangeTblEntry));
4542
4543         if (get_relation_stats_hook &&
4544                 (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
4545         {
4546                 /*
4547                  * The hook took control of acquiring a stats tuple.  If it did supply
4548                  * a tuple, it'd better have supplied a freefunc.
4549                  */
4550                 if (HeapTupleIsValid(vardata->statsTuple) &&
4551                         !vardata->freefunc)
4552                         elog(ERROR, "no function provided to release variable stats with");
4553         }
4554         else if (rte->rtekind == RTE_RELATION)
4555         {
4556                 /*
4557                  * Plain table or parent of an inheritance appendrel, so look up the
4558                  * column in pg_statistic
4559                  */
4560                 vardata->statsTuple = SearchSysCache3(STATRELATTINH,
4561                                                                                           ObjectIdGetDatum(rte->relid),
4562                                                                                           Int16GetDatum(var->varattno),
4563                                                                                           BoolGetDatum(rte->inh));
4564                 vardata->freefunc = ReleaseSysCache;
4565         }
4566         else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
4567         {
4568                 /*
4569                  * Plain subquery (not one that was converted to an appendrel).
4570                  */
4571                 Query      *subquery = rte->subquery;
4572                 RelOptInfo *rel;
4573                 TargetEntry *ste;
4574
4575                 /*
4576                  * Punt if it's a whole-row var rather than a plain column reference.
4577                  */
4578                 if (var->varattno == InvalidAttrNumber)
4579                         return;
4580
4581                 /*
4582                  * Punt if subquery uses set operations or GROUP BY, as these will
4583                  * mash underlying columns' stats beyond recognition.  (Set ops are
4584                  * particularly nasty; if we forged ahead, we would return stats
4585                  * relevant to only the leftmost subselect...)  DISTINCT is also
4586                  * problematic, but we check that later because there is a possibility
4587                  * of learning something even with it.
4588                  */
4589                 if (subquery->setOperations ||
4590                         subquery->groupClause)
4591                         return;
4592
4593                 /*
4594                  * OK, fetch RelOptInfo for subquery.  Note that we don't change the
4595                  * rel returned in vardata, since caller expects it to be a rel of the
4596                  * caller's query level.  Because we might already be recursing, we
4597                  * can't use that rel pointer either, but have to look up the Var's
4598                  * rel afresh.
4599                  */
4600                 rel = find_base_rel(root, var->varno);
4601
4602                 /* If the subquery hasn't been planned yet, we have to punt */
4603                 if (rel->subroot == NULL)
4604                         return;
4605                 Assert(IsA(rel->subroot, PlannerInfo));
4606
4607                 /*
4608                  * Switch our attention to the subquery as mangled by the planner. It
4609                  * was okay to look at the pre-planning version for the tests above,
4610                  * but now we need a Var that will refer to the subroot's live
4611                  * RelOptInfos.  For instance, if any subquery pullup happened during
4612                  * planning, Vars in the targetlist might have gotten replaced, and we
4613                  * need to see the replacement expressions.
4614                  */
4615                 subquery = rel->subroot->parse;
4616                 Assert(IsA(subquery, Query));
4617
4618                 /* Get the subquery output expression referenced by the upper Var */
4619                 ste = get_tle_by_resno(subquery->targetList, var->varattno);
4620                 if (ste == NULL || ste->resjunk)
4621                         elog(ERROR, "subquery %s does not have attribute %d",
4622                                  rte->eref->aliasname, var->varattno);
4623                 var = (Var *) ste->expr;
4624
4625                 /*
4626                  * If subquery uses DISTINCT, we can't make use of any stats for the
4627                  * variable ... but, if it's the only DISTINCT column, we are entitled
4628                  * to consider it unique.  We do the test this way so that it works
4629                  * for cases involving DISTINCT ON.
4630                  */
4631                 if (subquery->distinctClause)
4632                 {
4633                         if (list_length(subquery->distinctClause) == 1 &&
4634                                 targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
4635                                 vardata->isunique = true;
4636                         /* cannot go further */
4637                         return;
4638                 }
4639
4640                 /*
4641                  * If the sub-query originated from a view with the security_barrier
4642                  * attribute, we must not look at the variable's statistics, though it
4643                  * seems all right to notice the existence of a DISTINCT clause. So
4644                  * stop here.
4645                  *
4646                  * This is probably a harsher restriction than necessary; it's
4647                  * certainly OK for the selectivity estimator (which is a C function,
4648                  * and therefore omnipotent anyway) to look at the statistics.  But
4649                  * many selectivity estimators will happily *invoke the operator
4650                  * function* to try to work out a good estimate - and that's not OK.
4651                  * So for now, don't dig down for stats.
4652                  */
4653                 if (rte->security_barrier)
4654                         return;
4655
4656                 /* Can only handle a simple Var of subquery's query level */
4657                 if (var && IsA(var, Var) &&
4658                         var->varlevelsup == 0)
4659                 {
4660                         /*
4661                          * OK, recurse into the subquery.  Note that the original setting
4662                          * of vardata->isunique (which will surely be false) is left
4663                          * unchanged in this situation.  That's what we want, since even
4664                          * if the underlying column is unique, the subquery may have
4665                          * joined to other tables in a way that creates duplicates.
4666                          */
4667                         examine_simple_variable(rel->subroot, var, vardata);
4668                 }
4669         }
4670         else
4671         {
4672                 /*
4673                  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE.  (We
4674                  * won't see RTE_JOIN here because join alias Vars have already been
4675                  * flattened.)  There's not much we can do with function outputs, but
4676                  * maybe someday try to be smarter about VALUES and/or CTEs.
4677                  */
4678         }
4679 }
4680
4681 /*
4682  * get_variable_numdistinct
4683  *        Estimate the number of distinct values of a variable.
4684  *
4685  * vardata: results of examine_variable
4686  * *isdefault: set to TRUE if the result is a default rather than based on
4687  * anything meaningful.
4688  *
4689  * NB: be careful to produce a positive integral result, since callers may
4690  * compare the result to exact integer counts, or might divide by it.
4691  */
4692 double
4693 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
4694 {
4695         double          stadistinct;
4696         double          ntuples;
4697
4698         *isdefault = false;
4699
4700         /*
4701          * Determine the stadistinct value to use.  There are cases where we can
4702          * get an estimate even without a pg_statistic entry, or can get a better
4703          * value than is in pg_statistic.
4704          */
4705         if (HeapTupleIsValid(vardata->statsTuple))
4706         {
4707                 /* Use the pg_statistic entry */
4708                 Form_pg_statistic stats;
4709
4710                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
4711                 stadistinct = stats->stadistinct;
4712         }
4713         else if (vardata->vartype == BOOLOID)
4714         {
4715                 /*
4716                  * Special-case boolean columns: presumably, two distinct values.
4717                  *
4718                  * Are there any other datatypes we should wire in special estimates
4719                  * for?
4720                  */
4721                 stadistinct = 2.0;
4722         }
4723         else
4724         {
4725                 /*
4726                  * We don't keep statistics for system columns, but in some cases we
4727                  * can infer distinctness anyway.
4728                  */
4729                 if (vardata->var && IsA(vardata->var, Var))
4730                 {
4731                         switch (((Var *) vardata->var)->varattno)
4732                         {
4733                                 case ObjectIdAttributeNumber:
4734                                 case SelfItemPointerAttributeNumber:
4735                                         stadistinct = -1.0; /* unique */
4736                                         break;
4737                                 case TableOidAttributeNumber:
4738                                         stadistinct = 1.0;      /* only 1 value */
4739                                         break;
4740                                 default:
4741                                         stadistinct = 0.0;      /* means "unknown" */
4742                                         break;
4743                         }
4744                 }
4745                 else
4746                         stadistinct = 0.0;      /* means "unknown" */
4747
4748                 /*
4749                  * XXX consider using estimate_num_groups on expressions?
4750                  */
4751         }
4752
4753         /*
4754          * If there is a unique index or DISTINCT clause for the variable, assume
4755          * it is unique no matter what pg_statistic says; the statistics could be
4756          * out of date, or we might have found a partial unique index that proves
4757          * the var is unique for this query.
4758          */
4759         if (vardata->isunique)
4760                 stadistinct = -1.0;
4761
4762         /*
4763          * If we had an absolute estimate, use that.
4764          */
4765         if (stadistinct > 0.0)
4766                 return clamp_row_est(stadistinct);
4767
4768         /*
4769          * Otherwise we need to get the relation size; punt if not available.
4770          */
4771         if (vardata->rel == NULL)
4772         {
4773                 *isdefault = true;
4774                 return DEFAULT_NUM_DISTINCT;
4775         }
4776         ntuples = vardata->rel->tuples;
4777         if (ntuples <= 0.0)
4778         {
4779                 *isdefault = true;
4780                 return DEFAULT_NUM_DISTINCT;
4781         }
4782
4783         /*
4784          * If we had a relative estimate, use that.
4785          */
4786         if (stadistinct < 0.0)
4787                 return clamp_row_est(-stadistinct * ntuples);
4788
4789         /*
4790          * With no data, estimate ndistinct = ntuples if the table is small, else
4791          * use default.  We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
4792          * that the behavior isn't discontinuous.
4793          */
4794         if (ntuples < DEFAULT_NUM_DISTINCT)
4795                 return clamp_row_est(ntuples);
4796
4797         *isdefault = true;
4798         return DEFAULT_NUM_DISTINCT;
4799 }
4800
4801 /*
4802  * get_variable_range
4803  *              Estimate the minimum and maximum value of the specified variable.
4804  *              If successful, store values in *min and *max, and return TRUE.
4805  *              If no data available, return FALSE.
4806  *
4807  * sortop is the "<" comparison operator to use.  This should generally
4808  * be "<" not ">", as only the former is likely to be found in pg_statistic.
4809  */
4810 static bool
4811 get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
4812                                    Datum *min, Datum *max)
4813 {
4814         Datum           tmin = 0;
4815         Datum           tmax = 0;
4816         bool            have_data = false;
4817         int16           typLen;
4818         bool            typByVal;
4819         Datum      *values;
4820         int                     nvalues;
4821         int                     i;
4822
4823         /*
4824          * XXX It's very tempting to try to use the actual column min and max, if
4825          * we can get them relatively-cheaply with an index probe.  However, since
4826          * this function is called many times during join planning, that could
4827          * have unpleasant effects on planning speed.  Need more investigation
4828          * before enabling this.
4829          */
4830 #ifdef NOT_USED
4831         if (get_actual_variable_range(root, vardata, sortop, min, max))
4832                 return true;
4833 #endif
4834
4835         if (!HeapTupleIsValid(vardata->statsTuple))
4836         {
4837                 /* no stats available, so default result */
4838                 return false;
4839         }
4840
4841         get_typlenbyval(vardata->atttype, &typLen, &typByVal);
4842
4843         /*
4844          * If there is a histogram, grab the first and last values.
4845          *
4846          * If there is a histogram that is sorted with some other operator than
4847          * the one we want, fail --- this suggests that there is data we can't
4848          * use.
4849          */
4850         if (get_attstatsslot(vardata->statsTuple,
4851                                                  vardata->atttype, vardata->atttypmod,
4852                                                  STATISTIC_KIND_HISTOGRAM, sortop,
4853                                                  NULL,
4854                                                  &values, &nvalues,
4855                                                  NULL, NULL))
4856         {
4857                 if (nvalues > 0)
4858                 {
4859                         tmin = datumCopy(values[0], typByVal, typLen);
4860                         tmax = datumCopy(values[nvalues - 1], typByVal, typLen);
4861                         have_data = true;
4862                 }
4863                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4864         }
4865         else if (get_attstatsslot(vardata->statsTuple,
4866                                                           vardata->atttype, vardata->atttypmod,
4867                                                           STATISTIC_KIND_HISTOGRAM, InvalidOid,
4868                                                           NULL,
4869                                                           &values, &nvalues,
4870                                                           NULL, NULL))
4871         {
4872                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4873                 return false;
4874         }
4875
4876         /*
4877          * If we have most-common-values info, look for extreme MCVs.  This is
4878          * needed even if we also have a histogram, since the histogram excludes
4879          * the MCVs.  However, usually the MCVs will not be the extreme values, so
4880          * avoid unnecessary data copying.
4881          */
4882         if (get_attstatsslot(vardata->statsTuple,
4883                                                  vardata->atttype, vardata->atttypmod,
4884                                                  STATISTIC_KIND_MCV, InvalidOid,
4885                                                  NULL,
4886                                                  &values, &nvalues,
4887                                                  NULL, NULL))
4888         {
4889                 bool            tmin_is_mcv = false;
4890                 bool            tmax_is_mcv = false;
4891                 FmgrInfo        opproc;
4892
4893                 fmgr_info(get_opcode(sortop), &opproc);
4894
4895                 for (i = 0; i < nvalues; i++)
4896                 {
4897                         if (!have_data)
4898                         {
4899                                 tmin = tmax = values[i];
4900                                 tmin_is_mcv = tmax_is_mcv = have_data = true;
4901                                 continue;
4902                         }
4903                         if (DatumGetBool(FunctionCall2Coll(&opproc,
4904                                                                                            DEFAULT_COLLATION_OID,
4905                                                                                            values[i], tmin)))
4906                         {
4907                                 tmin = values[i];
4908                                 tmin_is_mcv = true;
4909                         }
4910                         if (DatumGetBool(FunctionCall2Coll(&opproc,
4911                                                                                            DEFAULT_COLLATION_OID,
4912                                                                                            tmax, values[i])))
4913                         {
4914                                 tmax = values[i];
4915                                 tmax_is_mcv = true;
4916                         }
4917                 }
4918                 if (tmin_is_mcv)
4919                         tmin = datumCopy(tmin, typByVal, typLen);
4920                 if (tmax_is_mcv)
4921                         tmax = datumCopy(tmax, typByVal, typLen);
4922                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4923         }
4924
4925         *min = tmin;
4926         *max = tmax;
4927         return have_data;
4928 }
4929
4930
4931 /*
4932  * get_actual_variable_range
4933  *              Attempt to identify the current *actual* minimum and/or maximum
4934  *              of the specified variable, by looking for a suitable btree index
4935  *              and fetching its low and/or high values.
4936  *              If successful, store values in *min and *max, and return TRUE.
4937  *              (Either pointer can be NULL if that endpoint isn't needed.)
4938  *              If no data available, return FALSE.
4939  *
4940  * sortop is the "<" comparison operator to use.
4941  */
4942 static bool
4943 get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
4944                                                   Oid sortop,
4945                                                   Datum *min, Datum *max)
4946 {
4947         bool            have_data = false;
4948         RelOptInfo *rel = vardata->rel;
4949         RangeTblEntry *rte;
4950         ListCell   *lc;
4951
4952         /* No hope if no relation or it doesn't have indexes */
4953         if (rel == NULL || rel->indexlist == NIL)
4954                 return false;
4955         /* If it has indexes it must be a plain relation */
4956         rte = root->simple_rte_array[rel->relid];
4957         Assert(rte->rtekind == RTE_RELATION);
4958
4959         /* Search through the indexes to see if any match our problem */
4960         foreach(lc, rel->indexlist)
4961         {
4962                 IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
4963                 ScanDirection indexscandir;
4964
4965                 /* Ignore non-btree indexes */
4966                 if (index->relam != BTREE_AM_OID)
4967                         continue;
4968
4969                 /*
4970                  * Ignore partial indexes --- we only want stats that cover the entire
4971                  * relation.
4972                  */
4973                 if (index->indpred != NIL)
4974                         continue;
4975
4976                 /*
4977                  * The index list might include hypothetical indexes inserted by a
4978                  * get_relation_info hook --- don't try to access them.
4979                  */
4980                 if (index->hypothetical)
4981                         continue;
4982
4983                 /*
4984                  * The first index column must match the desired variable and sort
4985                  * operator --- but we can use a descending-order index.
4986                  */
4987                 if (!match_index_to_operand(vardata->var, 0, index))
4988                         continue;
4989                 switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
4990                 {
4991                         case BTLessStrategyNumber:
4992                                 if (index->reverse_sort[0])
4993                                         indexscandir = BackwardScanDirection;
4994                                 else
4995                                         indexscandir = ForwardScanDirection;
4996                                 break;
4997                         case BTGreaterStrategyNumber:
4998                                 if (index->reverse_sort[0])
4999                                         indexscandir = ForwardScanDirection;
5000                                 else
5001                                         indexscandir = BackwardScanDirection;
5002                                 break;
5003                         default:
5004                                 /* index doesn't match the sortop */
5005                                 continue;
5006                 }
5007
5008                 /*
5009                  * Found a suitable index to extract data from.  We'll need an EState
5010                  * and a bunch of other infrastructure.
5011                  */
5012                 {
5013                         EState     *estate;
5014                         ExprContext *econtext;
5015                         MemoryContext tmpcontext;
5016                         MemoryContext oldcontext;
5017                         Relation        heapRel;
5018                         Relation        indexRel;
5019                         IndexInfo  *indexInfo;
5020                         TupleTableSlot *slot;
5021                         int16           typLen;
5022                         bool            typByVal;
5023                         ScanKeyData scankeys[1];
5024                         IndexScanDesc index_scan;
5025                         HeapTuple       tup;
5026                         Datum           values[INDEX_MAX_KEYS];
5027                         bool            isnull[INDEX_MAX_KEYS];
5028                         SnapshotData SnapshotDirty;
5029
5030                         estate = CreateExecutorState();
5031                         econtext = GetPerTupleExprContext(estate);
5032                         /* Make sure any cruft is generated in the econtext's memory */
5033                         tmpcontext = econtext->ecxt_per_tuple_memory;
5034                         oldcontext = MemoryContextSwitchTo(tmpcontext);
5035
5036                         /*
5037                          * Open the table and index so we can read from them.  We should
5038                          * already have at least AccessShareLock on the table, but not
5039                          * necessarily on the index.
5040                          */
5041                         heapRel = heap_open(rte->relid, NoLock);
5042                         indexRel = index_open(index->indexoid, AccessShareLock);
5043
5044                         /* extract index key information from the index's pg_index info */
5045                         indexInfo = BuildIndexInfo(indexRel);
5046
5047                         /* some other stuff */
5048                         slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRel));
5049                         econtext->ecxt_scantuple = slot;
5050                         get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5051                         InitDirtySnapshot(SnapshotDirty);
5052
5053                         /* set up an IS NOT NULL scan key so that we ignore nulls */
5054                         ScanKeyEntryInitialize(&scankeys[0],
5055                                                                    SK_ISNULL | SK_SEARCHNOTNULL,
5056                                                                    1,   /* index col to scan */
5057                                                                    InvalidStrategy,             /* no strategy */
5058                                                                    InvalidOid,  /* no strategy subtype */
5059                                                                    InvalidOid,  /* no collation */
5060                                                                    InvalidOid,  /* no reg proc for this */
5061                                                                    (Datum) 0);  /* constant */
5062
5063                         have_data = true;
5064
5065                         /* If min is requested ... */
5066                         if (min)
5067                         {
5068                                 /*
5069                                  * In principle, we should scan the index with our current
5070                                  * active snapshot, which is the best approximation we've got
5071                                  * to what the query will see when executed.  But that won't
5072                                  * be exact if a new snap is taken before running the query,
5073                                  * and it can be very expensive if a lot of uncommitted rows
5074                                  * exist at the end of the index (because we'll laboriously
5075                                  * fetch each one and reject it).  What seems like a good
5076                                  * compromise is to use SnapshotDirty.  That will accept
5077                                  * uncommitted rows, and thus avoid fetching multiple heap
5078                                  * tuples in this scenario.  On the other hand, it will reject
5079                                  * known-dead rows, and thus not give a bogus answer when the
5080                                  * extreme value has been deleted; that case motivates not
5081                                  * using SnapshotAny here.
5082                                  */
5083                                 index_scan = index_beginscan(heapRel, indexRel, &SnapshotDirty,
5084                                                                                          1, 0);
5085                                 index_rescan(index_scan, scankeys, 1, NULL, 0);
5086
5087                                 /* Fetch first tuple in sortop's direction */
5088                                 if ((tup = index_getnext(index_scan,
5089                                                                                  indexscandir)) != NULL)
5090                                 {
5091                                         /* Extract the index column values from the heap tuple */
5092                                         ExecStoreTuple(tup, slot, InvalidBuffer, false);
5093                                         FormIndexDatum(indexInfo, slot, estate,
5094                                                                    values, isnull);
5095
5096                                         /* Shouldn't have got a null, but be careful */
5097                                         if (isnull[0])
5098                                                 elog(ERROR, "found unexpected null value in index \"%s\"",
5099                                                          RelationGetRelationName(indexRel));
5100
5101                                         /* Copy the index column value out to caller's context */
5102                                         MemoryContextSwitchTo(oldcontext);
5103                                         *min = datumCopy(values[0], typByVal, typLen);
5104                                         MemoryContextSwitchTo(tmpcontext);
5105                                 }
5106                                 else
5107                                         have_data = false;
5108
5109                                 index_endscan(index_scan);
5110                         }
5111
5112                         /* If max is requested, and we didn't find the index is empty */
5113                         if (max && have_data)
5114                         {
5115                                 index_scan = index_beginscan(heapRel, indexRel, &SnapshotDirty,
5116                                                                                          1, 0);
5117                                 index_rescan(index_scan, scankeys, 1, NULL, 0);
5118
5119                                 /* Fetch first tuple in reverse direction */
5120                                 if ((tup = index_getnext(index_scan,
5121                                                                                  -indexscandir)) != NULL)
5122                                 {
5123                                         /* Extract the index column values from the heap tuple */
5124                                         ExecStoreTuple(tup, slot, InvalidBuffer, false);
5125                                         FormIndexDatum(indexInfo, slot, estate,
5126                                                                    values, isnull);
5127
5128                                         /* Shouldn't have got a null, but be careful */
5129                                         if (isnull[0])
5130                                                 elog(ERROR, "found unexpected null value in index \"%s\"",
5131                                                          RelationGetRelationName(indexRel));
5132
5133                                         /* Copy the index column value out to caller's context */
5134                                         MemoryContextSwitchTo(oldcontext);
5135                                         *max = datumCopy(values[0], typByVal, typLen);
5136                                         MemoryContextSwitchTo(tmpcontext);
5137                                 }
5138                                 else
5139                                         have_data = false;
5140
5141                                 index_endscan(index_scan);
5142                         }
5143
5144                         /* Clean everything up */
5145                         ExecDropSingleTupleTableSlot(slot);
5146
5147                         index_close(indexRel, AccessShareLock);
5148                         heap_close(heapRel, NoLock);
5149
5150                         MemoryContextSwitchTo(oldcontext);
5151                         FreeExecutorState(estate);
5152
5153                         /* And we're done */
5154                         break;
5155                 }
5156         }
5157
5158         return have_data;
5159 }
5160
5161 /*
5162  * find_join_input_rel
5163  *              Look up the input relation for a join.
5164  *
5165  * We assume that the input relation's RelOptInfo must have been constructed
5166  * already.
5167  */
5168 static RelOptInfo *
5169 find_join_input_rel(PlannerInfo *root, Relids relids)
5170 {
5171         RelOptInfo *rel = NULL;
5172
5173         switch (bms_membership(relids))
5174         {
5175                 case BMS_EMPTY_SET:
5176                         /* should not happen */
5177                         break;
5178                 case BMS_SINGLETON:
5179                         rel = find_base_rel(root, bms_singleton_member(relids));
5180                         break;
5181                 case BMS_MULTIPLE:
5182                         rel = find_join_rel(root, relids);
5183                         break;
5184         }
5185
5186         if (rel == NULL)
5187                 elog(ERROR, "could not find RelOptInfo for given relids");
5188
5189         return rel;
5190 }
5191
5192
5193 /*-------------------------------------------------------------------------
5194  *
5195  * Pattern analysis functions
5196  *
5197  * These routines support analysis of LIKE and regular-expression patterns
5198  * by the planner/optimizer.  It's important that they agree with the
5199  * regular-expression code in backend/regex/ and the LIKE code in
5200  * backend/utils/adt/like.c.  Also, the computation of the fixed prefix
5201  * must be conservative: if we report a string longer than the true fixed
5202  * prefix, the query may produce actually wrong answers, rather than just
5203  * getting a bad selectivity estimate!
5204  *
5205  * Note that the prefix-analysis functions are called from
5206  * backend/optimizer/path/indxpath.c as well as from routines in this file.
5207  *
5208  *-------------------------------------------------------------------------
5209  */
5210
5211 /*
5212  * Check whether char is a letter (and, hence, subject to case-folding)
5213  *
5214  * In multibyte character sets, we can't use isalpha, and it does not seem
5215  * worth trying to convert to wchar_t to use iswalpha.  Instead, just assume
5216  * any multibyte char is potentially case-varying.
5217  */
5218 static int
5219 pattern_char_isalpha(char c, bool is_multibyte,
5220                                          pg_locale_t locale, bool locale_is_c)
5221 {
5222         if (locale_is_c)
5223                 return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
5224         else if (is_multibyte && IS_HIGHBIT_SET(c))
5225                 return true;
5226 #ifdef HAVE_LOCALE_T
5227         else if (locale)
5228                 return isalpha_l((unsigned char) c, locale);
5229 #endif
5230         else
5231                 return isalpha((unsigned char) c);
5232 }
5233
5234 /*
5235  * Extract the fixed prefix, if any, for a pattern.
5236  *
5237  * *prefix is set to a palloc'd prefix string (in the form of a Const node),
5238  *      or to NULL if no fixed prefix exists for the pattern.
5239  * If rest_selec is not NULL, *rest_selec is set to an estimate of the
5240  *      selectivity of the remainder of the pattern (without any fixed prefix).
5241  * The prefix Const has the same type (TEXT or BYTEA) as the input pattern.
5242  *
5243  * The return value distinguishes no fixed prefix, a partial prefix,
5244  * or an exact-match-only pattern.
5245  */
5246
5247 static Pattern_Prefix_Status
5248 like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5249                                   Const **prefix_const, Selectivity *rest_selec)
5250 {
5251         char       *match;
5252         char       *patt;
5253         int                     pattlen;
5254         Oid                     typeid = patt_const->consttype;
5255         int                     pos,
5256                                 match_pos;
5257         bool            is_multibyte = (pg_database_encoding_max_length() > 1);
5258         pg_locale_t locale = 0;
5259         bool            locale_is_c = false;
5260
5261         /* the right-hand const is type text or bytea */
5262         Assert(typeid == BYTEAOID || typeid == TEXTOID);
5263
5264         if (case_insensitive)
5265         {
5266                 if (typeid == BYTEAOID)
5267                         ereport(ERROR,
5268                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5269                         errmsg("case insensitive matching not supported on type bytea")));
5270
5271                 /* If case-insensitive, we need locale info */
5272                 if (lc_ctype_is_c(collation))
5273                         locale_is_c = true;
5274                 else if (collation != DEFAULT_COLLATION_OID)
5275                 {
5276                         if (!OidIsValid(collation))
5277                         {
5278                                 /*
5279                                  * This typically means that the parser could not resolve a
5280                                  * conflict of implicit collations, so report it that way.
5281                                  */
5282                                 ereport(ERROR,
5283                                                 (errcode(ERRCODE_INDETERMINATE_COLLATION),
5284                                                  errmsg("could not determine which collation to use for ILIKE"),
5285                                                  errhint("Use the COLLATE clause to set the collation explicitly.")));
5286                         }
5287                         locale = pg_newlocale_from_collation(collation);
5288                 }
5289         }
5290
5291         if (typeid != BYTEAOID)
5292         {
5293                 patt = TextDatumGetCString(patt_const->constvalue);
5294                 pattlen = strlen(patt);
5295         }
5296         else
5297         {
5298                 bytea      *bstr = DatumGetByteaP(patt_const->constvalue);
5299
5300                 pattlen = VARSIZE(bstr) - VARHDRSZ;
5301                 patt = (char *) palloc(pattlen);
5302                 memcpy(patt, VARDATA(bstr), pattlen);
5303                 if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
5304                         pfree(bstr);
5305         }
5306
5307         match = palloc(pattlen + 1);
5308         match_pos = 0;
5309         for (pos = 0; pos < pattlen; pos++)
5310         {
5311                 /* % and _ are wildcard characters in LIKE */
5312                 if (patt[pos] == '%' ||
5313                         patt[pos] == '_')
5314                         break;
5315
5316                 /* Backslash escapes the next character */
5317                 if (patt[pos] == '\\')
5318                 {
5319                         pos++;
5320                         if (pos >= pattlen)
5321                                 break;
5322                 }
5323
5324                 /* Stop if case-varying character (it's sort of a wildcard) */
5325                 if (case_insensitive &&
5326                   pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
5327                         break;
5328
5329                 match[match_pos++] = patt[pos];
5330         }
5331
5332         match[match_pos] = '\0';
5333
5334         if (typeid != BYTEAOID)
5335                 *prefix_const = string_to_const(match, typeid);
5336         else
5337                 *prefix_const = string_to_bytea_const(match, match_pos);
5338
5339         if (rest_selec != NULL)
5340                 *rest_selec = like_selectivity(&patt[pos], pattlen - pos,
5341                                                                            case_insensitive);
5342
5343         pfree(patt);
5344         pfree(match);
5345
5346         /* in LIKE, an empty pattern is an exact match! */
5347         if (pos == pattlen)
5348                 return Pattern_Prefix_Exact;    /* reached end of pattern, so exact */
5349
5350         if (match_pos > 0)
5351                 return Pattern_Prefix_Partial;
5352
5353         return Pattern_Prefix_None;
5354 }
5355
5356 static Pattern_Prefix_Status
5357 regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5358                                    Const **prefix_const, Selectivity *rest_selec)
5359 {
5360         Oid                     typeid = patt_const->consttype;
5361         char       *prefix;
5362         bool            exact;
5363
5364         /*
5365          * Should be unnecessary, there are no bytea regex operators defined. As
5366          * such, it should be noted that the rest of this function has *not* been
5367          * made safe for binary (possibly NULL containing) strings.
5368          */
5369         if (typeid == BYTEAOID)
5370                 ereport(ERROR,
5371                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5372                  errmsg("regular-expression matching not supported on type bytea")));
5373
5374         /* Use the regexp machinery to extract the prefix, if any */
5375         prefix = regexp_fixed_prefix(DatumGetTextPP(patt_const->constvalue),
5376                                                                  case_insensitive, collation,
5377                                                                  &exact);
5378
5379         if (prefix == NULL)
5380         {
5381                 *prefix_const = NULL;
5382
5383                 if (rest_selec != NULL)
5384                 {
5385                         char       *patt = TextDatumGetCString(patt_const->constvalue);
5386
5387                         *rest_selec = regex_selectivity(patt, strlen(patt),
5388                                                                                         case_insensitive,
5389                                                                                         0);
5390                         pfree(patt);
5391                 }
5392
5393                 return Pattern_Prefix_None;
5394         }
5395
5396         *prefix_const = string_to_const(prefix, typeid);
5397
5398         if (rest_selec != NULL)
5399         {
5400                 if (exact)
5401                 {
5402                         /* Exact match, so there's no additional selectivity */
5403                         *rest_selec = 1.0;
5404                 }
5405                 else
5406                 {
5407                         char       *patt = TextDatumGetCString(patt_const->constvalue);
5408
5409                         *rest_selec = regex_selectivity(patt, strlen(patt),
5410                                                                                         case_insensitive,
5411                                                                                         strlen(prefix));
5412                         pfree(patt);
5413                 }
5414         }
5415
5416         pfree(prefix);
5417
5418         if (exact)
5419                 return Pattern_Prefix_Exact;    /* pattern specifies exact match */
5420         else
5421                 return Pattern_Prefix_Partial;
5422 }
5423
5424 Pattern_Prefix_Status
5425 pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
5426                                          Const **prefix, Selectivity *rest_selec)
5427 {
5428         Pattern_Prefix_Status result;
5429
5430         switch (ptype)
5431         {
5432                 case Pattern_Type_Like:
5433                         result = like_fixed_prefix(patt, false, collation,
5434                                                                            prefix, rest_selec);
5435                         break;
5436                 case Pattern_Type_Like_IC:
5437                         result = like_fixed_prefix(patt, true, collation,
5438                                                                            prefix, rest_selec);
5439                         break;
5440                 case Pattern_Type_Regex:
5441                         result = regex_fixed_prefix(patt, false, collation,
5442                                                                                 prefix, rest_selec);
5443                         break;
5444                 case Pattern_Type_Regex_IC:
5445                         result = regex_fixed_prefix(patt, true, collation,
5446                                                                                 prefix, rest_selec);
5447                         break;
5448                 default:
5449                         elog(ERROR, "unrecognized ptype: %d", (int) ptype);
5450                         result = Pattern_Prefix_None;           /* keep compiler quiet */
5451                         break;
5452         }
5453         return result;
5454 }
5455
5456 /*
5457  * Estimate the selectivity of a fixed prefix for a pattern match.
5458  *
5459  * A fixed prefix "foo" is estimated as the selectivity of the expression
5460  * "variable >= 'foo' AND variable < 'fop'" (see also indxpath.c).
5461  *
5462  * The selectivity estimate is with respect to the portion of the column
5463  * population represented by the histogram --- the caller must fold this
5464  * together with info about MCVs and NULLs.
5465  *
5466  * We use the >= and < operators from the specified btree opfamily to do the
5467  * estimation.  The given variable and Const must be of the associated
5468  * datatype.
5469  *
5470  * XXX Note: we make use of the upper bound to estimate operator selectivity
5471  * even if the locale is such that we cannot rely on the upper-bound string.
5472  * The selectivity only needs to be approximately right anyway, so it seems
5473  * more useful to use the upper-bound code than not.
5474  */
5475 static Selectivity
5476 prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
5477                                    Oid vartype, Oid opfamily, Const *prefixcon)
5478 {
5479         Selectivity prefixsel;
5480         Oid                     cmpopr;
5481         FmgrInfo        opproc;
5482         Const      *greaterstrcon;
5483         Selectivity eq_sel;
5484
5485         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5486                                                                  BTGreaterEqualStrategyNumber);
5487         if (cmpopr == InvalidOid)
5488                 elog(ERROR, "no >= operator for opfamily %u", opfamily);
5489         fmgr_info(get_opcode(cmpopr), &opproc);
5490
5491         prefixsel = ineq_histogram_selectivity(root, vardata, &opproc, true,
5492                                                                                    prefixcon->constvalue,
5493                                                                                    prefixcon->consttype);
5494
5495         if (prefixsel < 0.0)
5496         {
5497                 /* No histogram is present ... return a suitable default estimate */
5498                 return DEFAULT_MATCH_SEL;
5499         }
5500
5501         /*-------
5502          * If we can create a string larger than the prefix, say
5503          *      "x < greaterstr".
5504          *-------
5505          */
5506         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5507                                                                  BTLessStrategyNumber);
5508         if (cmpopr == InvalidOid)
5509                 elog(ERROR, "no < operator for opfamily %u", opfamily);
5510         fmgr_info(get_opcode(cmpopr), &opproc);
5511         greaterstrcon = make_greater_string(prefixcon, &opproc,
5512                                                                                 DEFAULT_COLLATION_OID);
5513         if (greaterstrcon)
5514         {
5515                 Selectivity topsel;
5516
5517                 topsel = ineq_histogram_selectivity(root, vardata, &opproc, false,
5518                                                                                         greaterstrcon->constvalue,
5519                                                                                         greaterstrcon->consttype);
5520
5521                 /* ineq_histogram_selectivity worked before, it shouldn't fail now */
5522                 Assert(topsel >= 0.0);
5523
5524                 /*
5525                  * Merge the two selectivities in the same way as for a range query
5526                  * (see clauselist_selectivity()).  Note that we don't need to worry
5527                  * about double-exclusion of nulls, since ineq_histogram_selectivity
5528                  * doesn't count those anyway.
5529                  */
5530                 prefixsel = topsel + prefixsel - 1.0;
5531         }
5532
5533         /*
5534          * If the prefix is long then the two bounding values might be too close
5535          * together for the histogram to distinguish them usefully, resulting in a
5536          * zero estimate (plus or minus roundoff error). To avoid returning a
5537          * ridiculously small estimate, compute the estimated selectivity for
5538          * "variable = 'foo'", and clamp to that. (Obviously, the resultant
5539          * estimate should be at least that.)
5540          *
5541          * We apply this even if we couldn't make a greater string.  That case
5542          * suggests that the prefix is near the maximum possible, and thus
5543          * probably off the end of the histogram, and thus we probably got a very
5544          * small estimate from the >= condition; so we still need to clamp.
5545          */
5546         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5547                                                                  BTEqualStrategyNumber);
5548         if (cmpopr == InvalidOid)
5549                 elog(ERROR, "no = operator for opfamily %u", opfamily);
5550         eq_sel = var_eq_const(vardata, cmpopr, prefixcon->constvalue,
5551                                                   false, true);
5552
5553         prefixsel = Max(prefixsel, eq_sel);
5554
5555         return prefixsel;
5556 }
5557
5558
5559 /*
5560  * Estimate the selectivity of a pattern of the specified type.
5561  * Note that any fixed prefix of the pattern will have been removed already,
5562  * so actually we may be looking at just a fragment of the pattern.
5563  *
5564  * For now, we use a very simplistic approach: fixed characters reduce the
5565  * selectivity a good deal, character ranges reduce it a little,
5566  * wildcards (such as % for LIKE or .* for regex) increase it.
5567  */
5568
5569 #define FIXED_CHAR_SEL  0.20    /* about 1/5 */
5570 #define CHAR_RANGE_SEL  0.25
5571 #define ANY_CHAR_SEL    0.9             /* not 1, since it won't match end-of-string */
5572 #define FULL_WILDCARD_SEL 5.0
5573 #define PARTIAL_WILDCARD_SEL 2.0
5574
5575 static Selectivity
5576 like_selectivity(const char *patt, int pattlen, bool case_insensitive)
5577 {
5578         Selectivity sel = 1.0;
5579         int                     pos;
5580
5581         /* Skip any leading wildcard; it's already factored into initial sel */
5582         for (pos = 0; pos < pattlen; pos++)
5583         {
5584                 if (patt[pos] != '%' && patt[pos] != '_')
5585                         break;
5586         }
5587
5588         for (; pos < pattlen; pos++)
5589         {
5590                 /* % and _ are wildcard characters in LIKE */
5591                 if (patt[pos] == '%')
5592                         sel *= FULL_WILDCARD_SEL;
5593                 else if (patt[pos] == '_')
5594                         sel *= ANY_CHAR_SEL;
5595                 else if (patt[pos] == '\\')
5596                 {
5597                         /* Backslash quotes the next character */
5598                         pos++;
5599                         if (pos >= pattlen)
5600                                 break;
5601                         sel *= FIXED_CHAR_SEL;
5602                 }
5603                 else
5604                         sel *= FIXED_CHAR_SEL;
5605         }
5606         /* Could get sel > 1 if multiple wildcards */
5607         if (sel > 1.0)
5608                 sel = 1.0;
5609         return sel;
5610 }
5611
5612 static Selectivity
5613 regex_selectivity_sub(const char *patt, int pattlen, bool case_insensitive)
5614 {
5615         Selectivity sel = 1.0;
5616         int                     paren_depth = 0;
5617         int                     paren_pos = 0;  /* dummy init to keep compiler quiet */
5618         int                     pos;
5619
5620         for (pos = 0; pos < pattlen; pos++)
5621         {
5622                 if (patt[pos] == '(')
5623                 {
5624                         if (paren_depth == 0)
5625                                 paren_pos = pos;        /* remember start of parenthesized item */
5626                         paren_depth++;
5627                 }
5628                 else if (patt[pos] == ')' && paren_depth > 0)
5629                 {
5630                         paren_depth--;
5631                         if (paren_depth == 0)
5632                                 sel *= regex_selectivity_sub(patt + (paren_pos + 1),
5633                                                                                          pos - (paren_pos + 1),
5634                                                                                          case_insensitive);
5635                 }
5636                 else if (patt[pos] == '|' && paren_depth == 0)
5637                 {
5638                         /*
5639                          * If unquoted | is present at paren level 0 in pattern, we have
5640                          * multiple alternatives; sum their probabilities.
5641                          */
5642                         sel += regex_selectivity_sub(patt + (pos + 1),
5643                                                                                  pattlen - (pos + 1),
5644                                                                                  case_insensitive);
5645                         break;                          /* rest of pattern is now processed */
5646                 }
5647                 else if (patt[pos] == '[')
5648                 {
5649                         bool            negclass = false;
5650
5651                         if (patt[++pos] == '^')
5652                         {
5653                                 negclass = true;
5654                                 pos++;
5655                         }
5656                         if (patt[pos] == ']')           /* ']' at start of class is not
5657                                                                                  * special */
5658                                 pos++;
5659                         while (pos < pattlen && patt[pos] != ']')
5660                                 pos++;
5661                         if (paren_depth == 0)
5662                                 sel *= (negclass ? (1.0 - CHAR_RANGE_SEL) : CHAR_RANGE_SEL);
5663                 }
5664                 else if (patt[pos] == '.')
5665                 {
5666                         if (paren_depth == 0)
5667                                 sel *= ANY_CHAR_SEL;
5668                 }
5669                 else if (patt[pos] == '*' ||
5670                                  patt[pos] == '?' ||
5671                                  patt[pos] == '+')
5672                 {
5673                         /* Ought to be smarter about quantifiers... */
5674                         if (paren_depth == 0)
5675                                 sel *= PARTIAL_WILDCARD_SEL;
5676                 }
5677                 else if (patt[pos] == '{')
5678                 {
5679                         while (pos < pattlen && patt[pos] != '}')
5680                                 pos++;
5681                         if (paren_depth == 0)
5682                                 sel *= PARTIAL_WILDCARD_SEL;
5683                 }
5684                 else if (patt[pos] == '\\')
5685                 {
5686                         /* backslash quotes the next character */
5687                         pos++;
5688                         if (pos >= pattlen)
5689                                 break;
5690                         if (paren_depth == 0)
5691                                 sel *= FIXED_CHAR_SEL;
5692                 }
5693                 else
5694                 {
5695                         if (paren_depth == 0)
5696                                 sel *= FIXED_CHAR_SEL;
5697                 }
5698         }
5699         /* Could get sel > 1 if multiple wildcards */
5700         if (sel > 1.0)
5701                 sel = 1.0;
5702         return sel;
5703 }
5704
5705 static Selectivity
5706 regex_selectivity(const char *patt, int pattlen, bool case_insensitive,
5707                                   int fixed_prefix_len)
5708 {
5709         Selectivity sel;
5710
5711         /* If patt doesn't end with $, consider it to have a trailing wildcard */
5712         if (pattlen > 0 && patt[pattlen - 1] == '$' &&
5713                 (pattlen == 1 || patt[pattlen - 2] != '\\'))
5714         {
5715                 /* has trailing $ */
5716                 sel = regex_selectivity_sub(patt, pattlen - 1, case_insensitive);
5717         }
5718         else
5719         {
5720                 /* no trailing $ */
5721                 sel = regex_selectivity_sub(patt, pattlen, case_insensitive);
5722                 sel *= FULL_WILDCARD_SEL;
5723         }
5724
5725         /* If there's a fixed prefix, discount its selectivity */
5726         if (fixed_prefix_len > 0)
5727                 sel /= pow(FIXED_CHAR_SEL, fixed_prefix_len);
5728
5729         /* Make sure result stays in range */
5730         CLAMP_PROBABILITY(sel);
5731         return sel;
5732 }
5733
5734
5735 /*
5736  * For bytea, the increment function need only increment the current byte
5737  * (there are no multibyte characters to worry about).
5738  */
5739 static bool
5740 byte_increment(unsigned char *ptr, int len)
5741 {
5742         if (*ptr >= 255)
5743                 return false;
5744         (*ptr)++;
5745         return true;
5746 }
5747
5748 /*
5749  * Try to generate a string greater than the given string or any
5750  * string it is a prefix of.  If successful, return a palloc'd string
5751  * in the form of a Const node; else return NULL.
5752  *
5753  * The caller must provide the appropriate "less than" comparison function
5754  * for testing the strings, along with the collation to use.
5755  *
5756  * The key requirement here is that given a prefix string, say "foo",
5757  * we must be able to generate another string "fop" that is greater than
5758  * all strings "foobar" starting with "foo".  We can test that we have
5759  * generated a string greater than the prefix string, but in non-C collations
5760  * that is not a bulletproof guarantee that an extension of the string might
5761  * not sort after it; an example is that "foo " is less than "foo!", but it
5762  * is not clear that a "dictionary" sort ordering will consider "foo!" less
5763  * than "foo bar".  CAUTION: Therefore, this function should be used only for
5764  * estimation purposes when working in a non-C collation.
5765  *
5766  * To try to catch most cases where an extended string might otherwise sort
5767  * before the result value, we determine which of the strings "Z", "z", "y",
5768  * and "9" is seen as largest by the collation, and append that to the given
5769  * prefix before trying to find a string that compares as larger.
5770  *
5771  * To search for a greater string, we repeatedly "increment" the rightmost
5772  * character, using an encoding-specific character incrementer function.
5773  * When it's no longer possible to increment the last character, we truncate
5774  * off that character and start incrementing the next-to-rightmost.
5775  * For example, if "z" were the last character in the sort order, then we
5776  * could produce "foo" as a string greater than "fonz".
5777  *
5778  * This could be rather slow in the worst case, but in most cases we
5779  * won't have to try more than one or two strings before succeeding.
5780  *
5781  * Note that it's important for the character incrementer not to be too anal
5782  * about producing every possible character code, since in some cases the only
5783  * way to get a larger string is to increment a previous character position.
5784  * So we don't want to spend too much time trying every possible character
5785  * code at the last position.  A good rule of thumb is to be sure that we
5786  * don't try more than 256*K values for a K-byte character (and definitely
5787  * not 256^K, which is what an exhaustive search would approach).
5788  */
5789 Const *
5790 make_greater_string(const Const *str_const, FmgrInfo *ltproc, Oid collation)
5791 {
5792         Oid                     datatype = str_const->consttype;
5793         char       *workstr;
5794         int                     len;
5795         Datum           cmpstr;
5796         text       *cmptxt = NULL;
5797         mbcharacter_incrementer charinc;
5798
5799         /*
5800          * Get a modifiable copy of the prefix string in C-string format, and set
5801          * up the string we will compare to as a Datum.  In C locale this can just
5802          * be the given prefix string, otherwise we need to add a suffix.  Types
5803          * NAME and BYTEA sort bytewise so they don't need a suffix either.
5804          */
5805         if (datatype == NAMEOID)
5806         {
5807                 workstr = DatumGetCString(DirectFunctionCall1(nameout,
5808                                                                                                           str_const->constvalue));
5809                 len = strlen(workstr);
5810                 cmpstr = str_const->constvalue;
5811         }
5812         else if (datatype == BYTEAOID)
5813         {
5814                 bytea      *bstr = DatumGetByteaP(str_const->constvalue);
5815
5816                 len = VARSIZE(bstr) - VARHDRSZ;
5817                 workstr = (char *) palloc(len);
5818                 memcpy(workstr, VARDATA(bstr), len);
5819                 if ((Pointer) bstr != DatumGetPointer(str_const->constvalue))
5820                         pfree(bstr);
5821                 cmpstr = str_const->constvalue;
5822         }
5823         else
5824         {
5825                 workstr = TextDatumGetCString(str_const->constvalue);
5826                 len = strlen(workstr);
5827                 if (lc_collate_is_c(collation) || len == 0)
5828                         cmpstr = str_const->constvalue;
5829                 else
5830                 {
5831                         /* If first time through, determine the suffix to use */
5832                         static char suffixchar = 0;
5833                         static Oid      suffixcollation = 0;
5834
5835                         if (!suffixchar || suffixcollation != collation)
5836                         {
5837                                 char       *best;
5838
5839                                 best = "Z";
5840                                 if (varstr_cmp(best, 1, "z", 1, collation) < 0)
5841                                         best = "z";
5842                                 if (varstr_cmp(best, 1, "y", 1, collation) < 0)
5843                                         best = "y";
5844                                 if (varstr_cmp(best, 1, "9", 1, collation) < 0)
5845                                         best = "9";
5846                                 suffixchar = *best;
5847                                 suffixcollation = collation;
5848                         }
5849
5850                         /* And build the string to compare to */
5851                         cmptxt = (text *) palloc(VARHDRSZ + len + 1);
5852                         SET_VARSIZE(cmptxt, VARHDRSZ + len + 1);
5853                         memcpy(VARDATA(cmptxt), workstr, len);
5854                         *(VARDATA(cmptxt) + len) = suffixchar;
5855                         cmpstr = PointerGetDatum(cmptxt);
5856                 }
5857         }
5858
5859         /* Select appropriate character-incrementer function */
5860         if (datatype == BYTEAOID)
5861                 charinc = byte_increment;
5862         else
5863                 charinc = pg_database_encoding_character_incrementer();
5864
5865         /* And search ... */
5866         while (len > 0)
5867         {
5868                 int                     charlen;
5869                 unsigned char *lastchar;
5870
5871                 /* Identify the last character --- for bytea, just the last byte */
5872                 if (datatype == BYTEAOID)
5873                         charlen = 1;
5874                 else
5875                         charlen = len - pg_mbcliplen(workstr, len, len - 1);
5876                 lastchar = (unsigned char *) (workstr + len - charlen);
5877
5878                 /*
5879                  * Try to generate a larger string by incrementing the last character
5880                  * (for BYTEA, we treat each byte as a character).
5881                  *
5882                  * Note: the incrementer function is expected to return true if it's
5883                  * generated a valid-per-the-encoding new character, otherwise false.
5884                  * The contents of the character on false return are unspecified.
5885                  */
5886                 while (charinc(lastchar, charlen))
5887                 {
5888                         Const      *workstr_const;
5889
5890                         if (datatype == BYTEAOID)
5891                                 workstr_const = string_to_bytea_const(workstr, len);
5892                         else
5893                                 workstr_const = string_to_const(workstr, datatype);
5894
5895                         if (DatumGetBool(FunctionCall2Coll(ltproc,
5896                                                                                            collation,
5897                                                                                            cmpstr,
5898                                                                                            workstr_const->constvalue)))
5899                         {
5900                                 /* Successfully made a string larger than cmpstr */
5901                                 if (cmptxt)
5902                                         pfree(cmptxt);
5903                                 pfree(workstr);
5904                                 return workstr_const;
5905                         }
5906
5907                         /* No good, release unusable value and try again */
5908                         pfree(DatumGetPointer(workstr_const->constvalue));
5909                         pfree(workstr_const);
5910                 }
5911
5912                 /*
5913                  * No luck here, so truncate off the last character and try to
5914                  * increment the next one.
5915                  */
5916                 len -= charlen;
5917                 workstr[len] = '\0';
5918         }
5919
5920         /* Failed... */
5921         if (cmptxt)
5922                 pfree(cmptxt);
5923         pfree(workstr);
5924
5925         return NULL;
5926 }
5927
5928 /*
5929  * Generate a Datum of the appropriate type from a C string.
5930  * Note that all of the supported types are pass-by-ref, so the
5931  * returned value should be pfree'd if no longer needed.
5932  */
5933 static Datum
5934 string_to_datum(const char *str, Oid datatype)
5935 {
5936         Assert(str != NULL);
5937
5938         /*
5939          * We cheat a little by assuming that CStringGetTextDatum() will do for
5940          * bpchar and varchar constants too...
5941          */
5942         if (datatype == NAMEOID)
5943                 return DirectFunctionCall1(namein, CStringGetDatum(str));
5944         else if (datatype == BYTEAOID)
5945                 return DirectFunctionCall1(byteain, CStringGetDatum(str));
5946         else
5947                 return CStringGetTextDatum(str);
5948 }
5949
5950 /*
5951  * Generate a Const node of the appropriate type from a C string.
5952  */
5953 static Const *
5954 string_to_const(const char *str, Oid datatype)
5955 {
5956         Datum           conval = string_to_datum(str, datatype);
5957         Oid                     collation;
5958         int                     constlen;
5959
5960         /*
5961          * We only need to support a few datatypes here, so hard-wire properties
5962          * instead of incurring the expense of catalog lookups.
5963          */
5964         switch (datatype)
5965         {
5966                 case TEXTOID:
5967                 case VARCHAROID:
5968                 case BPCHAROID:
5969                         collation = DEFAULT_COLLATION_OID;
5970                         constlen = -1;
5971                         break;
5972
5973                 case NAMEOID:
5974                         collation = InvalidOid;
5975                         constlen = NAMEDATALEN;
5976                         break;
5977
5978                 case BYTEAOID:
5979                         collation = InvalidOid;
5980                         constlen = -1;
5981                         break;
5982
5983                 default:
5984                         elog(ERROR, "unexpected datatype in string_to_const: %u",
5985                                  datatype);
5986                         return NULL;
5987         }
5988
5989         return makeConst(datatype, -1, collation, constlen,
5990                                          conval, false, false);
5991 }
5992
5993 /*
5994  * Generate a Const node of bytea type from a binary C string and a length.
5995  */
5996 static Const *
5997 string_to_bytea_const(const char *str, size_t str_len)
5998 {
5999         bytea      *bstr = palloc(VARHDRSZ + str_len);
6000         Datum           conval;
6001
6002         memcpy(VARDATA(bstr), str, str_len);
6003         SET_VARSIZE(bstr, VARHDRSZ + str_len);
6004         conval = PointerGetDatum(bstr);
6005
6006         return makeConst(BYTEAOID, -1, InvalidOid, -1, conval, false, false);
6007 }
6008
6009 /*-------------------------------------------------------------------------
6010  *
6011  * Index cost estimation functions
6012  *
6013  *-------------------------------------------------------------------------
6014  */
6015
6016 /*
6017  * deconstruct_indexquals is a simple function to examine the indexquals
6018  * attached to a proposed IndexPath.  It returns a list of IndexQualInfo
6019  * structs, one per qual expression.
6020  */
6021 typedef struct
6022 {
6023         RestrictInfo *rinfo;            /* the indexqual itself */
6024         int                     indexcol;               /* zero-based index column number */
6025         bool            varonleft;              /* true if index column is on left of qual */
6026         Oid                     clause_op;              /* qual's operator OID, if relevant */
6027         Node       *other_operand;      /* non-index operand of qual's operator */
6028 } IndexQualInfo;
6029
6030 static List *
6031 deconstruct_indexquals(IndexPath *path)
6032 {
6033         List       *result = NIL;
6034         IndexOptInfo *index = path->indexinfo;
6035         ListCell   *lcc,
6036                            *lci;
6037
6038         forboth(lcc, path->indexquals, lci, path->indexqualcols)
6039         {
6040                 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lcc);
6041                 int                     indexcol = lfirst_int(lci);
6042                 Expr       *clause;
6043                 Node       *leftop,
6044                                    *rightop;
6045                 IndexQualInfo *qinfo;
6046
6047                 Assert(IsA(rinfo, RestrictInfo));
6048                 clause = rinfo->clause;
6049
6050                 qinfo = (IndexQualInfo *) palloc(sizeof(IndexQualInfo));
6051                 qinfo->rinfo = rinfo;
6052                 qinfo->indexcol = indexcol;
6053
6054                 if (IsA(clause, OpExpr))
6055                 {
6056                         qinfo->clause_op = ((OpExpr *) clause)->opno;
6057                         leftop = get_leftop(clause);
6058                         rightop = get_rightop(clause);
6059                         if (match_index_to_operand(leftop, indexcol, index))
6060                         {
6061                                 qinfo->varonleft = true;
6062                                 qinfo->other_operand = rightop;
6063                         }
6064                         else
6065                         {
6066                                 Assert(match_index_to_operand(rightop, indexcol, index));
6067                                 qinfo->varonleft = false;
6068                                 qinfo->other_operand = leftop;
6069                         }
6070                 }
6071                 else if (IsA(clause, RowCompareExpr))
6072                 {
6073                         RowCompareExpr *rc = (RowCompareExpr *) clause;
6074
6075                         qinfo->clause_op = linitial_oid(rc->opnos);
6076                         /* Examine only first columns to determine left/right sides */
6077                         if (match_index_to_operand((Node *) linitial(rc->largs),
6078                                                                            indexcol, index))
6079                         {
6080                                 qinfo->varonleft = true;
6081                                 qinfo->other_operand = (Node *) rc->rargs;
6082                         }
6083                         else
6084                         {
6085                                 Assert(match_index_to_operand((Node *) linitial(rc->rargs),
6086                                                                                           indexcol, index));
6087                                 qinfo->varonleft = false;
6088                                 qinfo->other_operand = (Node *) rc->largs;
6089                         }
6090                 }
6091                 else if (IsA(clause, ScalarArrayOpExpr))
6092                 {
6093                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6094
6095                         qinfo->clause_op = saop->opno;
6096                         /* index column is always on the left in this case */
6097                         Assert(match_index_to_operand((Node *) linitial(saop->args),
6098                                                                                   indexcol, index));
6099                         qinfo->varonleft = true;
6100                         qinfo->other_operand = (Node *) lsecond(saop->args);
6101                 }
6102                 else if (IsA(clause, NullTest))
6103                 {
6104                         qinfo->clause_op = InvalidOid;
6105                         Assert(match_index_to_operand((Node *) ((NullTest *) clause)->arg,
6106                                                                                   indexcol, index));
6107                         qinfo->varonleft = true;
6108                         qinfo->other_operand = NULL;
6109                 }
6110                 else
6111                 {
6112                         elog(ERROR, "unsupported indexqual type: %d",
6113                                  (int) nodeTag(clause));
6114                 }
6115
6116                 result = lappend(result, qinfo);
6117         }
6118         return result;
6119 }
6120
6121 /*
6122  * Simple function to compute the total eval cost of the "other operands"
6123  * in an IndexQualInfo list.  Since we know these will be evaluated just
6124  * once per scan, there's no need to distinguish startup from per-row cost.
6125  */
6126 static Cost
6127 other_operands_eval_cost(PlannerInfo *root, List *qinfos)
6128 {
6129         Cost            qual_arg_cost = 0;
6130         ListCell   *lc;
6131
6132         foreach(lc, qinfos)
6133         {
6134                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6135                 QualCost        index_qual_cost;
6136
6137                 cost_qual_eval_node(&index_qual_cost, qinfo->other_operand, root);
6138                 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6139         }
6140         return qual_arg_cost;
6141 }
6142
6143 /*
6144  * Get other-operand eval cost for an index orderby list.
6145  *
6146  * Index orderby expressions aren't represented as RestrictInfos (since they
6147  * aren't boolean, usually).  So we can't apply deconstruct_indexquals to
6148  * them.  However, they are much simpler to deal with since they are always
6149  * OpExprs and the index column is always on the left.
6150  */
6151 static Cost
6152 orderby_operands_eval_cost(PlannerInfo *root, IndexPath *path)
6153 {
6154         Cost            qual_arg_cost = 0;
6155         ListCell   *lc;
6156
6157         foreach(lc, path->indexorderbys)
6158         {
6159                 Expr       *clause = (Expr *) lfirst(lc);
6160                 Node       *other_operand;
6161                 QualCost        index_qual_cost;
6162
6163                 if (IsA(clause, OpExpr))
6164                 {
6165                         other_operand = get_rightop(clause);
6166                 }
6167                 else
6168                 {
6169                         elog(ERROR, "unsupported indexorderby type: %d",
6170                                  (int) nodeTag(clause));
6171                         other_operand = NULL;           /* keep compiler quiet */
6172                 }
6173
6174                 cost_qual_eval_node(&index_qual_cost, other_operand, root);
6175                 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6176         }
6177         return qual_arg_cost;
6178 }
6179
6180 /*
6181  * genericcostestimate is a general-purpose estimator that can be used for
6182  * most index types.  In some cases we use genericcostestimate as the base
6183  * code and then incorporate additional index-type-specific knowledge in
6184  * the type-specific calling function.  To avoid code duplication, we make
6185  * genericcostestimate return a number of intermediate values as well as
6186  * its preliminary estimates of the output cost values.  The GenericCosts
6187  * struct includes all these values.
6188  *
6189  * Callers should initialize all fields of GenericCosts to zero.  In addition,
6190  * they can set numIndexTuples to some positive value if they have a better
6191  * than default way of estimating the number of leaf index tuples visited.
6192  */
6193 typedef struct
6194 {
6195         /* These are the values the cost estimator must return to the planner */
6196         Cost            indexStartupCost;               /* index-related startup cost */
6197         Cost            indexTotalCost; /* total index-related scan cost */
6198         Selectivity indexSelectivity;           /* selectivity of index */
6199         double          indexCorrelation;               /* order correlation of index */
6200
6201         /* Intermediate values we obtain along the way */
6202         double          numIndexPages;  /* number of leaf pages visited */
6203         double          numIndexTuples; /* number of leaf tuples visited */
6204         double          spc_random_page_cost;   /* relevant random_page_cost value */
6205         double          num_sa_scans;   /* # indexscans from ScalarArrayOps */
6206 } GenericCosts;
6207
6208 static void
6209 genericcostestimate(PlannerInfo *root,
6210                                         IndexPath *path,
6211                                         double loop_count,
6212                                         List *qinfos,
6213                                         GenericCosts *costs)
6214 {
6215         IndexOptInfo *index = path->indexinfo;
6216         List       *indexQuals = path->indexquals;
6217         List       *indexOrderBys = path->indexorderbys;
6218         Cost            indexStartupCost;
6219         Cost            indexTotalCost;
6220         Selectivity indexSelectivity;
6221         double          indexCorrelation;
6222         double          numIndexPages;
6223         double          numIndexTuples;
6224         double          spc_random_page_cost;
6225         double          num_sa_scans;
6226         double          num_outer_scans;
6227         double          num_scans;
6228         double          qual_op_cost;
6229         double          qual_arg_cost;
6230         List       *selectivityQuals;
6231         ListCell   *l;
6232
6233         /*
6234          * If the index is partial, AND the index predicate with the explicitly
6235          * given indexquals to produce a more accurate idea of the index
6236          * selectivity.
6237          */
6238         selectivityQuals = add_predicate_to_quals(index, indexQuals);
6239
6240         /*
6241          * Check for ScalarArrayOpExpr index quals, and estimate the number of
6242          * index scans that will be performed.
6243          */
6244         num_sa_scans = 1;
6245         foreach(l, indexQuals)
6246         {
6247                 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6248
6249                 if (IsA(rinfo->clause, ScalarArrayOpExpr))
6250                 {
6251                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6252                         int                     alength = estimate_array_length(lsecond(saop->args));
6253
6254                         if (alength > 1)
6255                                 num_sa_scans *= alength;
6256                 }
6257         }
6258
6259         /* Estimate the fraction of main-table tuples that will be visited */
6260         indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6261                                                                                           index->rel->relid,
6262                                                                                           JOIN_INNER,
6263                                                                                           NULL);
6264
6265         /*
6266          * If caller didn't give us an estimate, estimate the number of index
6267          * tuples that will be visited.  We do it in this rather peculiar-looking
6268          * way in order to get the right answer for partial indexes.
6269          */
6270         numIndexTuples = costs->numIndexTuples;
6271         if (numIndexTuples <= 0.0)
6272         {
6273                 numIndexTuples = indexSelectivity * index->rel->tuples;
6274
6275                 /*
6276                  * The above calculation counts all the tuples visited across all
6277                  * scans induced by ScalarArrayOpExpr nodes.  We want to consider the
6278                  * average per-indexscan number, so adjust.  This is a handy place to
6279                  * round to integer, too.  (If caller supplied tuple estimate, it's
6280                  * responsible for handling these considerations.)
6281                  */
6282                 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6283         }
6284
6285         /*
6286          * We can bound the number of tuples by the index size in any case. Also,
6287          * always estimate at least one tuple is touched, even when
6288          * indexSelectivity estimate is tiny.
6289          */
6290         if (numIndexTuples > index->tuples)
6291                 numIndexTuples = index->tuples;
6292         if (numIndexTuples < 1.0)
6293                 numIndexTuples = 1.0;
6294
6295         /*
6296          * Estimate the number of index pages that will be retrieved.
6297          *
6298          * We use the simplistic method of taking a pro-rata fraction of the total
6299          * number of index pages.  In effect, this counts only leaf pages and not
6300          * any overhead such as index metapage or upper tree levels.
6301          *
6302          * In practice access to upper index levels is often nearly free because
6303          * those tend to stay in cache under load; moreover, the cost involved is
6304          * highly dependent on index type.  We therefore ignore such costs here
6305          * and leave it to the caller to add a suitable charge if needed.
6306          */
6307         if (index->pages > 1 && index->tuples > 1)
6308                 numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6309         else
6310                 numIndexPages = 1.0;
6311
6312         /* fetch estimated page cost for tablespace containing index */
6313         get_tablespace_page_costs(index->reltablespace,
6314                                                           &spc_random_page_cost,
6315                                                           NULL);
6316
6317         /*
6318          * Now compute the disk access costs.
6319          *
6320          * The above calculations are all per-index-scan.  However, if we are in a
6321          * nestloop inner scan, we can expect the scan to be repeated (with
6322          * different search keys) for each row of the outer relation.  Likewise,
6323          * ScalarArrayOpExpr quals result in multiple index scans.  This creates
6324          * the potential for cache effects to reduce the number of disk page
6325          * fetches needed.  We want to estimate the average per-scan I/O cost in
6326          * the presence of caching.
6327          *
6328          * We use the Mackert-Lohman formula (see costsize.c for details) to
6329          * estimate the total number of page fetches that occur.  While this
6330          * wasn't what it was designed for, it seems a reasonable model anyway.
6331          * Note that we are counting pages not tuples anymore, so we take N = T =
6332          * index size, as if there were one "tuple" per page.
6333          */
6334         num_outer_scans = loop_count;
6335         num_scans = num_sa_scans * num_outer_scans;
6336
6337         if (num_scans > 1)
6338         {
6339                 double          pages_fetched;
6340
6341                 /* total page fetches ignoring cache effects */
6342                 pages_fetched = numIndexPages * num_scans;
6343
6344                 /* use Mackert and Lohman formula to adjust for cache effects */
6345                 pages_fetched = index_pages_fetched(pages_fetched,
6346                                                                                         index->pages,
6347                                                                                         (double) index->pages,
6348                                                                                         root);
6349
6350                 /*
6351                  * Now compute the total disk access cost, and then report a pro-rated
6352                  * share for each outer scan.  (Don't pro-rate for ScalarArrayOpExpr,
6353                  * since that's internal to the indexscan.)
6354                  */
6355                 indexTotalCost = (pages_fetched * spc_random_page_cost)
6356                         / num_outer_scans;
6357         }
6358         else
6359         {
6360                 /*
6361                  * For a single index scan, we just charge spc_random_page_cost per
6362                  * page touched.
6363                  */
6364                 indexTotalCost = numIndexPages * spc_random_page_cost;
6365         }
6366
6367         /*
6368          * CPU cost: any complex expressions in the indexquals will need to be
6369          * evaluated once at the start of the scan to reduce them to runtime keys
6370          * to pass to the index AM (see nodeIndexscan.c).  We model the per-tuple
6371          * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6372          * indexqual operator.  Because we have numIndexTuples as a per-scan
6373          * number, we have to multiply by num_sa_scans to get the correct result
6374          * for ScalarArrayOpExpr cases.  Similarly add in costs for any index
6375          * ORDER BY expressions.
6376          *
6377          * Note: this neglects the possible costs of rechecking lossy operators.
6378          * Detecting that that might be needed seems more expensive than it's
6379          * worth, though, considering all the other inaccuracies here ...
6380          */
6381         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
6382                 orderby_operands_eval_cost(root, path);
6383         qual_op_cost = cpu_operator_cost *
6384                 (list_length(indexQuals) + list_length(indexOrderBys));
6385
6386         indexStartupCost = qual_arg_cost;
6387         indexTotalCost += qual_arg_cost;
6388         indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6389
6390         /*
6391          * Generic assumption about index correlation: there isn't any.
6392          */
6393         indexCorrelation = 0.0;
6394
6395         /*
6396          * Return everything to caller.
6397          */
6398         costs->indexStartupCost = indexStartupCost;
6399         costs->indexTotalCost = indexTotalCost;
6400         costs->indexSelectivity = indexSelectivity;
6401         costs->indexCorrelation = indexCorrelation;
6402         costs->numIndexPages = numIndexPages;
6403         costs->numIndexTuples = numIndexTuples;
6404         costs->spc_random_page_cost = spc_random_page_cost;
6405         costs->num_sa_scans = num_sa_scans;
6406 }
6407
6408 /*
6409  * If the index is partial, add its predicate to the given qual list.
6410  *
6411  * ANDing the index predicate with the explicitly given indexquals produces
6412  * a more accurate idea of the index's selectivity.  However, we need to be
6413  * careful not to insert redundant clauses, because clauselist_selectivity()
6414  * is easily fooled into computing a too-low selectivity estimate.  Our
6415  * approach is to add only the predicate clause(s) that cannot be proven to
6416  * be implied by the given indexquals.  This successfully handles cases such
6417  * as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
6418  * There are many other cases where we won't detect redundancy, leading to a
6419  * too-low selectivity estimate, which will bias the system in favor of using
6420  * partial indexes where possible.  That is not necessarily bad though.
6421  *
6422  * Note that indexQuals contains RestrictInfo nodes while the indpred
6423  * does not, so the output list will be mixed.  This is OK for both
6424  * predicate_implied_by() and clauselist_selectivity(), but might be
6425  * problematic if the result were passed to other things.
6426  */
6427 static List *
6428 add_predicate_to_quals(IndexOptInfo *index, List *indexQuals)
6429 {
6430         List       *predExtraQuals = NIL;
6431         ListCell   *lc;
6432
6433         if (index->indpred == NIL)
6434                 return indexQuals;
6435
6436         foreach(lc, index->indpred)
6437         {
6438                 Node       *predQual = (Node *) lfirst(lc);
6439                 List       *oneQual = list_make1(predQual);
6440
6441                 if (!predicate_implied_by(oneQual, indexQuals))
6442                         predExtraQuals = list_concat(predExtraQuals, oneQual);
6443         }
6444         /* list_concat avoids modifying the passed-in indexQuals list */
6445         return list_concat(predExtraQuals, indexQuals);
6446 }
6447
6448
6449 void
6450 btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6451                            Cost *indexStartupCost, Cost *indexTotalCost,
6452                            Selectivity *indexSelectivity, double *indexCorrelation)
6453 {
6454         IndexOptInfo *index = path->indexinfo;
6455         List       *qinfos;
6456         GenericCosts costs;
6457         Oid                     relid;
6458         AttrNumber      colnum;
6459         VariableStatData vardata;
6460         double          numIndexTuples;
6461         Cost            descentCost;
6462         List       *indexBoundQuals;
6463         int                     indexcol;
6464         bool            eqQualHere;
6465         bool            found_saop;
6466         bool            found_is_null_op;
6467         double          num_sa_scans;
6468         ListCell   *lc;
6469
6470         /* Do preliminary analysis of indexquals */
6471         qinfos = deconstruct_indexquals(path);
6472
6473         /*
6474          * For a btree scan, only leading '=' quals plus inequality quals for the
6475          * immediately next attribute contribute to index selectivity (these are
6476          * the "boundary quals" that determine the starting and stopping points of
6477          * the index scan).  Additional quals can suppress visits to the heap, so
6478          * it's OK to count them in indexSelectivity, but they should not count
6479          * for estimating numIndexTuples.  So we must examine the given indexquals
6480          * to find out which ones count as boundary quals.  We rely on the
6481          * knowledge that they are given in index column order.
6482          *
6483          * For a RowCompareExpr, we consider only the first column, just as
6484          * rowcomparesel() does.
6485          *
6486          * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6487          * index scans not one, but the ScalarArrayOpExpr's operator can be
6488          * considered to act the same as it normally does.
6489          */
6490         indexBoundQuals = NIL;
6491         indexcol = 0;
6492         eqQualHere = false;
6493         found_saop = false;
6494         found_is_null_op = false;
6495         num_sa_scans = 1;
6496         foreach(lc, qinfos)
6497         {
6498                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6499                 RestrictInfo *rinfo = qinfo->rinfo;
6500                 Expr       *clause = rinfo->clause;
6501                 Oid                     clause_op;
6502                 int                     op_strategy;
6503
6504                 if (indexcol != qinfo->indexcol)
6505                 {
6506                         /* Beginning of a new column's quals */
6507                         if (!eqQualHere)
6508                                 break;                  /* done if no '=' qual for indexcol */
6509                         eqQualHere = false;
6510                         indexcol++;
6511                         if (indexcol != qinfo->indexcol)
6512                                 break;                  /* no quals at all for indexcol */
6513                 }
6514
6515                 if (IsA(clause, ScalarArrayOpExpr))
6516                 {
6517                         int                     alength = estimate_array_length(qinfo->other_operand);
6518
6519                         found_saop = true;
6520                         /* count up number of SA scans induced by indexBoundQuals only */
6521                         if (alength > 1)
6522                                 num_sa_scans *= alength;
6523                 }
6524                 else if (IsA(clause, NullTest))
6525                 {
6526                         NullTest   *nt = (NullTest *) clause;
6527
6528                         if (nt->nulltesttype == IS_NULL)
6529                         {
6530                                 found_is_null_op = true;
6531                                 /* IS NULL is like = for selectivity determination purposes */
6532                                 eqQualHere = true;
6533                         }
6534                 }
6535
6536                 /*
6537                  * We would need to commute the clause_op if not varonleft, except
6538                  * that we only care if it's equality or not, so that refinement is
6539                  * unnecessary.
6540                  */
6541                 clause_op = qinfo->clause_op;
6542
6543                 /* check for equality operator */
6544                 if (OidIsValid(clause_op))
6545                 {
6546                         op_strategy = get_op_opfamily_strategy(clause_op,
6547                                                                                                    index->opfamily[indexcol]);
6548                         Assert(op_strategy != 0);       /* not a member of opfamily?? */
6549                         if (op_strategy == BTEqualStrategyNumber)
6550                                 eqQualHere = true;
6551                 }
6552
6553                 indexBoundQuals = lappend(indexBoundQuals, rinfo);
6554         }
6555
6556         /*
6557          * If index is unique and we found an '=' clause for each column, we can
6558          * just assume numIndexTuples = 1 and skip the expensive
6559          * clauselist_selectivity calculations.  However, a ScalarArrayOp or
6560          * NullTest invalidates that theory, even though it sets eqQualHere.
6561          */
6562         if (index->unique &&
6563                 indexcol == index->ncolumns - 1 &&
6564                 eqQualHere &&
6565                 !found_saop &&
6566                 !found_is_null_op)
6567                 numIndexTuples = 1.0;
6568         else
6569         {
6570                 List       *selectivityQuals;
6571                 Selectivity btreeSelectivity;
6572
6573                 /*
6574                  * If the index is partial, AND the index predicate with the
6575                  * index-bound quals to produce a more accurate idea of the number of
6576                  * rows covered by the bound conditions.
6577                  */
6578                 selectivityQuals = add_predicate_to_quals(index, indexBoundQuals);
6579
6580                 btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6581                                                                                                   index->rel->relid,
6582                                                                                                   JOIN_INNER,
6583                                                                                                   NULL);
6584                 numIndexTuples = btreeSelectivity * index->rel->tuples;
6585
6586                 /*
6587                  * As in genericcostestimate(), we have to adjust for any
6588                  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6589                  * to integer.
6590                  */
6591                 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6592         }
6593
6594         /*
6595          * Now do generic index cost estimation.
6596          */
6597         MemSet(&costs, 0, sizeof(costs));
6598         costs.numIndexTuples = numIndexTuples;
6599
6600         genericcostestimate(root, path, loop_count, qinfos, &costs);
6601
6602         /*
6603          * Add a CPU-cost component to represent the costs of initial btree
6604          * descent.  We don't charge any I/O cost for touching upper btree levels,
6605          * since they tend to stay in cache, but we still have to do about log2(N)
6606          * comparisons to descend a btree of N leaf tuples.  We charge one
6607          * cpu_operator_cost per comparison.
6608          *
6609          * If there are ScalarArrayOpExprs, charge this once per SA scan.  The
6610          * ones after the first one are not startup cost so far as the overall
6611          * plan is concerned, so add them only to "total" cost.
6612          */
6613         if (index->tuples > 1)          /* avoid computing log(0) */
6614         {
6615                 descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6616                 costs.indexStartupCost += descentCost;
6617                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6618         }
6619
6620         /*
6621          * Even though we're not charging I/O cost for touching upper btree pages,
6622          * it's still reasonable to charge some CPU cost per page descended
6623          * through.  Moreover, if we had no such charge at all, bloated indexes
6624          * would appear to have the same search cost as unbloated ones, at least
6625          * in cases where only a single leaf page is expected to be visited.  This
6626          * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6627          * touched.  The number of such pages is btree tree height plus one (ie,
6628          * we charge for the leaf page too).  As above, charge once per SA scan.
6629          */
6630         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6631         costs.indexStartupCost += descentCost;
6632         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6633
6634         /*
6635          * If we can get an estimate of the first column's ordering correlation C
6636          * from pg_statistic, estimate the index correlation as C for a
6637          * single-column index, or C * 0.75 for multiple columns. (The idea here
6638          * is that multiple columns dilute the importance of the first column's
6639          * ordering, but don't negate it entirely.  Before 8.0 we divided the
6640          * correlation by the number of columns, but that seems too strong.)
6641          */
6642         MemSet(&vardata, 0, sizeof(vardata));
6643
6644         if (index->indexkeys[0] != 0)
6645         {
6646                 /* Simple variable --- look to stats for the underlying table */
6647                 RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6648
6649                 Assert(rte->rtekind == RTE_RELATION);
6650                 relid = rte->relid;
6651                 Assert(relid != InvalidOid);
6652                 colnum = index->indexkeys[0];
6653
6654                 if (get_relation_stats_hook &&
6655                         (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6656                 {
6657                         /*
6658                          * The hook took control of acquiring a stats tuple.  If it did
6659                          * supply a tuple, it'd better have supplied a freefunc.
6660                          */
6661                         if (HeapTupleIsValid(vardata.statsTuple) &&
6662                                 !vardata.freefunc)
6663                                 elog(ERROR, "no function provided to release variable stats with");
6664                 }
6665                 else
6666                 {
6667                         vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6668                                                                                                  ObjectIdGetDatum(relid),
6669                                                                                                  Int16GetDatum(colnum),
6670                                                                                                  BoolGetDatum(rte->inh));
6671                         vardata.freefunc = ReleaseSysCache;
6672                 }
6673         }
6674         else
6675         {
6676                 /* Expression --- maybe there are stats for the index itself */
6677                 relid = index->indexoid;
6678                 colnum = 1;
6679
6680                 if (get_index_stats_hook &&
6681                         (*get_index_stats_hook) (root, relid, colnum, &vardata))
6682                 {
6683                         /*
6684                          * The hook took control of acquiring a stats tuple.  If it did
6685                          * supply a tuple, it'd better have supplied a freefunc.
6686                          */
6687                         if (HeapTupleIsValid(vardata.statsTuple) &&
6688                                 !vardata.freefunc)
6689                                 elog(ERROR, "no function provided to release variable stats with");
6690                 }
6691                 else
6692                 {
6693                         vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6694                                                                                                  ObjectIdGetDatum(relid),
6695                                                                                                  Int16GetDatum(colnum),
6696                                                                                                  BoolGetDatum(false));
6697                         vardata.freefunc = ReleaseSysCache;
6698                 }
6699         }
6700
6701         if (HeapTupleIsValid(vardata.statsTuple))
6702         {
6703                 Oid                     sortop;
6704                 float4     *numbers;
6705                 int                     nnumbers;
6706
6707                 sortop = get_opfamily_member(index->opfamily[0],
6708                                                                          index->opcintype[0],
6709                                                                          index->opcintype[0],
6710                                                                          BTLessStrategyNumber);
6711                 if (OidIsValid(sortop) &&
6712                         get_attstatsslot(vardata.statsTuple, InvalidOid, 0,
6713                                                          STATISTIC_KIND_CORRELATION,
6714                                                          sortop,
6715                                                          NULL,
6716                                                          NULL, NULL,
6717                                                          &numbers, &nnumbers))
6718                 {
6719                         double          varCorrelation;
6720
6721                         Assert(nnumbers == 1);
6722                         varCorrelation = numbers[0];
6723
6724                         if (index->reverse_sort[0])
6725                                 varCorrelation = -varCorrelation;
6726
6727                         if (index->ncolumns > 1)
6728                                 costs.indexCorrelation = varCorrelation * 0.75;
6729                         else
6730                                 costs.indexCorrelation = varCorrelation;
6731
6732                         free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
6733                 }
6734         }
6735
6736         ReleaseVariableStats(vardata);
6737
6738         *indexStartupCost = costs.indexStartupCost;
6739         *indexTotalCost = costs.indexTotalCost;
6740         *indexSelectivity = costs.indexSelectivity;
6741         *indexCorrelation = costs.indexCorrelation;
6742 }
6743
6744 void
6745 hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6746                                  Cost *indexStartupCost, Cost *indexTotalCost,
6747                                  Selectivity *indexSelectivity, double *indexCorrelation)
6748 {
6749         List       *qinfos;
6750         GenericCosts costs;
6751
6752         /* Do preliminary analysis of indexquals */
6753         qinfos = deconstruct_indexquals(path);
6754
6755         MemSet(&costs, 0, sizeof(costs));
6756
6757         genericcostestimate(root, path, loop_count, qinfos, &costs);
6758
6759         /*
6760          * A hash index has no descent costs as such, since the index AM can go
6761          * directly to the target bucket after computing the hash value.  There
6762          * are a couple of other hash-specific costs that we could conceivably add
6763          * here, though:
6764          *
6765          * Ideally we'd charge spc_random_page_cost for each page in the target
6766          * bucket, not just the numIndexPages pages that genericcostestimate
6767          * thought we'd visit.  However in most cases we don't know which bucket
6768          * that will be.  There's no point in considering the average bucket size
6769          * because the hash AM makes sure that's always one page.
6770          *
6771          * Likewise, we could consider charging some CPU for each index tuple in
6772          * the bucket, if we knew how many there were.  But the per-tuple cost is
6773          * just a hash value comparison, not a general datatype-dependent
6774          * comparison, so any such charge ought to be quite a bit less than
6775          * cpu_operator_cost; which makes it probably not worth worrying about.
6776          *
6777          * A bigger issue is that chance hash-value collisions will result in
6778          * wasted probes into the heap.  We don't currently attempt to model this
6779          * cost on the grounds that it's rare, but maybe it's not rare enough.
6780          * (Any fix for this ought to consider the generic lossy-operator problem,
6781          * though; it's not entirely hash-specific.)
6782          */
6783
6784         *indexStartupCost = costs.indexStartupCost;
6785         *indexTotalCost = costs.indexTotalCost;
6786         *indexSelectivity = costs.indexSelectivity;
6787         *indexCorrelation = costs.indexCorrelation;
6788 }
6789
6790 void
6791 gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6792                                  Cost *indexStartupCost, Cost *indexTotalCost,
6793                                  Selectivity *indexSelectivity, double *indexCorrelation)
6794 {
6795         IndexOptInfo *index = path->indexinfo;
6796         List       *qinfos;
6797         GenericCosts costs;
6798         Cost            descentCost;
6799
6800         /* Do preliminary analysis of indexquals */
6801         qinfos = deconstruct_indexquals(path);
6802
6803         MemSet(&costs, 0, sizeof(costs));
6804
6805         genericcostestimate(root, path, loop_count, qinfos, &costs);
6806
6807         /*
6808          * We model index descent costs similarly to those for btree, but to do
6809          * that we first need an idea of the tree height.  We somewhat arbitrarily
6810          * assume that the fanout is 100, meaning the tree height is at most
6811          * log100(index->pages).
6812          *
6813          * Although this computation isn't really expensive enough to require
6814          * caching, we might as well use index->tree_height to cache it.
6815          */
6816         if (index->tree_height < 0) /* unknown? */
6817         {
6818                 if (index->pages > 1)   /* avoid computing log(0) */
6819                         index->tree_height = (int) (log(index->pages) / log(100.0));
6820                 else
6821                         index->tree_height = 0;
6822         }
6823
6824         /*
6825          * Add a CPU-cost component to represent the costs of initial descent. We
6826          * just use log(N) here not log2(N) since the branching factor isn't
6827          * necessarily two anyway.  As for btree, charge once per SA scan.
6828          */
6829         if (index->tuples > 1)          /* avoid computing log(0) */
6830         {
6831                 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6832                 costs.indexStartupCost += descentCost;
6833                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6834         }
6835
6836         /*
6837          * Likewise add a per-page charge, calculated the same as for btrees.
6838          */
6839         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6840         costs.indexStartupCost += descentCost;
6841         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6842
6843         *indexStartupCost = costs.indexStartupCost;
6844         *indexTotalCost = costs.indexTotalCost;
6845         *indexSelectivity = costs.indexSelectivity;
6846         *indexCorrelation = costs.indexCorrelation;
6847 }
6848
6849 void
6850 spgcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6851                                 Cost *indexStartupCost, Cost *indexTotalCost,
6852                                 Selectivity *indexSelectivity, double *indexCorrelation)
6853 {
6854         IndexOptInfo *index = path->indexinfo;
6855         List       *qinfos;
6856         GenericCosts costs;
6857         Cost            descentCost;
6858
6859         /* Do preliminary analysis of indexquals */
6860         qinfos = deconstruct_indexquals(path);
6861
6862         MemSet(&costs, 0, sizeof(costs));
6863
6864         genericcostestimate(root, path, loop_count, qinfos, &costs);
6865
6866         /*
6867          * We model index descent costs similarly to those for btree, but to do
6868          * that we first need an idea of the tree height.  We somewhat arbitrarily
6869          * assume that the fanout is 100, meaning the tree height is at most
6870          * log100(index->pages).
6871          *
6872          * Although this computation isn't really expensive enough to require
6873          * caching, we might as well use index->tree_height to cache it.
6874          */
6875         if (index->tree_height < 0) /* unknown? */
6876         {
6877                 if (index->pages > 1)   /* avoid computing log(0) */
6878                         index->tree_height = (int) (log(index->pages) / log(100.0));
6879                 else
6880                         index->tree_height = 0;
6881         }
6882
6883         /*
6884          * Add a CPU-cost component to represent the costs of initial descent. We
6885          * just use log(N) here not log2(N) since the branching factor isn't
6886          * necessarily two anyway.  As for btree, charge once per SA scan.
6887          */
6888         if (index->tuples > 1)          /* avoid computing log(0) */
6889         {
6890                 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6891                 costs.indexStartupCost += descentCost;
6892                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6893         }
6894
6895         /*
6896          * Likewise add a per-page charge, calculated the same as for btrees.
6897          */
6898         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6899         costs.indexStartupCost += descentCost;
6900         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6901
6902         *indexStartupCost = costs.indexStartupCost;
6903         *indexTotalCost = costs.indexTotalCost;
6904         *indexSelectivity = costs.indexSelectivity;
6905         *indexCorrelation = costs.indexCorrelation;
6906 }
6907
6908
6909 /*
6910  * Support routines for gincostestimate
6911  */
6912
6913 typedef struct
6914 {
6915         bool            haveFullScan;
6916         double          partialEntries;
6917         double          exactEntries;
6918         double          searchEntries;
6919         double          arrayScans;
6920 } GinQualCounts;
6921
6922 /*
6923  * Estimate the number of index terms that need to be searched for while
6924  * testing the given GIN query, and increment the counts in *counts
6925  * appropriately.  If the query is unsatisfiable, return false.
6926  */
6927 static bool
6928 gincost_pattern(IndexOptInfo *index, int indexcol,
6929                                 Oid clause_op, Datum query,
6930                                 GinQualCounts *counts)
6931 {
6932         Oid                     extractProcOid;
6933         Oid                     collation;
6934         int                     strategy_op;
6935         Oid                     lefttype,
6936                                 righttype;
6937         int32           nentries = 0;
6938         bool       *partial_matches = NULL;
6939         Pointer    *extra_data = NULL;
6940         bool       *nullFlags = NULL;
6941         int32           searchMode = GIN_SEARCH_MODE_DEFAULT;
6942         int32           i;
6943
6944         /*
6945          * Get the operator's strategy number and declared input data types within
6946          * the index opfamily.  (We don't need the latter, but we use
6947          * get_op_opfamily_properties because it will throw error if it fails to
6948          * find a matching pg_amop entry.)
6949          */
6950         get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
6951                                                            &strategy_op, &lefttype, &righttype);
6952
6953         /*
6954          * GIN always uses the "default" support functions, which are those with
6955          * lefttype == righttype == the opclass' opcintype (see
6956          * IndexSupportInitialize in relcache.c).
6957          */
6958         extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
6959                                                                            index->opcintype[indexcol],
6960                                                                            index->opcintype[indexcol],
6961                                                                            GIN_EXTRACTQUERY_PROC);
6962
6963         if (!OidIsValid(extractProcOid))
6964         {
6965                 /* should not happen; throw same error as index_getprocinfo */
6966                 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
6967                          GIN_EXTRACTQUERY_PROC, indexcol + 1,
6968                          get_rel_name(index->indexoid));
6969         }
6970
6971         /*
6972          * Choose collation to pass to extractProc (should match initGinState).
6973          */
6974         if (OidIsValid(index->indexcollations[indexcol]))
6975                 collation = index->indexcollations[indexcol];
6976         else
6977                 collation = DEFAULT_COLLATION_OID;
6978
6979         OidFunctionCall7Coll(extractProcOid,
6980                                                  collation,
6981                                                  query,
6982                                                  PointerGetDatum(&nentries),
6983                                                  UInt16GetDatum(strategy_op),
6984                                                  PointerGetDatum(&partial_matches),
6985                                                  PointerGetDatum(&extra_data),
6986                                                  PointerGetDatum(&nullFlags),
6987                                                  PointerGetDatum(&searchMode));
6988
6989         if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
6990         {
6991                 /* No match is possible */
6992                 return false;
6993         }
6994
6995         for (i = 0; i < nentries; i++)
6996         {
6997                 /*
6998                  * For partial match we haven't any information to estimate number of
6999                  * matched entries in index, so, we just estimate it as 100
7000                  */
7001                 if (partial_matches && partial_matches[i])
7002                         counts->partialEntries += 100;
7003                 else
7004                         counts->exactEntries++;
7005
7006                 counts->searchEntries++;
7007         }
7008
7009         if (searchMode == GIN_SEARCH_MODE_INCLUDE_EMPTY)
7010         {
7011                 /* Treat "include empty" like an exact-match item */
7012                 counts->exactEntries++;
7013                 counts->searchEntries++;
7014         }
7015         else if (searchMode != GIN_SEARCH_MODE_DEFAULT)
7016         {
7017                 /* It's GIN_SEARCH_MODE_ALL */
7018                 counts->haveFullScan = true;
7019         }
7020
7021         return true;
7022 }
7023
7024 /*
7025  * Estimate the number of index terms that need to be searched for while
7026  * testing the given GIN index clause, and increment the counts in *counts
7027  * appropriately.  If the query is unsatisfiable, return false.
7028  */
7029 static bool
7030 gincost_opexpr(PlannerInfo *root,
7031                            IndexOptInfo *index,
7032                            IndexQualInfo *qinfo,
7033                            GinQualCounts *counts)
7034 {
7035         int                     indexcol = qinfo->indexcol;
7036         Oid                     clause_op = qinfo->clause_op;
7037         Node       *operand = qinfo->other_operand;
7038
7039         if (!qinfo->varonleft)
7040         {
7041                 /* must commute the operator */
7042                 clause_op = get_commutator(clause_op);
7043         }
7044
7045         /* aggressively reduce to a constant, and look through relabeling */
7046         operand = estimate_expression_value(root, operand);
7047
7048         if (IsA(operand, RelabelType))
7049                 operand = (Node *) ((RelabelType *) operand)->arg;
7050
7051         /*
7052          * It's impossible to call extractQuery method for unknown operand. So
7053          * unless operand is a Const we can't do much; just assume there will be
7054          * one ordinary search entry from the operand at runtime.
7055          */
7056         if (!IsA(operand, Const))
7057         {
7058                 counts->exactEntries++;
7059                 counts->searchEntries++;
7060                 return true;
7061         }
7062
7063         /* If Const is null, there can be no matches */
7064         if (((Const *) operand)->constisnull)
7065                 return false;
7066
7067         /* Otherwise, apply extractQuery and get the actual term counts */
7068         return gincost_pattern(index, indexcol, clause_op,
7069                                                    ((Const *) operand)->constvalue,
7070                                                    counts);
7071 }
7072
7073 /*
7074  * Estimate the number of index terms that need to be searched for while
7075  * testing the given GIN index clause, and increment the counts in *counts
7076  * appropriately.  If the query is unsatisfiable, return false.
7077  *
7078  * A ScalarArrayOpExpr will give rise to N separate indexscans at runtime,
7079  * each of which involves one value from the RHS array, plus all the
7080  * non-array quals (if any).  To model this, we average the counts across
7081  * the RHS elements, and add the averages to the counts in *counts (which
7082  * correspond to per-indexscan costs).  We also multiply counts->arrayScans
7083  * by N, causing gincostestimate to scale up its estimates accordingly.
7084  */
7085 static bool
7086 gincost_scalararrayopexpr(PlannerInfo *root,
7087                                                   IndexOptInfo *index,
7088                                                   IndexQualInfo *qinfo,
7089                                                   double numIndexEntries,
7090                                                   GinQualCounts *counts)
7091 {
7092         int                     indexcol = qinfo->indexcol;
7093         Oid                     clause_op = qinfo->clause_op;
7094         Node       *rightop = qinfo->other_operand;
7095         ArrayType  *arrayval;
7096         int16           elmlen;
7097         bool            elmbyval;
7098         char            elmalign;
7099         int                     numElems;
7100         Datum      *elemValues;
7101         bool       *elemNulls;
7102         GinQualCounts arraycounts;
7103         int                     numPossible = 0;
7104         int                     i;
7105
7106         Assert(((ScalarArrayOpExpr *) qinfo->rinfo->clause)->useOr);
7107
7108         /* aggressively reduce to a constant, and look through relabeling */
7109         rightop = estimate_expression_value(root, rightop);
7110
7111         if (IsA(rightop, RelabelType))
7112                 rightop = (Node *) ((RelabelType *) rightop)->arg;
7113
7114         /*
7115          * It's impossible to call extractQuery method for unknown operand. So
7116          * unless operand is a Const we can't do much; just assume there will be
7117          * one ordinary search entry from each array entry at runtime, and fall
7118          * back on a probably-bad estimate of the number of array entries.
7119          */
7120         if (!IsA(rightop, Const))
7121         {
7122                 counts->exactEntries++;
7123                 counts->searchEntries++;
7124                 counts->arrayScans *= estimate_array_length(rightop);
7125                 return true;
7126         }
7127
7128         /* If Const is null, there can be no matches */
7129         if (((Const *) rightop)->constisnull)
7130                 return false;
7131
7132         /* Otherwise, extract the array elements and iterate over them */
7133         arrayval = DatumGetArrayTypeP(((Const *) rightop)->constvalue);
7134         get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
7135                                                  &elmlen, &elmbyval, &elmalign);
7136         deconstruct_array(arrayval,
7137                                           ARR_ELEMTYPE(arrayval),
7138                                           elmlen, elmbyval, elmalign,
7139                                           &elemValues, &elemNulls, &numElems);
7140
7141         memset(&arraycounts, 0, sizeof(arraycounts));
7142
7143         for (i = 0; i < numElems; i++)
7144         {
7145                 GinQualCounts elemcounts;
7146
7147                 /* NULL can't match anything, so ignore, as the executor will */
7148                 if (elemNulls[i])
7149                         continue;
7150
7151                 /* Otherwise, apply extractQuery and get the actual term counts */
7152                 memset(&elemcounts, 0, sizeof(elemcounts));
7153
7154                 if (gincost_pattern(index, indexcol, clause_op, elemValues[i],
7155                                                         &elemcounts))
7156                 {
7157                         /* We ignore array elements that are unsatisfiable patterns */
7158                         numPossible++;
7159
7160                         if (elemcounts.haveFullScan)
7161                         {
7162                                 /*
7163                                  * Full index scan will be required.  We treat this as if
7164                                  * every key in the index had been listed in the query; is
7165                                  * that reasonable?
7166                                  */
7167                                 elemcounts.partialEntries = 0;
7168                                 elemcounts.exactEntries = numIndexEntries;
7169                                 elemcounts.searchEntries = numIndexEntries;
7170                         }
7171                         arraycounts.partialEntries += elemcounts.partialEntries;
7172                         arraycounts.exactEntries += elemcounts.exactEntries;
7173                         arraycounts.searchEntries += elemcounts.searchEntries;
7174                 }
7175         }
7176
7177         if (numPossible == 0)
7178         {
7179                 /* No satisfiable patterns in the array */
7180                 return false;
7181         }
7182
7183         /*
7184          * Now add the averages to the global counts.  This will give us an
7185          * estimate of the average number of terms searched for in each indexscan,
7186          * including contributions from both array and non-array quals.
7187          */
7188         counts->partialEntries += arraycounts.partialEntries / numPossible;
7189         counts->exactEntries += arraycounts.exactEntries / numPossible;
7190         counts->searchEntries += arraycounts.searchEntries / numPossible;
7191
7192         counts->arrayScans *= numPossible;
7193
7194         return true;
7195 }
7196
7197 /*
7198  * GIN has search behavior completely different from other index types
7199  */
7200 void
7201 gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7202                                 Cost *indexStartupCost, Cost *indexTotalCost,
7203                                 Selectivity *indexSelectivity, double *indexCorrelation)
7204 {
7205         IndexOptInfo *index = path->indexinfo;
7206         List       *indexQuals = path->indexquals;
7207         List       *indexOrderBys = path->indexorderbys;
7208         List       *qinfos;
7209         ListCell   *l;
7210         List       *selectivityQuals;
7211         double          numPages = index->pages,
7212                                 numTuples = index->tuples;
7213         double          numEntryPages,
7214                                 numDataPages,
7215                                 numPendingPages,
7216                                 numEntries;
7217         GinQualCounts counts;
7218         bool            matchPossible;
7219         double          partialScale;
7220         double          entryPagesFetched,
7221                                 dataPagesFetched,
7222                                 dataPagesFetchedBySel;
7223         double          qual_op_cost,
7224                                 qual_arg_cost,
7225                                 spc_random_page_cost,
7226                                 outer_scans;
7227         Relation        indexRel;
7228         GinStatsData ginStats;
7229
7230         /* Do preliminary analysis of indexquals */
7231         qinfos = deconstruct_indexquals(path);
7232
7233         /*
7234          * Obtain statistical information from the meta page, if possible.  Else
7235          * set ginStats to zeroes, and we'll cope below.
7236          */
7237         if (!index->hypothetical)
7238         {
7239                 indexRel = index_open(index->indexoid, AccessShareLock);
7240                 ginGetStats(indexRel, &ginStats);
7241                 index_close(indexRel, AccessShareLock);
7242         }
7243         else
7244         {
7245                 memset(&ginStats, 0, sizeof(ginStats));
7246         }
7247
7248         /*
7249          * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7250          * trusted, but the other fields are data as of the last VACUUM.  We can
7251          * scale them up to account for growth since then, but that method only
7252          * goes so far; in the worst case, the stats might be for a completely
7253          * empty index, and scaling them will produce pretty bogus numbers.
7254          * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7255          * it's grown more than that, fall back to estimating things only from the
7256          * assumed-accurate index size.  But we'll trust nPendingPages in any case
7257          * so long as it's not clearly insane, ie, more than the index size.
7258          */
7259         if (ginStats.nPendingPages < numPages)
7260                 numPendingPages = ginStats.nPendingPages;
7261         else
7262                 numPendingPages = 0;
7263
7264         if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7265                 ginStats.nTotalPages > numPages / 4 &&
7266                 ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7267         {
7268                 /*
7269                  * OK, the stats seem close enough to sane to be trusted.  But we
7270                  * still need to scale them by the ratio numPages / nTotalPages to
7271                  * account for growth since the last VACUUM.
7272                  */
7273                 double          scale = numPages / ginStats.nTotalPages;
7274
7275                 numEntryPages = ceil(ginStats.nEntryPages * scale);
7276                 numDataPages = ceil(ginStats.nDataPages * scale);
7277                 numEntries = ceil(ginStats.nEntries * scale);
7278                 /* ensure we didn't round up too much */
7279                 numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7280                 numDataPages = Min(numDataPages,
7281                                                    numPages - numPendingPages - numEntryPages);
7282         }
7283         else
7284         {
7285                 /*
7286                  * We might get here because it's a hypothetical index, or an index
7287                  * created pre-9.1 and never vacuumed since upgrading (in which case
7288                  * its stats would read as zeroes), or just because it's grown too
7289                  * much since the last VACUUM for us to put our faith in scaling.
7290                  *
7291                  * Invent some plausible internal statistics based on the index page
7292                  * count (and clamp that to at least 10 pages, just in case).  We
7293                  * estimate that 90% of the index is entry pages, and the rest is data
7294                  * pages.  Estimate 100 entries per entry page; this is rather bogus
7295                  * since it'll depend on the size of the keys, but it's more robust
7296                  * than trying to predict the number of entries per heap tuple.
7297                  */
7298                 numPages = Max(numPages, 10);
7299                 numEntryPages = floor((numPages - numPendingPages) * 0.90);
7300                 numDataPages = numPages - numPendingPages - numEntryPages;
7301                 numEntries = floor(numEntryPages * 100);
7302         }
7303
7304         /* In an empty index, numEntries could be zero.  Avoid divide-by-zero */
7305         if (numEntries < 1)
7306                 numEntries = 1;
7307
7308         /*
7309          * Include predicate in selectivityQuals (should match
7310          * genericcostestimate)
7311          */
7312         if (index->indpred != NIL)
7313         {
7314                 List       *predExtraQuals = NIL;
7315
7316                 foreach(l, index->indpred)
7317                 {
7318                         Node       *predQual = (Node *) lfirst(l);
7319                         List       *oneQual = list_make1(predQual);
7320
7321                         if (!predicate_implied_by(oneQual, indexQuals))
7322                                 predExtraQuals = list_concat(predExtraQuals, oneQual);
7323                 }
7324                 /* list_concat avoids modifying the passed-in indexQuals list */
7325                 selectivityQuals = list_concat(predExtraQuals, indexQuals);
7326         }
7327         else
7328                 selectivityQuals = indexQuals;
7329
7330         /* Estimate the fraction of main-table tuples that will be visited */
7331         *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7332                                                                                            index->rel->relid,
7333                                                                                            JOIN_INNER,
7334                                                                                            NULL);
7335
7336         /* fetch estimated page cost for tablespace containing index */
7337         get_tablespace_page_costs(index->reltablespace,
7338                                                           &spc_random_page_cost,
7339                                                           NULL);
7340
7341         /*
7342          * Generic assumption about index correlation: there isn't any.
7343          */
7344         *indexCorrelation = 0.0;
7345
7346         /*
7347          * Examine quals to estimate number of search entries & partial matches
7348          */
7349         memset(&counts, 0, sizeof(counts));
7350         counts.arrayScans = 1;
7351         matchPossible = true;
7352
7353         foreach(l, qinfos)
7354         {
7355                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(l);
7356                 Expr       *clause = qinfo->rinfo->clause;
7357
7358                 if (IsA(clause, OpExpr))
7359                 {
7360                         matchPossible = gincost_opexpr(root,
7361                                                                                    index,
7362                                                                                    qinfo,
7363                                                                                    &counts);
7364                         if (!matchPossible)
7365                                 break;
7366                 }
7367                 else if (IsA(clause, ScalarArrayOpExpr))
7368                 {
7369                         matchPossible = gincost_scalararrayopexpr(root,
7370                                                                                                           index,
7371                                                                                                           qinfo,
7372                                                                                                           numEntries,
7373                                                                                                           &counts);
7374                         if (!matchPossible)
7375                                 break;
7376                 }
7377                 else
7378                 {
7379                         /* shouldn't be anything else for a GIN index */
7380                         elog(ERROR, "unsupported GIN indexqual type: %d",
7381                                  (int) nodeTag(clause));
7382                 }
7383         }
7384
7385         /* Fall out if there were any provably-unsatisfiable quals */
7386         if (!matchPossible)
7387         {
7388                 *indexStartupCost = 0;
7389                 *indexTotalCost = 0;
7390                 *indexSelectivity = 0;
7391                 return;
7392         }
7393
7394         if (counts.haveFullScan || indexQuals == NIL)
7395         {
7396                 /*
7397                  * Full index scan will be required.  We treat this as if every key in
7398                  * the index had been listed in the query; is that reasonable?
7399                  */
7400                 counts.partialEntries = 0;
7401                 counts.exactEntries = numEntries;
7402                 counts.searchEntries = numEntries;
7403         }
7404
7405         /* Will we have more than one iteration of a nestloop scan? */
7406         outer_scans = loop_count;
7407
7408         /*
7409          * Compute cost to begin scan, first of all, pay attention to pending
7410          * list.
7411          */
7412         entryPagesFetched = numPendingPages;
7413
7414         /*
7415          * Estimate number of entry pages read.  We need to do
7416          * counts.searchEntries searches.  Use a power function as it should be,
7417          * but tuples on leaf pages usually is much greater. Here we include all
7418          * searches in entry tree, including search of first entry in partial
7419          * match algorithm
7420          */
7421         entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7422
7423         /*
7424          * Add an estimate of entry pages read by partial match algorithm. It's a
7425          * scan over leaf pages in entry tree.  We haven't any useful stats here,
7426          * so estimate it as proportion.  Because counts.partialEntries is really
7427          * pretty bogus (see code above), it's possible that it is more than
7428          * numEntries; clamp the proportion to ensure sanity.
7429          */
7430         partialScale = counts.partialEntries / numEntries;
7431         partialScale = Min(partialScale, 1.0);
7432
7433         entryPagesFetched += ceil(numEntryPages * partialScale);
7434
7435         /*
7436          * Partial match algorithm reads all data pages before doing actual scan,
7437          * so it's a startup cost.  Again, we haven't any useful stats here, so
7438          * estimate it as proportion.
7439          */
7440         dataPagesFetched = ceil(numDataPages * partialScale);
7441
7442         /*
7443          * Calculate cache effects if more than one scan due to nestloops or array
7444          * quals.  The result is pro-rated per nestloop scan, but the array qual
7445          * factor shouldn't be pro-rated (compare genericcostestimate).
7446          */
7447         if (outer_scans > 1 || counts.arrayScans > 1)
7448         {
7449                 entryPagesFetched *= outer_scans * counts.arrayScans;
7450                 entryPagesFetched = index_pages_fetched(entryPagesFetched,
7451                                                                                                 (BlockNumber) numEntryPages,
7452                                                                                                 numEntryPages, root);
7453                 entryPagesFetched /= outer_scans;
7454                 dataPagesFetched *= outer_scans * counts.arrayScans;
7455                 dataPagesFetched = index_pages_fetched(dataPagesFetched,
7456                                                                                            (BlockNumber) numDataPages,
7457                                                                                            numDataPages, root);
7458                 dataPagesFetched /= outer_scans;
7459         }
7460
7461         /*
7462          * Here we use random page cost because logically-close pages could be far
7463          * apart on disk.
7464          */
7465         *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7466
7467         /*
7468          * Now compute the number of data pages fetched during the scan.
7469          *
7470          * We assume every entry to have the same number of items, and that there
7471          * is no overlap between them. (XXX: tsvector and array opclasses collect
7472          * statistics on the frequency of individual keys; it would be nice to use
7473          * those here.)
7474          */
7475         dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7476
7477         /*
7478          * If there is a lot of overlap among the entries, in particular if one of
7479          * the entries is very frequent, the above calculation can grossly
7480          * under-estimate.  As a simple cross-check, calculate a lower bound based
7481          * on the overall selectivity of the quals.  At a minimum, we must read
7482          * one item pointer for each matching entry.
7483          *
7484          * The width of each item pointer varies, based on the level of
7485          * compression.  We don't have statistics on that, but an average of
7486          * around 3 bytes per item is fairly typical.
7487          */
7488         dataPagesFetchedBySel = ceil(*indexSelectivity *
7489                                                                  (numTuples / (BLCKSZ / 3)));
7490         if (dataPagesFetchedBySel > dataPagesFetched)
7491                 dataPagesFetched = dataPagesFetchedBySel;
7492
7493         /* Account for cache effects, the same as above */
7494         if (outer_scans > 1 || counts.arrayScans > 1)
7495         {
7496                 dataPagesFetched *= outer_scans * counts.arrayScans;
7497                 dataPagesFetched = index_pages_fetched(dataPagesFetched,
7498                                                                                            (BlockNumber) numDataPages,
7499                                                                                            numDataPages, root);
7500                 dataPagesFetched /= outer_scans;
7501         }
7502
7503         /* And apply random_page_cost as the cost per page */
7504         *indexTotalCost = *indexStartupCost +
7505                 dataPagesFetched * spc_random_page_cost;
7506
7507         /*
7508          * Add on index qual eval costs, much as in genericcostestimate
7509          */
7510         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7511                 orderby_operands_eval_cost(root, path);
7512         qual_op_cost = cpu_operator_cost *
7513                 (list_length(indexQuals) + list_length(indexOrderBys));
7514
7515         *indexStartupCost += qual_arg_cost;
7516         *indexTotalCost += qual_arg_cost;
7517         *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7518 }
7519
7520 /*
7521  * BRIN has search behavior completely different from other index types
7522  */
7523 void
7524 brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7525                                  Cost *indexStartupCost, Cost *indexTotalCost,
7526                                  Selectivity *indexSelectivity, double *indexCorrelation)
7527 {
7528         IndexOptInfo *index = path->indexinfo;
7529         List       *indexQuals = path->indexquals;
7530         List       *indexOrderBys = path->indexorderbys;
7531         double          numPages = index->pages;
7532         double          numTuples = index->tuples;
7533         List       *qinfos;
7534         Cost            spc_seq_page_cost;
7535         Cost            spc_random_page_cost;
7536         double          qual_op_cost;
7537         double          qual_arg_cost;
7538
7539         /* Do preliminary analysis of indexquals */
7540         qinfos = deconstruct_indexquals(path);
7541
7542         /* fetch estimated page cost for tablespace containing index */
7543         get_tablespace_page_costs(index->reltablespace,
7544                                                           &spc_random_page_cost,
7545                                                           &spc_seq_page_cost);
7546
7547         /*
7548          * BRIN indexes are always read in full; use that as startup cost.
7549          *
7550          * XXX maybe only include revmap pages here?
7551          */
7552         *indexStartupCost = spc_seq_page_cost * numPages * loop_count;
7553
7554         /*
7555          * To read a BRIN index there might be a bit of back and forth over
7556          * regular pages, as revmap might point to them out of sequential order;
7557          * calculate this as reading the whole index in random order.
7558          */
7559         *indexTotalCost = spc_random_page_cost * numPages * loop_count;
7560
7561         *indexSelectivity =
7562                 clauselist_selectivity(root, indexQuals,
7563                                                            path->indexinfo->rel->relid,
7564                                                            JOIN_INNER, NULL);
7565         *indexCorrelation = 1;
7566
7567         /*
7568          * Add on index qual eval costs, much as in genericcostestimate.
7569          */
7570         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7571                 orderby_operands_eval_cost(root, path);
7572         qual_op_cost = cpu_operator_cost *
7573                 (list_length(indexQuals) + list_length(indexOrderBys));
7574
7575         *indexStartupCost += qual_arg_cost;
7576         *indexTotalCost += qual_arg_cost;
7577         *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7578
7579         /* XXX what about pages_per_range? */
7580 }