]> granicus.if.org Git - postgresql/blob - src/backend/utils/adt/selfuncs.c
Make more use of castNode()
[postgresql] / src / backend / utils / adt / selfuncs.c
1 /*-------------------------------------------------------------------------
2  *
3  * selfuncs.c
4  *        Selectivity functions and index cost estimation functions for
5  *        standard operators and index access methods.
6  *
7  *        Selectivity routines are registered in the pg_operator catalog
8  *        in the "oprrest" and "oprjoin" attributes.
9  *
10  *        Index cost functions are located via the index AM's API struct,
11  *        which is obtained from the handler function registered in pg_am.
12  *
13  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
14  * Portions Copyright (c) 1994, Regents of the University of California
15  *
16  *
17  * IDENTIFICATION
18  *        src/backend/utils/adt/selfuncs.c
19  *
20  *-------------------------------------------------------------------------
21  */
22
23 /*----------
24  * Operator selectivity estimation functions are called to estimate the
25  * selectivity of WHERE clauses whose top-level operator is their operator.
26  * We divide the problem into two cases:
27  *              Restriction clause estimation: the clause involves vars of just
28  *                      one relation.
29  *              Join clause estimation: the clause involves vars of multiple rels.
30  * Join selectivity estimation is far more difficult and usually less accurate
31  * than restriction estimation.
32  *
33  * When dealing with the inner scan of a nestloop join, we consider the
34  * join's joinclauses as restriction clauses for the inner relation, and
35  * treat vars of the outer relation as parameters (a/k/a constants of unknown
36  * values).  So, restriction estimators need to be able to accept an argument
37  * telling which relation is to be treated as the variable.
38  *
39  * The call convention for a restriction estimator (oprrest function) is
40  *
41  *              Selectivity oprrest (PlannerInfo *root,
42  *                                                       Oid operator,
43  *                                                       List *args,
44  *                                                       int varRelid);
45  *
46  * root: general information about the query (rtable and RelOptInfo lists
47  * are particularly important for the estimator).
48  * operator: OID of the specific operator in question.
49  * args: argument list from the operator clause.
50  * varRelid: if not zero, the relid (rtable index) of the relation to
51  * be treated as the variable relation.  May be zero if the args list
52  * is known to contain vars of only one relation.
53  *
54  * This is represented at the SQL level (in pg_proc) as
55  *
56  *              float8 oprrest (internal, oid, internal, int4);
57  *
58  * The result is a selectivity, that is, a fraction (0 to 1) of the rows
59  * of the relation that are expected to produce a TRUE result for the
60  * given operator.
61  *
62  * The call convention for a join estimator (oprjoin function) is similar
63  * except that varRelid is not needed, and instead join information is
64  * supplied:
65  *
66  *              Selectivity oprjoin (PlannerInfo *root,
67  *                                                       Oid operator,
68  *                                                       List *args,
69  *                                                       JoinType jointype,
70  *                                                       SpecialJoinInfo *sjinfo);
71  *
72  *              float8 oprjoin (internal, oid, internal, int2, internal);
73  *
74  * (Before Postgres 8.4, join estimators had only the first four of these
75  * parameters.  That signature is still allowed, but deprecated.)  The
76  * relationship between jointype and sjinfo is explained in the comments for
77  * clause_selectivity() --- the short version is that jointype is usually
78  * best ignored in favor of examining sjinfo.
79  *
80  * Join selectivity for regular inner and outer joins is defined as the
81  * fraction (0 to 1) of the cross product of the relations that is expected
82  * to produce a TRUE result for the given operator.  For both semi and anti
83  * joins, however, the selectivity is defined as the fraction of the left-hand
84  * side relation's rows that are expected to have a match (ie, at least one
85  * row with a TRUE result) in the right-hand side.
86  *
87  * For both oprrest and oprjoin functions, the operator's input collation OID
88  * (if any) is passed using the standard fmgr mechanism, so that the estimator
89  * function can fetch it with PG_GET_COLLATION().  Note, however, that all
90  * statistics in pg_statistic are currently built using the database's default
91  * collation.  Thus, in most cases where we are looking at statistics, we
92  * should ignore the actual operator collation and use DEFAULT_COLLATION_OID.
93  * We expect that the error induced by doing this is usually not large enough
94  * to justify complicating matters.
95  *----------
96  */
97
98 #include "postgres.h"
99
100 #include <ctype.h>
101 #include <math.h>
102
103 #include "access/gin.h"
104 #include "access/htup_details.h"
105 #include "access/sysattr.h"
106 #include "catalog/index.h"
107 #include "catalog/pg_am.h"
108 #include "catalog/pg_collation.h"
109 #include "catalog/pg_operator.h"
110 #include "catalog/pg_opfamily.h"
111 #include "catalog/pg_statistic.h"
112 #include "catalog/pg_type.h"
113 #include "executor/executor.h"
114 #include "mb/pg_wchar.h"
115 #include "nodes/makefuncs.h"
116 #include "nodes/nodeFuncs.h"
117 #include "optimizer/clauses.h"
118 #include "optimizer/cost.h"
119 #include "optimizer/pathnode.h"
120 #include "optimizer/paths.h"
121 #include "optimizer/plancat.h"
122 #include "optimizer/predtest.h"
123 #include "optimizer/restrictinfo.h"
124 #include "optimizer/var.h"
125 #include "parser/parse_clause.h"
126 #include "parser/parse_coerce.h"
127 #include "parser/parsetree.h"
128 #include "utils/builtins.h"
129 #include "utils/bytea.h"
130 #include "utils/date.h"
131 #include "utils/datum.h"
132 #include "utils/fmgroids.h"
133 #include "utils/index_selfuncs.h"
134 #include "utils/lsyscache.h"
135 #include "utils/nabstime.h"
136 #include "utils/pg_locale.h"
137 #include "utils/rel.h"
138 #include "utils/selfuncs.h"
139 #include "utils/spccache.h"
140 #include "utils/syscache.h"
141 #include "utils/timestamp.h"
142 #include "utils/tqual.h"
143 #include "utils/typcache.h"
144 #include "utils/varlena.h"
145
146
147 /* Hooks for plugins to get control when we ask for stats */
148 get_relation_stats_hook_type get_relation_stats_hook = NULL;
149 get_index_stats_hook_type get_index_stats_hook = NULL;
150
151 static double var_eq_const(VariableStatData *vardata, Oid operator,
152                          Datum constval, bool constisnull,
153                          bool varonleft);
154 static double var_eq_non_const(VariableStatData *vardata, Oid operator,
155                                  Node *other,
156                                  bool varonleft);
157 static double ineq_histogram_selectivity(PlannerInfo *root,
158                                                    VariableStatData *vardata,
159                                                    FmgrInfo *opproc, bool isgt,
160                                                    Datum constval, Oid consttype);
161 static double eqjoinsel_inner(Oid operator,
162                                 VariableStatData *vardata1, VariableStatData *vardata2);
163 static double eqjoinsel_semi(Oid operator,
164                            VariableStatData *vardata1, VariableStatData *vardata2,
165                            RelOptInfo *inner_rel);
166 static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
167                                   Datum lobound, Datum hibound, Oid boundstypid,
168                                   double *scaledlobound, double *scaledhibound);
169 static double convert_numeric_to_scalar(Datum value, Oid typid);
170 static void convert_string_to_scalar(char *value,
171                                                  double *scaledvalue,
172                                                  char *lobound,
173                                                  double *scaledlobound,
174                                                  char *hibound,
175                                                  double *scaledhibound);
176 static void convert_bytea_to_scalar(Datum value,
177                                                 double *scaledvalue,
178                                                 Datum lobound,
179                                                 double *scaledlobound,
180                                                 Datum hibound,
181                                                 double *scaledhibound);
182 static double convert_one_string_to_scalar(char *value,
183                                                          int rangelo, int rangehi);
184 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
185                                                         int rangelo, int rangehi);
186 static char *convert_string_datum(Datum value, Oid typid);
187 static double convert_timevalue_to_scalar(Datum value, Oid typid);
188 static void examine_simple_variable(PlannerInfo *root, Var *var,
189                                                 VariableStatData *vardata);
190 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
191                                    Oid sortop, Datum *min, Datum *max);
192 static bool get_actual_variable_range(PlannerInfo *root,
193                                                   VariableStatData *vardata,
194                                                   Oid sortop,
195                                                   Datum *min, Datum *max);
196 static RelOptInfo *find_join_input_rel(PlannerInfo *root, Relids relids);
197 static Selectivity prefix_selectivity(PlannerInfo *root,
198                                    VariableStatData *vardata,
199                                    Oid vartype, Oid opfamily, Const *prefixcon);
200 static Selectivity like_selectivity(const char *patt, int pattlen,
201                                  bool case_insensitive);
202 static Selectivity regex_selectivity(const char *patt, int pattlen,
203                                   bool case_insensitive,
204                                   int fixed_prefix_len);
205 static Datum string_to_datum(const char *str, Oid datatype);
206 static Const *string_to_const(const char *str, Oid datatype);
207 static Const *string_to_bytea_const(const char *str, size_t str_len);
208 static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
209
210
211 /*
212  *              eqsel                   - Selectivity of "=" for any data types.
213  *
214  * Note: this routine is also used to estimate selectivity for some
215  * operators that are not "=" but have comparable selectivity behavior,
216  * such as "~=" (geometric approximate-match).  Even for "=", we must
217  * keep in mind that the left and right datatypes may differ.
218  */
219 Datum
220 eqsel(PG_FUNCTION_ARGS)
221 {
222         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
223         Oid                     operator = PG_GETARG_OID(1);
224         List       *args = (List *) PG_GETARG_POINTER(2);
225         int                     varRelid = PG_GETARG_INT32(3);
226         VariableStatData vardata;
227         Node       *other;
228         bool            varonleft;
229         double          selec;
230
231         /*
232          * If expression is not variable = something or something = variable, then
233          * punt and return a default estimate.
234          */
235         if (!get_restriction_variable(root, args, varRelid,
236                                                                   &vardata, &other, &varonleft))
237                 PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
238
239         /*
240          * We can do a lot better if the something is a constant.  (Note: the
241          * Const might result from estimation rather than being a simple constant
242          * in the query.)
243          */
244         if (IsA(other, Const))
245                 selec = var_eq_const(&vardata, operator,
246                                                          ((Const *) other)->constvalue,
247                                                          ((Const *) other)->constisnull,
248                                                          varonleft);
249         else
250                 selec = var_eq_non_const(&vardata, operator, other,
251                                                                  varonleft);
252
253         ReleaseVariableStats(vardata);
254
255         PG_RETURN_FLOAT8((float8) selec);
256 }
257
258 /*
259  * var_eq_const --- eqsel for var = const case
260  *
261  * This is split out so that some other estimation functions can use it.
262  */
263 static double
264 var_eq_const(VariableStatData *vardata, Oid operator,
265                          Datum constval, bool constisnull,
266                          bool varonleft)
267 {
268         double          selec;
269         bool            isdefault;
270
271         /*
272          * If the constant is NULL, assume operator is strict and return zero, ie,
273          * operator will never return TRUE.
274          */
275         if (constisnull)
276                 return 0.0;
277
278         /*
279          * If we matched the var to a unique index or DISTINCT clause, assume
280          * there is exactly one match regardless of anything else.  (This is
281          * slightly bogus, since the index or clause's equality operator might be
282          * different from ours, but it's much more likely to be right than
283          * ignoring the information.)
284          */
285         if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
286                 return 1.0 / vardata->rel->tuples;
287
288         if (HeapTupleIsValid(vardata->statsTuple))
289         {
290                 Form_pg_statistic stats;
291                 Datum      *values;
292                 int                     nvalues;
293                 float4     *numbers;
294                 int                     nnumbers;
295                 bool            match = false;
296                 int                     i;
297
298                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
299
300                 /*
301                  * Is the constant "=" to any of the column's most common values?
302                  * (Although the given operator may not really be "=", we will assume
303                  * that seeing whether it returns TRUE is an appropriate test.  If you
304                  * don't like this, maybe you shouldn't be using eqsel for your
305                  * operator...)
306                  */
307                 if (get_attstatsslot(vardata->statsTuple,
308                                                          vardata->atttype, vardata->atttypmod,
309                                                          STATISTIC_KIND_MCV, InvalidOid,
310                                                          NULL,
311                                                          &values, &nvalues,
312                                                          &numbers, &nnumbers))
313                 {
314                         FmgrInfo        eqproc;
315
316                         fmgr_info(get_opcode(operator), &eqproc);
317
318                         for (i = 0; i < nvalues; i++)
319                         {
320                                 /* be careful to apply operator right way 'round */
321                                 if (varonleft)
322                                         match = DatumGetBool(FunctionCall2Coll(&eqproc,
323                                                                                                            DEFAULT_COLLATION_OID,
324                                                                                                                    values[i],
325                                                                                                                    constval));
326                                 else
327                                         match = DatumGetBool(FunctionCall2Coll(&eqproc,
328                                                                                                            DEFAULT_COLLATION_OID,
329                                                                                                                    constval,
330                                                                                                                    values[i]));
331                                 if (match)
332                                         break;
333                         }
334                 }
335                 else
336                 {
337                         /* no most-common-value info available */
338                         values = NULL;
339                         numbers = NULL;
340                         i = nvalues = nnumbers = 0;
341                 }
342
343                 if (match)
344                 {
345                         /*
346                          * Constant is "=" to this common value.  We know selectivity
347                          * exactly (or as exactly as ANALYZE could calculate it, anyway).
348                          */
349                         selec = numbers[i];
350                 }
351                 else
352                 {
353                         /*
354                          * Comparison is against a constant that is neither NULL nor any
355                          * of the common values.  Its selectivity cannot be more than
356                          * this:
357                          */
358                         double          sumcommon = 0.0;
359                         double          otherdistinct;
360
361                         for (i = 0; i < nnumbers; i++)
362                                 sumcommon += numbers[i];
363                         selec = 1.0 - sumcommon - stats->stanullfrac;
364                         CLAMP_PROBABILITY(selec);
365
366                         /*
367                          * and in fact it's probably a good deal less. We approximate that
368                          * all the not-common values share this remaining fraction
369                          * equally, so we divide by the number of other distinct values.
370                          */
371                         otherdistinct = get_variable_numdistinct(vardata, &isdefault) - nnumbers;
372                         if (otherdistinct > 1)
373                                 selec /= otherdistinct;
374
375                         /*
376                          * Another cross-check: selectivity shouldn't be estimated as more
377                          * than the least common "most common value".
378                          */
379                         if (nnumbers > 0 && selec > numbers[nnumbers - 1])
380                                 selec = numbers[nnumbers - 1];
381                 }
382
383                 free_attstatsslot(vardata->atttype, values, nvalues,
384                                                   numbers, nnumbers);
385         }
386         else
387         {
388                 /*
389                  * No ANALYZE stats available, so make a guess using estimated number
390                  * of distinct values and assuming they are equally common. (The guess
391                  * is unlikely to be very good, but we do know a few special cases.)
392                  */
393                 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
394         }
395
396         /* result should be in range, but make sure... */
397         CLAMP_PROBABILITY(selec);
398
399         return selec;
400 }
401
402 /*
403  * var_eq_non_const --- eqsel for var = something-other-than-const case
404  */
405 static double
406 var_eq_non_const(VariableStatData *vardata, Oid operator,
407                                  Node *other,
408                                  bool varonleft)
409 {
410         double          selec;
411         bool            isdefault;
412
413         /*
414          * If we matched the var to a unique index or DISTINCT clause, assume
415          * there is exactly one match regardless of anything else.  (This is
416          * slightly bogus, since the index or clause's equality operator might be
417          * different from ours, but it's much more likely to be right than
418          * ignoring the information.)
419          */
420         if (vardata->isunique && vardata->rel && vardata->rel->tuples >= 1.0)
421                 return 1.0 / vardata->rel->tuples;
422
423         if (HeapTupleIsValid(vardata->statsTuple))
424         {
425                 Form_pg_statistic stats;
426                 double          ndistinct;
427                 float4     *numbers;
428                 int                     nnumbers;
429
430                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
431
432                 /*
433                  * Search is for a value that we do not know a priori, but we will
434                  * assume it is not NULL.  Estimate the selectivity as non-null
435                  * fraction divided by number of distinct values, so that we get a
436                  * result averaged over all possible values whether common or
437                  * uncommon.  (Essentially, we are assuming that the not-yet-known
438                  * comparison value is equally likely to be any of the possible
439                  * values, regardless of their frequency in the table.  Is that a good
440                  * idea?)
441                  */
442                 selec = 1.0 - stats->stanullfrac;
443                 ndistinct = get_variable_numdistinct(vardata, &isdefault);
444                 if (ndistinct > 1)
445                         selec /= ndistinct;
446
447                 /*
448                  * Cross-check: selectivity should never be estimated as more than the
449                  * most common value's.
450                  */
451                 if (get_attstatsslot(vardata->statsTuple,
452                                                          vardata->atttype, vardata->atttypmod,
453                                                          STATISTIC_KIND_MCV, InvalidOid,
454                                                          NULL,
455                                                          NULL, NULL,
456                                                          &numbers, &nnumbers))
457                 {
458                         if (nnumbers > 0 && selec > numbers[0])
459                                 selec = numbers[0];
460                         free_attstatsslot(vardata->atttype, NULL, 0, numbers, nnumbers);
461                 }
462         }
463         else
464         {
465                 /*
466                  * No ANALYZE stats available, so make a guess using estimated number
467                  * of distinct values and assuming they are equally common. (The guess
468                  * is unlikely to be very good, but we do know a few special cases.)
469                  */
470                 selec = 1.0 / get_variable_numdistinct(vardata, &isdefault);
471         }
472
473         /* result should be in range, but make sure... */
474         CLAMP_PROBABILITY(selec);
475
476         return selec;
477 }
478
479 /*
480  *              neqsel                  - Selectivity of "!=" for any data types.
481  *
482  * This routine is also used for some operators that are not "!="
483  * but have comparable selectivity behavior.  See above comments
484  * for eqsel().
485  */
486 Datum
487 neqsel(PG_FUNCTION_ARGS)
488 {
489         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
490         Oid                     operator = PG_GETARG_OID(1);
491         List       *args = (List *) PG_GETARG_POINTER(2);
492         int                     varRelid = PG_GETARG_INT32(3);
493         Oid                     eqop;
494         float8          result;
495
496         /*
497          * We want 1 - eqsel() where the equality operator is the one associated
498          * with this != operator, that is, its negator.
499          */
500         eqop = get_negator(operator);
501         if (eqop)
502         {
503                 result = DatumGetFloat8(DirectFunctionCall4(eqsel,
504                                                                                                         PointerGetDatum(root),
505                                                                                                         ObjectIdGetDatum(eqop),
506                                                                                                         PointerGetDatum(args),
507                                                                                                         Int32GetDatum(varRelid)));
508         }
509         else
510         {
511                 /* Use default selectivity (should we raise an error instead?) */
512                 result = DEFAULT_EQ_SEL;
513         }
514         result = 1.0 - result;
515         PG_RETURN_FLOAT8(result);
516 }
517
518 /*
519  *      scalarineqsel           - Selectivity of "<", "<=", ">", ">=" for scalars.
520  *
521  * This is the guts of both scalarltsel and scalargtsel.  The caller has
522  * commuted the clause, if necessary, so that we can treat the variable as
523  * being on the left.  The caller must also make sure that the other side
524  * of the clause is a non-null Const, and dissect same into a value and
525  * datatype.
526  *
527  * This routine works for any datatype (or pair of datatypes) known to
528  * convert_to_scalar().  If it is applied to some other datatype,
529  * it will return a default estimate.
530  */
531 static double
532 scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
533                           VariableStatData *vardata, Datum constval, Oid consttype)
534 {
535         Form_pg_statistic stats;
536         FmgrInfo        opproc;
537         double          mcv_selec,
538                                 hist_selec,
539                                 sumcommon;
540         double          selec;
541
542         if (!HeapTupleIsValid(vardata->statsTuple))
543         {
544                 /* no stats available, so default result */
545                 return DEFAULT_INEQ_SEL;
546         }
547         stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
548
549         fmgr_info(get_opcode(operator), &opproc);
550
551         /*
552          * If we have most-common-values info, add up the fractions of the MCV
553          * entries that satisfy MCV OP CONST.  These fractions contribute directly
554          * to the result selectivity.  Also add up the total fraction represented
555          * by MCV entries.
556          */
557         mcv_selec = mcv_selectivity(vardata, &opproc, constval, true,
558                                                                 &sumcommon);
559
560         /*
561          * If there is a histogram, determine which bin the constant falls in, and
562          * compute the resulting contribution to selectivity.
563          */
564         hist_selec = ineq_histogram_selectivity(root, vardata, &opproc, isgt,
565                                                                                         constval, consttype);
566
567         /*
568          * Now merge the results from the MCV and histogram calculations,
569          * realizing that the histogram covers only the non-null values that are
570          * not listed in MCV.
571          */
572         selec = 1.0 - stats->stanullfrac - sumcommon;
573
574         if (hist_selec >= 0.0)
575                 selec *= hist_selec;
576         else
577         {
578                 /*
579                  * If no histogram but there are values not accounted for by MCV,
580                  * arbitrarily assume half of them will match.
581                  */
582                 selec *= 0.5;
583         }
584
585         selec += mcv_selec;
586
587         /* result should be in range, but make sure... */
588         CLAMP_PROBABILITY(selec);
589
590         return selec;
591 }
592
593 /*
594  *      mcv_selectivity                 - Examine the MCV list for selectivity estimates
595  *
596  * Determine the fraction of the variable's MCV population that satisfies
597  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.  Also
598  * compute the fraction of the total column population represented by the MCV
599  * list.  This code will work for any boolean-returning predicate operator.
600  *
601  * The function result is the MCV selectivity, and the fraction of the
602  * total population is returned into *sumcommonp.  Zeroes are returned
603  * if there is no MCV list.
604  */
605 double
606 mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
607                                 Datum constval, bool varonleft,
608                                 double *sumcommonp)
609 {
610         double          mcv_selec,
611                                 sumcommon;
612         Datum      *values;
613         int                     nvalues;
614         float4     *numbers;
615         int                     nnumbers;
616         int                     i;
617
618         mcv_selec = 0.0;
619         sumcommon = 0.0;
620
621         if (HeapTupleIsValid(vardata->statsTuple) &&
622                 get_attstatsslot(vardata->statsTuple,
623                                                  vardata->atttype, vardata->atttypmod,
624                                                  STATISTIC_KIND_MCV, InvalidOid,
625                                                  NULL,
626                                                  &values, &nvalues,
627                                                  &numbers, &nnumbers))
628         {
629                 for (i = 0; i < nvalues; i++)
630                 {
631                         if (varonleft ?
632                                 DatumGetBool(FunctionCall2Coll(opproc,
633                                                                                            DEFAULT_COLLATION_OID,
634                                                                                            values[i],
635                                                                                            constval)) :
636                                 DatumGetBool(FunctionCall2Coll(opproc,
637                                                                                            DEFAULT_COLLATION_OID,
638                                                                                            constval,
639                                                                                            values[i])))
640                                 mcv_selec += numbers[i];
641                         sumcommon += numbers[i];
642                 }
643                 free_attstatsslot(vardata->atttype, values, nvalues,
644                                                   numbers, nnumbers);
645         }
646
647         *sumcommonp = sumcommon;
648         return mcv_selec;
649 }
650
651 /*
652  *      histogram_selectivity   - Examine the histogram for selectivity estimates
653  *
654  * Determine the fraction of the variable's histogram entries that satisfy
655  * the predicate (VAR OP CONST), or (CONST OP VAR) if !varonleft.
656  *
657  * This code will work for any boolean-returning predicate operator, whether
658  * or not it has anything to do with the histogram sort operator.  We are
659  * essentially using the histogram just as a representative sample.  However,
660  * small histograms are unlikely to be all that representative, so the caller
661  * should be prepared to fall back on some other estimation approach when the
662  * histogram is missing or very small.  It may also be prudent to combine this
663  * approach with another one when the histogram is small.
664  *
665  * If the actual histogram size is not at least min_hist_size, we won't bother
666  * to do the calculation at all.  Also, if the n_skip parameter is > 0, we
667  * ignore the first and last n_skip histogram elements, on the grounds that
668  * they are outliers and hence not very representative.  Typical values for
669  * these parameters are 10 and 1.
670  *
671  * The function result is the selectivity, or -1 if there is no histogram
672  * or it's smaller than min_hist_size.
673  *
674  * The output parameter *hist_size receives the actual histogram size,
675  * or zero if no histogram.  Callers may use this number to decide how
676  * much faith to put in the function result.
677  *
678  * Note that the result disregards both the most-common-values (if any) and
679  * null entries.  The caller is expected to combine this result with
680  * statistics for those portions of the column population.  It may also be
681  * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
682  */
683 double
684 histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
685                                           Datum constval, bool varonleft,
686                                           int min_hist_size, int n_skip,
687                                           int *hist_size)
688 {
689         double          result;
690         Datum      *values;
691         int                     nvalues;
692
693         /* check sanity of parameters */
694         Assert(n_skip >= 0);
695         Assert(min_hist_size > 2 * n_skip);
696
697         if (HeapTupleIsValid(vardata->statsTuple) &&
698                 get_attstatsslot(vardata->statsTuple,
699                                                  vardata->atttype, vardata->atttypmod,
700                                                  STATISTIC_KIND_HISTOGRAM, InvalidOid,
701                                                  NULL,
702                                                  &values, &nvalues,
703                                                  NULL, NULL))
704         {
705                 *hist_size = nvalues;
706                 if (nvalues >= min_hist_size)
707                 {
708                         int                     nmatch = 0;
709                         int                     i;
710
711                         for (i = n_skip; i < nvalues - n_skip; i++)
712                         {
713                                 if (varonleft ?
714                                         DatumGetBool(FunctionCall2Coll(opproc,
715                                                                                                    DEFAULT_COLLATION_OID,
716                                                                                                    values[i],
717                                                                                                    constval)) :
718                                         DatumGetBool(FunctionCall2Coll(opproc,
719                                                                                                    DEFAULT_COLLATION_OID,
720                                                                                                    constval,
721                                                                                                    values[i])))
722                                         nmatch++;
723                         }
724                         result = ((double) nmatch) / ((double) (nvalues - 2 * n_skip));
725                 }
726                 else
727                         result = -1;
728                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
729         }
730         else
731         {
732                 *hist_size = 0;
733                 result = -1;
734         }
735
736         return result;
737 }
738
739 /*
740  *      ineq_histogram_selectivity      - Examine the histogram for scalarineqsel
741  *
742  * Determine the fraction of the variable's histogram population that
743  * satisfies the inequality condition, ie, VAR < CONST or VAR > CONST.
744  *
745  * Returns -1 if there is no histogram (valid results will always be >= 0).
746  *
747  * Note that the result disregards both the most-common-values (if any) and
748  * null entries.  The caller is expected to combine this result with
749  * statistics for those portions of the column population.
750  */
751 static double
752 ineq_histogram_selectivity(PlannerInfo *root,
753                                                    VariableStatData *vardata,
754                                                    FmgrInfo *opproc, bool isgt,
755                                                    Datum constval, Oid consttype)
756 {
757         double          hist_selec;
758         Oid                     hist_op;
759         Datum      *values;
760         int                     nvalues;
761
762         hist_selec = -1.0;
763
764         /*
765          * Someday, ANALYZE might store more than one histogram per rel/att,
766          * corresponding to more than one possible sort ordering defined for the
767          * column type.  However, to make that work we will need to figure out
768          * which staop to search for --- it's not necessarily the one we have at
769          * hand!  (For example, we might have a '<=' operator rather than the '<'
770          * operator that will appear in staop.)  For now, assume that whatever
771          * appears in pg_statistic is sorted the same way our operator sorts, or
772          * the reverse way if isgt is TRUE.
773          */
774         if (HeapTupleIsValid(vardata->statsTuple) &&
775                 get_attstatsslot(vardata->statsTuple,
776                                                  vardata->atttype, vardata->atttypmod,
777                                                  STATISTIC_KIND_HISTOGRAM, InvalidOid,
778                                                  &hist_op,
779                                                  &values, &nvalues,
780                                                  NULL, NULL))
781         {
782                 if (nvalues > 1)
783                 {
784                         /*
785                          * Use binary search to find proper location, ie, the first slot
786                          * at which the comparison fails.  (If the given operator isn't
787                          * actually sort-compatible with the histogram, you'll get garbage
788                          * results ... but probably not any more garbage-y than you would
789                          * from the old linear search.)
790                          *
791                          * If the binary search accesses the first or last histogram
792                          * entry, we try to replace that endpoint with the true column min
793                          * or max as found by get_actual_variable_range().  This
794                          * ameliorates misestimates when the min or max is moving as a
795                          * result of changes since the last ANALYZE.  Note that this could
796                          * result in effectively including MCVs into the histogram that
797                          * weren't there before, but we don't try to correct for that.
798                          */
799                         double          histfrac;
800                         int                     lobound = 0;    /* first possible slot to search */
801                         int                     hibound = nvalues;              /* last+1 slot to search */
802                         bool            have_end = false;
803
804                         /*
805                          * If there are only two histogram entries, we'll want up-to-date
806                          * values for both.  (If there are more than two, we need at most
807                          * one of them to be updated, so we deal with that within the
808                          * loop.)
809                          */
810                         if (nvalues == 2)
811                                 have_end = get_actual_variable_range(root,
812                                                                                                          vardata,
813                                                                                                          hist_op,
814                                                                                                          &values[0],
815                                                                                                          &values[1]);
816
817                         while (lobound < hibound)
818                         {
819                                 int                     probe = (lobound + hibound) / 2;
820                                 bool            ltcmp;
821
822                                 /*
823                                  * If we find ourselves about to compare to the first or last
824                                  * histogram entry, first try to replace it with the actual
825                                  * current min or max (unless we already did so above).
826                                  */
827                                 if (probe == 0 && nvalues > 2)
828                                         have_end = get_actual_variable_range(root,
829                                                                                                                  vardata,
830                                                                                                                  hist_op,
831                                                                                                                  &values[0],
832                                                                                                                  NULL);
833                                 else if (probe == nvalues - 1 && nvalues > 2)
834                                         have_end = get_actual_variable_range(root,
835                                                                                                                  vardata,
836                                                                                                                  hist_op,
837                                                                                                                  NULL,
838                                                                                                                  &values[probe]);
839
840                                 ltcmp = DatumGetBool(FunctionCall2Coll(opproc,
841                                                                                                            DEFAULT_COLLATION_OID,
842                                                                                                            values[probe],
843                                                                                                            constval));
844                                 if (isgt)
845                                         ltcmp = !ltcmp;
846                                 if (ltcmp)
847                                         lobound = probe + 1;
848                                 else
849                                         hibound = probe;
850                         }
851
852                         if (lobound <= 0)
853                         {
854                                 /* Constant is below lower histogram boundary. */
855                                 histfrac = 0.0;
856                         }
857                         else if (lobound >= nvalues)
858                         {
859                                 /* Constant is above upper histogram boundary. */
860                                 histfrac = 1.0;
861                         }
862                         else
863                         {
864                                 int                     i = lobound;
865                                 double          val,
866                                                         high,
867                                                         low;
868                                 double          binfrac;
869
870                                 /*
871                                  * We have values[i-1] <= constant <= values[i].
872                                  *
873                                  * Convert the constant and the two nearest bin boundary
874                                  * values to a uniform comparison scale, and do a linear
875                                  * interpolation within this bin.
876                                  */
877                                 if (convert_to_scalar(constval, consttype, &val,
878                                                                           values[i - 1], values[i],
879                                                                           vardata->vartype,
880                                                                           &low, &high))
881                                 {
882                                         if (high <= low)
883                                         {
884                                                 /* cope if bin boundaries appear identical */
885                                                 binfrac = 0.5;
886                                         }
887                                         else if (val <= low)
888                                                 binfrac = 0.0;
889                                         else if (val >= high)
890                                                 binfrac = 1.0;
891                                         else
892                                         {
893                                                 binfrac = (val - low) / (high - low);
894
895                                                 /*
896                                                  * Watch out for the possibility that we got a NaN or
897                                                  * Infinity from the division.  This can happen
898                                                  * despite the previous checks, if for example "low"
899                                                  * is -Infinity.
900                                                  */
901                                                 if (isnan(binfrac) ||
902                                                         binfrac < 0.0 || binfrac > 1.0)
903                                                         binfrac = 0.5;
904                                         }
905                                 }
906                                 else
907                                 {
908                                         /*
909                                          * Ideally we'd produce an error here, on the grounds that
910                                          * the given operator shouldn't have scalarXXsel
911                                          * registered as its selectivity func unless we can deal
912                                          * with its operand types.  But currently, all manner of
913                                          * stuff is invoking scalarXXsel, so give a default
914                                          * estimate until that can be fixed.
915                                          */
916                                         binfrac = 0.5;
917                                 }
918
919                                 /*
920                                  * Now, compute the overall selectivity across the values
921                                  * represented by the histogram.  We have i-1 full bins and
922                                  * binfrac partial bin below the constant.
923                                  */
924                                 histfrac = (double) (i - 1) + binfrac;
925                                 histfrac /= (double) (nvalues - 1);
926                         }
927
928                         /*
929                          * Now histfrac = fraction of histogram entries below the
930                          * constant.
931                          *
932                          * Account for "<" vs ">"
933                          */
934                         hist_selec = isgt ? (1.0 - histfrac) : histfrac;
935
936                         /*
937                          * The histogram boundaries are only approximate to begin with,
938                          * and may well be out of date anyway.  Therefore, don't believe
939                          * extremely small or large selectivity estimates --- unless we
940                          * got actual current endpoint values from the table.
941                          */
942                         if (have_end)
943                                 CLAMP_PROBABILITY(hist_selec);
944                         else
945                         {
946                                 if (hist_selec < 0.0001)
947                                         hist_selec = 0.0001;
948                                 else if (hist_selec > 0.9999)
949                                         hist_selec = 0.9999;
950                         }
951                 }
952
953                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
954         }
955
956         return hist_selec;
957 }
958
959 /*
960  *              scalarltsel             - Selectivity of "<" (also "<=") for scalars.
961  */
962 Datum
963 scalarltsel(PG_FUNCTION_ARGS)
964 {
965         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
966         Oid                     operator = PG_GETARG_OID(1);
967         List       *args = (List *) PG_GETARG_POINTER(2);
968         int                     varRelid = PG_GETARG_INT32(3);
969         VariableStatData vardata;
970         Node       *other;
971         bool            varonleft;
972         Datum           constval;
973         Oid                     consttype;
974         bool            isgt;
975         double          selec;
976
977         /*
978          * If expression is not variable op something or something op variable,
979          * then punt and return a default estimate.
980          */
981         if (!get_restriction_variable(root, args, varRelid,
982                                                                   &vardata, &other, &varonleft))
983                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
984
985         /*
986          * Can't do anything useful if the something is not a constant, either.
987          */
988         if (!IsA(other, Const))
989         {
990                 ReleaseVariableStats(vardata);
991                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
992         }
993
994         /*
995          * If the constant is NULL, assume operator is strict and return zero, ie,
996          * operator will never return TRUE.
997          */
998         if (((Const *) other)->constisnull)
999         {
1000                 ReleaseVariableStats(vardata);
1001                 PG_RETURN_FLOAT8(0.0);
1002         }
1003         constval = ((Const *) other)->constvalue;
1004         consttype = ((Const *) other)->consttype;
1005
1006         /*
1007          * Force the var to be on the left to simplify logic in scalarineqsel.
1008          */
1009         if (varonleft)
1010         {
1011                 /* we have var < other */
1012                 isgt = false;
1013         }
1014         else
1015         {
1016                 /* we have other < var, commute to make var > other */
1017                 operator = get_commutator(operator);
1018                 if (!operator)
1019                 {
1020                         /* Use default selectivity (should we raise an error instead?) */
1021                         ReleaseVariableStats(vardata);
1022                         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1023                 }
1024                 isgt = true;
1025         }
1026
1027         selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1028
1029         ReleaseVariableStats(vardata);
1030
1031         PG_RETURN_FLOAT8((float8) selec);
1032 }
1033
1034 /*
1035  *              scalargtsel             - Selectivity of ">" (also ">=") for integers.
1036  */
1037 Datum
1038 scalargtsel(PG_FUNCTION_ARGS)
1039 {
1040         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1041         Oid                     operator = PG_GETARG_OID(1);
1042         List       *args = (List *) PG_GETARG_POINTER(2);
1043         int                     varRelid = PG_GETARG_INT32(3);
1044         VariableStatData vardata;
1045         Node       *other;
1046         bool            varonleft;
1047         Datum           constval;
1048         Oid                     consttype;
1049         bool            isgt;
1050         double          selec;
1051
1052         /*
1053          * If expression is not variable op something or something op variable,
1054          * then punt and return a default estimate.
1055          */
1056         if (!get_restriction_variable(root, args, varRelid,
1057                                                                   &vardata, &other, &varonleft))
1058                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1059
1060         /*
1061          * Can't do anything useful if the something is not a constant, either.
1062          */
1063         if (!IsA(other, Const))
1064         {
1065                 ReleaseVariableStats(vardata);
1066                 PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1067         }
1068
1069         /*
1070          * If the constant is NULL, assume operator is strict and return zero, ie,
1071          * operator will never return TRUE.
1072          */
1073         if (((Const *) other)->constisnull)
1074         {
1075                 ReleaseVariableStats(vardata);
1076                 PG_RETURN_FLOAT8(0.0);
1077         }
1078         constval = ((Const *) other)->constvalue;
1079         consttype = ((Const *) other)->consttype;
1080
1081         /*
1082          * Force the var to be on the left to simplify logic in scalarineqsel.
1083          */
1084         if (varonleft)
1085         {
1086                 /* we have var > other */
1087                 isgt = true;
1088         }
1089         else
1090         {
1091                 /* we have other > var, commute to make var < other */
1092                 operator = get_commutator(operator);
1093                 if (!operator)
1094                 {
1095                         /* Use default selectivity (should we raise an error instead?) */
1096                         ReleaseVariableStats(vardata);
1097                         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
1098                 }
1099                 isgt = false;
1100         }
1101
1102         selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
1103
1104         ReleaseVariableStats(vardata);
1105
1106         PG_RETURN_FLOAT8((float8) selec);
1107 }
1108
1109 /*
1110  * patternsel                   - Generic code for pattern-match selectivity.
1111  */
1112 static double
1113 patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
1114 {
1115         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
1116         Oid                     operator = PG_GETARG_OID(1);
1117         List       *args = (List *) PG_GETARG_POINTER(2);
1118         int                     varRelid = PG_GETARG_INT32(3);
1119         Oid                     collation = PG_GET_COLLATION();
1120         VariableStatData vardata;
1121         Node       *other;
1122         bool            varonleft;
1123         Datum           constval;
1124         Oid                     consttype;
1125         Oid                     vartype;
1126         Oid                     opfamily;
1127         Pattern_Prefix_Status pstatus;
1128         Const      *patt;
1129         Const      *prefix = NULL;
1130         Selectivity rest_selec = 0;
1131         double          result;
1132
1133         /*
1134          * If this is for a NOT LIKE or similar operator, get the corresponding
1135          * positive-match operator and work with that.  Set result to the correct
1136          * default estimate, too.
1137          */
1138         if (negate)
1139         {
1140                 operator = get_negator(operator);
1141                 if (!OidIsValid(operator))
1142                         elog(ERROR, "patternsel called for operator without a negator");
1143                 result = 1.0 - DEFAULT_MATCH_SEL;
1144         }
1145         else
1146         {
1147                 result = DEFAULT_MATCH_SEL;
1148         }
1149
1150         /*
1151          * If expression is not variable op constant, then punt and return a
1152          * default estimate.
1153          */
1154         if (!get_restriction_variable(root, args, varRelid,
1155                                                                   &vardata, &other, &varonleft))
1156                 return result;
1157         if (!varonleft || !IsA(other, Const))
1158         {
1159                 ReleaseVariableStats(vardata);
1160                 return result;
1161         }
1162
1163         /*
1164          * If the constant is NULL, assume operator is strict and return zero, ie,
1165          * operator will never return TRUE.  (It's zero even for a negator op.)
1166          */
1167         if (((Const *) other)->constisnull)
1168         {
1169                 ReleaseVariableStats(vardata);
1170                 return 0.0;
1171         }
1172         constval = ((Const *) other)->constvalue;
1173         consttype = ((Const *) other)->consttype;
1174
1175         /*
1176          * The right-hand const is type text or bytea for all supported operators.
1177          * We do not expect to see binary-compatible types here, since
1178          * const-folding should have relabeled the const to exactly match the
1179          * operator's declared type.
1180          */
1181         if (consttype != TEXTOID && consttype != BYTEAOID)
1182         {
1183                 ReleaseVariableStats(vardata);
1184                 return result;
1185         }
1186
1187         /*
1188          * Similarly, the exposed type of the left-hand side should be one of
1189          * those we know.  (Do not look at vardata.atttype, which might be
1190          * something binary-compatible but different.)  We can use it to choose
1191          * the index opfamily from which we must draw the comparison operators.
1192          *
1193          * NOTE: It would be more correct to use the PATTERN opfamilies than the
1194          * simple ones, but at the moment ANALYZE will not generate statistics for
1195          * the PATTERN operators.  But our results are so approximate anyway that
1196          * it probably hardly matters.
1197          */
1198         vartype = vardata.vartype;
1199
1200         switch (vartype)
1201         {
1202                 case TEXTOID:
1203                         opfamily = TEXT_BTREE_FAM_OID;
1204                         break;
1205                 case BPCHAROID:
1206                         opfamily = BPCHAR_BTREE_FAM_OID;
1207                         break;
1208                 case NAMEOID:
1209                         opfamily = NAME_BTREE_FAM_OID;
1210                         break;
1211                 case BYTEAOID:
1212                         opfamily = BYTEA_BTREE_FAM_OID;
1213                         break;
1214                 default:
1215                         ReleaseVariableStats(vardata);
1216                         return result;
1217         }
1218
1219         /*
1220          * Pull out any fixed prefix implied by the pattern, and estimate the
1221          * fractional selectivity of the remainder of the pattern.  Unlike many of
1222          * the other functions in this file, we use the pattern operator's actual
1223          * collation for this step.  This is not because we expect the collation
1224          * to make a big difference in the selectivity estimate (it seldom would),
1225          * but because we want to be sure we cache compiled regexps under the
1226          * right cache key, so that they can be re-used at runtime.
1227          */
1228         patt = (Const *) other;
1229         pstatus = pattern_fixed_prefix(patt, ptype, collation,
1230                                                                    &prefix, &rest_selec);
1231
1232         /*
1233          * If necessary, coerce the prefix constant to the right type.
1234          */
1235         if (prefix && prefix->consttype != vartype)
1236         {
1237                 char       *prefixstr;
1238
1239                 switch (prefix->consttype)
1240                 {
1241                         case TEXTOID:
1242                                 prefixstr = TextDatumGetCString(prefix->constvalue);
1243                                 break;
1244                         case BYTEAOID:
1245                                 prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
1246                                                                                                                 prefix->constvalue));
1247                                 break;
1248                         default:
1249                                 elog(ERROR, "unrecognized consttype: %u",
1250                                          prefix->consttype);
1251                                 ReleaseVariableStats(vardata);
1252                                 return result;
1253                 }
1254                 prefix = string_to_const(prefixstr, vartype);
1255                 pfree(prefixstr);
1256         }
1257
1258         if (pstatus == Pattern_Prefix_Exact)
1259         {
1260                 /*
1261                  * Pattern specifies an exact match, so pretend operator is '='
1262                  */
1263                 Oid                     eqopr = get_opfamily_member(opfamily, vartype, vartype,
1264                                                                                                 BTEqualStrategyNumber);
1265
1266                 if (eqopr == InvalidOid)
1267                         elog(ERROR, "no = operator for opfamily %u", opfamily);
1268                 result = var_eq_const(&vardata, eqopr, prefix->constvalue,
1269                                                           false, true);
1270         }
1271         else
1272         {
1273                 /*
1274                  * Not exact-match pattern.  If we have a sufficiently large
1275                  * histogram, estimate selectivity for the histogram part of the
1276                  * population by counting matches in the histogram.  If not, estimate
1277                  * selectivity of the fixed prefix and remainder of pattern
1278                  * separately, then combine the two to get an estimate of the
1279                  * selectivity for the part of the column population represented by
1280                  * the histogram.  (For small histograms, we combine these
1281                  * approaches.)
1282                  *
1283                  * We then add up data for any most-common-values values; these are
1284                  * not in the histogram population, and we can get exact answers for
1285                  * them by applying the pattern operator, so there's no reason to
1286                  * approximate.  (If the MCVs cover a significant part of the total
1287                  * population, this gives us a big leg up in accuracy.)
1288                  */
1289                 Selectivity selec;
1290                 int                     hist_size;
1291                 FmgrInfo        opproc;
1292                 double          nullfrac,
1293                                         mcv_selec,
1294                                         sumcommon;
1295
1296                 /* Try to use the histogram entries to get selectivity */
1297                 fmgr_info(get_opcode(operator), &opproc);
1298
1299                 selec = histogram_selectivity(&vardata, &opproc, constval, true,
1300                                                                           10, 1, &hist_size);
1301
1302                 /* If not at least 100 entries, use the heuristic method */
1303                 if (hist_size < 100)
1304                 {
1305                         Selectivity heursel;
1306                         Selectivity prefixsel;
1307
1308                         if (pstatus == Pattern_Prefix_Partial)
1309                                 prefixsel = prefix_selectivity(root, &vardata, vartype,
1310                                                                                            opfamily, prefix);
1311                         else
1312                                 prefixsel = 1.0;
1313                         heursel = prefixsel * rest_selec;
1314
1315                         if (selec < 0)          /* fewer than 10 histogram entries? */
1316                                 selec = heursel;
1317                         else
1318                         {
1319                                 /*
1320                                  * For histogram sizes from 10 to 100, we combine the
1321                                  * histogram and heuristic selectivities, putting increasingly
1322                                  * more trust in the histogram for larger sizes.
1323                                  */
1324                                 double          hist_weight = hist_size / 100.0;
1325
1326                                 selec = selec * hist_weight + heursel * (1.0 - hist_weight);
1327                         }
1328                 }
1329
1330                 /* In any case, don't believe extremely small or large estimates. */
1331                 if (selec < 0.0001)
1332                         selec = 0.0001;
1333                 else if (selec > 0.9999)
1334                         selec = 0.9999;
1335
1336                 /*
1337                  * If we have most-common-values info, add up the fractions of the MCV
1338                  * entries that satisfy MCV OP PATTERN.  These fractions contribute
1339                  * directly to the result selectivity.  Also add up the total fraction
1340                  * represented by MCV entries.
1341                  */
1342                 mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
1343                                                                         &sumcommon);
1344
1345                 if (HeapTupleIsValid(vardata.statsTuple))
1346                         nullfrac = ((Form_pg_statistic) GETSTRUCT(vardata.statsTuple))->stanullfrac;
1347                 else
1348                         nullfrac = 0.0;
1349
1350                 /*
1351                  * Now merge the results from the MCV and histogram calculations,
1352                  * realizing that the histogram covers only the non-null values that
1353                  * are not listed in MCV.
1354                  */
1355                 selec *= 1.0 - nullfrac - sumcommon;
1356                 selec += mcv_selec;
1357
1358                 /* result should be in range, but make sure... */
1359                 CLAMP_PROBABILITY(selec);
1360                 result = selec;
1361         }
1362
1363         if (prefix)
1364         {
1365                 pfree(DatumGetPointer(prefix->constvalue));
1366                 pfree(prefix);
1367         }
1368
1369         ReleaseVariableStats(vardata);
1370
1371         return negate ? (1.0 - result) : result;
1372 }
1373
1374 /*
1375  *              regexeqsel              - Selectivity of regular-expression pattern match.
1376  */
1377 Datum
1378 regexeqsel(PG_FUNCTION_ARGS)
1379 {
1380         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, false));
1381 }
1382
1383 /*
1384  *              icregexeqsel    - Selectivity of case-insensitive regex match.
1385  */
1386 Datum
1387 icregexeqsel(PG_FUNCTION_ARGS)
1388 {
1389         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, false));
1390 }
1391
1392 /*
1393  *              likesel                 - Selectivity of LIKE pattern match.
1394  */
1395 Datum
1396 likesel(PG_FUNCTION_ARGS)
1397 {
1398         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, false));
1399 }
1400
1401 /*
1402  *              iclikesel                       - Selectivity of ILIKE pattern match.
1403  */
1404 Datum
1405 iclikesel(PG_FUNCTION_ARGS)
1406 {
1407         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, false));
1408 }
1409
1410 /*
1411  *              regexnesel              - Selectivity of regular-expression pattern non-match.
1412  */
1413 Datum
1414 regexnesel(PG_FUNCTION_ARGS)
1415 {
1416         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex, true));
1417 }
1418
1419 /*
1420  *              icregexnesel    - Selectivity of case-insensitive regex non-match.
1421  */
1422 Datum
1423 icregexnesel(PG_FUNCTION_ARGS)
1424 {
1425         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Regex_IC, true));
1426 }
1427
1428 /*
1429  *              nlikesel                - Selectivity of LIKE pattern non-match.
1430  */
1431 Datum
1432 nlikesel(PG_FUNCTION_ARGS)
1433 {
1434         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like, true));
1435 }
1436
1437 /*
1438  *              icnlikesel              - Selectivity of ILIKE pattern non-match.
1439  */
1440 Datum
1441 icnlikesel(PG_FUNCTION_ARGS)
1442 {
1443         PG_RETURN_FLOAT8(patternsel(fcinfo, Pattern_Type_Like_IC, true));
1444 }
1445
1446 /*
1447  *              boolvarsel              - Selectivity of Boolean variable.
1448  *
1449  * This can actually be called on any boolean-valued expression.  If it
1450  * involves only Vars of the specified relation, and if there are statistics
1451  * about the Var or expression (the latter is possible if it's indexed) then
1452  * we'll produce a real estimate; otherwise it's just a default.
1453  */
1454 Selectivity
1455 boolvarsel(PlannerInfo *root, Node *arg, int varRelid)
1456 {
1457         VariableStatData vardata;
1458         double          selec;
1459
1460         examine_variable(root, arg, varRelid, &vardata);
1461         if (HeapTupleIsValid(vardata.statsTuple))
1462         {
1463                 /*
1464                  * A boolean variable V is equivalent to the clause V = 't', so we
1465                  * compute the selectivity as if that is what we have.
1466                  */
1467                 selec = var_eq_const(&vardata, BooleanEqualOperator,
1468                                                          BoolGetDatum(true), false, true);
1469         }
1470         else if (is_funcclause(arg))
1471         {
1472                 /*
1473                  * If we have no stats and it's a function call, estimate 0.3333333.
1474                  * This seems a pretty unprincipled choice, but Postgres has been
1475                  * using that estimate for function calls since 1992.  The hoariness
1476                  * of this behavior suggests that we should not be in too much hurry
1477                  * to use another value.
1478                  */
1479                 selec = 0.3333333;
1480         }
1481         else
1482         {
1483                 /* Otherwise, the default estimate is 0.5 */
1484                 selec = 0.5;
1485         }
1486         ReleaseVariableStats(vardata);
1487         return selec;
1488 }
1489
1490 /*
1491  *              booltestsel             - Selectivity of BooleanTest Node.
1492  */
1493 Selectivity
1494 booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
1495                         int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1496 {
1497         VariableStatData vardata;
1498         double          selec;
1499
1500         examine_variable(root, arg, varRelid, &vardata);
1501
1502         if (HeapTupleIsValid(vardata.statsTuple))
1503         {
1504                 Form_pg_statistic stats;
1505                 double          freq_null;
1506                 Datum      *values;
1507                 int                     nvalues;
1508                 float4     *numbers;
1509                 int                     nnumbers;
1510
1511                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1512                 freq_null = stats->stanullfrac;
1513
1514                 if (get_attstatsslot(vardata.statsTuple,
1515                                                          vardata.atttype, vardata.atttypmod,
1516                                                          STATISTIC_KIND_MCV, InvalidOid,
1517                                                          NULL,
1518                                                          &values, &nvalues,
1519                                                          &numbers, &nnumbers)
1520                         && nnumbers > 0)
1521                 {
1522                         double          freq_true;
1523                         double          freq_false;
1524
1525                         /*
1526                          * Get first MCV frequency and derive frequency for true.
1527                          */
1528                         if (DatumGetBool(values[0]))
1529                                 freq_true = numbers[0];
1530                         else
1531                                 freq_true = 1.0 - numbers[0] - freq_null;
1532
1533                         /*
1534                          * Next derive frequency for false. Then use these as appropriate
1535                          * to derive frequency for each case.
1536                          */
1537                         freq_false = 1.0 - freq_true - freq_null;
1538
1539                         switch (booltesttype)
1540                         {
1541                                 case IS_UNKNOWN:
1542                                         /* select only NULL values */
1543                                         selec = freq_null;
1544                                         break;
1545                                 case IS_NOT_UNKNOWN:
1546                                         /* select non-NULL values */
1547                                         selec = 1.0 - freq_null;
1548                                         break;
1549                                 case IS_TRUE:
1550                                         /* select only TRUE values */
1551                                         selec = freq_true;
1552                                         break;
1553                                 case IS_NOT_TRUE:
1554                                         /* select non-TRUE values */
1555                                         selec = 1.0 - freq_true;
1556                                         break;
1557                                 case IS_FALSE:
1558                                         /* select only FALSE values */
1559                                         selec = freq_false;
1560                                         break;
1561                                 case IS_NOT_FALSE:
1562                                         /* select non-FALSE values */
1563                                         selec = 1.0 - freq_false;
1564                                         break;
1565                                 default:
1566                                         elog(ERROR, "unrecognized booltesttype: %d",
1567                                                  (int) booltesttype);
1568                                         selec = 0.0;    /* Keep compiler quiet */
1569                                         break;
1570                         }
1571
1572                         free_attstatsslot(vardata.atttype, values, nvalues,
1573                                                           numbers, nnumbers);
1574                 }
1575                 else
1576                 {
1577                         /*
1578                          * No most-common-value info available. Still have null fraction
1579                          * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
1580                          * for null fraction and assume a 50-50 split of TRUE and FALSE.
1581                          */
1582                         switch (booltesttype)
1583                         {
1584                                 case IS_UNKNOWN:
1585                                         /* select only NULL values */
1586                                         selec = freq_null;
1587                                         break;
1588                                 case IS_NOT_UNKNOWN:
1589                                         /* select non-NULL values */
1590                                         selec = 1.0 - freq_null;
1591                                         break;
1592                                 case IS_TRUE:
1593                                 case IS_FALSE:
1594                                         /* Assume we select half of the non-NULL values */
1595                                         selec = (1.0 - freq_null) / 2.0;
1596                                         break;
1597                                 case IS_NOT_TRUE:
1598                                 case IS_NOT_FALSE:
1599                                         /* Assume we select NULLs plus half of the non-NULLs */
1600                                         /* equiv. to freq_null + (1.0 - freq_null) / 2.0 */
1601                                         selec = (freq_null + 1.0) / 2.0;
1602                                         break;
1603                                 default:
1604                                         elog(ERROR, "unrecognized booltesttype: %d",
1605                                                  (int) booltesttype);
1606                                         selec = 0.0;    /* Keep compiler quiet */
1607                                         break;
1608                         }
1609                 }
1610         }
1611         else
1612         {
1613                 /*
1614                  * If we can't get variable statistics for the argument, perhaps
1615                  * clause_selectivity can do something with it.  We ignore the
1616                  * possibility of a NULL value when using clause_selectivity, and just
1617                  * assume the value is either TRUE or FALSE.
1618                  */
1619                 switch (booltesttype)
1620                 {
1621                         case IS_UNKNOWN:
1622                                 selec = DEFAULT_UNK_SEL;
1623                                 break;
1624                         case IS_NOT_UNKNOWN:
1625                                 selec = DEFAULT_NOT_UNK_SEL;
1626                                 break;
1627                         case IS_TRUE:
1628                         case IS_NOT_FALSE:
1629                                 selec = (double) clause_selectivity(root, arg,
1630                                                                                                         varRelid,
1631                                                                                                         jointype, sjinfo);
1632                                 break;
1633                         case IS_FALSE:
1634                         case IS_NOT_TRUE:
1635                                 selec = 1.0 - (double) clause_selectivity(root, arg,
1636                                                                                                                   varRelid,
1637                                                                                                                   jointype, sjinfo);
1638                                 break;
1639                         default:
1640                                 elog(ERROR, "unrecognized booltesttype: %d",
1641                                          (int) booltesttype);
1642                                 selec = 0.0;    /* Keep compiler quiet */
1643                                 break;
1644                 }
1645         }
1646
1647         ReleaseVariableStats(vardata);
1648
1649         /* result should be in range, but make sure... */
1650         CLAMP_PROBABILITY(selec);
1651
1652         return (Selectivity) selec;
1653 }
1654
1655 /*
1656  *              nulltestsel             - Selectivity of NullTest Node.
1657  */
1658 Selectivity
1659 nulltestsel(PlannerInfo *root, NullTestType nulltesttype, Node *arg,
1660                         int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
1661 {
1662         VariableStatData vardata;
1663         double          selec;
1664
1665         examine_variable(root, arg, varRelid, &vardata);
1666
1667         if (HeapTupleIsValid(vardata.statsTuple))
1668         {
1669                 Form_pg_statistic stats;
1670                 double          freq_null;
1671
1672                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
1673                 freq_null = stats->stanullfrac;
1674
1675                 switch (nulltesttype)
1676                 {
1677                         case IS_NULL:
1678
1679                                 /*
1680                                  * Use freq_null directly.
1681                                  */
1682                                 selec = freq_null;
1683                                 break;
1684                         case IS_NOT_NULL:
1685
1686                                 /*
1687                                  * Select not unknown (not null) values. Calculate from
1688                                  * freq_null.
1689                                  */
1690                                 selec = 1.0 - freq_null;
1691                                 break;
1692                         default:
1693                                 elog(ERROR, "unrecognized nulltesttype: %d",
1694                                          (int) nulltesttype);
1695                                 return (Selectivity) 0; /* keep compiler quiet */
1696                 }
1697         }
1698         else
1699         {
1700                 /*
1701                  * No ANALYZE stats available, so make a guess
1702                  */
1703                 switch (nulltesttype)
1704                 {
1705                         case IS_NULL:
1706                                 selec = DEFAULT_UNK_SEL;
1707                                 break;
1708                         case IS_NOT_NULL:
1709                                 selec = DEFAULT_NOT_UNK_SEL;
1710                                 break;
1711                         default:
1712                                 elog(ERROR, "unrecognized nulltesttype: %d",
1713                                          (int) nulltesttype);
1714                                 return (Selectivity) 0; /* keep compiler quiet */
1715                 }
1716         }
1717
1718         ReleaseVariableStats(vardata);
1719
1720         /* result should be in range, but make sure... */
1721         CLAMP_PROBABILITY(selec);
1722
1723         return (Selectivity) selec;
1724 }
1725
1726 /*
1727  * strip_array_coercion - strip binary-compatible relabeling from an array expr
1728  *
1729  * For array values, the parser normally generates ArrayCoerceExpr conversions,
1730  * but it seems possible that RelabelType might show up.  Also, the planner
1731  * is not currently tense about collapsing stacked ArrayCoerceExpr nodes,
1732  * so we need to be ready to deal with more than one level.
1733  */
1734 static Node *
1735 strip_array_coercion(Node *node)
1736 {
1737         for (;;)
1738         {
1739                 if (node && IsA(node, ArrayCoerceExpr) &&
1740                         ((ArrayCoerceExpr *) node)->elemfuncid == InvalidOid)
1741                 {
1742                         node = (Node *) ((ArrayCoerceExpr *) node)->arg;
1743                 }
1744                 else if (node && IsA(node, RelabelType))
1745                 {
1746                         /* We don't really expect this case, but may as well cope */
1747                         node = (Node *) ((RelabelType *) node)->arg;
1748                 }
1749                 else
1750                         break;
1751         }
1752         return node;
1753 }
1754
1755 /*
1756  *              scalararraysel          - Selectivity of ScalarArrayOpExpr Node.
1757  */
1758 Selectivity
1759 scalararraysel(PlannerInfo *root,
1760                            ScalarArrayOpExpr *clause,
1761                            bool is_join_clause,
1762                            int varRelid,
1763                            JoinType jointype,
1764                            SpecialJoinInfo *sjinfo)
1765 {
1766         Oid                     operator = clause->opno;
1767         bool            useOr = clause->useOr;
1768         bool            isEquality = false;
1769         bool            isInequality = false;
1770         Node       *leftop;
1771         Node       *rightop;
1772         Oid                     nominal_element_type;
1773         Oid                     nominal_element_collation;
1774         TypeCacheEntry *typentry;
1775         RegProcedure oprsel;
1776         FmgrInfo        oprselproc;
1777         Selectivity s1;
1778         Selectivity s1disjoint;
1779
1780         /* First, deconstruct the expression */
1781         Assert(list_length(clause->args) == 2);
1782         leftop = (Node *) linitial(clause->args);
1783         rightop = (Node *) lsecond(clause->args);
1784
1785         /* aggressively reduce both sides to constants */
1786         leftop = estimate_expression_value(root, leftop);
1787         rightop = estimate_expression_value(root, rightop);
1788
1789         /* get nominal (after relabeling) element type of rightop */
1790         nominal_element_type = get_base_element_type(exprType(rightop));
1791         if (!OidIsValid(nominal_element_type))
1792                 return (Selectivity) 0.5;               /* probably shouldn't happen */
1793         /* get nominal collation, too, for generating constants */
1794         nominal_element_collation = exprCollation(rightop);
1795
1796         /* look through any binary-compatible relabeling of rightop */
1797         rightop = strip_array_coercion(rightop);
1798
1799         /*
1800          * Detect whether the operator is the default equality or inequality
1801          * operator of the array element type.
1802          */
1803         typentry = lookup_type_cache(nominal_element_type, TYPECACHE_EQ_OPR);
1804         if (OidIsValid(typentry->eq_opr))
1805         {
1806                 if (operator == typentry->eq_opr)
1807                         isEquality = true;
1808                 else if (get_negator(operator) == typentry->eq_opr)
1809                         isInequality = true;
1810         }
1811
1812         /*
1813          * If it is equality or inequality, we might be able to estimate this as a
1814          * form of array containment; for instance "const = ANY(column)" can be
1815          * treated as "ARRAY[const] <@ column".  scalararraysel_containment tries
1816          * that, and returns the selectivity estimate if successful, or -1 if not.
1817          */
1818         if ((isEquality || isInequality) && !is_join_clause)
1819         {
1820                 s1 = scalararraysel_containment(root, leftop, rightop,
1821                                                                                 nominal_element_type,
1822                                                                                 isEquality, useOr, varRelid);
1823                 if (s1 >= 0.0)
1824                         return s1;
1825         }
1826
1827         /*
1828          * Look up the underlying operator's selectivity estimator. Punt if it
1829          * hasn't got one.
1830          */
1831         if (is_join_clause)
1832                 oprsel = get_oprjoin(operator);
1833         else
1834                 oprsel = get_oprrest(operator);
1835         if (!oprsel)
1836                 return (Selectivity) 0.5;
1837         fmgr_info(oprsel, &oprselproc);
1838
1839         /*
1840          * In the array-containment check above, we must only believe that an
1841          * operator is equality or inequality if it is the default btree equality
1842          * operator (or its negator) for the element type, since those are the
1843          * operators that array containment will use.  But in what follows, we can
1844          * be a little laxer, and also believe that any operators using eqsel() or
1845          * neqsel() as selectivity estimator act like equality or inequality.
1846          */
1847         if (oprsel == F_EQSEL || oprsel == F_EQJOINSEL)
1848                 isEquality = true;
1849         else if (oprsel == F_NEQSEL || oprsel == F_NEQJOINSEL)
1850                 isInequality = true;
1851
1852         /*
1853          * We consider three cases:
1854          *
1855          * 1. rightop is an Array constant: deconstruct the array, apply the
1856          * operator's selectivity function for each array element, and merge the
1857          * results in the same way that clausesel.c does for AND/OR combinations.
1858          *
1859          * 2. rightop is an ARRAY[] construct: apply the operator's selectivity
1860          * function for each element of the ARRAY[] construct, and merge.
1861          *
1862          * 3. otherwise, make a guess ...
1863          */
1864         if (rightop && IsA(rightop, Const))
1865         {
1866                 Datum           arraydatum = ((Const *) rightop)->constvalue;
1867                 bool            arrayisnull = ((Const *) rightop)->constisnull;
1868                 ArrayType  *arrayval;
1869                 int16           elmlen;
1870                 bool            elmbyval;
1871                 char            elmalign;
1872                 int                     num_elems;
1873                 Datum      *elem_values;
1874                 bool       *elem_nulls;
1875                 int                     i;
1876
1877                 if (arrayisnull)                /* qual can't succeed if null array */
1878                         return (Selectivity) 0.0;
1879                 arrayval = DatumGetArrayTypeP(arraydatum);
1880                 get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
1881                                                          &elmlen, &elmbyval, &elmalign);
1882                 deconstruct_array(arrayval,
1883                                                   ARR_ELEMTYPE(arrayval),
1884                                                   elmlen, elmbyval, elmalign,
1885                                                   &elem_values, &elem_nulls, &num_elems);
1886
1887                 /*
1888                  * For generic operators, we assume the probability of success is
1889                  * independent for each array element.  But for "= ANY" or "<> ALL",
1890                  * if the array elements are distinct (which'd typically be the case)
1891                  * then the probabilities are disjoint, and we should just sum them.
1892                  *
1893                  * If we were being really tense we would try to confirm that the
1894                  * elements are all distinct, but that would be expensive and it
1895                  * doesn't seem to be worth the cycles; it would amount to penalizing
1896                  * well-written queries in favor of poorly-written ones.  However, we
1897                  * do protect ourselves a little bit by checking whether the
1898                  * disjointness assumption leads to an impossible (out of range)
1899                  * probability; if so, we fall back to the normal calculation.
1900                  */
1901                 s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1902
1903                 for (i = 0; i < num_elems; i++)
1904                 {
1905                         List       *args;
1906                         Selectivity s2;
1907
1908                         args = list_make2(leftop,
1909                                                           makeConst(nominal_element_type,
1910                                                                                 -1,
1911                                                                                 nominal_element_collation,
1912                                                                                 elmlen,
1913                                                                                 elem_values[i],
1914                                                                                 elem_nulls[i],
1915                                                                                 elmbyval));
1916                         if (is_join_clause)
1917                                 s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1918                                                                                                           clause->inputcollid,
1919                                                                                                           PointerGetDatum(root),
1920                                                                                                   ObjectIdGetDatum(operator),
1921                                                                                                           PointerGetDatum(args),
1922                                                                                                           Int16GetDatum(jointype),
1923                                                                                                    PointerGetDatum(sjinfo)));
1924                         else
1925                                 s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1926                                                                                                           clause->inputcollid,
1927                                                                                                           PointerGetDatum(root),
1928                                                                                                   ObjectIdGetDatum(operator),
1929                                                                                                           PointerGetDatum(args),
1930                                                                                                    Int32GetDatum(varRelid)));
1931
1932                         if (useOr)
1933                         {
1934                                 s1 = s1 + s2 - s1 * s2;
1935                                 if (isEquality)
1936                                         s1disjoint += s2;
1937                         }
1938                         else
1939                         {
1940                                 s1 = s1 * s2;
1941                                 if (isInequality)
1942                                         s1disjoint += s2 - 1.0;
1943                         }
1944                 }
1945
1946                 /* accept disjoint-probability estimate if in range */
1947                 if ((useOr ? isEquality : isInequality) &&
1948                         s1disjoint >= 0.0 && s1disjoint <= 1.0)
1949                         s1 = s1disjoint;
1950         }
1951         else if (rightop && IsA(rightop, ArrayExpr) &&
1952                          !((ArrayExpr *) rightop)->multidims)
1953         {
1954                 ArrayExpr  *arrayexpr = (ArrayExpr *) rightop;
1955                 int16           elmlen;
1956                 bool            elmbyval;
1957                 ListCell   *l;
1958
1959                 get_typlenbyval(arrayexpr->element_typeid,
1960                                                 &elmlen, &elmbyval);
1961
1962                 /*
1963                  * We use the assumption of disjoint probabilities here too, although
1964                  * the odds of equal array elements are rather higher if the elements
1965                  * are not all constants (which they won't be, else constant folding
1966                  * would have reduced the ArrayExpr to a Const).  In this path it's
1967                  * critical to have the sanity check on the s1disjoint estimate.
1968                  */
1969                 s1 = s1disjoint = (useOr ? 0.0 : 1.0);
1970
1971                 foreach(l, arrayexpr->elements)
1972                 {
1973                         Node       *elem = (Node *) lfirst(l);
1974                         List       *args;
1975                         Selectivity s2;
1976
1977                         /*
1978                          * Theoretically, if elem isn't of nominal_element_type we should
1979                          * insert a RelabelType, but it seems unlikely that any operator
1980                          * estimation function would really care ...
1981                          */
1982                         args = list_make2(leftop, elem);
1983                         if (is_join_clause)
1984                                 s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
1985                                                                                                           clause->inputcollid,
1986                                                                                                           PointerGetDatum(root),
1987                                                                                                   ObjectIdGetDatum(operator),
1988                                                                                                           PointerGetDatum(args),
1989                                                                                                           Int16GetDatum(jointype),
1990                                                                                                    PointerGetDatum(sjinfo)));
1991                         else
1992                                 s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
1993                                                                                                           clause->inputcollid,
1994                                                                                                           PointerGetDatum(root),
1995                                                                                                   ObjectIdGetDatum(operator),
1996                                                                                                           PointerGetDatum(args),
1997                                                                                                    Int32GetDatum(varRelid)));
1998
1999                         if (useOr)
2000                         {
2001                                 s1 = s1 + s2 - s1 * s2;
2002                                 if (isEquality)
2003                                         s1disjoint += s2;
2004                         }
2005                         else
2006                         {
2007                                 s1 = s1 * s2;
2008                                 if (isInequality)
2009                                         s1disjoint += s2 - 1.0;
2010                         }
2011                 }
2012
2013                 /* accept disjoint-probability estimate if in range */
2014                 if ((useOr ? isEquality : isInequality) &&
2015                         s1disjoint >= 0.0 && s1disjoint <= 1.0)
2016                         s1 = s1disjoint;
2017         }
2018         else
2019         {
2020                 CaseTestExpr *dummyexpr;
2021                 List       *args;
2022                 Selectivity s2;
2023                 int                     i;
2024
2025                 /*
2026                  * We need a dummy rightop to pass to the operator selectivity
2027                  * routine.  It can be pretty much anything that doesn't look like a
2028                  * constant; CaseTestExpr is a convenient choice.
2029                  */
2030                 dummyexpr = makeNode(CaseTestExpr);
2031                 dummyexpr->typeId = nominal_element_type;
2032                 dummyexpr->typeMod = -1;
2033                 dummyexpr->collation = clause->inputcollid;
2034                 args = list_make2(leftop, dummyexpr);
2035                 if (is_join_clause)
2036                         s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
2037                                                                                                   clause->inputcollid,
2038                                                                                                   PointerGetDatum(root),
2039                                                                                                   ObjectIdGetDatum(operator),
2040                                                                                                   PointerGetDatum(args),
2041                                                                                                   Int16GetDatum(jointype),
2042                                                                                                   PointerGetDatum(sjinfo)));
2043                 else
2044                         s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
2045                                                                                                   clause->inputcollid,
2046                                                                                                   PointerGetDatum(root),
2047                                                                                                   ObjectIdGetDatum(operator),
2048                                                                                                   PointerGetDatum(args),
2049                                                                                                   Int32GetDatum(varRelid)));
2050                 s1 = useOr ? 0.0 : 1.0;
2051
2052                 /*
2053                  * Arbitrarily assume 10 elements in the eventual array value (see
2054                  * also estimate_array_length).  We don't risk an assumption of
2055                  * disjoint probabilities here.
2056                  */
2057                 for (i = 0; i < 10; i++)
2058                 {
2059                         if (useOr)
2060                                 s1 = s1 + s2 - s1 * s2;
2061                         else
2062                                 s1 = s1 * s2;
2063                 }
2064         }
2065
2066         /* result should be in range, but make sure... */
2067         CLAMP_PROBABILITY(s1);
2068
2069         return s1;
2070 }
2071
2072 /*
2073  * Estimate number of elements in the array yielded by an expression.
2074  *
2075  * It's important that this agree with scalararraysel.
2076  */
2077 int
2078 estimate_array_length(Node *arrayexpr)
2079 {
2080         /* look through any binary-compatible relabeling of arrayexpr */
2081         arrayexpr = strip_array_coercion(arrayexpr);
2082
2083         if (arrayexpr && IsA(arrayexpr, Const))
2084         {
2085                 Datum           arraydatum = ((Const *) arrayexpr)->constvalue;
2086                 bool            arrayisnull = ((Const *) arrayexpr)->constisnull;
2087                 ArrayType  *arrayval;
2088
2089                 if (arrayisnull)
2090                         return 0;
2091                 arrayval = DatumGetArrayTypeP(arraydatum);
2092                 return ArrayGetNItems(ARR_NDIM(arrayval), ARR_DIMS(arrayval));
2093         }
2094         else if (arrayexpr && IsA(arrayexpr, ArrayExpr) &&
2095                          !((ArrayExpr *) arrayexpr)->multidims)
2096         {
2097                 return list_length(((ArrayExpr *) arrayexpr)->elements);
2098         }
2099         else
2100         {
2101                 /* default guess --- see also scalararraysel */
2102                 return 10;
2103         }
2104 }
2105
2106 /*
2107  *              rowcomparesel           - Selectivity of RowCompareExpr Node.
2108  *
2109  * We estimate RowCompare selectivity by considering just the first (high
2110  * order) columns, which makes it equivalent to an ordinary OpExpr.  While
2111  * this estimate could be refined by considering additional columns, it
2112  * seems unlikely that we could do a lot better without multi-column
2113  * statistics.
2114  */
2115 Selectivity
2116 rowcomparesel(PlannerInfo *root,
2117                           RowCompareExpr *clause,
2118                           int varRelid, JoinType jointype, SpecialJoinInfo *sjinfo)
2119 {
2120         Selectivity s1;
2121         Oid                     opno = linitial_oid(clause->opnos);
2122         Oid                     inputcollid = linitial_oid(clause->inputcollids);
2123         List       *opargs;
2124         bool            is_join_clause;
2125
2126         /* Build equivalent arg list for single operator */
2127         opargs = list_make2(linitial(clause->largs), linitial(clause->rargs));
2128
2129         /*
2130          * Decide if it's a join clause.  This should match clausesel.c's
2131          * treat_as_join_clause(), except that we intentionally consider only the
2132          * leading columns and not the rest of the clause.
2133          */
2134         if (varRelid != 0)
2135         {
2136                 /*
2137                  * Caller is forcing restriction mode (eg, because we are examining an
2138                  * inner indexscan qual).
2139                  */
2140                 is_join_clause = false;
2141         }
2142         else if (sjinfo == NULL)
2143         {
2144                 /*
2145                  * It must be a restriction clause, since it's being evaluated at a
2146                  * scan node.
2147                  */
2148                 is_join_clause = false;
2149         }
2150         else
2151         {
2152                 /*
2153                  * Otherwise, it's a join if there's more than one relation used.
2154                  */
2155                 is_join_clause = (NumRelids((Node *) opargs) > 1);
2156         }
2157
2158         if (is_join_clause)
2159         {
2160                 /* Estimate selectivity for a join clause. */
2161                 s1 = join_selectivity(root, opno,
2162                                                           opargs,
2163                                                           inputcollid,
2164                                                           jointype,
2165                                                           sjinfo);
2166         }
2167         else
2168         {
2169                 /* Estimate selectivity for a restriction clause. */
2170                 s1 = restriction_selectivity(root, opno,
2171                                                                          opargs,
2172                                                                          inputcollid,
2173                                                                          varRelid);
2174         }
2175
2176         return s1;
2177 }
2178
2179 /*
2180  *              eqjoinsel               - Join selectivity of "="
2181  */
2182 Datum
2183 eqjoinsel(PG_FUNCTION_ARGS)
2184 {
2185         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2186         Oid                     operator = PG_GETARG_OID(1);
2187         List       *args = (List *) PG_GETARG_POINTER(2);
2188
2189 #ifdef NOT_USED
2190         JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
2191 #endif
2192         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2193         double          selec;
2194         VariableStatData vardata1;
2195         VariableStatData vardata2;
2196         bool            join_is_reversed;
2197         RelOptInfo *inner_rel;
2198
2199         get_join_variables(root, args, sjinfo,
2200                                            &vardata1, &vardata2, &join_is_reversed);
2201
2202         switch (sjinfo->jointype)
2203         {
2204                 case JOIN_INNER:
2205                 case JOIN_LEFT:
2206                 case JOIN_FULL:
2207                         selec = eqjoinsel_inner(operator, &vardata1, &vardata2);
2208                         break;
2209                 case JOIN_SEMI:
2210                 case JOIN_ANTI:
2211
2212                         /*
2213                          * Look up the join's inner relation.  min_righthand is sufficient
2214                          * information because neither SEMI nor ANTI joins permit any
2215                          * reassociation into or out of their RHS, so the righthand will
2216                          * always be exactly that set of rels.
2217                          */
2218                         inner_rel = find_join_input_rel(root, sjinfo->min_righthand);
2219
2220                         if (!join_is_reversed)
2221                                 selec = eqjoinsel_semi(operator, &vardata1, &vardata2,
2222                                                                            inner_rel);
2223                         else
2224                                 selec = eqjoinsel_semi(get_commutator(operator),
2225                                                                            &vardata2, &vardata1,
2226                                                                            inner_rel);
2227                         break;
2228                 default:
2229                         /* other values not expected here */
2230                         elog(ERROR, "unrecognized join type: %d",
2231                                  (int) sjinfo->jointype);
2232                         selec = 0;                      /* keep compiler quiet */
2233                         break;
2234         }
2235
2236         ReleaseVariableStats(vardata1);
2237         ReleaseVariableStats(vardata2);
2238
2239         CLAMP_PROBABILITY(selec);
2240
2241         PG_RETURN_FLOAT8((float8) selec);
2242 }
2243
2244 /*
2245  * eqjoinsel_inner --- eqjoinsel for normal inner join
2246  *
2247  * We also use this for LEFT/FULL outer joins; it's not presently clear
2248  * that it's worth trying to distinguish them here.
2249  */
2250 static double
2251 eqjoinsel_inner(Oid operator,
2252                                 VariableStatData *vardata1, VariableStatData *vardata2)
2253 {
2254         double          selec;
2255         double          nd1;
2256         double          nd2;
2257         bool            isdefault1;
2258         bool            isdefault2;
2259         Form_pg_statistic stats1 = NULL;
2260         Form_pg_statistic stats2 = NULL;
2261         bool            have_mcvs1 = false;
2262         Datum      *values1 = NULL;
2263         int                     nvalues1 = 0;
2264         float4     *numbers1 = NULL;
2265         int                     nnumbers1 = 0;
2266         bool            have_mcvs2 = false;
2267         Datum      *values2 = NULL;
2268         int                     nvalues2 = 0;
2269         float4     *numbers2 = NULL;
2270         int                     nnumbers2 = 0;
2271
2272         nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2273         nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2274
2275         if (HeapTupleIsValid(vardata1->statsTuple))
2276         {
2277                 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2278                 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2279                                                                           vardata1->atttype,
2280                                                                           vardata1->atttypmod,
2281                                                                           STATISTIC_KIND_MCV,
2282                                                                           InvalidOid,
2283                                                                           NULL,
2284                                                                           &values1, &nvalues1,
2285                                                                           &numbers1, &nnumbers1);
2286         }
2287
2288         if (HeapTupleIsValid(vardata2->statsTuple))
2289         {
2290                 stats2 = (Form_pg_statistic) GETSTRUCT(vardata2->statsTuple);
2291                 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2292                                                                           vardata2->atttype,
2293                                                                           vardata2->atttypmod,
2294                                                                           STATISTIC_KIND_MCV,
2295                                                                           InvalidOid,
2296                                                                           NULL,
2297                                                                           &values2, &nvalues2,
2298                                                                           &numbers2, &nnumbers2);
2299         }
2300
2301         if (have_mcvs1 && have_mcvs2)
2302         {
2303                 /*
2304                  * We have most-common-value lists for both relations.  Run through
2305                  * the lists to see which MCVs actually join to each other with the
2306                  * given operator.  This allows us to determine the exact join
2307                  * selectivity for the portion of the relations represented by the MCV
2308                  * lists.  We still have to estimate for the remaining population, but
2309                  * in a skewed distribution this gives us a big leg up in accuracy.
2310                  * For motivation see the analysis in Y. Ioannidis and S.
2311                  * Christodoulakis, "On the propagation of errors in the size of join
2312                  * results", Technical Report 1018, Computer Science Dept., University
2313                  * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
2314                  */
2315                 FmgrInfo        eqproc;
2316                 bool       *hasmatch1;
2317                 bool       *hasmatch2;
2318                 double          nullfrac1 = stats1->stanullfrac;
2319                 double          nullfrac2 = stats2->stanullfrac;
2320                 double          matchprodfreq,
2321                                         matchfreq1,
2322                                         matchfreq2,
2323                                         unmatchfreq1,
2324                                         unmatchfreq2,
2325                                         otherfreq1,
2326                                         otherfreq2,
2327                                         totalsel1,
2328                                         totalsel2;
2329                 int                     i,
2330                                         nmatches;
2331
2332                 fmgr_info(get_opcode(operator), &eqproc);
2333                 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2334                 hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
2335
2336                 /*
2337                  * Note we assume that each MCV will match at most one member of the
2338                  * other MCV list.  If the operator isn't really equality, there could
2339                  * be multiple matches --- but we don't look for them, both for speed
2340                  * and because the math wouldn't add up...
2341                  */
2342                 matchprodfreq = 0.0;
2343                 nmatches = 0;
2344                 for (i = 0; i < nvalues1; i++)
2345                 {
2346                         int                     j;
2347
2348                         for (j = 0; j < nvalues2; j++)
2349                         {
2350                                 if (hasmatch2[j])
2351                                         continue;
2352                                 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2353                                                                                                    DEFAULT_COLLATION_OID,
2354                                                                                                    values1[i],
2355                                                                                                    values2[j])))
2356                                 {
2357                                         hasmatch1[i] = hasmatch2[j] = true;
2358                                         matchprodfreq += numbers1[i] * numbers2[j];
2359                                         nmatches++;
2360                                         break;
2361                                 }
2362                         }
2363                 }
2364                 CLAMP_PROBABILITY(matchprodfreq);
2365                 /* Sum up frequencies of matched and unmatched MCVs */
2366                 matchfreq1 = unmatchfreq1 = 0.0;
2367                 for (i = 0; i < nvalues1; i++)
2368                 {
2369                         if (hasmatch1[i])
2370                                 matchfreq1 += numbers1[i];
2371                         else
2372                                 unmatchfreq1 += numbers1[i];
2373                 }
2374                 CLAMP_PROBABILITY(matchfreq1);
2375                 CLAMP_PROBABILITY(unmatchfreq1);
2376                 matchfreq2 = unmatchfreq2 = 0.0;
2377                 for (i = 0; i < nvalues2; i++)
2378                 {
2379                         if (hasmatch2[i])
2380                                 matchfreq2 += numbers2[i];
2381                         else
2382                                 unmatchfreq2 += numbers2[i];
2383                 }
2384                 CLAMP_PROBABILITY(matchfreq2);
2385                 CLAMP_PROBABILITY(unmatchfreq2);
2386                 pfree(hasmatch1);
2387                 pfree(hasmatch2);
2388
2389                 /*
2390                  * Compute total frequency of non-null values that are not in the MCV
2391                  * lists.
2392                  */
2393                 otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
2394                 otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
2395                 CLAMP_PROBABILITY(otherfreq1);
2396                 CLAMP_PROBABILITY(otherfreq2);
2397
2398                 /*
2399                  * We can estimate the total selectivity from the point of view of
2400                  * relation 1 as: the known selectivity for matched MCVs, plus
2401                  * unmatched MCVs that are assumed to match against random members of
2402                  * relation 2's non-MCV population, plus non-MCV values that are
2403                  * assumed to match against random members of relation 2's unmatched
2404                  * MCVs plus non-MCV values.
2405                  */
2406                 totalsel1 = matchprodfreq;
2407                 if (nd2 > nvalues2)
2408                         totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - nvalues2);
2409                 if (nd2 > nmatches)
2410                         totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
2411                                 (nd2 - nmatches);
2412                 /* Same estimate from the point of view of relation 2. */
2413                 totalsel2 = matchprodfreq;
2414                 if (nd1 > nvalues1)
2415                         totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - nvalues1);
2416                 if (nd1 > nmatches)
2417                         totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
2418                                 (nd1 - nmatches);
2419
2420                 /*
2421                  * Use the smaller of the two estimates.  This can be justified in
2422                  * essentially the same terms as given below for the no-stats case: to
2423                  * a first approximation, we are estimating from the point of view of
2424                  * the relation with smaller nd.
2425                  */
2426                 selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
2427         }
2428         else
2429         {
2430                 /*
2431                  * We do not have MCV lists for both sides.  Estimate the join
2432                  * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
2433                  * is plausible if we assume that the join operator is strict and the
2434                  * non-null values are about equally distributed: a given non-null
2435                  * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
2436                  * of rel2, so total join rows are at most
2437                  * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
2438                  * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
2439                  * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
2440                  * with MIN() is an upper bound.  Using the MIN() means we estimate
2441                  * from the point of view of the relation with smaller nd (since the
2442                  * larger nd is determining the MIN).  It is reasonable to assume that
2443                  * most tuples in this rel will have join partners, so the bound is
2444                  * probably reasonably tight and should be taken as-is.
2445                  *
2446                  * XXX Can we be smarter if we have an MCV list for just one side? It
2447                  * seems that if we assume equal distribution for the other side, we
2448                  * end up with the same answer anyway.
2449                  */
2450                 double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2451                 double          nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
2452
2453                 selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
2454                 if (nd1 > nd2)
2455                         selec /= nd1;
2456                 else
2457                         selec /= nd2;
2458         }
2459
2460         if (have_mcvs1)
2461                 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2462                                                   numbers1, nnumbers1);
2463         if (have_mcvs2)
2464                 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2465                                                   numbers2, nnumbers2);
2466
2467         return selec;
2468 }
2469
2470 /*
2471  * eqjoinsel_semi --- eqjoinsel for semi join
2472  *
2473  * (Also used for anti join, which we are supposed to estimate the same way.)
2474  * Caller has ensured that vardata1 is the LHS variable.
2475  */
2476 static double
2477 eqjoinsel_semi(Oid operator,
2478                            VariableStatData *vardata1, VariableStatData *vardata2,
2479                            RelOptInfo *inner_rel)
2480 {
2481         double          selec;
2482         double          nd1;
2483         double          nd2;
2484         bool            isdefault1;
2485         bool            isdefault2;
2486         Form_pg_statistic stats1 = NULL;
2487         bool            have_mcvs1 = false;
2488         Datum      *values1 = NULL;
2489         int                     nvalues1 = 0;
2490         float4     *numbers1 = NULL;
2491         int                     nnumbers1 = 0;
2492         bool            have_mcvs2 = false;
2493         Datum      *values2 = NULL;
2494         int                     nvalues2 = 0;
2495         float4     *numbers2 = NULL;
2496         int                     nnumbers2 = 0;
2497
2498         nd1 = get_variable_numdistinct(vardata1, &isdefault1);
2499         nd2 = get_variable_numdistinct(vardata2, &isdefault2);
2500
2501         /*
2502          * We clamp nd2 to be not more than what we estimate the inner relation's
2503          * size to be.  This is intuitively somewhat reasonable since obviously
2504          * there can't be more than that many distinct values coming from the
2505          * inner rel.  The reason for the asymmetry (ie, that we don't clamp nd1
2506          * likewise) is that this is the only pathway by which restriction clauses
2507          * applied to the inner rel will affect the join result size estimate,
2508          * since set_joinrel_size_estimates will multiply SEMI/ANTI selectivity by
2509          * only the outer rel's size.  If we clamped nd1 we'd be double-counting
2510          * the selectivity of outer-rel restrictions.
2511          *
2512          * We can apply this clamping both with respect to the base relation from
2513          * which the join variable comes (if there is just one), and to the
2514          * immediate inner input relation of the current join.
2515          *
2516          * If we clamp, we can treat nd2 as being a non-default estimate; it's not
2517          * great, maybe, but it didn't come out of nowhere either.  This is most
2518          * helpful when the inner relation is empty and consequently has no stats.
2519          */
2520         if (vardata2->rel)
2521         {
2522                 if (nd2 >= vardata2->rel->rows)
2523                 {
2524                         nd2 = vardata2->rel->rows;
2525                         isdefault2 = false;
2526                 }
2527         }
2528         if (nd2 >= inner_rel->rows)
2529         {
2530                 nd2 = inner_rel->rows;
2531                 isdefault2 = false;
2532         }
2533
2534         if (HeapTupleIsValid(vardata1->statsTuple))
2535         {
2536                 stats1 = (Form_pg_statistic) GETSTRUCT(vardata1->statsTuple);
2537                 have_mcvs1 = get_attstatsslot(vardata1->statsTuple,
2538                                                                           vardata1->atttype,
2539                                                                           vardata1->atttypmod,
2540                                                                           STATISTIC_KIND_MCV,
2541                                                                           InvalidOid,
2542                                                                           NULL,
2543                                                                           &values1, &nvalues1,
2544                                                                           &numbers1, &nnumbers1);
2545         }
2546
2547         if (HeapTupleIsValid(vardata2->statsTuple))
2548         {
2549                 have_mcvs2 = get_attstatsslot(vardata2->statsTuple,
2550                                                                           vardata2->atttype,
2551                                                                           vardata2->atttypmod,
2552                                                                           STATISTIC_KIND_MCV,
2553                                                                           InvalidOid,
2554                                                                           NULL,
2555                                                                           &values2, &nvalues2,
2556                                                                           &numbers2, &nnumbers2);
2557         }
2558
2559         if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
2560         {
2561                 /*
2562                  * We have most-common-value lists for both relations.  Run through
2563                  * the lists to see which MCVs actually join to each other with the
2564                  * given operator.  This allows us to determine the exact join
2565                  * selectivity for the portion of the relations represented by the MCV
2566                  * lists.  We still have to estimate for the remaining population, but
2567                  * in a skewed distribution this gives us a big leg up in accuracy.
2568                  */
2569                 FmgrInfo        eqproc;
2570                 bool       *hasmatch1;
2571                 bool       *hasmatch2;
2572                 double          nullfrac1 = stats1->stanullfrac;
2573                 double          matchfreq1,
2574                                         uncertainfrac,
2575                                         uncertain;
2576                 int                     i,
2577                                         nmatches,
2578                                         clamped_nvalues2;
2579
2580                 /*
2581                  * The clamping above could have resulted in nd2 being less than
2582                  * nvalues2; in which case, we assume that precisely the nd2 most
2583                  * common values in the relation will appear in the join input, and so
2584                  * compare to only the first nd2 members of the MCV list.  Of course
2585                  * this is frequently wrong, but it's the best bet we can make.
2586                  */
2587                 clamped_nvalues2 = Min(nvalues2, nd2);
2588
2589                 fmgr_info(get_opcode(operator), &eqproc);
2590                 hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
2591                 hasmatch2 = (bool *) palloc0(clamped_nvalues2 * sizeof(bool));
2592
2593                 /*
2594                  * Note we assume that each MCV will match at most one member of the
2595                  * other MCV list.  If the operator isn't really equality, there could
2596                  * be multiple matches --- but we don't look for them, both for speed
2597                  * and because the math wouldn't add up...
2598                  */
2599                 nmatches = 0;
2600                 for (i = 0; i < nvalues1; i++)
2601                 {
2602                         int                     j;
2603
2604                         for (j = 0; j < clamped_nvalues2; j++)
2605                         {
2606                                 if (hasmatch2[j])
2607                                         continue;
2608                                 if (DatumGetBool(FunctionCall2Coll(&eqproc,
2609                                                                                                    DEFAULT_COLLATION_OID,
2610                                                                                                    values1[i],
2611                                                                                                    values2[j])))
2612                                 {
2613                                         hasmatch1[i] = hasmatch2[j] = true;
2614                                         nmatches++;
2615                                         break;
2616                                 }
2617                         }
2618                 }
2619                 /* Sum up frequencies of matched MCVs */
2620                 matchfreq1 = 0.0;
2621                 for (i = 0; i < nvalues1; i++)
2622                 {
2623                         if (hasmatch1[i])
2624                                 matchfreq1 += numbers1[i];
2625                 }
2626                 CLAMP_PROBABILITY(matchfreq1);
2627                 pfree(hasmatch1);
2628                 pfree(hasmatch2);
2629
2630                 /*
2631                  * Now we need to estimate the fraction of relation 1 that has at
2632                  * least one join partner.  We know for certain that the matched MCVs
2633                  * do, so that gives us a lower bound, but we're really in the dark
2634                  * about everything else.  Our crude approach is: if nd1 <= nd2 then
2635                  * assume all non-null rel1 rows have join partners, else assume for
2636                  * the uncertain rows that a fraction nd2/nd1 have join partners. We
2637                  * can discount the known-matched MCVs from the distinct-values counts
2638                  * before doing the division.
2639                  *
2640                  * Crude as the above is, it's completely useless if we don't have
2641                  * reliable ndistinct values for both sides.  Hence, if either nd1 or
2642                  * nd2 is default, punt and assume half of the uncertain rows have
2643                  * join partners.
2644                  */
2645                 if (!isdefault1 && !isdefault2)
2646                 {
2647                         nd1 -= nmatches;
2648                         nd2 -= nmatches;
2649                         if (nd1 <= nd2 || nd2 < 0)
2650                                 uncertainfrac = 1.0;
2651                         else
2652                                 uncertainfrac = nd2 / nd1;
2653                 }
2654                 else
2655                         uncertainfrac = 0.5;
2656                 uncertain = 1.0 - matchfreq1 - nullfrac1;
2657                 CLAMP_PROBABILITY(uncertain);
2658                 selec = matchfreq1 + uncertainfrac * uncertain;
2659         }
2660         else
2661         {
2662                 /*
2663                  * Without MCV lists for both sides, we can only use the heuristic
2664                  * about nd1 vs nd2.
2665                  */
2666                 double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
2667
2668                 if (!isdefault1 && !isdefault2)
2669                 {
2670                         if (nd1 <= nd2 || nd2 < 0)
2671                                 selec = 1.0 - nullfrac1;
2672                         else
2673                                 selec = (nd2 / nd1) * (1.0 - nullfrac1);
2674                 }
2675                 else
2676                         selec = 0.5 * (1.0 - nullfrac1);
2677         }
2678
2679         if (have_mcvs1)
2680                 free_attstatsslot(vardata1->atttype, values1, nvalues1,
2681                                                   numbers1, nnumbers1);
2682         if (have_mcvs2)
2683                 free_attstatsslot(vardata2->atttype, values2, nvalues2,
2684                                                   numbers2, nnumbers2);
2685
2686         return selec;
2687 }
2688
2689 /*
2690  *              neqjoinsel              - Join selectivity of "!="
2691  */
2692 Datum
2693 neqjoinsel(PG_FUNCTION_ARGS)
2694 {
2695         PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
2696         Oid                     operator = PG_GETARG_OID(1);
2697         List       *args = (List *) PG_GETARG_POINTER(2);
2698         JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
2699         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) PG_GETARG_POINTER(4);
2700         Oid                     eqop;
2701         float8          result;
2702
2703         /*
2704          * We want 1 - eqjoinsel() where the equality operator is the one
2705          * associated with this != operator, that is, its negator.
2706          */
2707         eqop = get_negator(operator);
2708         if (eqop)
2709         {
2710                 result = DatumGetFloat8(DirectFunctionCall5(eqjoinsel,
2711                                                                                                         PointerGetDatum(root),
2712                                                                                                         ObjectIdGetDatum(eqop),
2713                                                                                                         PointerGetDatum(args),
2714                                                                                                         Int16GetDatum(jointype),
2715                                                                                                         PointerGetDatum(sjinfo)));
2716         }
2717         else
2718         {
2719                 /* Use default selectivity (should we raise an error instead?) */
2720                 result = DEFAULT_EQ_SEL;
2721         }
2722         result = 1.0 - result;
2723         PG_RETURN_FLOAT8(result);
2724 }
2725
2726 /*
2727  *              scalarltjoinsel - Join selectivity of "<" and "<=" for scalars
2728  */
2729 Datum
2730 scalarltjoinsel(PG_FUNCTION_ARGS)
2731 {
2732         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2733 }
2734
2735 /*
2736  *              scalargtjoinsel - Join selectivity of ">" and ">=" for scalars
2737  */
2738 Datum
2739 scalargtjoinsel(PG_FUNCTION_ARGS)
2740 {
2741         PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
2742 }
2743
2744 /*
2745  * patternjoinsel               - Generic code for pattern-match join selectivity.
2746  */
2747 static double
2748 patternjoinsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
2749 {
2750         /* For the moment we just punt. */
2751         return negate ? (1.0 - DEFAULT_MATCH_SEL) : DEFAULT_MATCH_SEL;
2752 }
2753
2754 /*
2755  *              regexeqjoinsel  - Join selectivity of regular-expression pattern match.
2756  */
2757 Datum
2758 regexeqjoinsel(PG_FUNCTION_ARGS)
2759 {
2760         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, false));
2761 }
2762
2763 /*
2764  *              icregexeqjoinsel        - Join selectivity of case-insensitive regex match.
2765  */
2766 Datum
2767 icregexeqjoinsel(PG_FUNCTION_ARGS)
2768 {
2769         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, false));
2770 }
2771
2772 /*
2773  *              likejoinsel                     - Join selectivity of LIKE pattern match.
2774  */
2775 Datum
2776 likejoinsel(PG_FUNCTION_ARGS)
2777 {
2778         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, false));
2779 }
2780
2781 /*
2782  *              iclikejoinsel                   - Join selectivity of ILIKE pattern match.
2783  */
2784 Datum
2785 iclikejoinsel(PG_FUNCTION_ARGS)
2786 {
2787         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, false));
2788 }
2789
2790 /*
2791  *              regexnejoinsel  - Join selectivity of regex non-match.
2792  */
2793 Datum
2794 regexnejoinsel(PG_FUNCTION_ARGS)
2795 {
2796         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex, true));
2797 }
2798
2799 /*
2800  *              icregexnejoinsel        - Join selectivity of case-insensitive regex non-match.
2801  */
2802 Datum
2803 icregexnejoinsel(PG_FUNCTION_ARGS)
2804 {
2805         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Regex_IC, true));
2806 }
2807
2808 /*
2809  *              nlikejoinsel            - Join selectivity of LIKE pattern non-match.
2810  */
2811 Datum
2812 nlikejoinsel(PG_FUNCTION_ARGS)
2813 {
2814         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like, true));
2815 }
2816
2817 /*
2818  *              icnlikejoinsel          - Join selectivity of ILIKE pattern non-match.
2819  */
2820 Datum
2821 icnlikejoinsel(PG_FUNCTION_ARGS)
2822 {
2823         PG_RETURN_FLOAT8(patternjoinsel(fcinfo, Pattern_Type_Like_IC, true));
2824 }
2825
2826 /*
2827  * mergejoinscansel                     - Scan selectivity of merge join.
2828  *
2829  * A merge join will stop as soon as it exhausts either input stream.
2830  * Therefore, if we can estimate the ranges of both input variables,
2831  * we can estimate how much of the input will actually be read.  This
2832  * can have a considerable impact on the cost when using indexscans.
2833  *
2834  * Also, we can estimate how much of each input has to be read before the
2835  * first join pair is found, which will affect the join's startup time.
2836  *
2837  * clause should be a clause already known to be mergejoinable.  opfamily,
2838  * strategy, and nulls_first specify the sort ordering being used.
2839  *
2840  * The outputs are:
2841  *              *leftstart is set to the fraction of the left-hand variable expected
2842  *               to be scanned before the first join pair is found (0 to 1).
2843  *              *leftend is set to the fraction of the left-hand variable expected
2844  *               to be scanned before the join terminates (0 to 1).
2845  *              *rightstart, *rightend similarly for the right-hand variable.
2846  */
2847 void
2848 mergejoinscansel(PlannerInfo *root, Node *clause,
2849                                  Oid opfamily, int strategy, bool nulls_first,
2850                                  Selectivity *leftstart, Selectivity *leftend,
2851                                  Selectivity *rightstart, Selectivity *rightend)
2852 {
2853         Node       *left,
2854                            *right;
2855         VariableStatData leftvar,
2856                                 rightvar;
2857         int                     op_strategy;
2858         Oid                     op_lefttype;
2859         Oid                     op_righttype;
2860         Oid                     opno,
2861                                 lsortop,
2862                                 rsortop,
2863                                 lstatop,
2864                                 rstatop,
2865                                 ltop,
2866                                 leop,
2867                                 revltop,
2868                                 revleop;
2869         bool            isgt;
2870         Datum           leftmin,
2871                                 leftmax,
2872                                 rightmin,
2873                                 rightmax;
2874         double          selec;
2875
2876         /* Set default results if we can't figure anything out. */
2877         /* XXX should default "start" fraction be a bit more than 0? */
2878         *leftstart = *rightstart = 0.0;
2879         *leftend = *rightend = 1.0;
2880
2881         /* Deconstruct the merge clause */
2882         if (!is_opclause(clause))
2883                 return;                                 /* shouldn't happen */
2884         opno = ((OpExpr *) clause)->opno;
2885         left = get_leftop((Expr *) clause);
2886         right = get_rightop((Expr *) clause);
2887         if (!right)
2888                 return;                                 /* shouldn't happen */
2889
2890         /* Look for stats for the inputs */
2891         examine_variable(root, left, 0, &leftvar);
2892         examine_variable(root, right, 0, &rightvar);
2893
2894         /* Extract the operator's declared left/right datatypes */
2895         get_op_opfamily_properties(opno, opfamily, false,
2896                                                            &op_strategy,
2897                                                            &op_lefttype,
2898                                                            &op_righttype);
2899         Assert(op_strategy == BTEqualStrategyNumber);
2900
2901         /*
2902          * Look up the various operators we need.  If we don't find them all, it
2903          * probably means the opfamily is broken, but we just fail silently.
2904          *
2905          * Note: we expect that pg_statistic histograms will be sorted by the '<'
2906          * operator, regardless of which sort direction we are considering.
2907          */
2908         switch (strategy)
2909         {
2910                 case BTLessStrategyNumber:
2911                         isgt = false;
2912                         if (op_lefttype == op_righttype)
2913                         {
2914                                 /* easy case */
2915                                 ltop = get_opfamily_member(opfamily,
2916                                                                                    op_lefttype, op_righttype,
2917                                                                                    BTLessStrategyNumber);
2918                                 leop = get_opfamily_member(opfamily,
2919                                                                                    op_lefttype, op_righttype,
2920                                                                                    BTLessEqualStrategyNumber);
2921                                 lsortop = ltop;
2922                                 rsortop = ltop;
2923                                 lstatop = lsortop;
2924                                 rstatop = rsortop;
2925                                 revltop = ltop;
2926                                 revleop = leop;
2927                         }
2928                         else
2929                         {
2930                                 ltop = get_opfamily_member(opfamily,
2931                                                                                    op_lefttype, op_righttype,
2932                                                                                    BTLessStrategyNumber);
2933                                 leop = get_opfamily_member(opfamily,
2934                                                                                    op_lefttype, op_righttype,
2935                                                                                    BTLessEqualStrategyNumber);
2936                                 lsortop = get_opfamily_member(opfamily,
2937                                                                                           op_lefttype, op_lefttype,
2938                                                                                           BTLessStrategyNumber);
2939                                 rsortop = get_opfamily_member(opfamily,
2940                                                                                           op_righttype, op_righttype,
2941                                                                                           BTLessStrategyNumber);
2942                                 lstatop = lsortop;
2943                                 rstatop = rsortop;
2944                                 revltop = get_opfamily_member(opfamily,
2945                                                                                           op_righttype, op_lefttype,
2946                                                                                           BTLessStrategyNumber);
2947                                 revleop = get_opfamily_member(opfamily,
2948                                                                                           op_righttype, op_lefttype,
2949                                                                                           BTLessEqualStrategyNumber);
2950                         }
2951                         break;
2952                 case BTGreaterStrategyNumber:
2953                         /* descending-order case */
2954                         isgt = true;
2955                         if (op_lefttype == op_righttype)
2956                         {
2957                                 /* easy case */
2958                                 ltop = get_opfamily_member(opfamily,
2959                                                                                    op_lefttype, op_righttype,
2960                                                                                    BTGreaterStrategyNumber);
2961                                 leop = get_opfamily_member(opfamily,
2962                                                                                    op_lefttype, op_righttype,
2963                                                                                    BTGreaterEqualStrategyNumber);
2964                                 lsortop = ltop;
2965                                 rsortop = ltop;
2966                                 lstatop = get_opfamily_member(opfamily,
2967                                                                                           op_lefttype, op_lefttype,
2968                                                                                           BTLessStrategyNumber);
2969                                 rstatop = lstatop;
2970                                 revltop = ltop;
2971                                 revleop = leop;
2972                         }
2973                         else
2974                         {
2975                                 ltop = get_opfamily_member(opfamily,
2976                                                                                    op_lefttype, op_righttype,
2977                                                                                    BTGreaterStrategyNumber);
2978                                 leop = get_opfamily_member(opfamily,
2979                                                                                    op_lefttype, op_righttype,
2980                                                                                    BTGreaterEqualStrategyNumber);
2981                                 lsortop = get_opfamily_member(opfamily,
2982                                                                                           op_lefttype, op_lefttype,
2983                                                                                           BTGreaterStrategyNumber);
2984                                 rsortop = get_opfamily_member(opfamily,
2985                                                                                           op_righttype, op_righttype,
2986                                                                                           BTGreaterStrategyNumber);
2987                                 lstatop = get_opfamily_member(opfamily,
2988                                                                                           op_lefttype, op_lefttype,
2989                                                                                           BTLessStrategyNumber);
2990                                 rstatop = get_opfamily_member(opfamily,
2991                                                                                           op_righttype, op_righttype,
2992                                                                                           BTLessStrategyNumber);
2993                                 revltop = get_opfamily_member(opfamily,
2994                                                                                           op_righttype, op_lefttype,
2995                                                                                           BTGreaterStrategyNumber);
2996                                 revleop = get_opfamily_member(opfamily,
2997                                                                                           op_righttype, op_lefttype,
2998                                                                                           BTGreaterEqualStrategyNumber);
2999                         }
3000                         break;
3001                 default:
3002                         goto fail;                      /* shouldn't get here */
3003         }
3004
3005         if (!OidIsValid(lsortop) ||
3006                 !OidIsValid(rsortop) ||
3007                 !OidIsValid(lstatop) ||
3008                 !OidIsValid(rstatop) ||
3009                 !OidIsValid(ltop) ||
3010                 !OidIsValid(leop) ||
3011                 !OidIsValid(revltop) ||
3012                 !OidIsValid(revleop))
3013                 goto fail;                              /* insufficient info in catalogs */
3014
3015         /* Try to get ranges of both inputs */
3016         if (!isgt)
3017         {
3018                 if (!get_variable_range(root, &leftvar, lstatop,
3019                                                                 &leftmin, &leftmax))
3020                         goto fail;                      /* no range available from stats */
3021                 if (!get_variable_range(root, &rightvar, rstatop,
3022                                                                 &rightmin, &rightmax))
3023                         goto fail;                      /* no range available from stats */
3024         }
3025         else
3026         {
3027                 /* need to swap the max and min */
3028                 if (!get_variable_range(root, &leftvar, lstatop,
3029                                                                 &leftmax, &leftmin))
3030                         goto fail;                      /* no range available from stats */
3031                 if (!get_variable_range(root, &rightvar, rstatop,
3032                                                                 &rightmax, &rightmin))
3033                         goto fail;                      /* no range available from stats */
3034         }
3035
3036         /*
3037          * Now, the fraction of the left variable that will be scanned is the
3038          * fraction that's <= the right-side maximum value.  But only believe
3039          * non-default estimates, else stick with our 1.0.
3040          */
3041         selec = scalarineqsel(root, leop, isgt, &leftvar,
3042                                                   rightmax, op_righttype);
3043         if (selec != DEFAULT_INEQ_SEL)
3044                 *leftend = selec;
3045
3046         /* And similarly for the right variable. */
3047         selec = scalarineqsel(root, revleop, isgt, &rightvar,
3048                                                   leftmax, op_lefttype);
3049         if (selec != DEFAULT_INEQ_SEL)
3050                 *rightend = selec;
3051
3052         /*
3053          * Only one of the two "end" fractions can really be less than 1.0;
3054          * believe the smaller estimate and reset the other one to exactly 1.0. If
3055          * we get exactly equal estimates (as can easily happen with self-joins),
3056          * believe neither.
3057          */
3058         if (*leftend > *rightend)
3059                 *leftend = 1.0;
3060         else if (*leftend < *rightend)
3061                 *rightend = 1.0;
3062         else
3063                 *leftend = *rightend = 1.0;
3064
3065         /*
3066          * Also, the fraction of the left variable that will be scanned before the
3067          * first join pair is found is the fraction that's < the right-side
3068          * minimum value.  But only believe non-default estimates, else stick with
3069          * our own default.
3070          */
3071         selec = scalarineqsel(root, ltop, isgt, &leftvar,
3072                                                   rightmin, op_righttype);
3073         if (selec != DEFAULT_INEQ_SEL)
3074                 *leftstart = selec;
3075
3076         /* And similarly for the right variable. */
3077         selec = scalarineqsel(root, revltop, isgt, &rightvar,
3078                                                   leftmin, op_lefttype);
3079         if (selec != DEFAULT_INEQ_SEL)
3080                 *rightstart = selec;
3081
3082         /*
3083          * Only one of the two "start" fractions can really be more than zero;
3084          * believe the larger estimate and reset the other one to exactly 0.0. If
3085          * we get exactly equal estimates (as can easily happen with self-joins),
3086          * believe neither.
3087          */
3088         if (*leftstart < *rightstart)
3089                 *leftstart = 0.0;
3090         else if (*leftstart > *rightstart)
3091                 *rightstart = 0.0;
3092         else
3093                 *leftstart = *rightstart = 0.0;
3094
3095         /*
3096          * If the sort order is nulls-first, we're going to have to skip over any
3097          * nulls too.  These would not have been counted by scalarineqsel, and we
3098          * can safely add in this fraction regardless of whether we believe
3099          * scalarineqsel's results or not.  But be sure to clamp the sum to 1.0!
3100          */
3101         if (nulls_first)
3102         {
3103                 Form_pg_statistic stats;
3104
3105                 if (HeapTupleIsValid(leftvar.statsTuple))
3106                 {
3107                         stats = (Form_pg_statistic) GETSTRUCT(leftvar.statsTuple);
3108                         *leftstart += stats->stanullfrac;
3109                         CLAMP_PROBABILITY(*leftstart);
3110                         *leftend += stats->stanullfrac;
3111                         CLAMP_PROBABILITY(*leftend);
3112                 }
3113                 if (HeapTupleIsValid(rightvar.statsTuple))
3114                 {
3115                         stats = (Form_pg_statistic) GETSTRUCT(rightvar.statsTuple);
3116                         *rightstart += stats->stanullfrac;
3117                         CLAMP_PROBABILITY(*rightstart);
3118                         *rightend += stats->stanullfrac;
3119                         CLAMP_PROBABILITY(*rightend);
3120                 }
3121         }
3122
3123         /* Disbelieve start >= end, just in case that can happen */
3124         if (*leftstart >= *leftend)
3125         {
3126                 *leftstart = 0.0;
3127                 *leftend = 1.0;
3128         }
3129         if (*rightstart >= *rightend)
3130         {
3131                 *rightstart = 0.0;
3132                 *rightend = 1.0;
3133         }
3134
3135 fail:
3136         ReleaseVariableStats(leftvar);
3137         ReleaseVariableStats(rightvar);
3138 }
3139
3140
3141 /*
3142  * Helper routine for estimate_num_groups: add an item to a list of
3143  * GroupVarInfos, but only if it's not known equal to any of the existing
3144  * entries.
3145  */
3146 typedef struct
3147 {
3148         Node       *var;                        /* might be an expression, not just a Var */
3149         RelOptInfo *rel;                        /* relation it belongs to */
3150         double          ndistinct;              /* # distinct values */
3151 } GroupVarInfo;
3152
3153 static List *
3154 add_unique_group_var(PlannerInfo *root, List *varinfos,
3155                                          Node *var, VariableStatData *vardata)
3156 {
3157         GroupVarInfo *varinfo;
3158         double          ndistinct;
3159         bool            isdefault;
3160         ListCell   *lc;
3161
3162         ndistinct = get_variable_numdistinct(vardata, &isdefault);
3163
3164         /* cannot use foreach here because of possible list_delete */
3165         lc = list_head(varinfos);
3166         while (lc)
3167         {
3168                 varinfo = (GroupVarInfo *) lfirst(lc);
3169
3170                 /* must advance lc before list_delete possibly pfree's it */
3171                 lc = lnext(lc);
3172
3173                 /* Drop exact duplicates */
3174                 if (equal(var, varinfo->var))
3175                         return varinfos;
3176
3177                 /*
3178                  * Drop known-equal vars, but only if they belong to different
3179                  * relations (see comments for estimate_num_groups)
3180                  */
3181                 if (vardata->rel != varinfo->rel &&
3182                         exprs_known_equal(root, var, varinfo->var))
3183                 {
3184                         if (varinfo->ndistinct <= ndistinct)
3185                         {
3186                                 /* Keep older item, forget new one */
3187                                 return varinfos;
3188                         }
3189                         else
3190                         {
3191                                 /* Delete the older item */
3192                                 varinfos = list_delete_ptr(varinfos, varinfo);
3193                         }
3194                 }
3195         }
3196
3197         varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
3198
3199         varinfo->var = var;
3200         varinfo->rel = vardata->rel;
3201         varinfo->ndistinct = ndistinct;
3202         varinfos = lappend(varinfos, varinfo);
3203         return varinfos;
3204 }
3205
3206 /*
3207  * estimate_num_groups          - Estimate number of groups in a grouped query
3208  *
3209  * Given a query having a GROUP BY clause, estimate how many groups there
3210  * will be --- ie, the number of distinct combinations of the GROUP BY
3211  * expressions.
3212  *
3213  * This routine is also used to estimate the number of rows emitted by
3214  * a DISTINCT filtering step; that is an isomorphic problem.  (Note:
3215  * actually, we only use it for DISTINCT when there's no grouping or
3216  * aggregation ahead of the DISTINCT.)
3217  *
3218  * Inputs:
3219  *      root - the query
3220  *      groupExprs - list of expressions being grouped by
3221  *      input_rows - number of rows estimated to arrive at the group/unique
3222  *              filter step
3223  *      pgset - NULL, or a List** pointing to a grouping set to filter the
3224  *              groupExprs against
3225  *
3226  * Given the lack of any cross-correlation statistics in the system, it's
3227  * impossible to do anything really trustworthy with GROUP BY conditions
3228  * involving multiple Vars.  We should however avoid assuming the worst
3229  * case (all possible cross-product terms actually appear as groups) since
3230  * very often the grouped-by Vars are highly correlated.  Our current approach
3231  * is as follows:
3232  *      1.  Expressions yielding boolean are assumed to contribute two groups,
3233  *              independently of their content, and are ignored in the subsequent
3234  *              steps.  This is mainly because tests like "col IS NULL" break the
3235  *              heuristic used in step 2 especially badly.
3236  *      2.  Reduce the given expressions to a list of unique Vars used.  For
3237  *              example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
3238  *              It is clearly correct not to count the same Var more than once.
3239  *              It is also reasonable to treat f(x) the same as x: f() cannot
3240  *              increase the number of distinct values (unless it is volatile,
3241  *              which we consider unlikely for grouping), but it probably won't
3242  *              reduce the number of distinct values much either.
3243  *              As a special case, if a GROUP BY expression can be matched to an
3244  *              expressional index for which we have statistics, then we treat the
3245  *              whole expression as though it were just a Var.
3246  *      3.  If the list contains Vars of different relations that are known equal
3247  *              due to equivalence classes, then drop all but one of the Vars from each
3248  *              known-equal set, keeping the one with smallest estimated # of values
3249  *              (since the extra values of the others can't appear in joined rows).
3250  *              Note the reason we only consider Vars of different relations is that
3251  *              if we considered ones of the same rel, we'd be double-counting the
3252  *              restriction selectivity of the equality in the next step.
3253  *      4.  For Vars within a single source rel, we multiply together the numbers
3254  *              of values, clamp to the number of rows in the rel (divided by 10 if
3255  *              more than one Var), and then multiply by a factor based on the
3256  *              selectivity of the restriction clauses for that rel.  When there's
3257  *              more than one Var, the initial product is probably too high (it's the
3258  *              worst case) but clamping to a fraction of the rel's rows seems to be a
3259  *              helpful heuristic for not letting the estimate get out of hand.  (The
3260  *              factor of 10 is derived from pre-Postgres-7.4 practice.)  The factor
3261  *              we multiply by to adjust for the restriction selectivity assumes that
3262  *              the restriction clauses are independent of the grouping, which may not
3263  *              be a valid assumption, but it's hard to do better.
3264  *      5.  If there are Vars from multiple rels, we repeat step 4 for each such
3265  *              rel, and multiply the results together.
3266  * Note that rels not containing grouped Vars are ignored completely, as are
3267  * join clauses.  Such rels cannot increase the number of groups, and we
3268  * assume such clauses do not reduce the number either (somewhat bogus,
3269  * but we don't have the info to do better).
3270  */
3271 double
3272 estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
3273                                         List **pgset)
3274 {
3275         List       *varinfos = NIL;
3276         double          numdistinct;
3277         ListCell   *l;
3278         int                     i;
3279
3280         /*
3281          * We don't ever want to return an estimate of zero groups, as that tends
3282          * to lead to division-by-zero and other unpleasantness.  The input_rows
3283          * estimate is usually already at least 1, but clamp it just in case it
3284          * isn't.
3285          */
3286         input_rows = clamp_row_est(input_rows);
3287
3288         /*
3289          * If no grouping columns, there's exactly one group.  (This can't happen
3290          * for normal cases with GROUP BY or DISTINCT, but it is possible for
3291          * corner cases with set operations.)
3292          */
3293         if (groupExprs == NIL || (pgset && list_length(*pgset) < 1))
3294                 return 1.0;
3295
3296         /*
3297          * Count groups derived from boolean grouping expressions.  For other
3298          * expressions, find the unique Vars used, treating an expression as a Var
3299          * if we can find stats for it.  For each one, record the statistical
3300          * estimate of number of distinct values (total in its table, without
3301          * regard for filtering).
3302          */
3303         numdistinct = 1.0;
3304
3305         i = 0;
3306         foreach(l, groupExprs)
3307         {
3308                 Node       *groupexpr = (Node *) lfirst(l);
3309                 VariableStatData vardata;
3310                 List       *varshere;
3311                 ListCell   *l2;
3312
3313                 /* is expression in this grouping set? */
3314                 if (pgset && !list_member_int(*pgset, i++))
3315                         continue;
3316
3317                 /* Short-circuit for expressions returning boolean */
3318                 if (exprType(groupexpr) == BOOLOID)
3319                 {
3320                         numdistinct *= 2.0;
3321                         continue;
3322                 }
3323
3324                 /*
3325                  * If examine_variable is able to deduce anything about the GROUP BY
3326                  * expression, treat it as a single variable even if it's really more
3327                  * complicated.
3328                  */
3329                 examine_variable(root, groupexpr, 0, &vardata);
3330                 if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique)
3331                 {
3332                         varinfos = add_unique_group_var(root, varinfos,
3333                                                                                         groupexpr, &vardata);
3334                         ReleaseVariableStats(vardata);
3335                         continue;
3336                 }
3337                 ReleaseVariableStats(vardata);
3338
3339                 /*
3340                  * Else pull out the component Vars.  Handle PlaceHolderVars by
3341                  * recursing into their arguments (effectively assuming that the
3342                  * PlaceHolderVar doesn't change the number of groups, which boils
3343                  * down to ignoring the possible addition of nulls to the result set).
3344                  */
3345                 varshere = pull_var_clause(groupexpr,
3346                                                                    PVC_RECURSE_AGGREGATES |
3347                                                                    PVC_RECURSE_WINDOWFUNCS |
3348                                                                    PVC_RECURSE_PLACEHOLDERS);
3349
3350                 /*
3351                  * If we find any variable-free GROUP BY item, then either it is a
3352                  * constant (and we can ignore it) or it contains a volatile function;
3353                  * in the latter case we punt and assume that each input row will
3354                  * yield a distinct group.
3355                  */
3356                 if (varshere == NIL)
3357                 {
3358                         if (contain_volatile_functions(groupexpr))
3359                                 return input_rows;
3360                         continue;
3361                 }
3362
3363                 /*
3364                  * Else add variables to varinfos list
3365                  */
3366                 foreach(l2, varshere)
3367                 {
3368                         Node       *var = (Node *) lfirst(l2);
3369
3370                         examine_variable(root, var, 0, &vardata);
3371                         varinfos = add_unique_group_var(root, varinfos, var, &vardata);
3372                         ReleaseVariableStats(vardata);
3373                 }
3374         }
3375
3376         /*
3377          * If now no Vars, we must have an all-constant or all-boolean GROUP BY
3378          * list.
3379          */
3380         if (varinfos == NIL)
3381         {
3382                 /* Guard against out-of-range answers */
3383                 if (numdistinct > input_rows)
3384                         numdistinct = input_rows;
3385                 return numdistinct;
3386         }
3387
3388         /*
3389          * Group Vars by relation and estimate total numdistinct.
3390          *
3391          * For each iteration of the outer loop, we process the frontmost Var in
3392          * varinfos, plus all other Vars in the same relation.  We remove these
3393          * Vars from the newvarinfos list for the next iteration. This is the
3394          * easiest way to group Vars of same rel together.
3395          */
3396         do
3397         {
3398                 GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
3399                 RelOptInfo *rel = varinfo1->rel;
3400                 double          reldistinct = varinfo1->ndistinct;
3401                 double          relmaxndistinct = reldistinct;
3402                 int                     relvarcount = 1;
3403                 List       *newvarinfos = NIL;
3404
3405                 /*
3406                  * Get the product of numdistinct estimates of the Vars for this rel.
3407                  * Also, construct new varinfos list of remaining Vars.
3408                  */
3409                 for_each_cell(l, lnext(list_head(varinfos)))
3410                 {
3411                         GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
3412
3413                         if (varinfo2->rel == varinfo1->rel)
3414                         {
3415                                 reldistinct *= varinfo2->ndistinct;
3416                                 if (relmaxndistinct < varinfo2->ndistinct)
3417                                         relmaxndistinct = varinfo2->ndistinct;
3418                                 relvarcount++;
3419                         }
3420                         else
3421                         {
3422                                 /* not time to process varinfo2 yet */
3423                                 newvarinfos = lcons(varinfo2, newvarinfos);
3424                         }
3425                 }
3426
3427                 /*
3428                  * Sanity check --- don't divide by zero if empty relation.
3429                  */
3430                 Assert(rel->reloptkind == RELOPT_BASEREL);
3431                 if (rel->tuples > 0)
3432                 {
3433                         /*
3434                          * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
3435                          * fudge factor is because the Vars are probably correlated but we
3436                          * don't know by how much.  We should never clamp to less than the
3437                          * largest ndistinct value for any of the Vars, though, since
3438                          * there will surely be at least that many groups.
3439                          */
3440                         double          clamp = rel->tuples;
3441
3442                         if (relvarcount > 1)
3443                         {
3444                                 clamp *= 0.1;
3445                                 if (clamp < relmaxndistinct)
3446                                 {
3447                                         clamp = relmaxndistinct;
3448                                         /* for sanity in case some ndistinct is too large: */
3449                                         if (clamp > rel->tuples)
3450                                                 clamp = rel->tuples;
3451                                 }
3452                         }
3453                         if (reldistinct > clamp)
3454                                 reldistinct = clamp;
3455
3456                         /*
3457                          * Update the estimate based on the restriction selectivity,
3458                          * guarding against division by zero when reldistinct is zero.
3459                          * Also skip this if we know that we are returning all rows.
3460                          */
3461                         if (reldistinct > 0 && rel->rows < rel->tuples)
3462                         {
3463                                 /*
3464                                  * Given a table containing N rows with n distinct values in a
3465                                  * uniform distribution, if we select p rows at random then
3466                                  * the expected number of distinct values selected is
3467                                  *
3468                                  * n * (1 - product((N-N/n-i)/(N-i), i=0..p-1))
3469                                  *
3470                                  * = n * (1 - (N-N/n)! / (N-N/n-p)! * (N-p)! / N!)
3471                                  *
3472                                  * See "Approximating block accesses in database
3473                                  * organizations", S. B. Yao, Communications of the ACM,
3474                                  * Volume 20 Issue 4, April 1977 Pages 260-261.
3475                                  *
3476                                  * Alternatively, re-arranging the terms from the factorials,
3477                                  * this may be written as
3478                                  *
3479                                  * n * (1 - product((N-p-i)/(N-i), i=0..N/n-1))
3480                                  *
3481                                  * This form of the formula is more efficient to compute in
3482                                  * the common case where p is larger than N/n.  Additionally,
3483                                  * as pointed out by Dell'Era, if i << N for all terms in the
3484                                  * product, it can be approximated by
3485                                  *
3486                                  * n * (1 - ((N-p)/N)^(N/n))
3487                                  *
3488                                  * See "Expected distinct values when selecting from a bag
3489                                  * without replacement", Alberto Dell'Era,
3490                                  * http://www.adellera.it/investigations/distinct_balls/.
3491                                  *
3492                                  * The condition i << N is equivalent to n >> 1, so this is a
3493                                  * good approximation when the number of distinct values in
3494                                  * the table is large.  It turns out that this formula also
3495                                  * works well even when n is small.
3496                                  */
3497                                 reldistinct *=
3498                                         (1 - pow((rel->tuples - rel->rows) / rel->tuples,
3499                                                          rel->tuples / reldistinct));
3500                         }
3501                         reldistinct = clamp_row_est(reldistinct);
3502
3503                         /*
3504                          * Update estimate of total distinct groups.
3505                          */
3506                         numdistinct *= reldistinct;
3507                 }
3508
3509                 varinfos = newvarinfos;
3510         } while (varinfos != NIL);
3511
3512         numdistinct = ceil(numdistinct);
3513
3514         /* Guard against out-of-range answers */
3515         if (numdistinct > input_rows)
3516                 numdistinct = input_rows;
3517         if (numdistinct < 1.0)
3518                 numdistinct = 1.0;
3519
3520         return numdistinct;
3521 }
3522
3523 /*
3524  * Estimate hash bucketsize fraction (ie, number of entries in a bucket
3525  * divided by total tuples in relation) if the specified expression is used
3526  * as a hash key.
3527  *
3528  * XXX This is really pretty bogus since we're effectively assuming that the
3529  * distribution of hash keys will be the same after applying restriction
3530  * clauses as it was in the underlying relation.  However, we are not nearly
3531  * smart enough to figure out how the restrict clauses might change the
3532  * distribution, so this will have to do for now.
3533  *
3534  * We are passed the number of buckets the executor will use for the given
3535  * input relation.  If the data were perfectly distributed, with the same
3536  * number of tuples going into each available bucket, then the bucketsize
3537  * fraction would be 1/nbuckets.  But this happy state of affairs will occur
3538  * only if (a) there are at least nbuckets distinct data values, and (b)
3539  * we have a not-too-skewed data distribution.  Otherwise the buckets will
3540  * be nonuniformly occupied.  If the other relation in the join has a key
3541  * distribution similar to this one's, then the most-loaded buckets are
3542  * exactly those that will be probed most often.  Therefore, the "average"
3543  * bucket size for costing purposes should really be taken as something close
3544  * to the "worst case" bucket size.  We try to estimate this by adjusting the
3545  * fraction if there are too few distinct data values, and then scaling up
3546  * by the ratio of the most common value's frequency to the average frequency.
3547  *
3548  * If no statistics are available, use a default estimate of 0.1.  This will
3549  * discourage use of a hash rather strongly if the inner relation is large,
3550  * which is what we want.  We do not want to hash unless we know that the
3551  * inner rel is well-dispersed (or the alternatives seem much worse).
3552  */
3553 Selectivity
3554 estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
3555 {
3556         VariableStatData vardata;
3557         double          estfract,
3558                                 ndistinct,
3559                                 stanullfrac,
3560                                 mcvfreq,
3561                                 avgfreq;
3562         bool            isdefault;
3563         float4     *numbers;
3564         int                     nnumbers;
3565
3566         examine_variable(root, hashkey, 0, &vardata);
3567
3568         /* Get number of distinct values */
3569         ndistinct = get_variable_numdistinct(&vardata, &isdefault);
3570
3571         /* If ndistinct isn't real, punt and return 0.1, per comments above */
3572         if (isdefault)
3573         {
3574                 ReleaseVariableStats(vardata);
3575                 return (Selectivity) 0.1;
3576         }
3577
3578         /* Get fraction that are null */
3579         if (HeapTupleIsValid(vardata.statsTuple))
3580         {
3581                 Form_pg_statistic stats;
3582
3583                 stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
3584                 stanullfrac = stats->stanullfrac;
3585         }
3586         else
3587                 stanullfrac = 0.0;
3588
3589         /* Compute avg freq of all distinct data values in raw relation */
3590         avgfreq = (1.0 - stanullfrac) / ndistinct;
3591
3592         /*
3593          * Adjust ndistinct to account for restriction clauses.  Observe we are
3594          * assuming that the data distribution is affected uniformly by the
3595          * restriction clauses!
3596          *
3597          * XXX Possibly better way, but much more expensive: multiply by
3598          * selectivity of rel's restriction clauses that mention the target Var.
3599          */
3600         if (vardata.rel && vardata.rel->tuples > 0)
3601         {
3602                 ndistinct *= vardata.rel->rows / vardata.rel->tuples;
3603                 ndistinct = clamp_row_est(ndistinct);
3604         }
3605
3606         /*
3607          * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
3608          * number of buckets is less than the expected number of distinct values;
3609          * otherwise it is 1/ndistinct.
3610          */
3611         if (ndistinct > nbuckets)
3612                 estfract = 1.0 / nbuckets;
3613         else
3614                 estfract = 1.0 / ndistinct;
3615
3616         /*
3617          * Look up the frequency of the most common value, if available.
3618          */
3619         mcvfreq = 0.0;
3620
3621         if (HeapTupleIsValid(vardata.statsTuple))
3622         {
3623                 if (get_attstatsslot(vardata.statsTuple,
3624                                                          vardata.atttype, vardata.atttypmod,
3625                                                          STATISTIC_KIND_MCV, InvalidOid,
3626                                                          NULL,
3627                                                          NULL, NULL,
3628                                                          &numbers, &nnumbers))
3629                 {
3630                         /*
3631                          * The first MCV stat is for the most common value.
3632                          */
3633                         if (nnumbers > 0)
3634                                 mcvfreq = numbers[0];
3635                         free_attstatsslot(vardata.atttype, NULL, 0,
3636                                                           numbers, nnumbers);
3637                 }
3638         }
3639
3640         /*
3641          * Adjust estimated bucketsize upward to account for skewed distribution.
3642          */
3643         if (avgfreq > 0.0 && mcvfreq > avgfreq)
3644                 estfract *= mcvfreq / avgfreq;
3645
3646         /*
3647          * Clamp bucketsize to sane range (the above adjustment could easily
3648          * produce an out-of-range result).  We set the lower bound a little above
3649          * zero, since zero isn't a very sane result.
3650          */
3651         if (estfract < 1.0e-6)
3652                 estfract = 1.0e-6;
3653         else if (estfract > 1.0)
3654                 estfract = 1.0;
3655
3656         ReleaseVariableStats(vardata);
3657
3658         return (Selectivity) estfract;
3659 }
3660
3661
3662 /*-------------------------------------------------------------------------
3663  *
3664  * Support routines
3665  *
3666  *-------------------------------------------------------------------------
3667  */
3668
3669 /*
3670  * convert_to_scalar
3671  *        Convert non-NULL values of the indicated types to the comparison
3672  *        scale needed by scalarineqsel().
3673  *        Returns "true" if successful.
3674  *
3675  * XXX this routine is a hack: ideally we should look up the conversion
3676  * subroutines in pg_type.
3677  *
3678  * All numeric datatypes are simply converted to their equivalent
3679  * "double" values.  (NUMERIC values that are outside the range of "double"
3680  * are clamped to +/- HUGE_VAL.)
3681  *
3682  * String datatypes are converted by convert_string_to_scalar(),
3683  * which is explained below.  The reason why this routine deals with
3684  * three values at a time, not just one, is that we need it for strings.
3685  *
3686  * The bytea datatype is just enough different from strings that it has
3687  * to be treated separately.
3688  *
3689  * The several datatypes representing absolute times are all converted
3690  * to Timestamp, which is actually a double, and then we just use that
3691  * double value.  Note this will give correct results even for the "special"
3692  * values of Timestamp, since those are chosen to compare correctly;
3693  * see timestamp_cmp.
3694  *
3695  * The several datatypes representing relative times (intervals) are all
3696  * converted to measurements expressed in seconds.
3697  */
3698 static bool
3699 convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
3700                                   Datum lobound, Datum hibound, Oid boundstypid,
3701                                   double *scaledlobound, double *scaledhibound)
3702 {
3703         /*
3704          * Both the valuetypid and the boundstypid should exactly match the
3705          * declared input type(s) of the operator we are invoked for, so we just
3706          * error out if either is not recognized.
3707          *
3708          * XXX The histogram we are interpolating between points of could belong
3709          * to a column that's only binary-compatible with the declared type. In
3710          * essence we are assuming that the semantics of binary-compatible types
3711          * are enough alike that we can use a histogram generated with one type's
3712          * operators to estimate selectivity for the other's.  This is outright
3713          * wrong in some cases --- in particular signed versus unsigned
3714          * interpretation could trip us up.  But it's useful enough in the
3715          * majority of cases that we do it anyway.  Should think about more
3716          * rigorous ways to do it.
3717          */
3718         switch (valuetypid)
3719         {
3720                         /*
3721                          * Built-in numeric types
3722                          */
3723                 case BOOLOID:
3724                 case INT2OID:
3725                 case INT4OID:
3726                 case INT8OID:
3727                 case FLOAT4OID:
3728                 case FLOAT8OID:
3729                 case NUMERICOID:
3730                 case OIDOID:
3731                 case REGPROCOID:
3732                 case REGPROCEDUREOID:
3733                 case REGOPEROID:
3734                 case REGOPERATOROID:
3735                 case REGCLASSOID:
3736                 case REGTYPEOID:
3737                 case REGCONFIGOID:
3738                 case REGDICTIONARYOID:
3739                 case REGROLEOID:
3740                 case REGNAMESPACEOID:
3741                         *scaledvalue = convert_numeric_to_scalar(value, valuetypid);
3742                         *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid);
3743                         *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid);
3744                         return true;
3745
3746                         /*
3747                          * Built-in string types
3748                          */
3749                 case CHAROID:
3750                 case BPCHAROID:
3751                 case VARCHAROID:
3752                 case TEXTOID:
3753                 case NAMEOID:
3754                         {
3755                                 char       *valstr = convert_string_datum(value, valuetypid);
3756                                 char       *lostr = convert_string_datum(lobound, boundstypid);
3757                                 char       *histr = convert_string_datum(hibound, boundstypid);
3758
3759                                 convert_string_to_scalar(valstr, scaledvalue,
3760                                                                                  lostr, scaledlobound,
3761                                                                                  histr, scaledhibound);
3762                                 pfree(valstr);
3763                                 pfree(lostr);
3764                                 pfree(histr);
3765                                 return true;
3766                         }
3767
3768                         /*
3769                          * Built-in bytea type
3770                          */
3771                 case BYTEAOID:
3772                         {
3773                                 convert_bytea_to_scalar(value, scaledvalue,
3774                                                                                 lobound, scaledlobound,
3775                                                                                 hibound, scaledhibound);
3776                                 return true;
3777                         }
3778
3779                         /*
3780                          * Built-in time types
3781                          */
3782                 case TIMESTAMPOID:
3783                 case TIMESTAMPTZOID:
3784                 case ABSTIMEOID:
3785                 case DATEOID:
3786                 case INTERVALOID:
3787                 case RELTIMEOID:
3788                 case TINTERVALOID:
3789                 case TIMEOID:
3790                 case TIMETZOID:
3791                         *scaledvalue = convert_timevalue_to_scalar(value, valuetypid);
3792                         *scaledlobound = convert_timevalue_to_scalar(lobound, boundstypid);
3793                         *scaledhibound = convert_timevalue_to_scalar(hibound, boundstypid);
3794                         return true;
3795
3796                         /*
3797                          * Built-in network types
3798                          */
3799                 case INETOID:
3800                 case CIDROID:
3801                 case MACADDROID:
3802                         *scaledvalue = convert_network_to_scalar(value, valuetypid);
3803                         *scaledlobound = convert_network_to_scalar(lobound, boundstypid);
3804                         *scaledhibound = convert_network_to_scalar(hibound, boundstypid);
3805                         return true;
3806         }
3807         /* Don't know how to convert */
3808         *scaledvalue = *scaledlobound = *scaledhibound = 0;
3809         return false;
3810 }
3811
3812 /*
3813  * Do convert_to_scalar()'s work for any numeric data type.
3814  */
3815 static double
3816 convert_numeric_to_scalar(Datum value, Oid typid)
3817 {
3818         switch (typid)
3819         {
3820                 case BOOLOID:
3821                         return (double) DatumGetBool(value);
3822                 case INT2OID:
3823                         return (double) DatumGetInt16(value);
3824                 case INT4OID:
3825                         return (double) DatumGetInt32(value);
3826                 case INT8OID:
3827                         return (double) DatumGetInt64(value);
3828                 case FLOAT4OID:
3829                         return (double) DatumGetFloat4(value);
3830                 case FLOAT8OID:
3831                         return (double) DatumGetFloat8(value);
3832                 case NUMERICOID:
3833                         /* Note: out-of-range values will be clamped to +-HUGE_VAL */
3834                         return (double)
3835                                 DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow,
3836                                                                                                    value));
3837                 case OIDOID:
3838                 case REGPROCOID:
3839                 case REGPROCEDUREOID:
3840                 case REGOPEROID:
3841                 case REGOPERATOROID:
3842                 case REGCLASSOID:
3843                 case REGTYPEOID:
3844                 case REGCONFIGOID:
3845                 case REGDICTIONARYOID:
3846                 case REGROLEOID:
3847                 case REGNAMESPACEOID:
3848                         /* we can treat OIDs as integers... */
3849                         return (double) DatumGetObjectId(value);
3850         }
3851
3852         /*
3853          * Can't get here unless someone tries to use scalarltsel/scalargtsel on
3854          * an operator with one numeric and one non-numeric operand.
3855          */
3856         elog(ERROR, "unsupported type: %u", typid);
3857         return 0;
3858 }
3859
3860 /*
3861  * Do convert_to_scalar()'s work for any character-string data type.
3862  *
3863  * String datatypes are converted to a scale that ranges from 0 to 1,
3864  * where we visualize the bytes of the string as fractional digits.
3865  *
3866  * We do not want the base to be 256, however, since that tends to
3867  * generate inflated selectivity estimates; few databases will have
3868  * occurrences of all 256 possible byte values at each position.
3869  * Instead, use the smallest and largest byte values seen in the bounds
3870  * as the estimated range for each byte, after some fudging to deal with
3871  * the fact that we probably aren't going to see the full range that way.
3872  *
3873  * An additional refinement is that we discard any common prefix of the
3874  * three strings before computing the scaled values.  This allows us to
3875  * "zoom in" when we encounter a narrow data range.  An example is a phone
3876  * number database where all the values begin with the same area code.
3877  * (Actually, the bounds will be adjacent histogram-bin-boundary values,
3878  * so this is more likely to happen than you might think.)
3879  */
3880 static void
3881 convert_string_to_scalar(char *value,
3882                                                  double *scaledvalue,
3883                                                  char *lobound,
3884                                                  double *scaledlobound,
3885                                                  char *hibound,
3886                                                  double *scaledhibound)
3887 {
3888         int                     rangelo,
3889                                 rangehi;
3890         char       *sptr;
3891
3892         rangelo = rangehi = (unsigned char) hibound[0];
3893         for (sptr = lobound; *sptr; sptr++)
3894         {
3895                 if (rangelo > (unsigned char) *sptr)
3896                         rangelo = (unsigned char) *sptr;
3897                 if (rangehi < (unsigned char) *sptr)
3898                         rangehi = (unsigned char) *sptr;
3899         }
3900         for (sptr = hibound; *sptr; sptr++)
3901         {
3902                 if (rangelo > (unsigned char) *sptr)
3903                         rangelo = (unsigned char) *sptr;
3904                 if (rangehi < (unsigned char) *sptr)
3905                         rangehi = (unsigned char) *sptr;
3906         }
3907         /* If range includes any upper-case ASCII chars, make it include all */
3908         if (rangelo <= 'Z' && rangehi >= 'A')
3909         {
3910                 if (rangelo > 'A')
3911                         rangelo = 'A';
3912                 if (rangehi < 'Z')
3913                         rangehi = 'Z';
3914         }
3915         /* Ditto lower-case */
3916         if (rangelo <= 'z' && rangehi >= 'a')
3917         {
3918                 if (rangelo > 'a')
3919                         rangelo = 'a';
3920                 if (rangehi < 'z')
3921                         rangehi = 'z';
3922         }
3923         /* Ditto digits */
3924         if (rangelo <= '9' && rangehi >= '0')
3925         {
3926                 if (rangelo > '0')
3927                         rangelo = '0';
3928                 if (rangehi < '9')
3929                         rangehi = '9';
3930         }
3931
3932         /*
3933          * If range includes less than 10 chars, assume we have not got enough
3934          * data, and make it include regular ASCII set.
3935          */
3936         if (rangehi - rangelo < 9)
3937         {
3938                 rangelo = ' ';
3939                 rangehi = 127;
3940         }
3941
3942         /*
3943          * Now strip any common prefix of the three strings.
3944          */
3945         while (*lobound)
3946         {
3947                 if (*lobound != *hibound || *lobound != *value)
3948                         break;
3949                 lobound++, hibound++, value++;
3950         }
3951
3952         /*
3953          * Now we can do the conversions.
3954          */
3955         *scaledvalue = convert_one_string_to_scalar(value, rangelo, rangehi);
3956         *scaledlobound = convert_one_string_to_scalar(lobound, rangelo, rangehi);
3957         *scaledhibound = convert_one_string_to_scalar(hibound, rangelo, rangehi);
3958 }
3959
3960 static double
3961 convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
3962 {
3963         int                     slen = strlen(value);
3964         double          num,
3965                                 denom,
3966                                 base;
3967
3968         if (slen <= 0)
3969                 return 0.0;                             /* empty string has scalar value 0 */
3970
3971         /*
3972          * There seems little point in considering more than a dozen bytes from
3973          * the string.  Since base is at least 10, that will give us nominal
3974          * resolution of at least 12 decimal digits, which is surely far more
3975          * precision than this estimation technique has got anyway (especially in
3976          * non-C locales).  Also, even with the maximum possible base of 256, this
3977          * ensures denom cannot grow larger than 256^13 = 2.03e31, which will not
3978          * overflow on any known machine.
3979          */
3980         if (slen > 12)
3981                 slen = 12;
3982
3983         /* Convert initial characters to fraction */
3984         base = rangehi - rangelo + 1;
3985         num = 0.0;
3986         denom = base;
3987         while (slen-- > 0)
3988         {
3989                 int                     ch = (unsigned char) *value++;
3990
3991                 if (ch < rangelo)
3992                         ch = rangelo - 1;
3993                 else if (ch > rangehi)
3994                         ch = rangehi + 1;
3995                 num += ((double) (ch - rangelo)) / denom;
3996                 denom *= base;
3997         }
3998
3999         return num;
4000 }
4001
4002 /*
4003  * Convert a string-type Datum into a palloc'd, null-terminated string.
4004  *
4005  * When using a non-C locale, we must pass the string through strxfrm()
4006  * before continuing, so as to generate correct locale-specific results.
4007  */
4008 static char *
4009 convert_string_datum(Datum value, Oid typid)
4010 {
4011         char       *val;
4012
4013         switch (typid)
4014         {
4015                 case CHAROID:
4016                         val = (char *) palloc(2);
4017                         val[0] = DatumGetChar(value);
4018                         val[1] = '\0';
4019                         break;
4020                 case BPCHAROID:
4021                 case VARCHAROID:
4022                 case TEXTOID:
4023                         val = TextDatumGetCString(value);
4024                         break;
4025                 case NAMEOID:
4026                         {
4027                                 NameData   *nm = (NameData *) DatumGetPointer(value);
4028
4029                                 val = pstrdup(NameStr(*nm));
4030                                 break;
4031                         }
4032                 default:
4033
4034                         /*
4035                          * Can't get here unless someone tries to use scalarltsel on an
4036                          * operator with one string and one non-string operand.
4037                          */
4038                         elog(ERROR, "unsupported type: %u", typid);
4039                         return NULL;
4040         }
4041
4042         if (!lc_collate_is_c(DEFAULT_COLLATION_OID))
4043         {
4044                 char       *xfrmstr;
4045                 size_t          xfrmlen;
4046                 size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
4047
4048                 /*
4049                  * XXX: We could guess at a suitable output buffer size and only call
4050                  * strxfrm twice if our guess is too small.
4051                  *
4052                  * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
4053                  * bogus data or set an error. This is not really a problem unless it
4054                  * crashes since it will only give an estimation error and nothing
4055                  * fatal.
4056                  */
4057 #if _MSC_VER == 1400                    /* VS.Net 2005 */
4058
4059                 /*
4060                  *
4061                  * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?
4062                  * FeedbackID=99694 */
4063                 {
4064                         char            x[1];
4065
4066                         xfrmlen = strxfrm(x, val, 0);
4067                 }
4068 #else
4069                 xfrmlen = strxfrm(NULL, val, 0);
4070 #endif
4071 #ifdef WIN32
4072
4073                 /*
4074                  * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
4075                  * of trying to allocate this much memory (and fail), just return the
4076                  * original string unmodified as if we were in the C locale.
4077                  */
4078                 if (xfrmlen == INT_MAX)
4079                         return val;
4080 #endif
4081                 xfrmstr = (char *) palloc(xfrmlen + 1);
4082                 xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
4083
4084                 /*
4085                  * Some systems (e.g., glibc) can return a smaller value from the
4086                  * second call than the first; thus the Assert must be <= not ==.
4087                  */
4088                 Assert(xfrmlen2 <= xfrmlen);
4089                 pfree(val);
4090                 val = xfrmstr;
4091         }
4092
4093         return val;
4094 }
4095
4096 /*
4097  * Do convert_to_scalar()'s work for any bytea data type.
4098  *
4099  * Very similar to convert_string_to_scalar except we can't assume
4100  * null-termination and therefore pass explicit lengths around.
4101  *
4102  * Also, assumptions about likely "normal" ranges of characters have been
4103  * removed - a data range of 0..255 is always used, for now.  (Perhaps
4104  * someday we will add information about actual byte data range to
4105  * pg_statistic.)
4106  */
4107 static void
4108 convert_bytea_to_scalar(Datum value,
4109                                                 double *scaledvalue,
4110                                                 Datum lobound,
4111                                                 double *scaledlobound,
4112                                                 Datum hibound,
4113                                                 double *scaledhibound)
4114 {
4115         int                     rangelo,
4116                                 rangehi,
4117                                 valuelen = VARSIZE(DatumGetPointer(value)) - VARHDRSZ,
4118                                 loboundlen = VARSIZE(DatumGetPointer(lobound)) - VARHDRSZ,
4119                                 hiboundlen = VARSIZE(DatumGetPointer(hibound)) - VARHDRSZ,
4120                                 i,
4121                                 minlen;
4122         unsigned char *valstr = (unsigned char *) VARDATA(DatumGetPointer(value)),
4123                            *lostr = (unsigned char *) VARDATA(DatumGetPointer(lobound)),
4124                            *histr = (unsigned char *) VARDATA(DatumGetPointer(hibound));
4125
4126         /*
4127          * Assume bytea data is uniformly distributed across all byte values.
4128          */
4129         rangelo = 0;
4130         rangehi = 255;
4131
4132         /*
4133          * Now strip any common prefix of the three strings.
4134          */
4135         minlen = Min(Min(valuelen, loboundlen), hiboundlen);
4136         for (i = 0; i < minlen; i++)
4137         {
4138                 if (*lostr != *histr || *lostr != *valstr)
4139                         break;
4140                 lostr++, histr++, valstr++;
4141                 loboundlen--, hiboundlen--, valuelen--;
4142         }
4143
4144         /*
4145          * Now we can do the conversions.
4146          */
4147         *scaledvalue = convert_one_bytea_to_scalar(valstr, valuelen, rangelo, rangehi);
4148         *scaledlobound = convert_one_bytea_to_scalar(lostr, loboundlen, rangelo, rangehi);
4149         *scaledhibound = convert_one_bytea_to_scalar(histr, hiboundlen, rangelo, rangehi);
4150 }
4151
4152 static double
4153 convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
4154                                                         int rangelo, int rangehi)
4155 {
4156         double          num,
4157                                 denom,
4158                                 base;
4159
4160         if (valuelen <= 0)
4161                 return 0.0;                             /* empty string has scalar value 0 */
4162
4163         /*
4164          * Since base is 256, need not consider more than about 10 chars (even
4165          * this many seems like overkill)
4166          */
4167         if (valuelen > 10)
4168                 valuelen = 10;
4169
4170         /* Convert initial characters to fraction */
4171         base = rangehi - rangelo + 1;
4172         num = 0.0;
4173         denom = base;
4174         while (valuelen-- > 0)
4175         {
4176                 int                     ch = *value++;
4177
4178                 if (ch < rangelo)
4179                         ch = rangelo - 1;
4180                 else if (ch > rangehi)
4181                         ch = rangehi + 1;
4182                 num += ((double) (ch - rangelo)) / denom;
4183                 denom *= base;
4184         }
4185
4186         return num;
4187 }
4188
4189 /*
4190  * Do convert_to_scalar()'s work for any timevalue data type.
4191  */
4192 static double
4193 convert_timevalue_to_scalar(Datum value, Oid typid)
4194 {
4195         switch (typid)
4196         {
4197                 case TIMESTAMPOID:
4198                         return DatumGetTimestamp(value);
4199                 case TIMESTAMPTZOID:
4200                         return DatumGetTimestampTz(value);
4201                 case ABSTIMEOID:
4202                         return DatumGetTimestamp(DirectFunctionCall1(abstime_timestamp,
4203                                                                                                                  value));
4204                 case DATEOID:
4205                         return date2timestamp_no_overflow(DatumGetDateADT(value));
4206                 case INTERVALOID:
4207                         {
4208                                 Interval   *interval = DatumGetIntervalP(value);
4209
4210                                 /*
4211                                  * Convert the month part of Interval to days using assumed
4212                                  * average month length of 365.25/12.0 days.  Not too
4213                                  * accurate, but plenty good enough for our purposes.
4214                                  */
4215 #ifdef HAVE_INT64_TIMESTAMP
4216                                 return interval->time + interval->day * (double) USECS_PER_DAY +
4217                                         interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
4218 #else
4219                                 return interval->time + interval->day * SECS_PER_DAY +
4220                                         interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * (double) SECS_PER_DAY);
4221 #endif
4222                         }
4223                 case RELTIMEOID:
4224 #ifdef HAVE_INT64_TIMESTAMP
4225                         return (DatumGetRelativeTime(value) * 1000000.0);
4226 #else
4227                         return DatumGetRelativeTime(value);
4228 #endif
4229                 case TINTERVALOID:
4230                         {
4231                                 TimeInterval tinterval = DatumGetTimeInterval(value);
4232
4233 #ifdef HAVE_INT64_TIMESTAMP
4234                                 if (tinterval->status != 0)
4235                                         return ((tinterval->data[1] - tinterval->data[0]) * 1000000.0);
4236 #else
4237                                 if (tinterval->status != 0)
4238                                         return tinterval->data[1] - tinterval->data[0];
4239 #endif
4240                                 return 0;               /* for lack of a better idea */
4241                         }
4242                 case TIMEOID:
4243                         return DatumGetTimeADT(value);
4244                 case TIMETZOID:
4245                         {
4246                                 TimeTzADT  *timetz = DatumGetTimeTzADTP(value);
4247
4248                                 /* use GMT-equivalent time */
4249 #ifdef HAVE_INT64_TIMESTAMP
4250                                 return (double) (timetz->time + (timetz->zone * 1000000.0));
4251 #else
4252                                 return (double) (timetz->time + timetz->zone);
4253 #endif
4254                         }
4255         }
4256
4257         /*
4258          * Can't get here unless someone tries to use scalarltsel/scalargtsel on
4259          * an operator with one timevalue and one non-timevalue operand.
4260          */
4261         elog(ERROR, "unsupported type: %u", typid);
4262         return 0;
4263 }
4264
4265
4266 /*
4267  * get_restriction_variable
4268  *              Examine the args of a restriction clause to see if it's of the
4269  *              form (variable op pseudoconstant) or (pseudoconstant op variable),
4270  *              where "variable" could be either a Var or an expression in vars of a
4271  *              single relation.  If so, extract information about the variable,
4272  *              and also indicate which side it was on and the other argument.
4273  *
4274  * Inputs:
4275  *      root: the planner info
4276  *      args: clause argument list
4277  *      varRelid: see specs for restriction selectivity functions
4278  *
4279  * Outputs: (these are valid only if TRUE is returned)
4280  *      *vardata: gets information about variable (see examine_variable)
4281  *      *other: gets other clause argument, aggressively reduced to a constant
4282  *      *varonleft: set TRUE if variable is on the left, FALSE if on the right
4283  *
4284  * Returns TRUE if a variable is identified, otherwise FALSE.
4285  *
4286  * Note: if there are Vars on both sides of the clause, we must fail, because
4287  * callers are expecting that the other side will act like a pseudoconstant.
4288  */
4289 bool
4290 get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
4291                                                  VariableStatData *vardata, Node **other,
4292                                                  bool *varonleft)
4293 {
4294         Node       *left,
4295                            *right;
4296         VariableStatData rdata;
4297
4298         /* Fail if not a binary opclause (probably shouldn't happen) */
4299         if (list_length(args) != 2)
4300                 return false;
4301
4302         left = (Node *) linitial(args);
4303         right = (Node *) lsecond(args);
4304
4305         /*
4306          * Examine both sides.  Note that when varRelid is nonzero, Vars of other
4307          * relations will be treated as pseudoconstants.
4308          */
4309         examine_variable(root, left, varRelid, vardata);
4310         examine_variable(root, right, varRelid, &rdata);
4311
4312         /*
4313          * If one side is a variable and the other not, we win.
4314          */
4315         if (vardata->rel && rdata.rel == NULL)
4316         {
4317                 *varonleft = true;
4318                 *other = estimate_expression_value(root, rdata.var);
4319                 /* Assume we need no ReleaseVariableStats(rdata) here */
4320                 return true;
4321         }
4322
4323         if (vardata->rel == NULL && rdata.rel)
4324         {
4325                 *varonleft = false;
4326                 *other = estimate_expression_value(root, vardata->var);
4327                 /* Assume we need no ReleaseVariableStats(*vardata) here */
4328                 *vardata = rdata;
4329                 return true;
4330         }
4331
4332         /* Ooops, clause has wrong structure (probably var op var) */
4333         ReleaseVariableStats(*vardata);
4334         ReleaseVariableStats(rdata);
4335
4336         return false;
4337 }
4338
4339 /*
4340  * get_join_variables
4341  *              Apply examine_variable() to each side of a join clause.
4342  *              Also, attempt to identify whether the join clause has the same
4343  *              or reversed sense compared to the SpecialJoinInfo.
4344  *
4345  * We consider the join clause "normal" if it is "lhs_var OP rhs_var",
4346  * or "reversed" if it is "rhs_var OP lhs_var".  In complicated cases
4347  * where we can't tell for sure, we default to assuming it's normal.
4348  */
4349 void
4350 get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo,
4351                                    VariableStatData *vardata1, VariableStatData *vardata2,
4352                                    bool *join_is_reversed)
4353 {
4354         Node       *left,
4355                            *right;
4356
4357         if (list_length(args) != 2)
4358                 elog(ERROR, "join operator should take two arguments");
4359
4360         left = (Node *) linitial(args);
4361         right = (Node *) lsecond(args);
4362
4363         examine_variable(root, left, 0, vardata1);
4364         examine_variable(root, right, 0, vardata2);
4365
4366         if (vardata1->rel &&
4367                 bms_is_subset(vardata1->rel->relids, sjinfo->syn_righthand))
4368                 *join_is_reversed = true;               /* var1 is on RHS */
4369         else if (vardata2->rel &&
4370                          bms_is_subset(vardata2->rel->relids, sjinfo->syn_lefthand))
4371                 *join_is_reversed = true;               /* var2 is on LHS */
4372         else
4373                 *join_is_reversed = false;
4374 }
4375
4376 /*
4377  * examine_variable
4378  *              Try to look up statistical data about an expression.
4379  *              Fill in a VariableStatData struct to describe the expression.
4380  *
4381  * Inputs:
4382  *      root: the planner info
4383  *      node: the expression tree to examine
4384  *      varRelid: see specs for restriction selectivity functions
4385  *
4386  * Outputs: *vardata is filled as follows:
4387  *      var: the input expression (with any binary relabeling stripped, if
4388  *              it is or contains a variable; but otherwise the type is preserved)
4389  *      rel: RelOptInfo for relation containing variable; NULL if expression
4390  *              contains no Vars (NOTE this could point to a RelOptInfo of a
4391  *              subquery, not one in the current query).
4392  *      statsTuple: the pg_statistic entry for the variable, if one exists;
4393  *              otherwise NULL.
4394  *      freefunc: pointer to a function to release statsTuple with.
4395  *      vartype: exposed type of the expression; this should always match
4396  *              the declared input type of the operator we are estimating for.
4397  *      atttype, atttypmod: type data to pass to get_attstatsslot().  This is
4398  *              commonly the same as the exposed type of the variable argument,
4399  *              but can be different in binary-compatible-type cases.
4400  *      isunique: TRUE if we were able to match the var to a unique index or a
4401  *              single-column DISTINCT clause, implying its values are unique for
4402  *              this query.  (Caution: this should be trusted for statistical
4403  *              purposes only, since we do not check indimmediate nor verify that
4404  *              the exact same definition of equality applies.)
4405  *
4406  * Caller is responsible for doing ReleaseVariableStats() before exiting.
4407  */
4408 void
4409 examine_variable(PlannerInfo *root, Node *node, int varRelid,
4410                                  VariableStatData *vardata)
4411 {
4412         Node       *basenode;
4413         Relids          varnos;
4414         RelOptInfo *onerel;
4415
4416         /* Make sure we don't return dangling pointers in vardata */
4417         MemSet(vardata, 0, sizeof(VariableStatData));
4418
4419         /* Save the exposed type of the expression */
4420         vardata->vartype = exprType(node);
4421
4422         /* Look inside any binary-compatible relabeling */
4423
4424         if (IsA(node, RelabelType))
4425                 basenode = (Node *) ((RelabelType *) node)->arg;
4426         else
4427                 basenode = node;
4428
4429         /* Fast path for a simple Var */
4430
4431         if (IsA(basenode, Var) &&
4432                 (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
4433         {
4434                 Var                *var = (Var *) basenode;
4435
4436                 /* Set up result fields other than the stats tuple */
4437                 vardata->var = basenode;        /* return Var without relabeling */
4438                 vardata->rel = find_base_rel(root, var->varno);
4439                 vardata->atttype = var->vartype;
4440                 vardata->atttypmod = var->vartypmod;
4441                 vardata->isunique = has_unique_index(vardata->rel, var->varattno);
4442
4443                 /* Try to locate some stats */
4444                 examine_simple_variable(root, var, vardata);
4445
4446                 return;
4447         }
4448
4449         /*
4450          * Okay, it's a more complicated expression.  Determine variable
4451          * membership.  Note that when varRelid isn't zero, only vars of that
4452          * relation are considered "real" vars.
4453          */
4454         varnos = pull_varnos(basenode);
4455
4456         onerel = NULL;
4457
4458         switch (bms_membership(varnos))
4459         {
4460                 case BMS_EMPTY_SET:
4461                         /* No Vars at all ... must be pseudo-constant clause */
4462                         break;
4463                 case BMS_SINGLETON:
4464                         if (varRelid == 0 || bms_is_member(varRelid, varnos))
4465                         {
4466                                 onerel = find_base_rel(root,
4467                                            (varRelid ? varRelid : bms_singleton_member(varnos)));
4468                                 vardata->rel = onerel;
4469                                 node = basenode;        /* strip any relabeling */
4470                         }
4471                         /* else treat it as a constant */
4472                         break;
4473                 case BMS_MULTIPLE:
4474                         if (varRelid == 0)
4475                         {
4476                                 /* treat it as a variable of a join relation */
4477                                 vardata->rel = find_join_rel(root, varnos);
4478                                 node = basenode;        /* strip any relabeling */
4479                         }
4480                         else if (bms_is_member(varRelid, varnos))
4481                         {
4482                                 /* ignore the vars belonging to other relations */
4483                                 vardata->rel = find_base_rel(root, varRelid);
4484                                 node = basenode;        /* strip any relabeling */
4485                                 /* note: no point in expressional-index search here */
4486                         }
4487                         /* else treat it as a constant */
4488                         break;
4489         }
4490
4491         bms_free(varnos);
4492
4493         vardata->var = node;
4494         vardata->atttype = exprType(node);
4495         vardata->atttypmod = exprTypmod(node);
4496
4497         if (onerel)
4498         {
4499                 /*
4500                  * We have an expression in vars of a single relation.  Try to match
4501                  * it to expressional index columns, in hopes of finding some
4502                  * statistics.
4503                  *
4504                  * XXX it's conceivable that there are multiple matches with different
4505                  * index opfamilies; if so, we need to pick one that matches the
4506                  * operator we are estimating for.  FIXME later.
4507                  */
4508                 ListCell   *ilist;
4509
4510                 foreach(ilist, onerel->indexlist)
4511                 {
4512                         IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
4513                         ListCell   *indexpr_item;
4514                         int                     pos;
4515
4516                         indexpr_item = list_head(index->indexprs);
4517                         if (indexpr_item == NULL)
4518                                 continue;               /* no expressions here... */
4519
4520                         for (pos = 0; pos < index->ncolumns; pos++)
4521                         {
4522                                 if (index->indexkeys[pos] == 0)
4523                                 {
4524                                         Node       *indexkey;
4525
4526                                         if (indexpr_item == NULL)
4527                                                 elog(ERROR, "too few entries in indexprs list");
4528                                         indexkey = (Node *) lfirst(indexpr_item);
4529                                         if (indexkey && IsA(indexkey, RelabelType))
4530                                                 indexkey = (Node *) ((RelabelType *) indexkey)->arg;
4531                                         if (equal(node, indexkey))
4532                                         {
4533                                                 /*
4534                                                  * Found a match ... is it a unique index? Tests here
4535                                                  * should match has_unique_index().
4536                                                  */
4537                                                 if (index->unique &&
4538                                                         index->ncolumns == 1 &&
4539                                                         (index->indpred == NIL || index->predOK))
4540                                                         vardata->isunique = true;
4541
4542                                                 /*
4543                                                  * Has it got stats?  We only consider stats for
4544                                                  * non-partial indexes, since partial indexes probably
4545                                                  * don't reflect whole-relation statistics; the above
4546                                                  * check for uniqueness is the only info we take from
4547                                                  * a partial index.
4548                                                  *
4549                                                  * An index stats hook, however, must make its own
4550                                                  * decisions about what to do with partial indexes.
4551                                                  */
4552                                                 if (get_index_stats_hook &&
4553                                                         (*get_index_stats_hook) (root, index->indexoid,
4554                                                                                                          pos + 1, vardata))
4555                                                 {
4556                                                         /*
4557                                                          * The hook took control of acquiring a stats
4558                                                          * tuple.  If it did supply a tuple, it'd better
4559                                                          * have supplied a freefunc.
4560                                                          */
4561                                                         if (HeapTupleIsValid(vardata->statsTuple) &&
4562                                                                 !vardata->freefunc)
4563                                                                 elog(ERROR, "no function provided to release variable stats with");
4564                                                 }
4565                                                 else if (index->indpred == NIL)
4566                                                 {
4567                                                         vardata->statsTuple =
4568                                                                 SearchSysCache3(STATRELATTINH,
4569                                                                                    ObjectIdGetDatum(index->indexoid),
4570                                                                                                 Int16GetDatum(pos + 1),
4571                                                                                                 BoolGetDatum(false));
4572                                                         vardata->freefunc = ReleaseSysCache;
4573                                                 }
4574                                                 if (vardata->statsTuple)
4575                                                         break;
4576                                         }
4577                                         indexpr_item = lnext(indexpr_item);
4578                                 }
4579                         }
4580                         if (vardata->statsTuple)
4581                                 break;
4582                 }
4583         }
4584 }
4585
4586 /*
4587  * examine_simple_variable
4588  *              Handle a simple Var for examine_variable
4589  *
4590  * This is split out as a subroutine so that we can recurse to deal with
4591  * Vars referencing subqueries.
4592  *
4593  * We already filled in all the fields of *vardata except for the stats tuple.
4594  */
4595 static void
4596 examine_simple_variable(PlannerInfo *root, Var *var,
4597                                                 VariableStatData *vardata)
4598 {
4599         RangeTblEntry *rte = root->simple_rte_array[var->varno];
4600
4601         Assert(IsA(rte, RangeTblEntry));
4602
4603         if (get_relation_stats_hook &&
4604                 (*get_relation_stats_hook) (root, rte, var->varattno, vardata))
4605         {
4606                 /*
4607                  * The hook took control of acquiring a stats tuple.  If it did supply
4608                  * a tuple, it'd better have supplied a freefunc.
4609                  */
4610                 if (HeapTupleIsValid(vardata->statsTuple) &&
4611                         !vardata->freefunc)
4612                         elog(ERROR, "no function provided to release variable stats with");
4613         }
4614         else if (rte->rtekind == RTE_RELATION)
4615         {
4616                 /*
4617                  * Plain table or parent of an inheritance appendrel, so look up the
4618                  * column in pg_statistic
4619                  */
4620                 vardata->statsTuple = SearchSysCache3(STATRELATTINH,
4621                                                                                           ObjectIdGetDatum(rte->relid),
4622                                                                                           Int16GetDatum(var->varattno),
4623                                                                                           BoolGetDatum(rte->inh));
4624                 vardata->freefunc = ReleaseSysCache;
4625         }
4626         else if (rte->rtekind == RTE_SUBQUERY && !rte->inh)
4627         {
4628                 /*
4629                  * Plain subquery (not one that was converted to an appendrel).
4630                  */
4631                 Query      *subquery = rte->subquery;
4632                 RelOptInfo *rel;
4633                 TargetEntry *ste;
4634
4635                 /*
4636                  * Punt if it's a whole-row var rather than a plain column reference.
4637                  */
4638                 if (var->varattno == InvalidAttrNumber)
4639                         return;
4640
4641                 /*
4642                  * Punt if subquery uses set operations or GROUP BY, as these will
4643                  * mash underlying columns' stats beyond recognition.  (Set ops are
4644                  * particularly nasty; if we forged ahead, we would return stats
4645                  * relevant to only the leftmost subselect...)  DISTINCT is also
4646                  * problematic, but we check that later because there is a possibility
4647                  * of learning something even with it.
4648                  */
4649                 if (subquery->setOperations ||
4650                         subquery->groupClause)
4651                         return;
4652
4653                 /*
4654                  * OK, fetch RelOptInfo for subquery.  Note that we don't change the
4655                  * rel returned in vardata, since caller expects it to be a rel of the
4656                  * caller's query level.  Because we might already be recursing, we
4657                  * can't use that rel pointer either, but have to look up the Var's
4658                  * rel afresh.
4659                  */
4660                 rel = find_base_rel(root, var->varno);
4661
4662                 /* If the subquery hasn't been planned yet, we have to punt */
4663                 if (rel->subroot == NULL)
4664                         return;
4665                 Assert(IsA(rel->subroot, PlannerInfo));
4666
4667                 /*
4668                  * Switch our attention to the subquery as mangled by the planner. It
4669                  * was okay to look at the pre-planning version for the tests above,
4670                  * but now we need a Var that will refer to the subroot's live
4671                  * RelOptInfos.  For instance, if any subquery pullup happened during
4672                  * planning, Vars in the targetlist might have gotten replaced, and we
4673                  * need to see the replacement expressions.
4674                  */
4675                 subquery = rel->subroot->parse;
4676                 Assert(IsA(subquery, Query));
4677
4678                 /* Get the subquery output expression referenced by the upper Var */
4679                 ste = get_tle_by_resno(subquery->targetList, var->varattno);
4680                 if (ste == NULL || ste->resjunk)
4681                         elog(ERROR, "subquery %s does not have attribute %d",
4682                                  rte->eref->aliasname, var->varattno);
4683                 var = (Var *) ste->expr;
4684
4685                 /*
4686                  * If subquery uses DISTINCT, we can't make use of any stats for the
4687                  * variable ... but, if it's the only DISTINCT column, we are entitled
4688                  * to consider it unique.  We do the test this way so that it works
4689                  * for cases involving DISTINCT ON.
4690                  */
4691                 if (subquery->distinctClause)
4692                 {
4693                         if (list_length(subquery->distinctClause) == 1 &&
4694                                 targetIsInSortList(ste, InvalidOid, subquery->distinctClause))
4695                                 vardata->isunique = true;
4696                         /* cannot go further */
4697                         return;
4698                 }
4699
4700                 /*
4701                  * If the sub-query originated from a view with the security_barrier
4702                  * attribute, we must not look at the variable's statistics, though it
4703                  * seems all right to notice the existence of a DISTINCT clause. So
4704                  * stop here.
4705                  *
4706                  * This is probably a harsher restriction than necessary; it's
4707                  * certainly OK for the selectivity estimator (which is a C function,
4708                  * and therefore omnipotent anyway) to look at the statistics.  But
4709                  * many selectivity estimators will happily *invoke the operator
4710                  * function* to try to work out a good estimate - and that's not OK.
4711                  * So for now, don't dig down for stats.
4712                  */
4713                 if (rte->security_barrier)
4714                         return;
4715
4716                 /* Can only handle a simple Var of subquery's query level */
4717                 if (var && IsA(var, Var) &&
4718                         var->varlevelsup == 0)
4719                 {
4720                         /*
4721                          * OK, recurse into the subquery.  Note that the original setting
4722                          * of vardata->isunique (which will surely be false) is left
4723                          * unchanged in this situation.  That's what we want, since even
4724                          * if the underlying column is unique, the subquery may have
4725                          * joined to other tables in a way that creates duplicates.
4726                          */
4727                         examine_simple_variable(rel->subroot, var, vardata);
4728                 }
4729         }
4730         else
4731         {
4732                 /*
4733                  * Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE.  (We
4734                  * won't see RTE_JOIN here because join alias Vars have already been
4735                  * flattened.)  There's not much we can do with function outputs, but
4736                  * maybe someday try to be smarter about VALUES and/or CTEs.
4737                  */
4738         }
4739 }
4740
4741 /*
4742  * get_variable_numdistinct
4743  *        Estimate the number of distinct values of a variable.
4744  *
4745  * vardata: results of examine_variable
4746  * *isdefault: set to TRUE if the result is a default rather than based on
4747  * anything meaningful.
4748  *
4749  * NB: be careful to produce a positive integral result, since callers may
4750  * compare the result to exact integer counts, or might divide by it.
4751  */
4752 double
4753 get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
4754 {
4755         double          stadistinct;
4756         double          stanullfrac = 0.0;
4757         double          ntuples;
4758
4759         *isdefault = false;
4760
4761         /*
4762          * Determine the stadistinct value to use.  There are cases where we can
4763          * get an estimate even without a pg_statistic entry, or can get a better
4764          * value than is in pg_statistic.  Grab stanullfrac too if we can find it
4765          * (otherwise, assume no nulls, for lack of any better idea).
4766          */
4767         if (HeapTupleIsValid(vardata->statsTuple))
4768         {
4769                 /* Use the pg_statistic entry */
4770                 Form_pg_statistic stats;
4771
4772                 stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
4773                 stadistinct = stats->stadistinct;
4774                 stanullfrac = stats->stanullfrac;
4775         }
4776         else if (vardata->vartype == BOOLOID)
4777         {
4778                 /*
4779                  * Special-case boolean columns: presumably, two distinct values.
4780                  *
4781                  * Are there any other datatypes we should wire in special estimates
4782                  * for?
4783                  */
4784                 stadistinct = 2.0;
4785         }
4786         else
4787         {
4788                 /*
4789                  * We don't keep statistics for system columns, but in some cases we
4790                  * can infer distinctness anyway.
4791                  */
4792                 if (vardata->var && IsA(vardata->var, Var))
4793                 {
4794                         switch (((Var *) vardata->var)->varattno)
4795                         {
4796                                 case ObjectIdAttributeNumber:
4797                                 case SelfItemPointerAttributeNumber:
4798                                         stadistinct = -1.0; /* unique (and all non null) */
4799                                         break;
4800                                 case TableOidAttributeNumber:
4801                                         stadistinct = 1.0;      /* only 1 value */
4802                                         break;
4803                                 default:
4804                                         stadistinct = 0.0;      /* means "unknown" */
4805                                         break;
4806                         }
4807                 }
4808                 else
4809                         stadistinct = 0.0;      /* means "unknown" */
4810
4811                 /*
4812                  * XXX consider using estimate_num_groups on expressions?
4813                  */
4814         }
4815
4816         /*
4817          * If there is a unique index or DISTINCT clause for the variable, assume
4818          * it is unique no matter what pg_statistic says; the statistics could be
4819          * out of date, or we might have found a partial unique index that proves
4820          * the var is unique for this query.  However, we'd better still believe
4821          * the null-fraction statistic.
4822          */
4823         if (vardata->isunique)
4824                 stadistinct = -1.0 * (1.0 - stanullfrac);
4825
4826         /*
4827          * If we had an absolute estimate, use that.
4828          */
4829         if (stadistinct > 0.0)
4830                 return clamp_row_est(stadistinct);
4831
4832         /*
4833          * Otherwise we need to get the relation size; punt if not available.
4834          */
4835         if (vardata->rel == NULL)
4836         {
4837                 *isdefault = true;
4838                 return DEFAULT_NUM_DISTINCT;
4839         }
4840         ntuples = vardata->rel->tuples;
4841         if (ntuples <= 0.0)
4842         {
4843                 *isdefault = true;
4844                 return DEFAULT_NUM_DISTINCT;
4845         }
4846
4847         /*
4848          * If we had a relative estimate, use that.
4849          */
4850         if (stadistinct < 0.0)
4851                 return clamp_row_est(-stadistinct * ntuples);
4852
4853         /*
4854          * With no data, estimate ndistinct = ntuples if the table is small, else
4855          * use default.  We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
4856          * that the behavior isn't discontinuous.
4857          */
4858         if (ntuples < DEFAULT_NUM_DISTINCT)
4859                 return clamp_row_est(ntuples);
4860
4861         *isdefault = true;
4862         return DEFAULT_NUM_DISTINCT;
4863 }
4864
4865 /*
4866  * get_variable_range
4867  *              Estimate the minimum and maximum value of the specified variable.
4868  *              If successful, store values in *min and *max, and return TRUE.
4869  *              If no data available, return FALSE.
4870  *
4871  * sortop is the "<" comparison operator to use.  This should generally
4872  * be "<" not ">", as only the former is likely to be found in pg_statistic.
4873  */
4874 static bool
4875 get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
4876                                    Datum *min, Datum *max)
4877 {
4878         Datum           tmin = 0;
4879         Datum           tmax = 0;
4880         bool            have_data = false;
4881         int16           typLen;
4882         bool            typByVal;
4883         Datum      *values;
4884         int                     nvalues;
4885         int                     i;
4886
4887         /*
4888          * XXX It's very tempting to try to use the actual column min and max, if
4889          * we can get them relatively-cheaply with an index probe.  However, since
4890          * this function is called many times during join planning, that could
4891          * have unpleasant effects on planning speed.  Need more investigation
4892          * before enabling this.
4893          */
4894 #ifdef NOT_USED
4895         if (get_actual_variable_range(root, vardata, sortop, min, max))
4896                 return true;
4897 #endif
4898
4899         if (!HeapTupleIsValid(vardata->statsTuple))
4900         {
4901                 /* no stats available, so default result */
4902                 return false;
4903         }
4904
4905         get_typlenbyval(vardata->atttype, &typLen, &typByVal);
4906
4907         /*
4908          * If there is a histogram, grab the first and last values.
4909          *
4910          * If there is a histogram that is sorted with some other operator than
4911          * the one we want, fail --- this suggests that there is data we can't
4912          * use.
4913          */
4914         if (get_attstatsslot(vardata->statsTuple,
4915                                                  vardata->atttype, vardata->atttypmod,
4916                                                  STATISTIC_KIND_HISTOGRAM, sortop,
4917                                                  NULL,
4918                                                  &values, &nvalues,
4919                                                  NULL, NULL))
4920         {
4921                 if (nvalues > 0)
4922                 {
4923                         tmin = datumCopy(values[0], typByVal, typLen);
4924                         tmax = datumCopy(values[nvalues - 1], typByVal, typLen);
4925                         have_data = true;
4926                 }
4927                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4928         }
4929         else if (get_attstatsslot(vardata->statsTuple,
4930                                                           vardata->atttype, vardata->atttypmod,
4931                                                           STATISTIC_KIND_HISTOGRAM, InvalidOid,
4932                                                           NULL,
4933                                                           &values, &nvalues,
4934                                                           NULL, NULL))
4935         {
4936                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4937                 return false;
4938         }
4939
4940         /*
4941          * If we have most-common-values info, look for extreme MCVs.  This is
4942          * needed even if we also have a histogram, since the histogram excludes
4943          * the MCVs.  However, usually the MCVs will not be the extreme values, so
4944          * avoid unnecessary data copying.
4945          */
4946         if (get_attstatsslot(vardata->statsTuple,
4947                                                  vardata->atttype, vardata->atttypmod,
4948                                                  STATISTIC_KIND_MCV, InvalidOid,
4949                                                  NULL,
4950                                                  &values, &nvalues,
4951                                                  NULL, NULL))
4952         {
4953                 bool            tmin_is_mcv = false;
4954                 bool            tmax_is_mcv = false;
4955                 FmgrInfo        opproc;
4956
4957                 fmgr_info(get_opcode(sortop), &opproc);
4958
4959                 for (i = 0; i < nvalues; i++)
4960                 {
4961                         if (!have_data)
4962                         {
4963                                 tmin = tmax = values[i];
4964                                 tmin_is_mcv = tmax_is_mcv = have_data = true;
4965                                 continue;
4966                         }
4967                         if (DatumGetBool(FunctionCall2Coll(&opproc,
4968                                                                                            DEFAULT_COLLATION_OID,
4969                                                                                            values[i], tmin)))
4970                         {
4971                                 tmin = values[i];
4972                                 tmin_is_mcv = true;
4973                         }
4974                         if (DatumGetBool(FunctionCall2Coll(&opproc,
4975                                                                                            DEFAULT_COLLATION_OID,
4976                                                                                            tmax, values[i])))
4977                         {
4978                                 tmax = values[i];
4979                                 tmax_is_mcv = true;
4980                         }
4981                 }
4982                 if (tmin_is_mcv)
4983                         tmin = datumCopy(tmin, typByVal, typLen);
4984                 if (tmax_is_mcv)
4985                         tmax = datumCopy(tmax, typByVal, typLen);
4986                 free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
4987         }
4988
4989         *min = tmin;
4990         *max = tmax;
4991         return have_data;
4992 }
4993
4994
4995 /*
4996  * get_actual_variable_range
4997  *              Attempt to identify the current *actual* minimum and/or maximum
4998  *              of the specified variable, by looking for a suitable btree index
4999  *              and fetching its low and/or high values.
5000  *              If successful, store values in *min and *max, and return TRUE.
5001  *              (Either pointer can be NULL if that endpoint isn't needed.)
5002  *              If no data available, return FALSE.
5003  *
5004  * sortop is the "<" comparison operator to use.
5005  */
5006 static bool
5007 get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
5008                                                   Oid sortop,
5009                                                   Datum *min, Datum *max)
5010 {
5011         bool            have_data = false;
5012         RelOptInfo *rel = vardata->rel;
5013         RangeTblEntry *rte;
5014         ListCell   *lc;
5015
5016         /* No hope if no relation or it doesn't have indexes */
5017         if (rel == NULL || rel->indexlist == NIL)
5018                 return false;
5019         /* If it has indexes it must be a plain relation */
5020         rte = root->simple_rte_array[rel->relid];
5021         Assert(rte->rtekind == RTE_RELATION);
5022
5023         /* Search through the indexes to see if any match our problem */
5024         foreach(lc, rel->indexlist)
5025         {
5026                 IndexOptInfo *index = (IndexOptInfo *) lfirst(lc);
5027                 ScanDirection indexscandir;
5028
5029                 /* Ignore non-btree indexes */
5030                 if (index->relam != BTREE_AM_OID)
5031                         continue;
5032
5033                 /*
5034                  * Ignore partial indexes --- we only want stats that cover the entire
5035                  * relation.
5036                  */
5037                 if (index->indpred != NIL)
5038                         continue;
5039
5040                 /*
5041                  * The index list might include hypothetical indexes inserted by a
5042                  * get_relation_info hook --- don't try to access them.
5043                  */
5044                 if (index->hypothetical)
5045                         continue;
5046
5047                 /*
5048                  * The first index column must match the desired variable and sort
5049                  * operator --- but we can use a descending-order index.
5050                  */
5051                 if (!match_index_to_operand(vardata->var, 0, index))
5052                         continue;
5053                 switch (get_op_opfamily_strategy(sortop, index->sortopfamily[0]))
5054                 {
5055                         case BTLessStrategyNumber:
5056                                 if (index->reverse_sort[0])
5057                                         indexscandir = BackwardScanDirection;
5058                                 else
5059                                         indexscandir = ForwardScanDirection;
5060                                 break;
5061                         case BTGreaterStrategyNumber:
5062                                 if (index->reverse_sort[0])
5063                                         indexscandir = ForwardScanDirection;
5064                                 else
5065                                         indexscandir = BackwardScanDirection;
5066                                 break;
5067                         default:
5068                                 /* index doesn't match the sortop */
5069                                 continue;
5070                 }
5071
5072                 /*
5073                  * Found a suitable index to extract data from.  We'll need an EState
5074                  * and a bunch of other infrastructure.
5075                  */
5076                 {
5077                         EState     *estate;
5078                         ExprContext *econtext;
5079                         MemoryContext tmpcontext;
5080                         MemoryContext oldcontext;
5081                         Relation        heapRel;
5082                         Relation        indexRel;
5083                         IndexInfo  *indexInfo;
5084                         TupleTableSlot *slot;
5085                         int16           typLen;
5086                         bool            typByVal;
5087                         ScanKeyData scankeys[1];
5088                         IndexScanDesc index_scan;
5089                         HeapTuple       tup;
5090                         Datum           values[INDEX_MAX_KEYS];
5091                         bool            isnull[INDEX_MAX_KEYS];
5092                         SnapshotData SnapshotDirty;
5093
5094                         estate = CreateExecutorState();
5095                         econtext = GetPerTupleExprContext(estate);
5096                         /* Make sure any cruft is generated in the econtext's memory */
5097                         tmpcontext = econtext->ecxt_per_tuple_memory;
5098                         oldcontext = MemoryContextSwitchTo(tmpcontext);
5099
5100                         /*
5101                          * Open the table and index so we can read from them.  We should
5102                          * already have at least AccessShareLock on the table, but not
5103                          * necessarily on the index.
5104                          */
5105                         heapRel = heap_open(rte->relid, NoLock);
5106                         indexRel = index_open(index->indexoid, AccessShareLock);
5107
5108                         /* extract index key information from the index's pg_index info */
5109                         indexInfo = BuildIndexInfo(indexRel);
5110
5111                         /* some other stuff */
5112                         slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRel));
5113                         econtext->ecxt_scantuple = slot;
5114                         get_typlenbyval(vardata->atttype, &typLen, &typByVal);
5115                         InitDirtySnapshot(SnapshotDirty);
5116
5117                         /* set up an IS NOT NULL scan key so that we ignore nulls */
5118                         ScanKeyEntryInitialize(&scankeys[0],
5119                                                                    SK_ISNULL | SK_SEARCHNOTNULL,
5120                                                                    1,   /* index col to scan */
5121                                                                    InvalidStrategy,             /* no strategy */
5122                                                                    InvalidOid,  /* no strategy subtype */
5123                                                                    InvalidOid,  /* no collation */
5124                                                                    InvalidOid,  /* no reg proc for this */
5125                                                                    (Datum) 0);  /* constant */
5126
5127                         have_data = true;
5128
5129                         /* If min is requested ... */
5130                         if (min)
5131                         {
5132                                 /*
5133                                  * In principle, we should scan the index with our current
5134                                  * active snapshot, which is the best approximation we've got
5135                                  * to what the query will see when executed.  But that won't
5136                                  * be exact if a new snap is taken before running the query,
5137                                  * and it can be very expensive if a lot of uncommitted rows
5138                                  * exist at the end of the index (because we'll laboriously
5139                                  * fetch each one and reject it).  What seems like a good
5140                                  * compromise is to use SnapshotDirty.  That will accept
5141                                  * uncommitted rows, and thus avoid fetching multiple heap
5142                                  * tuples in this scenario.  On the other hand, it will reject
5143                                  * known-dead rows, and thus not give a bogus answer when the
5144                                  * extreme value has been deleted; that case motivates not
5145                                  * using SnapshotAny here.
5146                                  */
5147                                 index_scan = index_beginscan(heapRel, indexRel, &SnapshotDirty,
5148                                                                                          1, 0);
5149                                 index_rescan(index_scan, scankeys, 1, NULL, 0);
5150
5151                                 /* Fetch first tuple in sortop's direction */
5152                                 if ((tup = index_getnext(index_scan,
5153                                                                                  indexscandir)) != NULL)
5154                                 {
5155                                         /* Extract the index column values from the heap tuple */
5156                                         ExecStoreTuple(tup, slot, InvalidBuffer, false);
5157                                         FormIndexDatum(indexInfo, slot, estate,
5158                                                                    values, isnull);
5159
5160                                         /* Shouldn't have got a null, but be careful */
5161                                         if (isnull[0])
5162                                                 elog(ERROR, "found unexpected null value in index \"%s\"",
5163                                                          RelationGetRelationName(indexRel));
5164
5165                                         /* Copy the index column value out to caller's context */
5166                                         MemoryContextSwitchTo(oldcontext);
5167                                         *min = datumCopy(values[0], typByVal, typLen);
5168                                         MemoryContextSwitchTo(tmpcontext);
5169                                 }
5170                                 else
5171                                         have_data = false;
5172
5173                                 index_endscan(index_scan);
5174                         }
5175
5176                         /* If max is requested, and we didn't find the index is empty */
5177                         if (max && have_data)
5178                         {
5179                                 index_scan = index_beginscan(heapRel, indexRel, &SnapshotDirty,
5180                                                                                          1, 0);
5181                                 index_rescan(index_scan, scankeys, 1, NULL, 0);
5182
5183                                 /* Fetch first tuple in reverse direction */
5184                                 if ((tup = index_getnext(index_scan,
5185                                                                                  -indexscandir)) != NULL)
5186                                 {
5187                                         /* Extract the index column values from the heap tuple */
5188                                         ExecStoreTuple(tup, slot, InvalidBuffer, false);
5189                                         FormIndexDatum(indexInfo, slot, estate,
5190                                                                    values, isnull);
5191
5192                                         /* Shouldn't have got a null, but be careful */
5193                                         if (isnull[0])
5194                                                 elog(ERROR, "found unexpected null value in index \"%s\"",
5195                                                          RelationGetRelationName(indexRel));
5196
5197                                         /* Copy the index column value out to caller's context */
5198                                         MemoryContextSwitchTo(oldcontext);
5199                                         *max = datumCopy(values[0], typByVal, typLen);
5200                                         MemoryContextSwitchTo(tmpcontext);
5201                                 }
5202                                 else
5203                                         have_data = false;
5204
5205                                 index_endscan(index_scan);
5206                         }
5207
5208                         /* Clean everything up */
5209                         ExecDropSingleTupleTableSlot(slot);
5210
5211                         index_close(indexRel, AccessShareLock);
5212                         heap_close(heapRel, NoLock);
5213
5214                         MemoryContextSwitchTo(oldcontext);
5215                         FreeExecutorState(estate);
5216
5217                         /* And we're done */
5218                         break;
5219                 }
5220         }
5221
5222         return have_data;
5223 }
5224
5225 /*
5226  * find_join_input_rel
5227  *              Look up the input relation for a join.
5228  *
5229  * We assume that the input relation's RelOptInfo must have been constructed
5230  * already.
5231  */
5232 static RelOptInfo *
5233 find_join_input_rel(PlannerInfo *root, Relids relids)
5234 {
5235         RelOptInfo *rel = NULL;
5236
5237         switch (bms_membership(relids))
5238         {
5239                 case BMS_EMPTY_SET:
5240                         /* should not happen */
5241                         break;
5242                 case BMS_SINGLETON:
5243                         rel = find_base_rel(root, bms_singleton_member(relids));
5244                         break;
5245                 case BMS_MULTIPLE:
5246                         rel = find_join_rel(root, relids);
5247                         break;
5248         }
5249
5250         if (rel == NULL)
5251                 elog(ERROR, "could not find RelOptInfo for given relids");
5252
5253         return rel;
5254 }
5255
5256
5257 /*-------------------------------------------------------------------------
5258  *
5259  * Pattern analysis functions
5260  *
5261  * These routines support analysis of LIKE and regular-expression patterns
5262  * by the planner/optimizer.  It's important that they agree with the
5263  * regular-expression code in backend/regex/ and the LIKE code in
5264  * backend/utils/adt/like.c.  Also, the computation of the fixed prefix
5265  * must be conservative: if we report a string longer than the true fixed
5266  * prefix, the query may produce actually wrong answers, rather than just
5267  * getting a bad selectivity estimate!
5268  *
5269  * Note that the prefix-analysis functions are called from
5270  * backend/optimizer/path/indxpath.c as well as from routines in this file.
5271  *
5272  *-------------------------------------------------------------------------
5273  */
5274
5275 /*
5276  * Check whether char is a letter (and, hence, subject to case-folding)
5277  *
5278  * In multibyte character sets, we can't use isalpha, and it does not seem
5279  * worth trying to convert to wchar_t to use iswalpha.  Instead, just assume
5280  * any multibyte char is potentially case-varying.
5281  */
5282 static int
5283 pattern_char_isalpha(char c, bool is_multibyte,
5284                                          pg_locale_t locale, bool locale_is_c)
5285 {
5286         if (locale_is_c)
5287                 return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
5288         else if (is_multibyte && IS_HIGHBIT_SET(c))
5289                 return true;
5290 #ifdef HAVE_LOCALE_T
5291         else if (locale)
5292                 return isalpha_l((unsigned char) c, locale);
5293 #endif
5294         else
5295                 return isalpha((unsigned char) c);
5296 }
5297
5298 /*
5299  * Extract the fixed prefix, if any, for a pattern.
5300  *
5301  * *prefix is set to a palloc'd prefix string (in the form of a Const node),
5302  *      or to NULL if no fixed prefix exists for the pattern.
5303  * If rest_selec is not NULL, *rest_selec is set to an estimate of the
5304  *      selectivity of the remainder of the pattern (without any fixed prefix).
5305  * The prefix Const has the same type (TEXT or BYTEA) as the input pattern.
5306  *
5307  * The return value distinguishes no fixed prefix, a partial prefix,
5308  * or an exact-match-only pattern.
5309  */
5310
5311 static Pattern_Prefix_Status
5312 like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5313                                   Const **prefix_const, Selectivity *rest_selec)
5314 {
5315         char       *match;
5316         char       *patt;
5317         int                     pattlen;
5318         Oid                     typeid = patt_const->consttype;
5319         int                     pos,
5320                                 match_pos;
5321         bool            is_multibyte = (pg_database_encoding_max_length() > 1);
5322         pg_locale_t locale = 0;
5323         bool            locale_is_c = false;
5324
5325         /* the right-hand const is type text or bytea */
5326         Assert(typeid == BYTEAOID || typeid == TEXTOID);
5327
5328         if (case_insensitive)
5329         {
5330                 if (typeid == BYTEAOID)
5331                         ereport(ERROR,
5332                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5333                         errmsg("case insensitive matching not supported on type bytea")));
5334
5335                 /* If case-insensitive, we need locale info */
5336                 if (lc_ctype_is_c(collation))
5337                         locale_is_c = true;
5338                 else if (collation != DEFAULT_COLLATION_OID)
5339                 {
5340                         if (!OidIsValid(collation))
5341                         {
5342                                 /*
5343                                  * This typically means that the parser could not resolve a
5344                                  * conflict of implicit collations, so report it that way.
5345                                  */
5346                                 ereport(ERROR,
5347                                                 (errcode(ERRCODE_INDETERMINATE_COLLATION),
5348                                                  errmsg("could not determine which collation to use for ILIKE"),
5349                                                  errhint("Use the COLLATE clause to set the collation explicitly.")));
5350                         }
5351                         locale = pg_newlocale_from_collation(collation);
5352                 }
5353         }
5354
5355         if (typeid != BYTEAOID)
5356         {
5357                 patt = TextDatumGetCString(patt_const->constvalue);
5358                 pattlen = strlen(patt);
5359         }
5360         else
5361         {
5362                 bytea      *bstr = DatumGetByteaP(patt_const->constvalue);
5363
5364                 pattlen = VARSIZE(bstr) - VARHDRSZ;
5365                 patt = (char *) palloc(pattlen);
5366                 memcpy(patt, VARDATA(bstr), pattlen);
5367                 if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
5368                         pfree(bstr);
5369         }
5370
5371         match = palloc(pattlen + 1);
5372         match_pos = 0;
5373         for (pos = 0; pos < pattlen; pos++)
5374         {
5375                 /* % and _ are wildcard characters in LIKE */
5376                 if (patt[pos] == '%' ||
5377                         patt[pos] == '_')
5378                         break;
5379
5380                 /* Backslash escapes the next character */
5381                 if (patt[pos] == '\\')
5382                 {
5383                         pos++;
5384                         if (pos >= pattlen)
5385                                 break;
5386                 }
5387
5388                 /* Stop if case-varying character (it's sort of a wildcard) */
5389                 if (case_insensitive &&
5390                   pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
5391                         break;
5392
5393                 match[match_pos++] = patt[pos];
5394         }
5395
5396         match[match_pos] = '\0';
5397
5398         if (typeid != BYTEAOID)
5399                 *prefix_const = string_to_const(match, typeid);
5400         else
5401                 *prefix_const = string_to_bytea_const(match, match_pos);
5402
5403         if (rest_selec != NULL)
5404                 *rest_selec = like_selectivity(&patt[pos], pattlen - pos,
5405                                                                            case_insensitive);
5406
5407         pfree(patt);
5408         pfree(match);
5409
5410         /* in LIKE, an empty pattern is an exact match! */
5411         if (pos == pattlen)
5412                 return Pattern_Prefix_Exact;    /* reached end of pattern, so exact */
5413
5414         if (match_pos > 0)
5415                 return Pattern_Prefix_Partial;
5416
5417         return Pattern_Prefix_None;
5418 }
5419
5420 static Pattern_Prefix_Status
5421 regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
5422                                    Const **prefix_const, Selectivity *rest_selec)
5423 {
5424         Oid                     typeid = patt_const->consttype;
5425         char       *prefix;
5426         bool            exact;
5427
5428         /*
5429          * Should be unnecessary, there are no bytea regex operators defined. As
5430          * such, it should be noted that the rest of this function has *not* been
5431          * made safe for binary (possibly NULL containing) strings.
5432          */
5433         if (typeid == BYTEAOID)
5434                 ereport(ERROR,
5435                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5436                  errmsg("regular-expression matching not supported on type bytea")));
5437
5438         /* Use the regexp machinery to extract the prefix, if any */
5439         prefix = regexp_fixed_prefix(DatumGetTextPP(patt_const->constvalue),
5440                                                                  case_insensitive, collation,
5441                                                                  &exact);
5442
5443         if (prefix == NULL)
5444         {
5445                 *prefix_const = NULL;
5446
5447                 if (rest_selec != NULL)
5448                 {
5449                         char       *patt = TextDatumGetCString(patt_const->constvalue);
5450
5451                         *rest_selec = regex_selectivity(patt, strlen(patt),
5452                                                                                         case_insensitive,
5453                                                                                         0);
5454                         pfree(patt);
5455                 }
5456
5457                 return Pattern_Prefix_None;
5458         }
5459
5460         *prefix_const = string_to_const(prefix, typeid);
5461
5462         if (rest_selec != NULL)
5463         {
5464                 if (exact)
5465                 {
5466                         /* Exact match, so there's no additional selectivity */
5467                         *rest_selec = 1.0;
5468                 }
5469                 else
5470                 {
5471                         char       *patt = TextDatumGetCString(patt_const->constvalue);
5472
5473                         *rest_selec = regex_selectivity(patt, strlen(patt),
5474                                                                                         case_insensitive,
5475                                                                                         strlen(prefix));
5476                         pfree(patt);
5477                 }
5478         }
5479
5480         pfree(prefix);
5481
5482         if (exact)
5483                 return Pattern_Prefix_Exact;    /* pattern specifies exact match */
5484         else
5485                 return Pattern_Prefix_Partial;
5486 }
5487
5488 Pattern_Prefix_Status
5489 pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
5490                                          Const **prefix, Selectivity *rest_selec)
5491 {
5492         Pattern_Prefix_Status result;
5493
5494         switch (ptype)
5495         {
5496                 case Pattern_Type_Like:
5497                         result = like_fixed_prefix(patt, false, collation,
5498                                                                            prefix, rest_selec);
5499                         break;
5500                 case Pattern_Type_Like_IC:
5501                         result = like_fixed_prefix(patt, true, collation,
5502                                                                            prefix, rest_selec);
5503                         break;
5504                 case Pattern_Type_Regex:
5505                         result = regex_fixed_prefix(patt, false, collation,
5506                                                                                 prefix, rest_selec);
5507                         break;
5508                 case Pattern_Type_Regex_IC:
5509                         result = regex_fixed_prefix(patt, true, collation,
5510                                                                                 prefix, rest_selec);
5511                         break;
5512                 default:
5513                         elog(ERROR, "unrecognized ptype: %d", (int) ptype);
5514                         result = Pattern_Prefix_None;           /* keep compiler quiet */
5515                         break;
5516         }
5517         return result;
5518 }
5519
5520 /*
5521  * Estimate the selectivity of a fixed prefix for a pattern match.
5522  *
5523  * A fixed prefix "foo" is estimated as the selectivity of the expression
5524  * "variable >= 'foo' AND variable < 'fop'" (see also indxpath.c).
5525  *
5526  * The selectivity estimate is with respect to the portion of the column
5527  * population represented by the histogram --- the caller must fold this
5528  * together with info about MCVs and NULLs.
5529  *
5530  * We use the >= and < operators from the specified btree opfamily to do the
5531  * estimation.  The given variable and Const must be of the associated
5532  * datatype.
5533  *
5534  * XXX Note: we make use of the upper bound to estimate operator selectivity
5535  * even if the locale is such that we cannot rely on the upper-bound string.
5536  * The selectivity only needs to be approximately right anyway, so it seems
5537  * more useful to use the upper-bound code than not.
5538  */
5539 static Selectivity
5540 prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
5541                                    Oid vartype, Oid opfamily, Const *prefixcon)
5542 {
5543         Selectivity prefixsel;
5544         Oid                     cmpopr;
5545         FmgrInfo        opproc;
5546         Const      *greaterstrcon;
5547         Selectivity eq_sel;
5548
5549         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5550                                                                  BTGreaterEqualStrategyNumber);
5551         if (cmpopr == InvalidOid)
5552                 elog(ERROR, "no >= operator for opfamily %u", opfamily);
5553         fmgr_info(get_opcode(cmpopr), &opproc);
5554
5555         prefixsel = ineq_histogram_selectivity(root, vardata, &opproc, true,
5556                                                                                    prefixcon->constvalue,
5557                                                                                    prefixcon->consttype);
5558
5559         if (prefixsel < 0.0)
5560         {
5561                 /* No histogram is present ... return a suitable default estimate */
5562                 return DEFAULT_MATCH_SEL;
5563         }
5564
5565         /*-------
5566          * If we can create a string larger than the prefix, say
5567          *      "x < greaterstr".
5568          *-------
5569          */
5570         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5571                                                                  BTLessStrategyNumber);
5572         if (cmpopr == InvalidOid)
5573                 elog(ERROR, "no < operator for opfamily %u", opfamily);
5574         fmgr_info(get_opcode(cmpopr), &opproc);
5575         greaterstrcon = make_greater_string(prefixcon, &opproc,
5576                                                                                 DEFAULT_COLLATION_OID);
5577         if (greaterstrcon)
5578         {
5579                 Selectivity topsel;
5580
5581                 topsel = ineq_histogram_selectivity(root, vardata, &opproc, false,
5582                                                                                         greaterstrcon->constvalue,
5583                                                                                         greaterstrcon->consttype);
5584
5585                 /* ineq_histogram_selectivity worked before, it shouldn't fail now */
5586                 Assert(topsel >= 0.0);
5587
5588                 /*
5589                  * Merge the two selectivities in the same way as for a range query
5590                  * (see clauselist_selectivity()).  Note that we don't need to worry
5591                  * about double-exclusion of nulls, since ineq_histogram_selectivity
5592                  * doesn't count those anyway.
5593                  */
5594                 prefixsel = topsel + prefixsel - 1.0;
5595         }
5596
5597         /*
5598          * If the prefix is long then the two bounding values might be too close
5599          * together for the histogram to distinguish them usefully, resulting in a
5600          * zero estimate (plus or minus roundoff error). To avoid returning a
5601          * ridiculously small estimate, compute the estimated selectivity for
5602          * "variable = 'foo'", and clamp to that. (Obviously, the resultant
5603          * estimate should be at least that.)
5604          *
5605          * We apply this even if we couldn't make a greater string.  That case
5606          * suggests that the prefix is near the maximum possible, and thus
5607          * probably off the end of the histogram, and thus we probably got a very
5608          * small estimate from the >= condition; so we still need to clamp.
5609          */
5610         cmpopr = get_opfamily_member(opfamily, vartype, vartype,
5611                                                                  BTEqualStrategyNumber);
5612         if (cmpopr == InvalidOid)
5613                 elog(ERROR, "no = operator for opfamily %u", opfamily);
5614         eq_sel = var_eq_const(vardata, cmpopr, prefixcon->constvalue,
5615                                                   false, true);
5616
5617         prefixsel = Max(prefixsel, eq_sel);
5618
5619         return prefixsel;
5620 }
5621
5622
5623 /*
5624  * Estimate the selectivity of a pattern of the specified type.
5625  * Note that any fixed prefix of the pattern will have been removed already,
5626  * so actually we may be looking at just a fragment of the pattern.
5627  *
5628  * For now, we use a very simplistic approach: fixed characters reduce the
5629  * selectivity a good deal, character ranges reduce it a little,
5630  * wildcards (such as % for LIKE or .* for regex) increase it.
5631  */
5632
5633 #define FIXED_CHAR_SEL  0.20    /* about 1/5 */
5634 #define CHAR_RANGE_SEL  0.25
5635 #define ANY_CHAR_SEL    0.9             /* not 1, since it won't match end-of-string */
5636 #define FULL_WILDCARD_SEL 5.0
5637 #define PARTIAL_WILDCARD_SEL 2.0
5638
5639 static Selectivity
5640 like_selectivity(const char *patt, int pattlen, bool case_insensitive)
5641 {
5642         Selectivity sel = 1.0;
5643         int                     pos;
5644
5645         /* Skip any leading wildcard; it's already factored into initial sel */
5646         for (pos = 0; pos < pattlen; pos++)
5647         {
5648                 if (patt[pos] != '%' && patt[pos] != '_')
5649                         break;
5650         }
5651
5652         for (; pos < pattlen; pos++)
5653         {
5654                 /* % and _ are wildcard characters in LIKE */
5655                 if (patt[pos] == '%')
5656                         sel *= FULL_WILDCARD_SEL;
5657                 else if (patt[pos] == '_')
5658                         sel *= ANY_CHAR_SEL;
5659                 else if (patt[pos] == '\\')
5660                 {
5661                         /* Backslash quotes the next character */
5662                         pos++;
5663                         if (pos >= pattlen)
5664                                 break;
5665                         sel *= FIXED_CHAR_SEL;
5666                 }
5667                 else
5668                         sel *= FIXED_CHAR_SEL;
5669         }
5670         /* Could get sel > 1 if multiple wildcards */
5671         if (sel > 1.0)
5672                 sel = 1.0;
5673         return sel;
5674 }
5675
5676 static Selectivity
5677 regex_selectivity_sub(const char *patt, int pattlen, bool case_insensitive)
5678 {
5679         Selectivity sel = 1.0;
5680         int                     paren_depth = 0;
5681         int                     paren_pos = 0;  /* dummy init to keep compiler quiet */
5682         int                     pos;
5683
5684         for (pos = 0; pos < pattlen; pos++)
5685         {
5686                 if (patt[pos] == '(')
5687                 {
5688                         if (paren_depth == 0)
5689                                 paren_pos = pos;        /* remember start of parenthesized item */
5690                         paren_depth++;
5691                 }
5692                 else if (patt[pos] == ')' && paren_depth > 0)
5693                 {
5694                         paren_depth--;
5695                         if (paren_depth == 0)
5696                                 sel *= regex_selectivity_sub(patt + (paren_pos + 1),
5697                                                                                          pos - (paren_pos + 1),
5698                                                                                          case_insensitive);
5699                 }
5700                 else if (patt[pos] == '|' && paren_depth == 0)
5701                 {
5702                         /*
5703                          * If unquoted | is present at paren level 0 in pattern, we have
5704                          * multiple alternatives; sum their probabilities.
5705                          */
5706                         sel += regex_selectivity_sub(patt + (pos + 1),
5707                                                                                  pattlen - (pos + 1),
5708                                                                                  case_insensitive);
5709                         break;                          /* rest of pattern is now processed */
5710                 }
5711                 else if (patt[pos] == '[')
5712                 {
5713                         bool            negclass = false;
5714
5715                         if (patt[++pos] == '^')
5716                         {
5717                                 negclass = true;
5718                                 pos++;
5719                         }
5720                         if (patt[pos] == ']')           /* ']' at start of class is not
5721                                                                                  * special */
5722                                 pos++;
5723                         while (pos < pattlen && patt[pos] != ']')
5724                                 pos++;
5725                         if (paren_depth == 0)
5726                                 sel *= (negclass ? (1.0 - CHAR_RANGE_SEL) : CHAR_RANGE_SEL);
5727                 }
5728                 else if (patt[pos] == '.')
5729                 {
5730                         if (paren_depth == 0)
5731                                 sel *= ANY_CHAR_SEL;
5732                 }
5733                 else if (patt[pos] == '*' ||
5734                                  patt[pos] == '?' ||
5735                                  patt[pos] == '+')
5736                 {
5737                         /* Ought to be smarter about quantifiers... */
5738                         if (paren_depth == 0)
5739                                 sel *= PARTIAL_WILDCARD_SEL;
5740                 }
5741                 else if (patt[pos] == '{')
5742                 {
5743                         while (pos < pattlen && patt[pos] != '}')
5744                                 pos++;
5745                         if (paren_depth == 0)
5746                                 sel *= PARTIAL_WILDCARD_SEL;
5747                 }
5748                 else if (patt[pos] == '\\')
5749                 {
5750                         /* backslash quotes the next character */
5751                         pos++;
5752                         if (pos >= pattlen)
5753                                 break;
5754                         if (paren_depth == 0)
5755                                 sel *= FIXED_CHAR_SEL;
5756                 }
5757                 else
5758                 {
5759                         if (paren_depth == 0)
5760                                 sel *= FIXED_CHAR_SEL;
5761                 }
5762         }
5763         /* Could get sel > 1 if multiple wildcards */
5764         if (sel > 1.0)
5765                 sel = 1.0;
5766         return sel;
5767 }
5768
5769 static Selectivity
5770 regex_selectivity(const char *patt, int pattlen, bool case_insensitive,
5771                                   int fixed_prefix_len)
5772 {
5773         Selectivity sel;
5774
5775         /* If patt doesn't end with $, consider it to have a trailing wildcard */
5776         if (pattlen > 0 && patt[pattlen - 1] == '$' &&
5777                 (pattlen == 1 || patt[pattlen - 2] != '\\'))
5778         {
5779                 /* has trailing $ */
5780                 sel = regex_selectivity_sub(patt, pattlen - 1, case_insensitive);
5781         }
5782         else
5783         {
5784                 /* no trailing $ */
5785                 sel = regex_selectivity_sub(patt, pattlen, case_insensitive);
5786                 sel *= FULL_WILDCARD_SEL;
5787         }
5788
5789         /* If there's a fixed prefix, discount its selectivity */
5790         if (fixed_prefix_len > 0)
5791                 sel /= pow(FIXED_CHAR_SEL, fixed_prefix_len);
5792
5793         /* Make sure result stays in range */
5794         CLAMP_PROBABILITY(sel);
5795         return sel;
5796 }
5797
5798
5799 /*
5800  * For bytea, the increment function need only increment the current byte
5801  * (there are no multibyte characters to worry about).
5802  */
5803 static bool
5804 byte_increment(unsigned char *ptr, int len)
5805 {
5806         if (*ptr >= 255)
5807                 return false;
5808         (*ptr)++;
5809         return true;
5810 }
5811
5812 /*
5813  * Try to generate a string greater than the given string or any
5814  * string it is a prefix of.  If successful, return a palloc'd string
5815  * in the form of a Const node; else return NULL.
5816  *
5817  * The caller must provide the appropriate "less than" comparison function
5818  * for testing the strings, along with the collation to use.
5819  *
5820  * The key requirement here is that given a prefix string, say "foo",
5821  * we must be able to generate another string "fop" that is greater than
5822  * all strings "foobar" starting with "foo".  We can test that we have
5823  * generated a string greater than the prefix string, but in non-C collations
5824  * that is not a bulletproof guarantee that an extension of the string might
5825  * not sort after it; an example is that "foo " is less than "foo!", but it
5826  * is not clear that a "dictionary" sort ordering will consider "foo!" less
5827  * than "foo bar".  CAUTION: Therefore, this function should be used only for
5828  * estimation purposes when working in a non-C collation.
5829  *
5830  * To try to catch most cases where an extended string might otherwise sort
5831  * before the result value, we determine which of the strings "Z", "z", "y",
5832  * and "9" is seen as largest by the collation, and append that to the given
5833  * prefix before trying to find a string that compares as larger.
5834  *
5835  * To search for a greater string, we repeatedly "increment" the rightmost
5836  * character, using an encoding-specific character incrementer function.
5837  * When it's no longer possible to increment the last character, we truncate
5838  * off that character and start incrementing the next-to-rightmost.
5839  * For example, if "z" were the last character in the sort order, then we
5840  * could produce "foo" as a string greater than "fonz".
5841  *
5842  * This could be rather slow in the worst case, but in most cases we
5843  * won't have to try more than one or two strings before succeeding.
5844  *
5845  * Note that it's important for the character incrementer not to be too anal
5846  * about producing every possible character code, since in some cases the only
5847  * way to get a larger string is to increment a previous character position.
5848  * So we don't want to spend too much time trying every possible character
5849  * code at the last position.  A good rule of thumb is to be sure that we
5850  * don't try more than 256*K values for a K-byte character (and definitely
5851  * not 256^K, which is what an exhaustive search would approach).
5852  */
5853 Const *
5854 make_greater_string(const Const *str_const, FmgrInfo *ltproc, Oid collation)
5855 {
5856         Oid                     datatype = str_const->consttype;
5857         char       *workstr;
5858         int                     len;
5859         Datum           cmpstr;
5860         text       *cmptxt = NULL;
5861         mbcharacter_incrementer charinc;
5862
5863         /*
5864          * Get a modifiable copy of the prefix string in C-string format, and set
5865          * up the string we will compare to as a Datum.  In C locale this can just
5866          * be the given prefix string, otherwise we need to add a suffix.  Types
5867          * NAME and BYTEA sort bytewise so they don't need a suffix either.
5868          */
5869         if (datatype == NAMEOID)
5870         {
5871                 workstr = DatumGetCString(DirectFunctionCall1(nameout,
5872                                                                                                           str_const->constvalue));
5873                 len = strlen(workstr);
5874                 cmpstr = str_const->constvalue;
5875         }
5876         else if (datatype == BYTEAOID)
5877         {
5878                 bytea      *bstr = DatumGetByteaP(str_const->constvalue);
5879
5880                 len = VARSIZE(bstr) - VARHDRSZ;
5881                 workstr = (char *) palloc(len);
5882                 memcpy(workstr, VARDATA(bstr), len);
5883                 if ((Pointer) bstr != DatumGetPointer(str_const->constvalue))
5884                         pfree(bstr);
5885                 cmpstr = str_const->constvalue;
5886         }
5887         else
5888         {
5889                 workstr = TextDatumGetCString(str_const->constvalue);
5890                 len = strlen(workstr);
5891                 if (lc_collate_is_c(collation) || len == 0)
5892                         cmpstr = str_const->constvalue;
5893                 else
5894                 {
5895                         /* If first time through, determine the suffix to use */
5896                         static char suffixchar = 0;
5897                         static Oid      suffixcollation = 0;
5898
5899                         if (!suffixchar || suffixcollation != collation)
5900                         {
5901                                 char       *best;
5902
5903                                 best = "Z";
5904                                 if (varstr_cmp(best, 1, "z", 1, collation) < 0)
5905                                         best = "z";
5906                                 if (varstr_cmp(best, 1, "y", 1, collation) < 0)
5907                                         best = "y";
5908                                 if (varstr_cmp(best, 1, "9", 1, collation) < 0)
5909                                         best = "9";
5910                                 suffixchar = *best;
5911                                 suffixcollation = collation;
5912                         }
5913
5914                         /* And build the string to compare to */
5915                         cmptxt = (text *) palloc(VARHDRSZ + len + 1);
5916                         SET_VARSIZE(cmptxt, VARHDRSZ + len + 1);
5917                         memcpy(VARDATA(cmptxt), workstr, len);
5918                         *(VARDATA(cmptxt) + len) = suffixchar;
5919                         cmpstr = PointerGetDatum(cmptxt);
5920                 }
5921         }
5922
5923         /* Select appropriate character-incrementer function */
5924         if (datatype == BYTEAOID)
5925                 charinc = byte_increment;
5926         else
5927                 charinc = pg_database_encoding_character_incrementer();
5928
5929         /* And search ... */
5930         while (len > 0)
5931         {
5932                 int                     charlen;
5933                 unsigned char *lastchar;
5934
5935                 /* Identify the last character --- for bytea, just the last byte */
5936                 if (datatype == BYTEAOID)
5937                         charlen = 1;
5938                 else
5939                         charlen = len - pg_mbcliplen(workstr, len, len - 1);
5940                 lastchar = (unsigned char *) (workstr + len - charlen);
5941
5942                 /*
5943                  * Try to generate a larger string by incrementing the last character
5944                  * (for BYTEA, we treat each byte as a character).
5945                  *
5946                  * Note: the incrementer function is expected to return true if it's
5947                  * generated a valid-per-the-encoding new character, otherwise false.
5948                  * The contents of the character on false return are unspecified.
5949                  */
5950                 while (charinc(lastchar, charlen))
5951                 {
5952                         Const      *workstr_const;
5953
5954                         if (datatype == BYTEAOID)
5955                                 workstr_const = string_to_bytea_const(workstr, len);
5956                         else
5957                                 workstr_const = string_to_const(workstr, datatype);
5958
5959                         if (DatumGetBool(FunctionCall2Coll(ltproc,
5960                                                                                            collation,
5961                                                                                            cmpstr,
5962                                                                                            workstr_const->constvalue)))
5963                         {
5964                                 /* Successfully made a string larger than cmpstr */
5965                                 if (cmptxt)
5966                                         pfree(cmptxt);
5967                                 pfree(workstr);
5968                                 return workstr_const;
5969                         }
5970
5971                         /* No good, release unusable value and try again */
5972                         pfree(DatumGetPointer(workstr_const->constvalue));
5973                         pfree(workstr_const);
5974                 }
5975
5976                 /*
5977                  * No luck here, so truncate off the last character and try to
5978                  * increment the next one.
5979                  */
5980                 len -= charlen;
5981                 workstr[len] = '\0';
5982         }
5983
5984         /* Failed... */
5985         if (cmptxt)
5986                 pfree(cmptxt);
5987         pfree(workstr);
5988
5989         return NULL;
5990 }
5991
5992 /*
5993  * Generate a Datum of the appropriate type from a C string.
5994  * Note that all of the supported types are pass-by-ref, so the
5995  * returned value should be pfree'd if no longer needed.
5996  */
5997 static Datum
5998 string_to_datum(const char *str, Oid datatype)
5999 {
6000         Assert(str != NULL);
6001
6002         /*
6003          * We cheat a little by assuming that CStringGetTextDatum() will do for
6004          * bpchar and varchar constants too...
6005          */
6006         if (datatype == NAMEOID)
6007                 return DirectFunctionCall1(namein, CStringGetDatum(str));
6008         else if (datatype == BYTEAOID)
6009                 return DirectFunctionCall1(byteain, CStringGetDatum(str));
6010         else
6011                 return CStringGetTextDatum(str);
6012 }
6013
6014 /*
6015  * Generate a Const node of the appropriate type from a C string.
6016  */
6017 static Const *
6018 string_to_const(const char *str, Oid datatype)
6019 {
6020         Datum           conval = string_to_datum(str, datatype);
6021         Oid                     collation;
6022         int                     constlen;
6023
6024         /*
6025          * We only need to support a few datatypes here, so hard-wire properties
6026          * instead of incurring the expense of catalog lookups.
6027          */
6028         switch (datatype)
6029         {
6030                 case TEXTOID:
6031                 case VARCHAROID:
6032                 case BPCHAROID:
6033                         collation = DEFAULT_COLLATION_OID;
6034                         constlen = -1;
6035                         break;
6036
6037                 case NAMEOID:
6038                         collation = InvalidOid;
6039                         constlen = NAMEDATALEN;
6040                         break;
6041
6042                 case BYTEAOID:
6043                         collation = InvalidOid;
6044                         constlen = -1;
6045                         break;
6046
6047                 default:
6048                         elog(ERROR, "unexpected datatype in string_to_const: %u",
6049                                  datatype);
6050                         return NULL;
6051         }
6052
6053         return makeConst(datatype, -1, collation, constlen,
6054                                          conval, false, false);
6055 }
6056
6057 /*
6058  * Generate a Const node of bytea type from a binary C string and a length.
6059  */
6060 static Const *
6061 string_to_bytea_const(const char *str, size_t str_len)
6062 {
6063         bytea      *bstr = palloc(VARHDRSZ + str_len);
6064         Datum           conval;
6065
6066         memcpy(VARDATA(bstr), str, str_len);
6067         SET_VARSIZE(bstr, VARHDRSZ + str_len);
6068         conval = PointerGetDatum(bstr);
6069
6070         return makeConst(BYTEAOID, -1, InvalidOid, -1, conval, false, false);
6071 }
6072
6073 /*-------------------------------------------------------------------------
6074  *
6075  * Index cost estimation functions
6076  *
6077  *-------------------------------------------------------------------------
6078  */
6079
6080 List *
6081 deconstruct_indexquals(IndexPath *path)
6082 {
6083         List       *result = NIL;
6084         IndexOptInfo *index = path->indexinfo;
6085         ListCell   *lcc,
6086                            *lci;
6087
6088         forboth(lcc, path->indexquals, lci, path->indexqualcols)
6089         {
6090                 RestrictInfo *rinfo = castNode(RestrictInfo, lfirst(lcc));
6091                 int                     indexcol = lfirst_int(lci);
6092                 Expr       *clause;
6093                 Node       *leftop,
6094                                    *rightop;
6095                 IndexQualInfo *qinfo;
6096
6097                 clause = rinfo->clause;
6098
6099                 qinfo = (IndexQualInfo *) palloc(sizeof(IndexQualInfo));
6100                 qinfo->rinfo = rinfo;
6101                 qinfo->indexcol = indexcol;
6102
6103                 if (IsA(clause, OpExpr))
6104                 {
6105                         qinfo->clause_op = ((OpExpr *) clause)->opno;
6106                         leftop = get_leftop(clause);
6107                         rightop = get_rightop(clause);
6108                         if (match_index_to_operand(leftop, indexcol, index))
6109                         {
6110                                 qinfo->varonleft = true;
6111                                 qinfo->other_operand = rightop;
6112                         }
6113                         else
6114                         {
6115                                 Assert(match_index_to_operand(rightop, indexcol, index));
6116                                 qinfo->varonleft = false;
6117                                 qinfo->other_operand = leftop;
6118                         }
6119                 }
6120                 else if (IsA(clause, RowCompareExpr))
6121                 {
6122                         RowCompareExpr *rc = (RowCompareExpr *) clause;
6123
6124                         qinfo->clause_op = linitial_oid(rc->opnos);
6125                         /* Examine only first columns to determine left/right sides */
6126                         if (match_index_to_operand((Node *) linitial(rc->largs),
6127                                                                            indexcol, index))
6128                         {
6129                                 qinfo->varonleft = true;
6130                                 qinfo->other_operand = (Node *) rc->rargs;
6131                         }
6132                         else
6133                         {
6134                                 Assert(match_index_to_operand((Node *) linitial(rc->rargs),
6135                                                                                           indexcol, index));
6136                                 qinfo->varonleft = false;
6137                                 qinfo->other_operand = (Node *) rc->largs;
6138                         }
6139                 }
6140                 else if (IsA(clause, ScalarArrayOpExpr))
6141                 {
6142                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
6143
6144                         qinfo->clause_op = saop->opno;
6145                         /* index column is always on the left in this case */
6146                         Assert(match_index_to_operand((Node *) linitial(saop->args),
6147                                                                                   indexcol, index));
6148                         qinfo->varonleft = true;
6149                         qinfo->other_operand = (Node *) lsecond(saop->args);
6150                 }
6151                 else if (IsA(clause, NullTest))
6152                 {
6153                         qinfo->clause_op = InvalidOid;
6154                         Assert(match_index_to_operand((Node *) ((NullTest *) clause)->arg,
6155                                                                                   indexcol, index));
6156                         qinfo->varonleft = true;
6157                         qinfo->other_operand = NULL;
6158                 }
6159                 else
6160                 {
6161                         elog(ERROR, "unsupported indexqual type: %d",
6162                                  (int) nodeTag(clause));
6163                 }
6164
6165                 result = lappend(result, qinfo);
6166         }
6167         return result;
6168 }
6169
6170 /*
6171  * Simple function to compute the total eval cost of the "other operands"
6172  * in an IndexQualInfo list.  Since we know these will be evaluated just
6173  * once per scan, there's no need to distinguish startup from per-row cost.
6174  */
6175 static Cost
6176 other_operands_eval_cost(PlannerInfo *root, List *qinfos)
6177 {
6178         Cost            qual_arg_cost = 0;
6179         ListCell   *lc;
6180
6181         foreach(lc, qinfos)
6182         {
6183                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6184                 QualCost        index_qual_cost;
6185
6186                 cost_qual_eval_node(&index_qual_cost, qinfo->other_operand, root);
6187                 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6188         }
6189         return qual_arg_cost;
6190 }
6191
6192 /*
6193  * Get other-operand eval cost for an index orderby list.
6194  *
6195  * Index orderby expressions aren't represented as RestrictInfos (since they
6196  * aren't boolean, usually).  So we can't apply deconstruct_indexquals to
6197  * them.  However, they are much simpler to deal with since they are always
6198  * OpExprs and the index column is always on the left.
6199  */
6200 static Cost
6201 orderby_operands_eval_cost(PlannerInfo *root, IndexPath *path)
6202 {
6203         Cost            qual_arg_cost = 0;
6204         ListCell   *lc;
6205
6206         foreach(lc, path->indexorderbys)
6207         {
6208                 Expr       *clause = (Expr *) lfirst(lc);
6209                 Node       *other_operand;
6210                 QualCost        index_qual_cost;
6211
6212                 if (IsA(clause, OpExpr))
6213                 {
6214                         other_operand = get_rightop(clause);
6215                 }
6216                 else
6217                 {
6218                         elog(ERROR, "unsupported indexorderby type: %d",
6219                                  (int) nodeTag(clause));
6220                         other_operand = NULL;           /* keep compiler quiet */
6221                 }
6222
6223                 cost_qual_eval_node(&index_qual_cost, other_operand, root);
6224                 qual_arg_cost += index_qual_cost.startup + index_qual_cost.per_tuple;
6225         }
6226         return qual_arg_cost;
6227 }
6228
6229 void
6230 genericcostestimate(PlannerInfo *root,
6231                                         IndexPath *path,
6232                                         double loop_count,
6233                                         List *qinfos,
6234                                         GenericCosts *costs)
6235 {
6236         IndexOptInfo *index = path->indexinfo;
6237         List       *indexQuals = path->indexquals;
6238         List       *indexOrderBys = path->indexorderbys;
6239         Cost            indexStartupCost;
6240         Cost            indexTotalCost;
6241         Selectivity indexSelectivity;
6242         double          indexCorrelation;
6243         double          numIndexPages;
6244         double          numIndexTuples;
6245         double          spc_random_page_cost;
6246         double          num_sa_scans;
6247         double          num_outer_scans;
6248         double          num_scans;
6249         double          qual_op_cost;
6250         double          qual_arg_cost;
6251         List       *selectivityQuals;
6252         ListCell   *l;
6253
6254         /*
6255          * If the index is partial, AND the index predicate with the explicitly
6256          * given indexquals to produce a more accurate idea of the index
6257          * selectivity.
6258          */
6259         selectivityQuals = add_predicate_to_quals(index, indexQuals);
6260
6261         /*
6262          * Check for ScalarArrayOpExpr index quals, and estimate the number of
6263          * index scans that will be performed.
6264          */
6265         num_sa_scans = 1;
6266         foreach(l, indexQuals)
6267         {
6268                 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
6269
6270                 if (IsA(rinfo->clause, ScalarArrayOpExpr))
6271                 {
6272                         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
6273                         int                     alength = estimate_array_length(lsecond(saop->args));
6274
6275                         if (alength > 1)
6276                                 num_sa_scans *= alength;
6277                 }
6278         }
6279
6280         /* Estimate the fraction of main-table tuples that will be visited */
6281         indexSelectivity = clauselist_selectivity(root, selectivityQuals,
6282                                                                                           index->rel->relid,
6283                                                                                           JOIN_INNER,
6284                                                                                           NULL);
6285
6286         /*
6287          * If caller didn't give us an estimate, estimate the number of index
6288          * tuples that will be visited.  We do it in this rather peculiar-looking
6289          * way in order to get the right answer for partial indexes.
6290          */
6291         numIndexTuples = costs->numIndexTuples;
6292         if (numIndexTuples <= 0.0)
6293         {
6294                 numIndexTuples = indexSelectivity * index->rel->tuples;
6295
6296                 /*
6297                  * The above calculation counts all the tuples visited across all
6298                  * scans induced by ScalarArrayOpExpr nodes.  We want to consider the
6299                  * average per-indexscan number, so adjust.  This is a handy place to
6300                  * round to integer, too.  (If caller supplied tuple estimate, it's
6301                  * responsible for handling these considerations.)
6302                  */
6303                 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6304         }
6305
6306         /*
6307          * We can bound the number of tuples by the index size in any case. Also,
6308          * always estimate at least one tuple is touched, even when
6309          * indexSelectivity estimate is tiny.
6310          */
6311         if (numIndexTuples > index->tuples)
6312                 numIndexTuples = index->tuples;
6313         if (numIndexTuples < 1.0)
6314                 numIndexTuples = 1.0;
6315
6316         /*
6317          * Estimate the number of index pages that will be retrieved.
6318          *
6319          * We use the simplistic method of taking a pro-rata fraction of the total
6320          * number of index pages.  In effect, this counts only leaf pages and not
6321          * any overhead such as index metapage or upper tree levels.
6322          *
6323          * In practice access to upper index levels is often nearly free because
6324          * those tend to stay in cache under load; moreover, the cost involved is
6325          * highly dependent on index type.  We therefore ignore such costs here
6326          * and leave it to the caller to add a suitable charge if needed.
6327          */
6328         if (index->pages > 1 && index->tuples > 1)
6329                 numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
6330         else
6331                 numIndexPages = 1.0;
6332
6333         /* fetch estimated page cost for tablespace containing index */
6334         get_tablespace_page_costs(index->reltablespace,
6335                                                           &spc_random_page_cost,
6336                                                           NULL);
6337
6338         /*
6339          * Now compute the disk access costs.
6340          *
6341          * The above calculations are all per-index-scan.  However, if we are in a
6342          * nestloop inner scan, we can expect the scan to be repeated (with
6343          * different search keys) for each row of the outer relation.  Likewise,
6344          * ScalarArrayOpExpr quals result in multiple index scans.  This creates
6345          * the potential for cache effects to reduce the number of disk page
6346          * fetches needed.  We want to estimate the average per-scan I/O cost in
6347          * the presence of caching.
6348          *
6349          * We use the Mackert-Lohman formula (see costsize.c for details) to
6350          * estimate the total number of page fetches that occur.  While this
6351          * wasn't what it was designed for, it seems a reasonable model anyway.
6352          * Note that we are counting pages not tuples anymore, so we take N = T =
6353          * index size, as if there were one "tuple" per page.
6354          */
6355         num_outer_scans = loop_count;
6356         num_scans = num_sa_scans * num_outer_scans;
6357
6358         if (num_scans > 1)
6359         {
6360                 double          pages_fetched;
6361
6362                 /* total page fetches ignoring cache effects */
6363                 pages_fetched = numIndexPages * num_scans;
6364
6365                 /* use Mackert and Lohman formula to adjust for cache effects */
6366                 pages_fetched = index_pages_fetched(pages_fetched,
6367                                                                                         index->pages,
6368                                                                                         (double) index->pages,
6369                                                                                         root);
6370
6371                 /*
6372                  * Now compute the total disk access cost, and then report a pro-rated
6373                  * share for each outer scan.  (Don't pro-rate for ScalarArrayOpExpr,
6374                  * since that's internal to the indexscan.)
6375                  */
6376                 indexTotalCost = (pages_fetched * spc_random_page_cost)
6377                         / num_outer_scans;
6378         }
6379         else
6380         {
6381                 /*
6382                  * For a single index scan, we just charge spc_random_page_cost per
6383                  * page touched.
6384                  */
6385                 indexTotalCost = numIndexPages * spc_random_page_cost;
6386         }
6387
6388         /*
6389          * CPU cost: any complex expressions in the indexquals will need to be
6390          * evaluated once at the start of the scan to reduce them to runtime keys
6391          * to pass to the index AM (see nodeIndexscan.c).  We model the per-tuple
6392          * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
6393          * indexqual operator.  Because we have numIndexTuples as a per-scan
6394          * number, we have to multiply by num_sa_scans to get the correct result
6395          * for ScalarArrayOpExpr cases.  Similarly add in costs for any index
6396          * ORDER BY expressions.
6397          *
6398          * Note: this neglects the possible costs of rechecking lossy operators.
6399          * Detecting that that might be needed seems more expensive than it's
6400          * worth, though, considering all the other inaccuracies here ...
6401          */
6402         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
6403                 orderby_operands_eval_cost(root, path);
6404         qual_op_cost = cpu_operator_cost *
6405                 (list_length(indexQuals) + list_length(indexOrderBys));
6406
6407         indexStartupCost = qual_arg_cost;
6408         indexTotalCost += qual_arg_cost;
6409         indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
6410
6411         /*
6412          * Generic assumption about index correlation: there isn't any.
6413          */
6414         indexCorrelation = 0.0;
6415
6416         /*
6417          * Return everything to caller.
6418          */
6419         costs->indexStartupCost = indexStartupCost;
6420         costs->indexTotalCost = indexTotalCost;
6421         costs->indexSelectivity = indexSelectivity;
6422         costs->indexCorrelation = indexCorrelation;
6423         costs->numIndexPages = numIndexPages;
6424         costs->numIndexTuples = numIndexTuples;
6425         costs->spc_random_page_cost = spc_random_page_cost;
6426         costs->num_sa_scans = num_sa_scans;
6427 }
6428
6429 /*
6430  * If the index is partial, add its predicate to the given qual list.
6431  *
6432  * ANDing the index predicate with the explicitly given indexquals produces
6433  * a more accurate idea of the index's selectivity.  However, we need to be
6434  * careful not to insert redundant clauses, because clauselist_selectivity()
6435  * is easily fooled into computing a too-low selectivity estimate.  Our
6436  * approach is to add only the predicate clause(s) that cannot be proven to
6437  * be implied by the given indexquals.  This successfully handles cases such
6438  * as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
6439  * There are many other cases where we won't detect redundancy, leading to a
6440  * too-low selectivity estimate, which will bias the system in favor of using
6441  * partial indexes where possible.  That is not necessarily bad though.
6442  *
6443  * Note that indexQuals contains RestrictInfo nodes while the indpred
6444  * does not, so the output list will be mixed.  This is OK for both
6445  * predicate_implied_by() and clauselist_selectivity(), but might be
6446  * problematic if the result were passed to other things.
6447  */
6448 static List *
6449 add_predicate_to_quals(IndexOptInfo *index, List *indexQuals)
6450 {
6451         List       *predExtraQuals = NIL;
6452         ListCell   *lc;
6453
6454         if (index->indpred == NIL)
6455                 return indexQuals;
6456
6457         foreach(lc, index->indpred)
6458         {
6459                 Node       *predQual = (Node *) lfirst(lc);
6460                 List       *oneQual = list_make1(predQual);
6461
6462                 if (!predicate_implied_by(oneQual, indexQuals))
6463                         predExtraQuals = list_concat(predExtraQuals, oneQual);
6464         }
6465         /* list_concat avoids modifying the passed-in indexQuals list */
6466         return list_concat(predExtraQuals, indexQuals);
6467 }
6468
6469
6470 void
6471 btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6472                            Cost *indexStartupCost, Cost *indexTotalCost,
6473                            Selectivity *indexSelectivity, double *indexCorrelation,
6474                            double *indexPages)
6475 {
6476         IndexOptInfo *index = path->indexinfo;
6477         List       *qinfos;
6478         GenericCosts costs;
6479         Oid                     relid;
6480         AttrNumber      colnum;
6481         VariableStatData vardata;
6482         double          numIndexTuples;
6483         Cost            descentCost;
6484         List       *indexBoundQuals;
6485         int                     indexcol;
6486         bool            eqQualHere;
6487         bool            found_saop;
6488         bool            found_is_null_op;
6489         double          num_sa_scans;
6490         ListCell   *lc;
6491
6492         /* Do preliminary analysis of indexquals */
6493         qinfos = deconstruct_indexquals(path);
6494
6495         /*
6496          * For a btree scan, only leading '=' quals plus inequality quals for the
6497          * immediately next attribute contribute to index selectivity (these are
6498          * the "boundary quals" that determine the starting and stopping points of
6499          * the index scan).  Additional quals can suppress visits to the heap, so
6500          * it's OK to count them in indexSelectivity, but they should not count
6501          * for estimating numIndexTuples.  So we must examine the given indexquals
6502          * to find out which ones count as boundary quals.  We rely on the
6503          * knowledge that they are given in index column order.
6504          *
6505          * For a RowCompareExpr, we consider only the first column, just as
6506          * rowcomparesel() does.
6507          *
6508          * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
6509          * index scans not one, but the ScalarArrayOpExpr's operator can be
6510          * considered to act the same as it normally does.
6511          */
6512         indexBoundQuals = NIL;
6513         indexcol = 0;
6514         eqQualHere = false;
6515         found_saop = false;
6516         found_is_null_op = false;
6517         num_sa_scans = 1;
6518         foreach(lc, qinfos)
6519         {
6520                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(lc);
6521                 RestrictInfo *rinfo = qinfo->rinfo;
6522                 Expr       *clause = rinfo->clause;
6523                 Oid                     clause_op;
6524                 int                     op_strategy;
6525
6526                 if (indexcol != qinfo->indexcol)
6527                 {
6528                         /* Beginning of a new column's quals */
6529                         if (!eqQualHere)
6530                                 break;                  /* done if no '=' qual for indexcol */
6531                         eqQualHere = false;
6532                         indexcol++;
6533                         if (indexcol != qinfo->indexcol)
6534                                 break;                  /* no quals at all for indexcol */
6535                 }
6536
6537                 if (IsA(clause, ScalarArrayOpExpr))
6538                 {
6539                         int                     alength = estimate_array_length(qinfo->other_operand);
6540
6541                         found_saop = true;
6542                         /* count up number of SA scans induced by indexBoundQuals only */
6543                         if (alength > 1)
6544                                 num_sa_scans *= alength;
6545                 }
6546                 else if (IsA(clause, NullTest))
6547                 {
6548                         NullTest   *nt = (NullTest *) clause;
6549
6550                         if (nt->nulltesttype == IS_NULL)
6551                         {
6552                                 found_is_null_op = true;
6553                                 /* IS NULL is like = for selectivity determination purposes */
6554                                 eqQualHere = true;
6555                         }
6556                 }
6557
6558                 /*
6559                  * We would need to commute the clause_op if not varonleft, except
6560                  * that we only care if it's equality or not, so that refinement is
6561                  * unnecessary.
6562                  */
6563                 clause_op = qinfo->clause_op;
6564
6565                 /* check for equality operator */
6566                 if (OidIsValid(clause_op))
6567                 {
6568                         op_strategy = get_op_opfamily_strategy(clause_op,
6569                                                                                                    index->opfamily[indexcol]);
6570                         Assert(op_strategy != 0);       /* not a member of opfamily?? */
6571                         if (op_strategy == BTEqualStrategyNumber)
6572                                 eqQualHere = true;
6573                 }
6574
6575                 indexBoundQuals = lappend(indexBoundQuals, rinfo);
6576         }
6577
6578         /*
6579          * If index is unique and we found an '=' clause for each column, we can
6580          * just assume numIndexTuples = 1 and skip the expensive
6581          * clauselist_selectivity calculations.  However, a ScalarArrayOp or
6582          * NullTest invalidates that theory, even though it sets eqQualHere.
6583          */
6584         if (index->unique &&
6585                 indexcol == index->ncolumns - 1 &&
6586                 eqQualHere &&
6587                 !found_saop &&
6588                 !found_is_null_op)
6589                 numIndexTuples = 1.0;
6590         else
6591         {
6592                 List       *selectivityQuals;
6593                 Selectivity btreeSelectivity;
6594
6595                 /*
6596                  * If the index is partial, AND the index predicate with the
6597                  * index-bound quals to produce a more accurate idea of the number of
6598                  * rows covered by the bound conditions.
6599                  */
6600                 selectivityQuals = add_predicate_to_quals(index, indexBoundQuals);
6601
6602                 btreeSelectivity = clauselist_selectivity(root, selectivityQuals,
6603                                                                                                   index->rel->relid,
6604                                                                                                   JOIN_INNER,
6605                                                                                                   NULL);
6606                 numIndexTuples = btreeSelectivity * index->rel->tuples;
6607
6608                 /*
6609                  * As in genericcostestimate(), we have to adjust for any
6610                  * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
6611                  * to integer.
6612                  */
6613                 numIndexTuples = rint(numIndexTuples / num_sa_scans);
6614         }
6615
6616         /*
6617          * Now do generic index cost estimation.
6618          */
6619         MemSet(&costs, 0, sizeof(costs));
6620         costs.numIndexTuples = numIndexTuples;
6621
6622         genericcostestimate(root, path, loop_count, qinfos, &costs);
6623
6624         /*
6625          * Add a CPU-cost component to represent the costs of initial btree
6626          * descent.  We don't charge any I/O cost for touching upper btree levels,
6627          * since they tend to stay in cache, but we still have to do about log2(N)
6628          * comparisons to descend a btree of N leaf tuples.  We charge one
6629          * cpu_operator_cost per comparison.
6630          *
6631          * If there are ScalarArrayOpExprs, charge this once per SA scan.  The
6632          * ones after the first one are not startup cost so far as the overall
6633          * plan is concerned, so add them only to "total" cost.
6634          */
6635         if (index->tuples > 1)          /* avoid computing log(0) */
6636         {
6637                 descentCost = ceil(log(index->tuples) / log(2.0)) * cpu_operator_cost;
6638                 costs.indexStartupCost += descentCost;
6639                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6640         }
6641
6642         /*
6643          * Even though we're not charging I/O cost for touching upper btree pages,
6644          * it's still reasonable to charge some CPU cost per page descended
6645          * through.  Moreover, if we had no such charge at all, bloated indexes
6646          * would appear to have the same search cost as unbloated ones, at least
6647          * in cases where only a single leaf page is expected to be visited.  This
6648          * cost is somewhat arbitrarily set at 50x cpu_operator_cost per page
6649          * touched.  The number of such pages is btree tree height plus one (ie,
6650          * we charge for the leaf page too).  As above, charge once per SA scan.
6651          */
6652         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6653         costs.indexStartupCost += descentCost;
6654         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6655
6656         /*
6657          * If we can get an estimate of the first column's ordering correlation C
6658          * from pg_statistic, estimate the index correlation as C for a
6659          * single-column index, or C * 0.75 for multiple columns. (The idea here
6660          * is that multiple columns dilute the importance of the first column's
6661          * ordering, but don't negate it entirely.  Before 8.0 we divided the
6662          * correlation by the number of columns, but that seems too strong.)
6663          */
6664         MemSet(&vardata, 0, sizeof(vardata));
6665
6666         if (index->indexkeys[0] != 0)
6667         {
6668                 /* Simple variable --- look to stats for the underlying table */
6669                 RangeTblEntry *rte = planner_rt_fetch(index->rel->relid, root);
6670
6671                 Assert(rte->rtekind == RTE_RELATION);
6672                 relid = rte->relid;
6673                 Assert(relid != InvalidOid);
6674                 colnum = index->indexkeys[0];
6675
6676                 if (get_relation_stats_hook &&
6677                         (*get_relation_stats_hook) (root, rte, colnum, &vardata))
6678                 {
6679                         /*
6680                          * The hook took control of acquiring a stats tuple.  If it did
6681                          * supply a tuple, it'd better have supplied a freefunc.
6682                          */
6683                         if (HeapTupleIsValid(vardata.statsTuple) &&
6684                                 !vardata.freefunc)
6685                                 elog(ERROR, "no function provided to release variable stats with");
6686                 }
6687                 else
6688                 {
6689                         vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6690                                                                                                  ObjectIdGetDatum(relid),
6691                                                                                                  Int16GetDatum(colnum),
6692                                                                                                  BoolGetDatum(rte->inh));
6693                         vardata.freefunc = ReleaseSysCache;
6694                 }
6695         }
6696         else
6697         {
6698                 /* Expression --- maybe there are stats for the index itself */
6699                 relid = index->indexoid;
6700                 colnum = 1;
6701
6702                 if (get_index_stats_hook &&
6703                         (*get_index_stats_hook) (root, relid, colnum, &vardata))
6704                 {
6705                         /*
6706                          * The hook took control of acquiring a stats tuple.  If it did
6707                          * supply a tuple, it'd better have supplied a freefunc.
6708                          */
6709                         if (HeapTupleIsValid(vardata.statsTuple) &&
6710                                 !vardata.freefunc)
6711                                 elog(ERROR, "no function provided to release variable stats with");
6712                 }
6713                 else
6714                 {
6715                         vardata.statsTuple = SearchSysCache3(STATRELATTINH,
6716                                                                                                  ObjectIdGetDatum(relid),
6717                                                                                                  Int16GetDatum(colnum),
6718                                                                                                  BoolGetDatum(false));
6719                         vardata.freefunc = ReleaseSysCache;
6720                 }
6721         }
6722
6723         if (HeapTupleIsValid(vardata.statsTuple))
6724         {
6725                 Oid                     sortop;
6726                 float4     *numbers;
6727                 int                     nnumbers;
6728
6729                 sortop = get_opfamily_member(index->opfamily[0],
6730                                                                          index->opcintype[0],
6731                                                                          index->opcintype[0],
6732                                                                          BTLessStrategyNumber);
6733                 if (OidIsValid(sortop) &&
6734                         get_attstatsslot(vardata.statsTuple, InvalidOid, 0,
6735                                                          STATISTIC_KIND_CORRELATION,
6736                                                          sortop,
6737                                                          NULL,
6738                                                          NULL, NULL,
6739                                                          &numbers, &nnumbers))
6740                 {
6741                         double          varCorrelation;
6742
6743                         Assert(nnumbers == 1);
6744                         varCorrelation = numbers[0];
6745
6746                         if (index->reverse_sort[0])
6747                                 varCorrelation = -varCorrelation;
6748
6749                         if (index->ncolumns > 1)
6750                                 costs.indexCorrelation = varCorrelation * 0.75;
6751                         else
6752                                 costs.indexCorrelation = varCorrelation;
6753
6754                         free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
6755                 }
6756         }
6757
6758         ReleaseVariableStats(vardata);
6759
6760         *indexStartupCost = costs.indexStartupCost;
6761         *indexTotalCost = costs.indexTotalCost;
6762         *indexSelectivity = costs.indexSelectivity;
6763         *indexCorrelation = costs.indexCorrelation;
6764         *indexPages = costs.numIndexPages;
6765 }
6766
6767 void
6768 hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6769                                  Cost *indexStartupCost, Cost *indexTotalCost,
6770                                  Selectivity *indexSelectivity, double *indexCorrelation,
6771                                  double *indexPages)
6772 {
6773         List       *qinfos;
6774         GenericCosts costs;
6775
6776         /* Do preliminary analysis of indexquals */
6777         qinfos = deconstruct_indexquals(path);
6778
6779         MemSet(&costs, 0, sizeof(costs));
6780
6781         genericcostestimate(root, path, loop_count, qinfos, &costs);
6782
6783         /*
6784          * A hash index has no descent costs as such, since the index AM can go
6785          * directly to the target bucket after computing the hash value.  There
6786          * are a couple of other hash-specific costs that we could conceivably add
6787          * here, though:
6788          *
6789          * Ideally we'd charge spc_random_page_cost for each page in the target
6790          * bucket, not just the numIndexPages pages that genericcostestimate
6791          * thought we'd visit.  However in most cases we don't know which bucket
6792          * that will be.  There's no point in considering the average bucket size
6793          * because the hash AM makes sure that's always one page.
6794          *
6795          * Likewise, we could consider charging some CPU for each index tuple in
6796          * the bucket, if we knew how many there were.  But the per-tuple cost is
6797          * just a hash value comparison, not a general datatype-dependent
6798          * comparison, so any such charge ought to be quite a bit less than
6799          * cpu_operator_cost; which makes it probably not worth worrying about.
6800          *
6801          * A bigger issue is that chance hash-value collisions will result in
6802          * wasted probes into the heap.  We don't currently attempt to model this
6803          * cost on the grounds that it's rare, but maybe it's not rare enough.
6804          * (Any fix for this ought to consider the generic lossy-operator problem,
6805          * though; it's not entirely hash-specific.)
6806          */
6807
6808         *indexStartupCost = costs.indexStartupCost;
6809         *indexTotalCost = costs.indexTotalCost;
6810         *indexSelectivity = costs.indexSelectivity;
6811         *indexCorrelation = costs.indexCorrelation;
6812         *indexPages = costs.numIndexPages;
6813 }
6814
6815 void
6816 gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6817                                  Cost *indexStartupCost, Cost *indexTotalCost,
6818                                  Selectivity *indexSelectivity, double *indexCorrelation,
6819                                  double *indexPages)
6820 {
6821         IndexOptInfo *index = path->indexinfo;
6822         List       *qinfos;
6823         GenericCosts costs;
6824         Cost            descentCost;
6825
6826         /* Do preliminary analysis of indexquals */
6827         qinfos = deconstruct_indexquals(path);
6828
6829         MemSet(&costs, 0, sizeof(costs));
6830
6831         genericcostestimate(root, path, loop_count, qinfos, &costs);
6832
6833         /*
6834          * We model index descent costs similarly to those for btree, but to do
6835          * that we first need an idea of the tree height.  We somewhat arbitrarily
6836          * assume that the fanout is 100, meaning the tree height is at most
6837          * log100(index->pages).
6838          *
6839          * Although this computation isn't really expensive enough to require
6840          * caching, we might as well use index->tree_height to cache it.
6841          */
6842         if (index->tree_height < 0) /* unknown? */
6843         {
6844                 if (index->pages > 1)   /* avoid computing log(0) */
6845                         index->tree_height = (int) (log(index->pages) / log(100.0));
6846                 else
6847                         index->tree_height = 0;
6848         }
6849
6850         /*
6851          * Add a CPU-cost component to represent the costs of initial descent. We
6852          * just use log(N) here not log2(N) since the branching factor isn't
6853          * necessarily two anyway.  As for btree, charge once per SA scan.
6854          */
6855         if (index->tuples > 1)          /* avoid computing log(0) */
6856         {
6857                 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6858                 costs.indexStartupCost += descentCost;
6859                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6860         }
6861
6862         /*
6863          * Likewise add a per-page charge, calculated the same as for btrees.
6864          */
6865         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6866         costs.indexStartupCost += descentCost;
6867         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6868
6869         *indexStartupCost = costs.indexStartupCost;
6870         *indexTotalCost = costs.indexTotalCost;
6871         *indexSelectivity = costs.indexSelectivity;
6872         *indexCorrelation = costs.indexCorrelation;
6873         *indexPages = costs.numIndexPages;
6874 }
6875
6876 void
6877 spgcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
6878                                 Cost *indexStartupCost, Cost *indexTotalCost,
6879                                 Selectivity *indexSelectivity, double *indexCorrelation,
6880                                 double *indexPages)
6881 {
6882         IndexOptInfo *index = path->indexinfo;
6883         List       *qinfos;
6884         GenericCosts costs;
6885         Cost            descentCost;
6886
6887         /* Do preliminary analysis of indexquals */
6888         qinfos = deconstruct_indexquals(path);
6889
6890         MemSet(&costs, 0, sizeof(costs));
6891
6892         genericcostestimate(root, path, loop_count, qinfos, &costs);
6893
6894         /*
6895          * We model index descent costs similarly to those for btree, but to do
6896          * that we first need an idea of the tree height.  We somewhat arbitrarily
6897          * assume that the fanout is 100, meaning the tree height is at most
6898          * log100(index->pages).
6899          *
6900          * Although this computation isn't really expensive enough to require
6901          * caching, we might as well use index->tree_height to cache it.
6902          */
6903         if (index->tree_height < 0) /* unknown? */
6904         {
6905                 if (index->pages > 1)   /* avoid computing log(0) */
6906                         index->tree_height = (int) (log(index->pages) / log(100.0));
6907                 else
6908                         index->tree_height = 0;
6909         }
6910
6911         /*
6912          * Add a CPU-cost component to represent the costs of initial descent. We
6913          * just use log(N) here not log2(N) since the branching factor isn't
6914          * necessarily two anyway.  As for btree, charge once per SA scan.
6915          */
6916         if (index->tuples > 1)          /* avoid computing log(0) */
6917         {
6918                 descentCost = ceil(log(index->tuples)) * cpu_operator_cost;
6919                 costs.indexStartupCost += descentCost;
6920                 costs.indexTotalCost += costs.num_sa_scans * descentCost;
6921         }
6922
6923         /*
6924          * Likewise add a per-page charge, calculated the same as for btrees.
6925          */
6926         descentCost = (index->tree_height + 1) * 50.0 * cpu_operator_cost;
6927         costs.indexStartupCost += descentCost;
6928         costs.indexTotalCost += costs.num_sa_scans * descentCost;
6929
6930         *indexStartupCost = costs.indexStartupCost;
6931         *indexTotalCost = costs.indexTotalCost;
6932         *indexSelectivity = costs.indexSelectivity;
6933         *indexCorrelation = costs.indexCorrelation;
6934         *indexPages = costs.numIndexPages;
6935 }
6936
6937
6938 /*
6939  * Support routines for gincostestimate
6940  */
6941
6942 typedef struct
6943 {
6944         bool            haveFullScan;
6945         double          partialEntries;
6946         double          exactEntries;
6947         double          searchEntries;
6948         double          arrayScans;
6949 } GinQualCounts;
6950
6951 /*
6952  * Estimate the number of index terms that need to be searched for while
6953  * testing the given GIN query, and increment the counts in *counts
6954  * appropriately.  If the query is unsatisfiable, return false.
6955  */
6956 static bool
6957 gincost_pattern(IndexOptInfo *index, int indexcol,
6958                                 Oid clause_op, Datum query,
6959                                 GinQualCounts *counts)
6960 {
6961         Oid                     extractProcOid;
6962         Oid                     collation;
6963         int                     strategy_op;
6964         Oid                     lefttype,
6965                                 righttype;
6966         int32           nentries = 0;
6967         bool       *partial_matches = NULL;
6968         Pointer    *extra_data = NULL;
6969         bool       *nullFlags = NULL;
6970         int32           searchMode = GIN_SEARCH_MODE_DEFAULT;
6971         int32           i;
6972
6973         /*
6974          * Get the operator's strategy number and declared input data types within
6975          * the index opfamily.  (We don't need the latter, but we use
6976          * get_op_opfamily_properties because it will throw error if it fails to
6977          * find a matching pg_amop entry.)
6978          */
6979         get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
6980                                                            &strategy_op, &lefttype, &righttype);
6981
6982         /*
6983          * GIN always uses the "default" support functions, which are those with
6984          * lefttype == righttype == the opclass' opcintype (see
6985          * IndexSupportInitialize in relcache.c).
6986          */
6987         extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
6988                                                                            index->opcintype[indexcol],
6989                                                                            index->opcintype[indexcol],
6990                                                                            GIN_EXTRACTQUERY_PROC);
6991
6992         if (!OidIsValid(extractProcOid))
6993         {
6994                 /* should not happen; throw same error as index_getprocinfo */
6995                 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
6996                          GIN_EXTRACTQUERY_PROC, indexcol + 1,
6997                          get_rel_name(index->indexoid));
6998         }
6999
7000         /*
7001          * Choose collation to pass to extractProc (should match initGinState).
7002          */
7003         if (OidIsValid(index->indexcollations[indexcol]))
7004                 collation = index->indexcollations[indexcol];
7005         else
7006                 collation = DEFAULT_COLLATION_OID;
7007
7008         OidFunctionCall7Coll(extractProcOid,
7009                                                  collation,
7010                                                  query,
7011                                                  PointerGetDatum(&nentries),
7012                                                  UInt16GetDatum(strategy_op),
7013                                                  PointerGetDatum(&partial_matches),
7014                                                  PointerGetDatum(&extra_data),
7015                                                  PointerGetDatum(&nullFlags),
7016                                                  PointerGetDatum(&searchMode));
7017
7018         if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
7019         {
7020                 /* No match is possible */
7021                 return false;
7022         }
7023
7024         for (i = 0; i < nentries; i++)
7025         {
7026                 /*
7027                  * For partial match we haven't any information to estimate number of
7028                  * matched entries in index, so, we just estimate it as 100
7029                  */
7030                 if (partial_matches && partial_matches[i])
7031                         counts->partialEntries += 100;
7032                 else
7033                         counts->exactEntries++;
7034
7035                 counts->searchEntries++;
7036         }
7037
7038         if (searchMode == GIN_SEARCH_MODE_INCLUDE_EMPTY)
7039         {
7040                 /* Treat "include empty" like an exact-match item */
7041                 counts->exactEntries++;
7042                 counts->searchEntries++;
7043         }
7044         else if (searchMode != GIN_SEARCH_MODE_DEFAULT)
7045         {
7046                 /* It's GIN_SEARCH_MODE_ALL */
7047                 counts->haveFullScan = true;
7048         }
7049
7050         return true;
7051 }
7052
7053 /*
7054  * Estimate the number of index terms that need to be searched for while
7055  * testing the given GIN index clause, and increment the counts in *counts
7056  * appropriately.  If the query is unsatisfiable, return false.
7057  */
7058 static bool
7059 gincost_opexpr(PlannerInfo *root,
7060                            IndexOptInfo *index,
7061                            IndexQualInfo *qinfo,
7062                            GinQualCounts *counts)
7063 {
7064         int                     indexcol = qinfo->indexcol;
7065         Oid                     clause_op = qinfo->clause_op;
7066         Node       *operand = qinfo->other_operand;
7067
7068         if (!qinfo->varonleft)
7069         {
7070                 /* must commute the operator */
7071                 clause_op = get_commutator(clause_op);
7072         }
7073
7074         /* aggressively reduce to a constant, and look through relabeling */
7075         operand = estimate_expression_value(root, operand);
7076
7077         if (IsA(operand, RelabelType))
7078                 operand = (Node *) ((RelabelType *) operand)->arg;
7079
7080         /*
7081          * It's impossible to call extractQuery method for unknown operand. So
7082          * unless operand is a Const we can't do much; just assume there will be
7083          * one ordinary search entry from the operand at runtime.
7084          */
7085         if (!IsA(operand, Const))
7086         {
7087                 counts->exactEntries++;
7088                 counts->searchEntries++;
7089                 return true;
7090         }
7091
7092         /* If Const is null, there can be no matches */
7093         if (((Const *) operand)->constisnull)
7094                 return false;
7095
7096         /* Otherwise, apply extractQuery and get the actual term counts */
7097         return gincost_pattern(index, indexcol, clause_op,
7098                                                    ((Const *) operand)->constvalue,
7099                                                    counts);
7100 }
7101
7102 /*
7103  * Estimate the number of index terms that need to be searched for while
7104  * testing the given GIN index clause, and increment the counts in *counts
7105  * appropriately.  If the query is unsatisfiable, return false.
7106  *
7107  * A ScalarArrayOpExpr will give rise to N separate indexscans at runtime,
7108  * each of which involves one value from the RHS array, plus all the
7109  * non-array quals (if any).  To model this, we average the counts across
7110  * the RHS elements, and add the averages to the counts in *counts (which
7111  * correspond to per-indexscan costs).  We also multiply counts->arrayScans
7112  * by N, causing gincostestimate to scale up its estimates accordingly.
7113  */
7114 static bool
7115 gincost_scalararrayopexpr(PlannerInfo *root,
7116                                                   IndexOptInfo *index,
7117                                                   IndexQualInfo *qinfo,
7118                                                   double numIndexEntries,
7119                                                   GinQualCounts *counts)
7120 {
7121         int                     indexcol = qinfo->indexcol;
7122         Oid                     clause_op = qinfo->clause_op;
7123         Node       *rightop = qinfo->other_operand;
7124         ArrayType  *arrayval;
7125         int16           elmlen;
7126         bool            elmbyval;
7127         char            elmalign;
7128         int                     numElems;
7129         Datum      *elemValues;
7130         bool       *elemNulls;
7131         GinQualCounts arraycounts;
7132         int                     numPossible = 0;
7133         int                     i;
7134
7135         Assert(((ScalarArrayOpExpr *) qinfo->rinfo->clause)->useOr);
7136
7137         /* aggressively reduce to a constant, and look through relabeling */
7138         rightop = estimate_expression_value(root, rightop);
7139
7140         if (IsA(rightop, RelabelType))
7141                 rightop = (Node *) ((RelabelType *) rightop)->arg;
7142
7143         /*
7144          * It's impossible to call extractQuery method for unknown operand. So
7145          * unless operand is a Const we can't do much; just assume there will be
7146          * one ordinary search entry from each array entry at runtime, and fall
7147          * back on a probably-bad estimate of the number of array entries.
7148          */
7149         if (!IsA(rightop, Const))
7150         {
7151                 counts->exactEntries++;
7152                 counts->searchEntries++;
7153                 counts->arrayScans *= estimate_array_length(rightop);
7154                 return true;
7155         }
7156
7157         /* If Const is null, there can be no matches */
7158         if (((Const *) rightop)->constisnull)
7159                 return false;
7160
7161         /* Otherwise, extract the array elements and iterate over them */
7162         arrayval = DatumGetArrayTypeP(((Const *) rightop)->constvalue);
7163         get_typlenbyvalalign(ARR_ELEMTYPE(arrayval),
7164                                                  &elmlen, &elmbyval, &elmalign);
7165         deconstruct_array(arrayval,
7166                                           ARR_ELEMTYPE(arrayval),
7167                                           elmlen, elmbyval, elmalign,
7168                                           &elemValues, &elemNulls, &numElems);
7169
7170         memset(&arraycounts, 0, sizeof(arraycounts));
7171
7172         for (i = 0; i < numElems; i++)
7173         {
7174                 GinQualCounts elemcounts;
7175
7176                 /* NULL can't match anything, so ignore, as the executor will */
7177                 if (elemNulls[i])
7178                         continue;
7179
7180                 /* Otherwise, apply extractQuery and get the actual term counts */
7181                 memset(&elemcounts, 0, sizeof(elemcounts));
7182
7183                 if (gincost_pattern(index, indexcol, clause_op, elemValues[i],
7184                                                         &elemcounts))
7185                 {
7186                         /* We ignore array elements that are unsatisfiable patterns */
7187                         numPossible++;
7188
7189                         if (elemcounts.haveFullScan)
7190                         {
7191                                 /*
7192                                  * Full index scan will be required.  We treat this as if
7193                                  * every key in the index had been listed in the query; is
7194                                  * that reasonable?
7195                                  */
7196                                 elemcounts.partialEntries = 0;
7197                                 elemcounts.exactEntries = numIndexEntries;
7198                                 elemcounts.searchEntries = numIndexEntries;
7199                         }
7200                         arraycounts.partialEntries += elemcounts.partialEntries;
7201                         arraycounts.exactEntries += elemcounts.exactEntries;
7202                         arraycounts.searchEntries += elemcounts.searchEntries;
7203                 }
7204         }
7205
7206         if (numPossible == 0)
7207         {
7208                 /* No satisfiable patterns in the array */
7209                 return false;
7210         }
7211
7212         /*
7213          * Now add the averages to the global counts.  This will give us an
7214          * estimate of the average number of terms searched for in each indexscan,
7215          * including contributions from both array and non-array quals.
7216          */
7217         counts->partialEntries += arraycounts.partialEntries / numPossible;
7218         counts->exactEntries += arraycounts.exactEntries / numPossible;
7219         counts->searchEntries += arraycounts.searchEntries / numPossible;
7220
7221         counts->arrayScans *= numPossible;
7222
7223         return true;
7224 }
7225
7226 /*
7227  * GIN has search behavior completely different from other index types
7228  */
7229 void
7230 gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7231                                 Cost *indexStartupCost, Cost *indexTotalCost,
7232                                 Selectivity *indexSelectivity, double *indexCorrelation,
7233                                 double *indexPages)
7234 {
7235         IndexOptInfo *index = path->indexinfo;
7236         List       *indexQuals = path->indexquals;
7237         List       *indexOrderBys = path->indexorderbys;
7238         List       *qinfos;
7239         ListCell   *l;
7240         List       *selectivityQuals;
7241         double          numPages = index->pages,
7242                                 numTuples = index->tuples;
7243         double          numEntryPages,
7244                                 numDataPages,
7245                                 numPendingPages,
7246                                 numEntries;
7247         GinQualCounts counts;
7248         bool            matchPossible;
7249         double          partialScale;
7250         double          entryPagesFetched,
7251                                 dataPagesFetched,
7252                                 dataPagesFetchedBySel;
7253         double          qual_op_cost,
7254                                 qual_arg_cost,
7255                                 spc_random_page_cost,
7256                                 outer_scans;
7257         Relation        indexRel;
7258         GinStatsData ginStats;
7259
7260         /* Do preliminary analysis of indexquals */
7261         qinfos = deconstruct_indexquals(path);
7262
7263         /*
7264          * Obtain statistical information from the meta page, if possible.  Else
7265          * set ginStats to zeroes, and we'll cope below.
7266          */
7267         if (!index->hypothetical)
7268         {
7269                 indexRel = index_open(index->indexoid, AccessShareLock);
7270                 ginGetStats(indexRel, &ginStats);
7271                 index_close(indexRel, AccessShareLock);
7272         }
7273         else
7274         {
7275                 memset(&ginStats, 0, sizeof(ginStats));
7276         }
7277
7278         /*
7279          * Assuming we got valid (nonzero) stats at all, nPendingPages can be
7280          * trusted, but the other fields are data as of the last VACUUM.  We can
7281          * scale them up to account for growth since then, but that method only
7282          * goes so far; in the worst case, the stats might be for a completely
7283          * empty index, and scaling them will produce pretty bogus numbers.
7284          * Somewhat arbitrarily, set the cutoff for doing scaling at 4X growth; if
7285          * it's grown more than that, fall back to estimating things only from the
7286          * assumed-accurate index size.  But we'll trust nPendingPages in any case
7287          * so long as it's not clearly insane, ie, more than the index size.
7288          */
7289         if (ginStats.nPendingPages < numPages)
7290                 numPendingPages = ginStats.nPendingPages;
7291         else
7292                 numPendingPages = 0;
7293
7294         if (numPages > 0 && ginStats.nTotalPages <= numPages &&
7295                 ginStats.nTotalPages > numPages / 4 &&
7296                 ginStats.nEntryPages > 0 && ginStats.nEntries > 0)
7297         {
7298                 /*
7299                  * OK, the stats seem close enough to sane to be trusted.  But we
7300                  * still need to scale them by the ratio numPages / nTotalPages to
7301                  * account for growth since the last VACUUM.
7302                  */
7303                 double          scale = numPages / ginStats.nTotalPages;
7304
7305                 numEntryPages = ceil(ginStats.nEntryPages * scale);
7306                 numDataPages = ceil(ginStats.nDataPages * scale);
7307                 numEntries = ceil(ginStats.nEntries * scale);
7308                 /* ensure we didn't round up too much */
7309                 numEntryPages = Min(numEntryPages, numPages - numPendingPages);
7310                 numDataPages = Min(numDataPages,
7311                                                    numPages - numPendingPages - numEntryPages);
7312         }
7313         else
7314         {
7315                 /*
7316                  * We might get here because it's a hypothetical index, or an index
7317                  * created pre-9.1 and never vacuumed since upgrading (in which case
7318                  * its stats would read as zeroes), or just because it's grown too
7319                  * much since the last VACUUM for us to put our faith in scaling.
7320                  *
7321                  * Invent some plausible internal statistics based on the index page
7322                  * count (and clamp that to at least 10 pages, just in case).  We
7323                  * estimate that 90% of the index is entry pages, and the rest is data
7324                  * pages.  Estimate 100 entries per entry page; this is rather bogus
7325                  * since it'll depend on the size of the keys, but it's more robust
7326                  * than trying to predict the number of entries per heap tuple.
7327                  */
7328                 numPages = Max(numPages, 10);
7329                 numEntryPages = floor((numPages - numPendingPages) * 0.90);
7330                 numDataPages = numPages - numPendingPages - numEntryPages;
7331                 numEntries = floor(numEntryPages * 100);
7332         }
7333
7334         /* In an empty index, numEntries could be zero.  Avoid divide-by-zero */
7335         if (numEntries < 1)
7336                 numEntries = 1;
7337
7338         /*
7339          * Include predicate in selectivityQuals (should match
7340          * genericcostestimate)
7341          */
7342         if (index->indpred != NIL)
7343         {
7344                 List       *predExtraQuals = NIL;
7345
7346                 foreach(l, index->indpred)
7347                 {
7348                         Node       *predQual = (Node *) lfirst(l);
7349                         List       *oneQual = list_make1(predQual);
7350
7351                         if (!predicate_implied_by(oneQual, indexQuals))
7352                                 predExtraQuals = list_concat(predExtraQuals, oneQual);
7353                 }
7354                 /* list_concat avoids modifying the passed-in indexQuals list */
7355                 selectivityQuals = list_concat(predExtraQuals, indexQuals);
7356         }
7357         else
7358                 selectivityQuals = indexQuals;
7359
7360         /* Estimate the fraction of main-table tuples that will be visited */
7361         *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
7362                                                                                            index->rel->relid,
7363                                                                                            JOIN_INNER,
7364                                                                                            NULL);
7365
7366         /* fetch estimated page cost for tablespace containing index */
7367         get_tablespace_page_costs(index->reltablespace,
7368                                                           &spc_random_page_cost,
7369                                                           NULL);
7370
7371         /*
7372          * Generic assumption about index correlation: there isn't any.
7373          */
7374         *indexCorrelation = 0.0;
7375
7376         /*
7377          * Examine quals to estimate number of search entries & partial matches
7378          */
7379         memset(&counts, 0, sizeof(counts));
7380         counts.arrayScans = 1;
7381         matchPossible = true;
7382
7383         foreach(l, qinfos)
7384         {
7385                 IndexQualInfo *qinfo = (IndexQualInfo *) lfirst(l);
7386                 Expr       *clause = qinfo->rinfo->clause;
7387
7388                 if (IsA(clause, OpExpr))
7389                 {
7390                         matchPossible = gincost_opexpr(root,
7391                                                                                    index,
7392                                                                                    qinfo,
7393                                                                                    &counts);
7394                         if (!matchPossible)
7395                                 break;
7396                 }
7397                 else if (IsA(clause, ScalarArrayOpExpr))
7398                 {
7399                         matchPossible = gincost_scalararrayopexpr(root,
7400                                                                                                           index,
7401                                                                                                           qinfo,
7402                                                                                                           numEntries,
7403                                                                                                           &counts);
7404                         if (!matchPossible)
7405                                 break;
7406                 }
7407                 else
7408                 {
7409                         /* shouldn't be anything else for a GIN index */
7410                         elog(ERROR, "unsupported GIN indexqual type: %d",
7411                                  (int) nodeTag(clause));
7412                 }
7413         }
7414
7415         /* Fall out if there were any provably-unsatisfiable quals */
7416         if (!matchPossible)
7417         {
7418                 *indexStartupCost = 0;
7419                 *indexTotalCost = 0;
7420                 *indexSelectivity = 0;
7421                 return;
7422         }
7423
7424         if (counts.haveFullScan || indexQuals == NIL)
7425         {
7426                 /*
7427                  * Full index scan will be required.  We treat this as if every key in
7428                  * the index had been listed in the query; is that reasonable?
7429                  */
7430                 counts.partialEntries = 0;
7431                 counts.exactEntries = numEntries;
7432                 counts.searchEntries = numEntries;
7433         }
7434
7435         /* Will we have more than one iteration of a nestloop scan? */
7436         outer_scans = loop_count;
7437
7438         /*
7439          * Compute cost to begin scan, first of all, pay attention to pending
7440          * list.
7441          */
7442         entryPagesFetched = numPendingPages;
7443
7444         /*
7445          * Estimate number of entry pages read.  We need to do
7446          * counts.searchEntries searches.  Use a power function as it should be,
7447          * but tuples on leaf pages usually is much greater. Here we include all
7448          * searches in entry tree, including search of first entry in partial
7449          * match algorithm
7450          */
7451         entryPagesFetched += ceil(counts.searchEntries * rint(pow(numEntryPages, 0.15)));
7452
7453         /*
7454          * Add an estimate of entry pages read by partial match algorithm. It's a
7455          * scan over leaf pages in entry tree.  We haven't any useful stats here,
7456          * so estimate it as proportion.  Because counts.partialEntries is really
7457          * pretty bogus (see code above), it's possible that it is more than
7458          * numEntries; clamp the proportion to ensure sanity.
7459          */
7460         partialScale = counts.partialEntries / numEntries;
7461         partialScale = Min(partialScale, 1.0);
7462
7463         entryPagesFetched += ceil(numEntryPages * partialScale);
7464
7465         /*
7466          * Partial match algorithm reads all data pages before doing actual scan,
7467          * so it's a startup cost.  Again, we haven't any useful stats here, so
7468          * estimate it as proportion.
7469          */
7470         dataPagesFetched = ceil(numDataPages * partialScale);
7471
7472         /*
7473          * Calculate cache effects if more than one scan due to nestloops or array
7474          * quals.  The result is pro-rated per nestloop scan, but the array qual
7475          * factor shouldn't be pro-rated (compare genericcostestimate).
7476          */
7477         if (outer_scans > 1 || counts.arrayScans > 1)
7478         {
7479                 entryPagesFetched *= outer_scans * counts.arrayScans;
7480                 entryPagesFetched = index_pages_fetched(entryPagesFetched,
7481                                                                                                 (BlockNumber) numEntryPages,
7482                                                                                                 numEntryPages, root);
7483                 entryPagesFetched /= outer_scans;
7484                 dataPagesFetched *= outer_scans * counts.arrayScans;
7485                 dataPagesFetched = index_pages_fetched(dataPagesFetched,
7486                                                                                            (BlockNumber) numDataPages,
7487                                                                                            numDataPages, root);
7488                 dataPagesFetched /= outer_scans;
7489         }
7490
7491         /*
7492          * Here we use random page cost because logically-close pages could be far
7493          * apart on disk.
7494          */
7495         *indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
7496
7497         /*
7498          * Now compute the number of data pages fetched during the scan.
7499          *
7500          * We assume every entry to have the same number of items, and that there
7501          * is no overlap between them. (XXX: tsvector and array opclasses collect
7502          * statistics on the frequency of individual keys; it would be nice to use
7503          * those here.)
7504          */
7505         dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
7506
7507         /*
7508          * If there is a lot of overlap among the entries, in particular if one of
7509          * the entries is very frequent, the above calculation can grossly
7510          * under-estimate.  As a simple cross-check, calculate a lower bound based
7511          * on the overall selectivity of the quals.  At a minimum, we must read
7512          * one item pointer for each matching entry.
7513          *
7514          * The width of each item pointer varies, based on the level of
7515          * compression.  We don't have statistics on that, but an average of
7516          * around 3 bytes per item is fairly typical.
7517          */
7518         dataPagesFetchedBySel = ceil(*indexSelectivity *
7519                                                                  (numTuples / (BLCKSZ / 3)));
7520         if (dataPagesFetchedBySel > dataPagesFetched)
7521                 dataPagesFetched = dataPagesFetchedBySel;
7522
7523         /* Account for cache effects, the same as above */
7524         if (outer_scans > 1 || counts.arrayScans > 1)
7525         {
7526                 dataPagesFetched *= outer_scans * counts.arrayScans;
7527                 dataPagesFetched = index_pages_fetched(dataPagesFetched,
7528                                                                                            (BlockNumber) numDataPages,
7529                                                                                            numDataPages, root);
7530                 dataPagesFetched /= outer_scans;
7531         }
7532
7533         /* And apply random_page_cost as the cost per page */
7534         *indexTotalCost = *indexStartupCost +
7535                 dataPagesFetched * spc_random_page_cost;
7536
7537         /*
7538          * Add on index qual eval costs, much as in genericcostestimate
7539          */
7540         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7541                 orderby_operands_eval_cost(root, path);
7542         qual_op_cost = cpu_operator_cost *
7543                 (list_length(indexQuals) + list_length(indexOrderBys));
7544
7545         *indexStartupCost += qual_arg_cost;
7546         *indexTotalCost += qual_arg_cost;
7547         *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7548         *indexPages = dataPagesFetched;
7549 }
7550
7551 /*
7552  * BRIN has search behavior completely different from other index types
7553  */
7554 void
7555 brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
7556                                  Cost *indexStartupCost, Cost *indexTotalCost,
7557                                  Selectivity *indexSelectivity, double *indexCorrelation,
7558                                  double *indexPages)
7559 {
7560         IndexOptInfo *index = path->indexinfo;
7561         List       *indexQuals = path->indexquals;
7562         List       *indexOrderBys = path->indexorderbys;
7563         double          numPages = index->pages;
7564         double          numTuples = index->tuples;
7565         List       *qinfos;
7566         Cost            spc_seq_page_cost;
7567         Cost            spc_random_page_cost;
7568         double          qual_op_cost;
7569         double          qual_arg_cost;
7570
7571         /* Do preliminary analysis of indexquals */
7572         qinfos = deconstruct_indexquals(path);
7573
7574         /* fetch estimated page cost for tablespace containing index */
7575         get_tablespace_page_costs(index->reltablespace,
7576                                                           &spc_random_page_cost,
7577                                                           &spc_seq_page_cost);
7578
7579         /*
7580          * BRIN indexes are always read in full; use that as startup cost.
7581          *
7582          * XXX maybe only include revmap pages here?
7583          */
7584         *indexStartupCost = spc_seq_page_cost * numPages * loop_count;
7585
7586         /*
7587          * To read a BRIN index there might be a bit of back and forth over
7588          * regular pages, as revmap might point to them out of sequential order;
7589          * calculate this as reading the whole index in random order.
7590          */
7591         *indexTotalCost = spc_random_page_cost * numPages * loop_count;
7592
7593         *indexSelectivity =
7594                 clauselist_selectivity(root, indexQuals,
7595                                                            path->indexinfo->rel->relid,
7596                                                            JOIN_INNER, NULL);
7597         *indexCorrelation = 1;
7598
7599         /*
7600          * Add on index qual eval costs, much as in genericcostestimate.
7601          */
7602         qual_arg_cost = other_operands_eval_cost(root, qinfos) +
7603                 orderby_operands_eval_cost(root, path);
7604         qual_op_cost = cpu_operator_cost *
7605                 (list_length(indexQuals) + list_length(indexOrderBys));
7606
7607         *indexStartupCost += qual_arg_cost;
7608         *indexTotalCost += qual_arg_cost;
7609         *indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
7610         *indexPages = index->pages;
7611
7612         /* XXX what about pages_per_range? */
7613 }