]> granicus.if.org Git - postgresql/blobdiff - src/backend/utils/adt/selfuncs.c
Clean up possibly-uninitialized-variable warnings reported by gcc 4.x.
[postgresql] / src / backend / utils / adt / selfuncs.c
index f5fb2adf2ebf318765cbd46433e723fd009d27b1..e987a66a1cf09e2f68aa5a4da5657357e58885d9 100644 (file)
  *       Index cost functions are registered in the pg_am catalog
  *       in the "amcostestimate" attribute.
  *
- * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.106 2002/03/08 04:29:01 momjian Exp $
+ *       $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.189 2005/09/24 22:54:38 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -38,7 +38,7 @@
  *
  * The call convention for a restriction estimator (oprrest function) is
  *
- *             Selectivity oprrest (Query *root,
+ *             Selectivity oprrest (PlannerInfo *root,
  *                                                      Oid operator,
  *                                                      List *args,
  *                                                      int varRelid);
  *
  * This is represented at the SQL level (in pg_proc) as
  *
- *             float8 oprrest (opaque, oid, opaque, int4);
+ *             float8 oprrest (internal, oid, internal, int4);
  *
  * The call convention for a join estimator (oprjoin function) is similar
- * except that varRelid is not needed:
+ * except that varRelid is not needed, and instead the join type is
+ * supplied:
  *
- *             Selectivity oprjoin (Query *root,
+ *             Selectivity oprjoin (PlannerInfo *root,
  *                                                      Oid operator,
- *                                                      List *args);
+ *                                                      List *args,
+ *                                                      JoinType jointype);
+ *
+ *             float8 oprjoin (internal, oid, internal, int2);
  *
- *             float8 oprjoin (opaque, oid, opaque);
+ * (We deliberately make the SQL signature different to facilitate
+ * catching errors.)
  *----------
  */
 
 
 #include <ctype.h>
 #include <math.h>
-#ifdef USE_LOCALE
-#include <locale.h>
-#endif
 
 #include "access/heapam.h"
-#include "catalog/catname.h"
+#include "access/nbtree.h"
+#include "access/tuptoaster.h"
+#include "catalog/pg_namespace.h"
+#include "catalog/pg_opclass.h"
 #include "catalog/pg_operator.h"
 #include "catalog/pg_proc.h"
 #include "catalog/pg_statistic.h"
 #include "optimizer/clauses.h"
 #include "optimizer/cost.h"
 #include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
 #include "optimizer/plancat.h"
 #include "optimizer/prep.h"
+#include "optimizer/restrictinfo.h"
+#include "optimizer/tlist.h"
+#include "optimizer/var.h"
+#include "parser/parse_expr.h"
 #include "parser/parse_func.h"
 #include "parser/parse_oper.h"
 #include "parser/parsetree.h"
 #include "utils/datum.h"
 #include "utils/int8.h"
 #include "utils/lsyscache.h"
+#include "utils/nabstime.h"
+#include "utils/pg_locale.h"
 #include "utils/selfuncs.h"
 #include "utils/syscache.h"
 
 
-/*
- * Note: the default selectivity estimates are not chosen entirely at random.
- * We want them to be small enough to ensure that indexscans will be used if
- * available, for typical table densities of ~100 tuples/page. Thus, for
- * example, 0.01 is not quite small enough, since that makes it appear that
- * nearly all pages will be hit anyway.  Also, since we sometimes estimate
- * eqsel as 1/num_distinct, we probably want DEFAULT_NUM_DISTINCT to equal
- * 1/DEFAULT_EQ_SEL.
- */
-
-/* default selectivity estimate for equalities such as "A = b" */
-#define DEFAULT_EQ_SEL 0.005
-
-/* default selectivity estimate for inequalities such as "A < b" */
-#define DEFAULT_INEQ_SEL  (1.0 / 3.0)
-
-/* default selectivity estimate for pattern-match operators such as LIKE */
-#define DEFAULT_MATCH_SEL      0.005
-
-/* default number of distinct values in a table */
-#define DEFAULT_NUM_DISTINCT  200
-
-/* default selectivity estimate for boolean and null test nodes */
-#define DEFAULT_UNK_SEL                        0.005
-#define DEFAULT_NOT_UNK_SEL            (1.0 - DEFAULT_UNK_SEL)
-#define DEFAULT_BOOL_SEL               0.5
-
-/*
- * Clamp a computed probability estimate (which may suffer from roundoff or
- * estimation errors) to valid range.  Argument must be a float variable.
- */
-#define CLAMP_PROBABILITY(p) \
+/* Return data from examine_variable and friends */
+typedef struct
+{
+       Node       *var;                        /* the Var or expression tree */
+       RelOptInfo *rel;                        /* Relation, or NULL if not identifiable */
+       HeapTuple       statsTuple;             /* pg_statistic tuple, or NULL if none */
+       /* NB: if statsTuple!=NULL, it must be freed when caller is done */
+       Oid                     vartype;                /* exposed type of expression */
+       Oid                     atttype;                /* type to pass to get_attstatsslot */
+       int32           atttypmod;              /* typmod to pass to get_attstatsslot */
+       bool            isunique;               /* true if matched to a unique index */
+} VariableStatData;
+
+#define ReleaseVariableStats(vardata)  \
        do { \
-               if (p < 0.0) \
-                       p = 0.0; \
-               else if (p > 1.0) \
-                       p = 1.0; \
-       } while (0)
+               if (HeapTupleIsValid((vardata).statsTuple)) \
+                       ReleaseSysCache((vardata).statsTuple); \
+       } while(0)
 
 
-static bool get_var_maximum(Query *root, Var *var, Oid sortop, Datum *max);
 static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
                                  Datum lobound, Datum hibound, Oid boundstypid,
                                  double *scaledlobound, double *scaledhibound);
 static double convert_numeric_to_scalar(Datum value, Oid typid);
-static void convert_string_to_scalar(unsigned char *value,
+static void convert_string_to_scalar(char *value,
                                                 double *scaledvalue,
-                                                unsigned char *lobound,
+                                                char *lobound,
                                                 double *scaledlobound,
-                                                unsigned char *hibound,
+                                                char *hibound,
                                                 double *scaledhibound);
 static void convert_bytea_to_scalar(Datum value,
                                                double *scaledvalue,
@@ -156,25 +147,29 @@ static void convert_bytea_to_scalar(Datum value,
                                                double *scaledlobound,
                                                Datum hibound,
                                                double *scaledhibound);
-static double convert_one_string_to_scalar(unsigned char *value,
+static double convert_one_string_to_scalar(char *value,
                                                         int rangelo, int rangehi);
 static double convert_one_bytea_to_scalar(unsigned char *value, int valuelen,
                                                        int rangelo, int rangehi);
-static unsigned char *convert_string_datum(Datum value, Oid typid);
+static char *convert_string_datum(Datum value, Oid typid);
 static double convert_timevalue_to_scalar(Datum value, Oid typid);
-static double get_att_numdistinct(Query *root, Var *var,
-                                       Form_pg_statistic stats);
-static bool get_restriction_var(List *args, int varRelid,
-                                       Var **var, Node **other,
-                                       bool *varonleft);
-static void get_join_vars(List *args, Var **var1, Var **var2);
-static Selectivity prefix_selectivity(Query *root, Var *var, char *prefix);
-static Selectivity pattern_selectivity(char *patt, Pattern_Type ptype);
-static bool string_lessthan(const char *str1, const char *str2,
-                               Oid datatype);
-static Oid     find_operator(const char *opname, Oid datatype);
+static bool get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
+                                                VariableStatData *vardata, Node **other,
+                                                bool *varonleft);
+static void get_join_variables(PlannerInfo *root, List *args,
+                                  VariableStatData *vardata1,
+                                  VariableStatData *vardata2);
+static void examine_variable(PlannerInfo *root, Node *node, int varRelid,
+                                VariableStatData *vardata);
+static double get_variable_numdistinct(VariableStatData *vardata);
+static bool get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
+                                        Oid sortop, Datum *max);
+static Selectivity prefix_selectivity(PlannerInfo *root, Node *variable,
+                                  Oid opclass, Const *prefix);
+static Selectivity pattern_selectivity(Const *patt, Pattern_Type ptype);
 static Datum string_to_datum(const char *str, Oid datatype);
 static Const *string_to_const(const char *str, Oid datatype);
+static Const *string_to_bytea_const(const char *str, size_t str_len);
 
 
 /*
@@ -188,15 +183,13 @@ static Const *string_to_const(const char *str, Oid datatype);
 Datum
 eqsel(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
        Oid                     operator = PG_GETARG_OID(1);
        List       *args = (List *) PG_GETARG_POINTER(2);
        int                     varRelid = PG_GETARG_INT32(3);
-       Var                *var;
+       VariableStatData vardata;
        Node       *other;
        bool            varonleft;
-       Oid                     relid;
-       HeapTuple       statsTuple;
        Datum      *values;
        int                     nvalues;
        float4     *numbers;
@@ -204,38 +197,33 @@ eqsel(PG_FUNCTION_ARGS)
        double          selec;
 
        /*
-        * If expression is not var = something or something = var for a
-        * simple var of a real relation (no subqueries, for now), then punt
-        * and return a default estimate.
+        * If expression is not variable = something or something = variable,
+        * then punt and return a default estimate.
         */
-       if (!get_restriction_var(args, varRelid,
-                                                        &var, &other, &varonleft))
-               PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
-       relid = getrelid(var->varno, root->rtable);
-       if (relid == InvalidOid)
+       if (!get_restriction_variable(root, args, varRelid,
+                                                                 &vardata, &other, &varonleft))
                PG_RETURN_FLOAT8(DEFAULT_EQ_SEL);
 
        /*
         * If the something is a NULL constant, assume operator is strict and
         * return zero, ie, operator will never return TRUE.
         */
-       if (IsA(other, Const) &&((Const *) other)->constisnull)
+       if (IsA(other, Const) &&
+               ((Const *) other)->constisnull)
+       {
+               ReleaseVariableStats(vardata);
                PG_RETURN_FLOAT8(0.0);
+       }
 
-       /* get stats for the attribute, if available */
-       statsTuple = SearchSysCache(STATRELATT,
-                                                               ObjectIdGetDatum(relid),
-                                                               Int16GetDatum(var->varattno),
-                                                               0, 0);
-       if (HeapTupleIsValid(statsTuple))
+       if (HeapTupleIsValid(vardata.statsTuple))
        {
                Form_pg_statistic stats;
 
-               stats = (Form_pg_statistic) GETSTRUCT(statsTuple);
+               stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
 
                if (IsA(other, Const))
                {
-                       /* Var is being compared to a known non-null constant */
+                       /* Variable is being compared to a known non-null constant */
                        Datum           constval = ((Const *) other)->constvalue;
                        bool            match = false;
                        int                     i;
@@ -247,7 +235,8 @@ eqsel(PG_FUNCTION_ARGS)
                         * an appropriate test.  If you don't like this, maybe you
                         * shouldn't be using eqsel for your operator...)
                         */
-                       if (get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
+                       if (get_attstatsslot(vardata.statsTuple,
+                                                                vardata.atttype, vardata.atttypmod,
                                                                 STATISTIC_KIND_MCV, InvalidOid,
                                                                 &values, &nvalues,
                                                                 &numbers, &nnumbers))
@@ -309,7 +298,7 @@ eqsel(PG_FUNCTION_ARGS)
                                 * remaining fraction equally, so we divide by the number
                                 * of other distinct values.
                                 */
-                               otherdistinct = get_att_numdistinct(root, var, stats)
+                               otherdistinct = get_variable_numdistinct(&vardata)
                                        - nnumbers;
                                if (otherdistinct > 1)
                                        selec /= otherdistinct;
@@ -322,7 +311,7 @@ eqsel(PG_FUNCTION_ARGS)
                                        selec = numbers[nnumbers - 1];
                        }
 
-                       free_attstatsslot(var->vartype, values, nvalues,
+                       free_attstatsslot(vardata.atttype, values, nvalues,
                                                          numbers, nnumbers);
                }
                else
@@ -340,7 +329,7 @@ eqsel(PG_FUNCTION_ARGS)
                         * frequency in the table.      Is that a good idea?)
                         */
                        selec = 1.0 - stats->stanullfrac;
-                       ndistinct = get_att_numdistinct(root, var, stats);
+                       ndistinct = get_variable_numdistinct(&vardata);
                        if (ndistinct > 1)
                                selec /= ndistinct;
 
@@ -348,18 +337,17 @@ eqsel(PG_FUNCTION_ARGS)
                         * Cross-check: selectivity should never be estimated as more
                         * than the most common value's.
                         */
-                       if (get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
+                       if (get_attstatsslot(vardata.statsTuple,
+                                                                vardata.atttype, vardata.atttypmod,
                                                                 STATISTIC_KIND_MCV, InvalidOid,
                                                                 NULL, NULL,
                                                                 &numbers, &nnumbers))
                        {
                                if (nnumbers > 0 && selec > numbers[0])
                                        selec = numbers[0];
-                               free_attstatsslot(var->vartype, NULL, 0, numbers, nnumbers);
+                               free_attstatsslot(vardata.atttype, NULL, 0, numbers, nnumbers);
                        }
                }
-
-               ReleaseSysCache(statsTuple);
        }
        else
        {
@@ -369,9 +357,11 @@ eqsel(PG_FUNCTION_ARGS)
                 * equally common.      (The guess is unlikely to be very good, but we
                 * do know a few special cases.)
                 */
-               selec = 1.0 / get_att_numdistinct(root, var, NULL);
+               selec = 1.0 / get_variable_numdistinct(&vardata);
        }
 
+       ReleaseVariableStats(vardata);
+
        /* result should be in range, but make sure... */
        CLAMP_PROBABILITY(selec);
 
@@ -388,7 +378,7 @@ eqsel(PG_FUNCTION_ARGS)
 Datum
 neqsel(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
        Oid                     operator = PG_GETARG_OID(1);
        List       *args = (List *) PG_GETARG_POINTER(2);
        int                     varRelid = PG_GETARG_INT32(3);
@@ -421,7 +411,7 @@ neqsel(PG_FUNCTION_ARGS)
  *     scalarineqsel           - Selectivity of "<", "<=", ">", ">=" for scalars.
  *
  * This is the guts of both scalarltsel and scalargtsel.  The caller has
- * commuted the clause, if necessary, so that we can treat the Var as
+ * commuted the clause, if necessary, so that we can treat the variable as
  * being on the left.  The caller must also make sure that the other side
  * of the clause is a non-null Const, and dissect same into a value and
  * datatype.
@@ -431,11 +421,9 @@ neqsel(PG_FUNCTION_ARGS)
  * it will return a default estimate.
  */
 static double
-scalarineqsel(Query *root, Oid operator, bool isgt,
-                         Var *var, Datum constval, Oid consttype)
+scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
+                         VariableStatData *vardata, Datum constval, Oid consttype)
 {
-       Oid                     relid;
-       HeapTuple       statsTuple;
        Form_pg_statistic stats;
        FmgrInfo        opproc;
        Datum      *values;
@@ -448,26 +436,12 @@ scalarineqsel(Query *root, Oid operator, bool isgt,
        double          selec;
        int                     i;
 
-       /*
-        * If expression is not var op something or something op var for a
-        * simple var of a real relation (no subqueries, for now), then punt
-        * and return a default estimate.
-        */
-       relid = getrelid(var->varno, root->rtable);
-       if (relid == InvalidOid)
-               return DEFAULT_INEQ_SEL;
-
-       /* get stats for the attribute */
-       statsTuple = SearchSysCache(STATRELATT,
-                                                               ObjectIdGetDatum(relid),
-                                                               Int16GetDatum(var->varattno),
-                                                               0, 0);
-       if (!HeapTupleIsValid(statsTuple))
+       if (!HeapTupleIsValid(vardata->statsTuple))
        {
                /* no stats available, so default result */
                return DEFAULT_INEQ_SEL;
        }
-       stats = (Form_pg_statistic) GETSTRUCT(statsTuple);
+       stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
 
        fmgr_info(get_opcode(operator), &opproc);
 
@@ -480,7 +454,8 @@ scalarineqsel(Query *root, Oid operator, bool isgt,
        mcv_selec = 0.0;
        sumcommon = 0.0;
 
-       if (get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
+       if (get_attstatsslot(vardata->statsTuple,
+                                                vardata->atttype, vardata->atttypmod,
                                                 STATISTIC_KIND_MCV, InvalidOid,
                                                 &values, &nvalues,
                                                 &numbers, &nnumbers))
@@ -493,7 +468,8 @@ scalarineqsel(Query *root, Oid operator, bool isgt,
                                mcv_selec += numbers[i];
                        sumcommon += numbers[i];
                }
-               free_attstatsslot(var->vartype, values, nvalues, numbers, nnumbers);
+               free_attstatsslot(vardata->atttype, values, nvalues,
+                                                 numbers, nnumbers);
        }
 
        /*
@@ -511,7 +487,8 @@ scalarineqsel(Query *root, Oid operator, bool isgt,
         */
        hist_selec = 0.0;
 
-       if (get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
+       if (get_attstatsslot(vardata->statsTuple,
+                                                vardata->atttype, vardata->atttypmod,
                                                 STATISTIC_KIND_HISTOGRAM, InvalidOid,
                                                 &values, &nvalues,
                                                 NULL, NULL))
@@ -570,7 +547,7 @@ scalarineqsel(Query *root, Oid operator, bool isgt,
                                         */
                                        if (convert_to_scalar(constval, consttype, &val,
                                                                                  values[i - 1], values[i],
-                                                                                 var->vartype,
+                                                                                 vardata->vartype,
                                                                                  &low, &high))
                                        {
                                                if (high <= low)
@@ -641,7 +618,7 @@ scalarineqsel(Query *root, Oid operator, bool isgt,
                                hist_selec = 0.9999;
                }
 
-               free_attstatsslot(var->vartype, values, nvalues, NULL, 0);
+               free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
        }
 
        /*
@@ -664,8 +641,6 @@ scalarineqsel(Query *root, Oid operator, bool isgt,
 
        selec += mcv_selec;
 
-       ReleaseSysCache(statsTuple);
-
        /* result should be in range, but make sure... */
        CLAMP_PROBABILITY(selec);
 
@@ -678,25 +653,24 @@ scalarineqsel(Query *root, Oid operator, bool isgt,
 Datum
 scalarltsel(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
        Oid                     operator = PG_GETARG_OID(1);
        List       *args = (List *) PG_GETARG_POINTER(2);
        int                     varRelid = PG_GETARG_INT32(3);
-       Var                *var;
+       VariableStatData vardata;
        Node       *other;
+       bool            varonleft;
        Datum           constval;
        Oid                     consttype;
-       bool            varonleft;
        bool            isgt;
        double          selec;
 
        /*
-        * If expression is not var op something or something op var for a
-        * simple var of a real relation (no subqueries, for now), then punt
-        * and return a default estimate.
+        * If expression is not variable op something or something op
+        * variable, then punt and return a default estimate.
         */
-       if (!get_restriction_var(args, varRelid,
-                                                        &var, &other, &varonleft))
+       if (!get_restriction_variable(root, args, varRelid,
+                                                                 &vardata, &other, &varonleft))
                PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
 
        /*
@@ -704,14 +678,20 @@ scalarltsel(PG_FUNCTION_ARGS)
         * either.
         */
        if (!IsA(other, Const))
+       {
+               ReleaseVariableStats(vardata);
                PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
+       }
 
        /*
         * If the constant is NULL, assume operator is strict and return zero,
         * ie, operator will never return TRUE.
         */
        if (((Const *) other)->constisnull)
+       {
+               ReleaseVariableStats(vardata);
                PG_RETURN_FLOAT8(0.0);
+       }
        constval = ((Const *) other)->constvalue;
        consttype = ((Const *) other)->consttype;
 
@@ -730,12 +710,15 @@ scalarltsel(PG_FUNCTION_ARGS)
                if (!operator)
                {
                        /* Use default selectivity (should we raise an error instead?) */
+                       ReleaseVariableStats(vardata);
                        PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
                }
                isgt = true;
        }
 
-       selec = scalarineqsel(root, operator, isgt, var, constval, consttype);
+       selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
+
+       ReleaseVariableStats(vardata);
 
        PG_RETURN_FLOAT8((float8) selec);
 }
@@ -746,25 +729,24 @@ scalarltsel(PG_FUNCTION_ARGS)
 Datum
 scalargtsel(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
        Oid                     operator = PG_GETARG_OID(1);
        List       *args = (List *) PG_GETARG_POINTER(2);
        int                     varRelid = PG_GETARG_INT32(3);
-       Var                *var;
+       VariableStatData vardata;
        Node       *other;
+       bool            varonleft;
        Datum           constval;
        Oid                     consttype;
-       bool            varonleft;
        bool            isgt;
        double          selec;
 
        /*
-        * If expression is not var op something or something op var for a
-        * simple var of a real relation (no subqueries, for now), then punt
-        * and return a default estimate.
+        * If expression is not variable op something or something op
+        * variable, then punt and return a default estimate.
         */
-       if (!get_restriction_var(args, varRelid,
-                                                        &var, &other, &varonleft))
+       if (!get_restriction_variable(root, args, varRelid,
+                                                                 &vardata, &other, &varonleft))
                PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
 
        /*
@@ -772,14 +754,20 @@ scalargtsel(PG_FUNCTION_ARGS)
         * either.
         */
        if (!IsA(other, Const))
+       {
+               ReleaseVariableStats(vardata);
                PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
+       }
 
        /*
         * If the constant is NULL, assume operator is strict and return zero,
         * ie, operator will never return TRUE.
         */
        if (((Const *) other)->constisnull)
+       {
+               ReleaseVariableStats(vardata);
                PG_RETURN_FLOAT8(0.0);
+       }
        constval = ((Const *) other)->constvalue;
        consttype = ((Const *) other)->consttype;
 
@@ -798,12 +786,15 @@ scalargtsel(PG_FUNCTION_ARGS)
                if (!operator)
                {
                        /* Use default selectivity (should we raise an error instead?) */
+                       ReleaseVariableStats(vardata);
                        PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
                }
                isgt = false;
        }
 
-       selec = scalarineqsel(root, operator, isgt, var, constval, consttype);
+       selec = scalarineqsel(root, operator, isgt, &vardata, constval, consttype);
+
+       ReleaseVariableStats(vardata);
 
        PG_RETURN_FLOAT8((float8) selec);
 }
@@ -814,66 +805,144 @@ scalargtsel(PG_FUNCTION_ARGS)
 static double
 patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
 
 #ifdef NOT_USED
        Oid                     operator = PG_GETARG_OID(1);
 #endif
        List       *args = (List *) PG_GETARG_POINTER(2);
        int                     varRelid = PG_GETARG_INT32(3);
-       Var                *var;
+       VariableStatData vardata;
+       Node       *variable;
        Node       *other;
        bool            varonleft;
-       Oid                     relid;
        Datum           constval;
-       char       *patt;
+       Oid                     consttype;
+       Oid                     vartype;
+       Oid                     opclass;
        Pattern_Prefix_Status pstatus;
-       char       *prefix;
-       char       *rest;
+       Const      *patt = NULL;
+       Const      *prefix = NULL;
+       Const      *rest = NULL;
        double          result;
 
        /*
-        * If expression is not var op constant for a simple var of a real
-        * relation (no subqueries, for now), then punt and return a default
-        * estimate.
+        * If expression is not variable op constant, then punt and return a
+        * default estimate.
         */
-       if (!get_restriction_var(args, varRelid,
-                                                        &var, &other, &varonleft))
+       if (!get_restriction_variable(root, args, varRelid,
+                                                                 &vardata, &other, &varonleft))
                return DEFAULT_MATCH_SEL;
        if (!varonleft || !IsA(other, Const))
+       {
+               ReleaseVariableStats(vardata);
                return DEFAULT_MATCH_SEL;
-       relid = getrelid(var->varno, root->rtable);
-       if (relid == InvalidOid)
-               return DEFAULT_MATCH_SEL;
+       }
+       variable = (Node *) linitial(args);
 
        /*
         * If the constant is NULL, assume operator is strict and return zero,
         * ie, operator will never return TRUE.
         */
        if (((Const *) other)->constisnull)
+       {
+               ReleaseVariableStats(vardata);
                return 0.0;
+       }
        constval = ((Const *) other)->constvalue;
-       /* the right-hand const is type text for all supported operators */
-       Assert(((Const *) other)->consttype == TEXTOID);
-       patt = DatumGetCString(DirectFunctionCall1(textout, constval));
+       consttype = ((Const *) other)->consttype;
+
+       /*
+        * The right-hand const is type text or bytea for all supported
+        * operators.  We do not expect to see binary-compatible types here,
+        * since const-folding should have relabeled the const to exactly
+        * match the operator's declared type.
+        */
+       if (consttype != TEXTOID && consttype != BYTEAOID)
+       {
+               ReleaseVariableStats(vardata);
+               return DEFAULT_MATCH_SEL;
+       }
+
+       /*
+        * Similarly, the exposed type of the left-hand side should be one
+        * of those we know.  (Do not look at vardata.atttype, which might be
+        * something binary-compatible but different.)  We can use it to choose
+        * the index opclass from which we must draw the comparison operators.
+        *
+        * NOTE: It would be more correct to use the PATTERN opclasses than the
+        * simple ones, but at the moment ANALYZE will not generate statistics
+        * for the PATTERN operators.  But our results are so approximate
+        * anyway that it probably hardly matters.
+        */
+       vartype = vardata.vartype;
+
+       switch (vartype)
+       {
+               case TEXTOID:
+                       opclass = TEXT_BTREE_OPS_OID;
+                       break;
+               case VARCHAROID:
+                       opclass = VARCHAR_BTREE_OPS_OID;
+                       break;
+               case BPCHAROID:
+                       opclass = BPCHAR_BTREE_OPS_OID;
+                       break;
+               case NAMEOID:
+                       opclass = NAME_BTREE_OPS_OID;
+                       break;
+               case BYTEAOID:
+                       opclass = BYTEA_BTREE_OPS_OID;
+                       break;
+               default:
+                       ReleaseVariableStats(vardata);
+                       return DEFAULT_MATCH_SEL;
+       }
 
        /* divide pattern into fixed prefix and remainder */
+       patt = (Const *) other;
        pstatus = pattern_fixed_prefix(patt, ptype, &prefix, &rest);
 
+       /*
+        * If necessary, coerce the prefix constant to the right type. (The
+        * "rest" constant need not be changed.)
+        */
+       if (prefix && prefix->consttype != vartype)
+       {
+               char       *prefixstr;
+
+               switch (prefix->consttype)
+               {
+                       case TEXTOID:
+                               prefixstr = DatumGetCString(DirectFunctionCall1(textout,
+                                                                                                       prefix->constvalue));
+                               break;
+                       case BYTEAOID:
+                               prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
+                                                                                                       prefix->constvalue));
+                               break;
+                       default:
+                               elog(ERROR, "unrecognized consttype: %u",
+                                        prefix->consttype);
+                               ReleaseVariableStats(vardata);
+                               return DEFAULT_MATCH_SEL;
+               }
+               prefix = string_to_const(prefixstr, vartype);
+               pfree(prefixstr);
+       }
+
        if (pstatus == Pattern_Prefix_Exact)
        {
                /*
                 * Pattern specifies an exact match, so pretend operator is '='
                 */
-               Oid                     eqopr = find_operator("=", var->vartype);
-               Const      *eqcon;
+               Oid                     eqopr = get_opclass_member(opclass, InvalidOid,
+                                                                                          BTEqualStrategyNumber);
                List       *eqargs;
 
                if (eqopr == InvalidOid)
-                       elog(ERROR, "patternsel: no = operator for type %u",
-                                var->vartype);
-               eqcon = string_to_const(prefix, var->vartype);
-               eqargs = makeList2(var, eqcon);
+                       elog(ERROR, "no = operator for opclass %u", opclass);
+               eqargs = list_make2(variable, prefix);
                result = DatumGetFloat8(DirectFunctionCall4(eqsel,
                                                                                                        PointerGetDatum(root),
                                                                                                 ObjectIdGetDatum(eqopr),
@@ -892,7 +961,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
                Selectivity selec;
 
                if (pstatus == Pattern_Prefix_Partial)
-                       prefixsel = prefix_selectivity(root, var, prefix);
+                       prefixsel = prefix_selectivity(root, variable, opclass, prefix);
                else
                        prefixsel = 1.0;
                restsel = pattern_selectivity(rest, ptype);
@@ -903,8 +972,12 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
        }
 
        if (prefix)
+       {
+               pfree(DatumGetPointer(prefix->constvalue));
                pfree(prefix);
-       pfree(patt);
+       }
+
+       ReleaseVariableStats(vardata);
 
        return result;
 }
@@ -1001,84 +1074,28 @@ icnlikesel(PG_FUNCTION_ARGS)
  *             booltestsel             - Selectivity of BooleanTest Node.
  */
 Selectivity
-booltestsel(Query *root, BooleanTest *clause, int varRelid)
+booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
+                       int varRelid, JoinType jointype)
 {
-       Var                *var;
-       Node       *arg;
-       Oid                     relid;
-       HeapTuple       statsTuple;
-       Datum      *values;
-       int                     nvalues;
-       float4     *numbers;
-       int                     nnumbers;
+       VariableStatData vardata;
        double          selec;
 
-       Assert(clause && IsA(clause, BooleanTest));
-
-       arg = (Node *) clause->arg;
-
-       /*
-        * Ignore any binary-compatible relabeling (probably unnecessary, but
-        * can't hurt)
-        */
-       if (IsA(arg, RelabelType))
-               arg = ((RelabelType *) arg)->arg;
-
-       if (IsA(arg, Var) &&(varRelid == 0 || varRelid == ((Var *) arg)->varno))
-               var = (Var *) arg;
-       else
-       {
-               /*
-                * If argument is not a Var, we can't get statistics for it, but
-                * perhaps clause_selectivity can do something with it.  We ignore
-                * the possibility of a NULL value when using clause_selectivity,
-                * and just assume the value is either TRUE or FALSE.
-                */
-               switch (clause->booltesttype)
-               {
-                       case IS_UNKNOWN:
-                               selec = DEFAULT_UNK_SEL;
-                               break;
-                       case IS_NOT_UNKNOWN:
-                               selec = DEFAULT_NOT_UNK_SEL;
-                               break;
-                       case IS_TRUE:
-                       case IS_NOT_FALSE:
-                               selec = (double) clause_selectivity(root, arg, varRelid);
-                               break;
-                       case IS_FALSE:
-                       case IS_NOT_TRUE:
-                               selec = 1.0 - (double) clause_selectivity(root, arg, varRelid);
-                               break;
-                       default:
-                               elog(ERROR, "booltestsel: unexpected booltesttype %d",
-                                        (int) clause->booltesttype);
-                               selec = 0.0;    /* Keep compiler quiet */
-                               break;
-               }
-               return (Selectivity) selec;
-       }
-
-       /* get stats for the attribute, if available */
-       relid = getrelid(var->varno, root->rtable);
-       if (relid == InvalidOid)
-               statsTuple = NULL;
-       else
-               statsTuple = SearchSysCache(STATRELATT,
-                                                                       ObjectIdGetDatum(relid),
-                                                                       Int16GetDatum(var->varattno),
-                                                                       0, 0);
+       examine_variable(root, arg, varRelid, &vardata);
 
-       if (HeapTupleIsValid(statsTuple))
+       if (HeapTupleIsValid(vardata.statsTuple))
        {
                Form_pg_statistic stats;
                double          freq_null;
+               Datum      *values;
+               int                     nvalues;
+               float4     *numbers;
+               int                     nnumbers;
 
-               stats = (Form_pg_statistic) GETSTRUCT(statsTuple);
-
+               stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
                freq_null = stats->stanullfrac;
 
-               if (get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
+               if (get_attstatsslot(vardata.statsTuple,
+                                                        vardata.atttype, vardata.atttypmod,
                                                         STATISTIC_KIND_MCV, InvalidOid,
                                                         &values, &nvalues,
                                                         &numbers, &nnumbers)
@@ -1096,12 +1113,12 @@ booltestsel(Query *root, BooleanTest *clause, int varRelid)
                                freq_true = 1.0 - numbers[0] - freq_null;
 
                        /*
-                        * Next derive freqency for false. Then use these as
+                        * Next derive frequency for false. Then use these as
                         * appropriate to derive frequency for each case.
                         */
                        freq_false = 1.0 - freq_true - freq_null;
 
-                       switch (clause->booltesttype)
+                       switch (booltesttype)
                        {
                                case IS_UNKNOWN:
                                        /* select only NULL values */
@@ -1128,13 +1145,13 @@ booltestsel(Query *root, BooleanTest *clause, int varRelid)
                                        selec = 1.0 - freq_false;
                                        break;
                                default:
-                                       elog(ERROR, "booltestsel: unexpected booltesttype %d",
-                                                (int) clause->booltesttype);
+                                       elog(ERROR, "unrecognized booltesttype: %d",
+                                                (int) booltesttype);
                                        selec = 0.0;    /* Keep compiler quiet */
                                        break;
                        }
 
-                       free_attstatsslot(var->vartype, values, nvalues,
+                       free_attstatsslot(vardata.atttype, values, nvalues,
                                                          numbers, nnumbers);
                }
                else
@@ -1145,7 +1162,7 @@ booltestsel(Query *root, BooleanTest *clause, int varRelid)
                         * Otherwise adjust for null fraction and assume an even split
                         * for boolean tests.
                         */
-                       switch (clause->booltesttype)
+                       switch (booltesttype)
                        {
                                case IS_UNKNOWN:
 
@@ -1169,22 +1186,22 @@ booltestsel(Query *root, BooleanTest *clause, int varRelid)
                                        selec = (1.0 - freq_null) / 2.0;
                                        break;
                                default:
-                                       elog(ERROR, "booltestsel: unexpected booltesttype %d",
-                                                (int) clause->booltesttype);
+                                       elog(ERROR, "unrecognized booltesttype: %d",
+                                                (int) booltesttype);
                                        selec = 0.0;    /* Keep compiler quiet */
                                        break;
                        }
                }
-
-               ReleaseSysCache(statsTuple);
        }
        else
        {
                /*
-                * No VACUUM ANALYZE stats available, so use a default value.
-                * (Note: not much point in recursing to clause_selectivity here.)
+                * If we can't get variable statistics for the argument, perhaps
+                * clause_selectivity can do something with it.  We ignore the
+                * possibility of a NULL value when using clause_selectivity, and
+                * just assume the value is either TRUE or FALSE.
                 */
-               switch (clause->booltesttype)
+               switch (booltesttype)
                {
                        case IS_UNKNOWN:
                                selec = DEFAULT_UNK_SEL;
@@ -1193,19 +1210,25 @@ booltestsel(Query *root, BooleanTest *clause, int varRelid)
                                selec = DEFAULT_NOT_UNK_SEL;
                                break;
                        case IS_TRUE:
-                       case IS_NOT_TRUE:
-                       case IS_FALSE:
                        case IS_NOT_FALSE:
-                               selec = DEFAULT_BOOL_SEL;
+                               selec = (double) clause_selectivity(root, arg,
+                                                                                                       varRelid, jointype);
+                               break;
+                       case IS_FALSE:
+                       case IS_NOT_TRUE:
+                               selec = 1.0 - (double) clause_selectivity(root, arg,
+                                                                                                        varRelid, jointype);
                                break;
                        default:
-                               elog(ERROR, "booltestsel: unexpected booltesttype %d",
-                                        (int) clause->booltesttype);
+                               elog(ERROR, "unrecognized booltesttype: %d",
+                                        (int) booltesttype);
                                selec = 0.0;    /* Keep compiler quiet */
                                break;
                }
        }
 
+       ReleaseVariableStats(vardata);
+
        /* result should be in range, but make sure... */
        CLAMP_PROBABILITY(selec);
 
@@ -1216,67 +1239,23 @@ booltestsel(Query *root, BooleanTest *clause, int varRelid)
  *             nulltestsel             - Selectivity of NullTest Node.
  */
 Selectivity
-nulltestsel(Query *root, NullTest *clause, int varRelid)
+nulltestsel(PlannerInfo *root, NullTestType nulltesttype,
+                       Node *arg, int varRelid)
 {
-       Var                *var;
-       Node       *arg;
-       Oid                     relid;
-       HeapTuple       statsTuple;
+       VariableStatData vardata;
        double          selec;
-       double          defselec;
-       double          freq_null;
-
-       Assert(clause && IsA(clause, NullTest));
-
-       switch (clause->nulltesttype)
-       {
-               case IS_NULL:
-                       defselec = DEFAULT_UNK_SEL;
-                       break;
-               case IS_NOT_NULL:
-                       defselec = DEFAULT_NOT_UNK_SEL;
-                       break;
-               default:
-                       elog(ERROR, "nulltestsel: unexpected nulltesttype %d",
-                                (int) clause->nulltesttype);
-                       return (Selectivity) 0;         /* keep compiler quiet */
-       }
-
-       arg = (Node *) clause->arg;
-
-       /*
-        * Ignore any binary-compatible relabeling
-        */
-       if (IsA(arg, RelabelType))
-               arg = ((RelabelType *) arg)->arg;
-
-       if (IsA(arg, Var) &&(varRelid == 0 || varRelid == ((Var *) arg)->varno))
-               var = (Var *) arg;
-       else
-       {
-               /*
-                * punt if non-Var argument
-                */
-               return (Selectivity) defselec;
-       }
 
-       relid = getrelid(var->varno, root->rtable);
-       if (relid == InvalidOid)
-               return (Selectivity) defselec;
+       examine_variable(root, arg, varRelid, &vardata);
 
-       /* get stats for the attribute, if available */
-       statsTuple = SearchSysCache(STATRELATT,
-                                                               ObjectIdGetDatum(relid),
-                                                               Int16GetDatum(var->varattno),
-                                                               0, 0);
-       if (HeapTupleIsValid(statsTuple))
+       if (HeapTupleIsValid(vardata.statsTuple))
        {
                Form_pg_statistic stats;
+               double          freq_null;
 
-               stats = (Form_pg_statistic) GETSTRUCT(statsTuple);
+               stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
                freq_null = stats->stanullfrac;
 
-               switch (clause->nulltesttype)
+               switch (nulltesttype)
                {
                        case IS_NULL:
 
@@ -1294,21 +1273,33 @@ nulltestsel(Query *root, NullTest *clause, int varRelid)
                                selec = 1.0 - freq_null;
                                break;
                        default:
-                               elog(ERROR, "nulltestsel: unexpected nulltesttype %d",
-                                        (int) clause->nulltesttype);
+                               elog(ERROR, "unrecognized nulltesttype: %d",
+                                        (int) nulltesttype);
                                return (Selectivity) 0; /* keep compiler quiet */
                }
-
-               ReleaseSysCache(statsTuple);
        }
        else
        {
                /*
                 * No VACUUM ANALYZE stats available, so make a guess
                 */
-               selec = defselec;
+               switch (nulltesttype)
+               {
+                       case IS_NULL:
+                               selec = DEFAULT_UNK_SEL;
+                               break;
+                       case IS_NOT_NULL:
+                               selec = DEFAULT_NOT_UNK_SEL;
+                               break;
+                       default:
+                               elog(ERROR, "unrecognized nulltesttype: %d",
+                                        (int) nulltesttype);
+                               return (Selectivity) 0; /* keep compiler quiet */
+               }
        }
 
+       ReleaseVariableStats(vardata);
+
        /* result should be in range, but make sure... */
        CLAMP_PROBABILITY(selec);
 
@@ -1321,259 +1312,255 @@ nulltestsel(Query *root, NullTest *clause, int varRelid)
 Datum
 eqjoinsel(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
        Oid                     operator = PG_GETARG_OID(1);
        List       *args = (List *) PG_GETARG_POINTER(2);
-       Var                *var1;
-       Var                *var2;
+       JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
        double          selec;
+       VariableStatData vardata1;
+       VariableStatData vardata2;
+       double          nd1;
+       double          nd2;
+       Form_pg_statistic stats1 = NULL;
+       Form_pg_statistic stats2 = NULL;
+       bool            have_mcvs1 = false;
+       Datum      *values1 = NULL;
+       int                     nvalues1 = 0;
+       float4     *numbers1 = NULL;
+       int                     nnumbers1 = 0;
+       bool            have_mcvs2 = false;
+       Datum      *values2 = NULL;
+       int                     nvalues2 = 0;
+       float4     *numbers2 = NULL;
+       int                     nnumbers2 = 0;
+
+       get_join_variables(root, args, &vardata1, &vardata2);
+
+       nd1 = get_variable_numdistinct(&vardata1);
+       nd2 = get_variable_numdistinct(&vardata2);
+
+       if (HeapTupleIsValid(vardata1.statsTuple))
+       {
+               stats1 = (Form_pg_statistic) GETSTRUCT(vardata1.statsTuple);
+               have_mcvs1 = get_attstatsslot(vardata1.statsTuple,
+                                                                         vardata1.atttype,
+                                                                         vardata1.atttypmod,
+                                                                         STATISTIC_KIND_MCV,
+                                                                         InvalidOid,
+                                                                         &values1, &nvalues1,
+                                                                         &numbers1, &nnumbers1);
+       }
 
-       get_join_vars(args, &var1, &var2);
+       if (HeapTupleIsValid(vardata2.statsTuple))
+       {
+               stats2 = (Form_pg_statistic) GETSTRUCT(vardata2.statsTuple);
+               have_mcvs2 = get_attstatsslot(vardata2.statsTuple,
+                                                                         vardata2.atttype,
+                                                                         vardata2.atttypmod,
+                                                                         STATISTIC_KIND_MCV,
+                                                                         InvalidOid,
+                                                                         &values2, &nvalues2,
+                                                                         &numbers2, &nnumbers2);
+       }
 
-       if (var1 == NULL && var2 == NULL)
-               selec = DEFAULT_EQ_SEL;
-       else
+       if (have_mcvs1 && have_mcvs2)
        {
-               HeapTuple       statsTuple1 = NULL;
-               HeapTuple       statsTuple2 = NULL;
-               Form_pg_statistic stats1 = NULL;
-               Form_pg_statistic stats2 = NULL;
-               double          nd1 = DEFAULT_NUM_DISTINCT;
-               double          nd2 = DEFAULT_NUM_DISTINCT;
-               bool            have_mcvs1 = false;
-               Datum      *values1 = NULL;
-               int                     nvalues1 = 0;
-               float4     *numbers1 = NULL;
-               int                     nnumbers1 = 0;
-               bool            have_mcvs2 = false;
-               Datum      *values2 = NULL;
-               int                     nvalues2 = 0;
-               float4     *numbers2 = NULL;
-               int                     nnumbers2 = 0;
-
-               if (var1 != NULL)
-               {
-                       /* get stats for the attribute, if available */
-                       Oid                     relid1 = getrelid(var1->varno, root->rtable);
+               /*
+                * We have most-common-value lists for both relations.  Run
+                * through the lists to see which MCVs actually join to each other
+                * with the given operator.  This allows us to determine the exact
+                * join selectivity for the portion of the relations represented
+                * by the MCV lists.  We still have to estimate for the remaining
+                * population, but in a skewed distribution this gives us a big
+                * leg up in accuracy.  For motivation see the analysis in Y.
+                * Ioannidis and S. Christodoulakis, "On the propagation of errors
+                * in the size of join results", Technical Report 1018, Computer
+                * Science Dept., University of Wisconsin, Madison, March 1991
+                * (available from ftp.cs.wisc.edu).
+                */
+               FmgrInfo        eqproc;
+               bool       *hasmatch1;
+               bool       *hasmatch2;
+               double          nullfrac1 = stats1->stanullfrac;
+               double          nullfrac2 = stats2->stanullfrac;
+               double          matchprodfreq,
+                                       matchfreq1,
+                                       matchfreq2,
+                                       unmatchfreq1,
+                                       unmatchfreq2,
+                                       otherfreq1,
+                                       otherfreq2,
+                                       totalsel1,
+                                       totalsel2;
+               int                     i,
+                                       nmatches;
+
+               fmgr_info(get_opcode(operator), &eqproc);
+               hasmatch1 = (bool *) palloc0(nvalues1 * sizeof(bool));
+               hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
 
-                       if (relid1 != InvalidOid)
-                       {
-                               statsTuple1 = SearchSysCache(STATRELATT,
-                                                                                        ObjectIdGetDatum(relid1),
-                                                                                  Int16GetDatum(var1->varattno),
-                                                                                        0, 0);
-                               if (HeapTupleIsValid(statsTuple1))
-                               {
-                                       stats1 = (Form_pg_statistic) GETSTRUCT(statsTuple1);
-                                       have_mcvs1 = get_attstatsslot(statsTuple1,
-                                                                                                 var1->vartype,
-                                                                                                 var1->vartypmod,
-                                                                                                 STATISTIC_KIND_MCV,
-                                                                                                 InvalidOid,
-                                                                                                 &values1, &nvalues1,
-                                                                                                 &numbers1, &nnumbers1);
-                               }
+               /*
+                * If we are doing any variant of JOIN_IN, pretend all the values
+                * of the righthand relation are unique (ie, act as if it's been
+                * DISTINCT'd).
+                *
+                * NOTE: it might seem that we should unique-ify the lefthand input
+                * when considering JOIN_REVERSE_IN.  But this is not so, because
+                * the join clause we've been handed has not been commuted from
+                * the way the parser originally wrote it.      We know that the
+                * unique side of the IN clause is *always* on the right.
+                *
+                * NOTE: it would be dangerous to try to be smart about JOIN_LEFT or
+                * JOIN_RIGHT here, because we do not have enough information to
+                * determine which var is really on which side of the join.
+                * Perhaps someday we should pass in more information.
+                */
+               if (jointype == JOIN_IN ||
+                       jointype == JOIN_REVERSE_IN ||
+                       jointype == JOIN_UNIQUE_INNER ||
+                       jointype == JOIN_UNIQUE_OUTER)
+               {
+                       float4          oneovern = 1.0 / nd2;
 
-                               nd1 = get_att_numdistinct(root, var1, stats1);
-                       }
+                       for (i = 0; i < nvalues2; i++)
+                               numbers2[i] = oneovern;
+                       nullfrac2 = oneovern;
                }
 
-               if (var2 != NULL)
+               /*
+                * Note we assume that each MCV will match at most one member of
+                * the other MCV list.  If the operator isn't really equality,
+                * there could be multiple matches --- but we don't look for them,
+                * both for speed and because the math wouldn't add up...
+                */
+               matchprodfreq = 0.0;
+               nmatches = 0;
+               for (i = 0; i < nvalues1; i++)
                {
-                       /* get stats for the attribute, if available */
-                       Oid                     relid2 = getrelid(var2->varno, root->rtable);
+                       int                     j;
 
-                       if (relid2 != InvalidOid)
+                       for (j = 0; j < nvalues2; j++)
                        {
-                               statsTuple2 = SearchSysCache(STATRELATT,
-                                                                                        ObjectIdGetDatum(relid2),
-                                                                                  Int16GetDatum(var2->varattno),
-                                                                                        0, 0);
-                               if (HeapTupleIsValid(statsTuple2))
+                               if (hasmatch2[j])
+                                       continue;
+                               if (DatumGetBool(FunctionCall2(&eqproc,
+                                                                                          values1[i],
+                                                                                          values2[j])))
                                {
-                                       stats2 = (Form_pg_statistic) GETSTRUCT(statsTuple2);
-                                       have_mcvs2 = get_attstatsslot(statsTuple2,
-                                                                                                 var2->vartype,
-                                                                                                 var2->vartypmod,
-                                                                                                 STATISTIC_KIND_MCV,
-                                                                                                 InvalidOid,
-                                                                                                 &values2, &nvalues2,
-                                                                                                 &numbers2, &nnumbers2);
+                                       hasmatch1[i] = hasmatch2[j] = true;
+                                       matchprodfreq += numbers1[i] * numbers2[j];
+                                       nmatches++;
+                                       break;
                                }
-
-                               nd2 = get_att_numdistinct(root, var2, stats2);
                        }
                }
-
-               if (have_mcvs1 && have_mcvs2)
+               CLAMP_PROBABILITY(matchprodfreq);
+               /* Sum up frequencies of matched and unmatched MCVs */
+               matchfreq1 = unmatchfreq1 = 0.0;
+               for (i = 0; i < nvalues1; i++)
                {
-                       /*
-                        * We have most-common-value lists for both relations.  Run
-                        * through the lists to see which MCVs actually join to each
-                        * other with the given operator.  This allows us to determine
-                        * the exact join selectivity for the portion of the relations
-                        * represented by the MCV lists.  We still have to estimate
-                        * for the remaining population, but in a skewed distribution
-                        * this gives us a big leg up in accuracy.      For motivation see
-                        * the analysis in Y. Ioannidis and S. Christodoulakis, "On
-                        * the propagation of errors in the size of join results",
-                        * Technical Report 1018, Computer Science Dept., University
-                        * of Wisconsin, Madison, March 1991 (available from
-                        * ftp.cs.wisc.edu).
-                        */
-                       FmgrInfo        eqproc;
-                       bool       *hasmatch1;
-                       bool       *hasmatch2;
-                       double          matchprodfreq,
-                                               matchfreq1,
-                                               matchfreq2,
-                                               unmatchfreq1,
-                                               unmatchfreq2,
-                                               otherfreq1,
-                                               otherfreq2,
-                                               totalsel1,
-                                               totalsel2;
-                       int                     i,
-                                               nmatches;
-
-                       fmgr_info(get_opcode(operator), &eqproc);
-                       hasmatch1 = (bool *) palloc(nvalues1 * sizeof(bool));
-                       memset(hasmatch1, 0, nvalues1 * sizeof(bool));
-                       hasmatch2 = (bool *) palloc(nvalues2 * sizeof(bool));
-                       memset(hasmatch2, 0, nvalues2 * sizeof(bool));
-
-                       /*
-                        * Note we assume that each MCV will match at most one member
-                        * of the other MCV list.  If the operator isn't really
-                        * equality, there could be multiple matches --- but we don't
-                        * look for them, both for speed and because the math wouldn't
-                        * add up...
-                        */
-                       matchprodfreq = 0.0;
-                       nmatches = 0;
-                       for (i = 0; i < nvalues1; i++)
-                       {
-                               int                     j;
-
-                               for (j = 0; j < nvalues2; j++)
-                               {
-                                       if (hasmatch2[j])
-                                               continue;
-                                       if (DatumGetBool(FunctionCall2(&eqproc,
-                                                                                                  values1[i],
-                                                                                                  values2[j])))
-                                       {
-                                               hasmatch1[i] = hasmatch2[j] = true;
-                                               matchprodfreq += numbers1[i] * numbers2[j];
-                                               nmatches++;
-                                               break;
-                                       }
-                               }
-                       }
-                       CLAMP_PROBABILITY(matchprodfreq);
-                       /* Sum up frequencies of matched and unmatched MCVs */
-                       matchfreq1 = unmatchfreq1 = 0.0;
-                       for (i = 0; i < nvalues1; i++)
-                       {
-                               if (hasmatch1[i])
-                                       matchfreq1 += numbers1[i];
-                               else
-                                       unmatchfreq1 += numbers1[i];
-                       }
-                       CLAMP_PROBABILITY(matchfreq1);
-                       CLAMP_PROBABILITY(unmatchfreq1);
-                       matchfreq2 = unmatchfreq2 = 0.0;
-                       for (i = 0; i < nvalues2; i++)
-                       {
-                               if (hasmatch2[i])
-                                       matchfreq2 += numbers2[i];
-                               else
-                                       unmatchfreq2 += numbers2[i];
-                       }
-                       CLAMP_PROBABILITY(matchfreq2);
-                       CLAMP_PROBABILITY(unmatchfreq2);
-                       pfree(hasmatch1);
-                       pfree(hasmatch2);
-
-                       /*
-                        * Compute total frequency of non-null values that are not in
-                        * the MCV lists.
-                        */
-                       otherfreq1 = 1.0 - stats1->stanullfrac - matchfreq1 - unmatchfreq1;
-                       otherfreq2 = 1.0 - stats2->stanullfrac - matchfreq2 - unmatchfreq2;
-                       CLAMP_PROBABILITY(otherfreq1);
-                       CLAMP_PROBABILITY(otherfreq2);
-
-                       /*
-                        * We can estimate the total selectivity from the point of
-                        * view of relation 1 as: the known selectivity for matched
-                        * MCVs, plus unmatched MCVs that are assumed to match against
-                        * random members of relation 2's non-MCV population, plus
-                        * non-MCV values that are assumed to match against random
-                        * members of relation 2's unmatched MCVs plus non-MCV values.
-                        */
-                       totalsel1 = matchprodfreq;
-                       if (nd2 > nvalues2)
-                               totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - nvalues2);
-                       if (nd2 > nmatches)
-                               totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
-                                       (nd2 - nmatches);
-                       /* Same estimate from the point of view of relation 2. */
-                       totalsel2 = matchprodfreq;
-                       if (nd1 > nvalues1)
-                               totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - nvalues1);
-                       if (nd1 > nmatches)
-                               totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
-                                       (nd1 - nmatches);
-
-                       /*
-                        * Use the smaller of the two estimates.  This can be
-                        * justified in essentially the same terms as given below for
-                        * the no-stats case: to a first approximation, we are
-                        * estimating from the point of view of the relation with
-                        * smaller nd.
-                        */
-                       selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
+                       if (hasmatch1[i])
+                               matchfreq1 += numbers1[i];
+                       else
+                               unmatchfreq1 += numbers1[i];
                }
-               else
+               CLAMP_PROBABILITY(matchfreq1);
+               CLAMP_PROBABILITY(unmatchfreq1);
+               matchfreq2 = unmatchfreq2 = 0.0;
+               for (i = 0; i < nvalues2; i++)
                {
-                       /*
-                        * We do not have MCV lists for both sides.  Estimate the join
-                        * selectivity as MIN(1/nd1, 1/nd2).  This is plausible if we
-                        * assume that the values are about equally distributed: a
-                        * given tuple of rel1 will join to either 0 or N2/nd2 rows of
-                        * rel2, so total join rows are at most N1*N2/nd2 giving a
-                        * join selectivity of not more than 1/nd2.  By the same logic
-                        * it is not more than 1/nd1, so MIN(1/nd1, 1/nd2) is an upper
-                        * bound.  Using the MIN() means we estimate from the point of
-                        * view of the relation with smaller nd (since the larger nd
-                        * is determining the MIN).  It is reasonable to assume that
-                        * most tuples in this rel will have join partners, so the
-                        * bound is probably reasonably tight and should be taken
-                        * as-is.
-                        *
-                        * XXX Can we be smarter if we have an MCV list for just one
-                        * side? It seems that if we assume equal distribution for the
-                        * other side, we end up with the same answer anyway.
-                        */
-                       if (nd1 > nd2)
-                               selec = 1.0 / nd1;
+                       if (hasmatch2[i])
+                               matchfreq2 += numbers2[i];
                        else
-                               selec = 1.0 / nd2;
+                               unmatchfreq2 += numbers2[i];
                }
+               CLAMP_PROBABILITY(matchfreq2);
+               CLAMP_PROBABILITY(unmatchfreq2);
+               pfree(hasmatch1);
+               pfree(hasmatch2);
+
+               /*
+                * Compute total frequency of non-null values that are not in the
+                * MCV lists.
+                */
+               otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
+               otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
+               CLAMP_PROBABILITY(otherfreq1);
+               CLAMP_PROBABILITY(otherfreq2);
+
+               /*
+                * We can estimate the total selectivity from the point of view of
+                * relation 1 as: the known selectivity for matched MCVs, plus
+                * unmatched MCVs that are assumed to match against random members
+                * of relation 2's non-MCV population, plus non-MCV values that
+                * are assumed to match against random members of relation 2's
+                * unmatched MCVs plus non-MCV values.
+                */
+               totalsel1 = matchprodfreq;
+               if (nd2 > nvalues2)
+                       totalsel1 += unmatchfreq1 * otherfreq2 / (nd2 - nvalues2);
+               if (nd2 > nmatches)
+                       totalsel1 += otherfreq1 * (otherfreq2 + unmatchfreq2) /
+                               (nd2 - nmatches);
+               /* Same estimate from the point of view of relation 2. */
+               totalsel2 = matchprodfreq;
+               if (nd1 > nvalues1)
+                       totalsel2 += unmatchfreq2 * otherfreq1 / (nd1 - nvalues1);
+               if (nd1 > nmatches)
+                       totalsel2 += otherfreq2 * (otherfreq1 + unmatchfreq1) /
+                               (nd1 - nmatches);
+
+               /*
+                * Use the smaller of the two estimates.  This can be justified in
+                * essentially the same terms as given below for the no-stats
+                * case: to a first approximation, we are estimating from the
+                * point of view of the relation with smaller nd.
+                */
+               selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
+       }
+       else
+       {
+               /*
+                * We do not have MCV lists for both sides.  Estimate the join
+                * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2).
+                * This is plausible if we assume that the join operator is strict
+                * and the non-null values are about equally distributed: a given
+                * non-null tuple of rel1 will join to either zero or
+                * N2*(1-nullfrac2)/nd2 rows of rel2, so total join rows are at
+                * most N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join
+                * selectivity of not more than (1-nullfrac1)*(1-nullfrac2)/nd2.
+                * By the same logic it is not more than
+                * (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression with MIN()
+                * is an upper bound.  Using the MIN() means we estimate from the
+                * point of view of the relation with smaller nd (since the larger
+                * nd is determining the MIN).  It is reasonable to assume that
+                * most tuples in this rel will have join partners, so the bound
+                * is probably reasonably tight and should be taken as-is.
+                *
+                * XXX Can we be smarter if we have an MCV list for just one side? It
+                * seems that if we assume equal distribution for the other side,
+                * we end up with the same answer anyway.
+                */
+               double          nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
+               double          nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
 
-               if (have_mcvs1)
-                       free_attstatsslot(var1->vartype, values1, nvalues1,
-                                                         numbers1, nnumbers1);
-               if (have_mcvs2)
-                       free_attstatsslot(var2->vartype, values2, nvalues2,
-                                                         numbers2, nnumbers2);
-               if (HeapTupleIsValid(statsTuple1))
-                       ReleaseSysCache(statsTuple1);
-               if (HeapTupleIsValid(statsTuple2))
-                       ReleaseSysCache(statsTuple2);
+               selec = (1.0 - nullfrac1) * (1.0 - nullfrac2);
+               if (nd1 > nd2)
+                       selec /= nd1;
+               else
+                       selec /= nd2;
        }
 
+       if (have_mcvs1)
+               free_attstatsslot(vardata1.atttype, values1, nvalues1,
+                                                 numbers1, nnumbers1);
+       if (have_mcvs2)
+               free_attstatsslot(vardata2.atttype, values2, nvalues2,
+                                                 numbers2, nnumbers2);
+
+       ReleaseVariableStats(vardata1);
+       ReleaseVariableStats(vardata2);
+
        CLAMP_PROBABILITY(selec);
 
        PG_RETURN_FLOAT8((float8) selec);
@@ -1585,9 +1572,10 @@ eqjoinsel(PG_FUNCTION_ARGS)
 Datum
 neqjoinsel(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
        Oid                     operator = PG_GETARG_OID(1);
        List       *args = (List *) PG_GETARG_POINTER(2);
+       JoinType        jointype = (JoinType) PG_GETARG_INT16(3);
        Oid                     eqop;
        float8          result;
 
@@ -1598,11 +1586,11 @@ neqjoinsel(PG_FUNCTION_ARGS)
        eqop = get_negator(operator);
        if (eqop)
        {
-               result = DatumGetFloat8(DirectFunctionCall3(eqjoinsel,
+               result = DatumGetFloat8(DirectFunctionCall4(eqjoinsel,
                                                                                                        PointerGetDatum(root),
                                                                                                  ObjectIdGetDatum(eqop),
-                                                                                                PointerGetDatum(args)));
-
+                                                                                                       PointerGetDatum(args),
+                                                                                          Int16GetDatum(jointype)));
        }
        else
        {
@@ -1734,18 +1722,24 @@ icnlikejoinsel(PG_FUNCTION_ARGS)
  * variable.
  */
 void
-mergejoinscansel(Query *root, Node *clause,
+mergejoinscansel(PlannerInfo *root, Node *clause,
                                 Selectivity *leftscan,
                                 Selectivity *rightscan)
 {
-       Var                *left,
+       Node       *left,
                           *right;
+       VariableStatData leftvar,
+                               rightvar;
+       Oid                     lefttype,
+                               righttype;
        Oid                     opno,
                                lsortop,
                                rsortop,
                                ltop,
                                gtop,
-                               revltop;
+                               leop,
+                               revgtop,
+                               revleop;
        Datum           leftmax,
                                rightmax;
        double          selec;
@@ -1756,193 +1750,524 @@ mergejoinscansel(Query *root, Node *clause,
        /* Deconstruct the merge clause */
        if (!is_opclause(clause))
                return;                                 /* shouldn't happen */
-       opno = ((Oper *) ((Expr *) clause)->oper)->opno;
+       opno = ((OpExpr *) clause)->opno;
        left = get_leftop((Expr *) clause);
        right = get_rightop((Expr *) clause);
        if (!right)
                return;                                 /* shouldn't happen */
 
-       /* Can't do anything if inputs are not Vars */
-       if (!IsA(left, Var) ||!IsA(right, Var))
-               return;
+       /* Look for stats for the inputs */
+       examine_variable(root, left, 0, &leftvar);
+       examine_variable(root, right, 0, &rightvar);
+
+       /* Get the direct input types of the operator */
+       lefttype = exprType(left);
+       righttype = exprType(right);
 
        /* Verify mergejoinability and get left and right "<" operators */
        if (!op_mergejoinable(opno,
-                                                 left->vartype,
-                                                 right->vartype,
                                                  &lsortop,
                                                  &rsortop))
-               return;                                 /* shouldn't happen */
+               goto fail;                              /* shouldn't happen */
 
-       /* Try to get maximum values of both vars */
-       if (!get_var_maximum(root, left, lsortop, &leftmax))
-               return;                                 /* no max available from stats */
+       /* Try to get maximum values of both inputs */
+       if (!get_variable_maximum(root, &leftvar, lsortop, &leftmax))
+               goto fail;                              /* no max available from stats */
 
-       if (!get_var_maximum(root, right, rsortop, &rightmax))
-               return;                                 /* no max available from stats */
+       if (!get_variable_maximum(root, &rightvar, rsortop, &rightmax))
+               goto fail;                              /* no max available from stats */
 
        /* Look up the "left < right" and "left > right" operators */
        op_mergejoin_crossops(opno, &ltop, &gtop, NULL, NULL);
 
-       /* Look up the "right < left" operator */
-       revltop = get_commutator(gtop);
-       if (!OidIsValid(revltop))
-               return;                                 /* shouldn't happen */
+       /* Look up the "left <= right" operator */
+       leop = get_negator(gtop);
+       if (!OidIsValid(leop))
+               goto fail;                              /* insufficient info in catalogs */
+
+       /* Look up the "right > left" operator */
+       revgtop = get_commutator(ltop);
+       if (!OidIsValid(revgtop))
+               goto fail;                              /* insufficient info in catalogs */
+
+       /* Look up the "right <= left" operator */
+       revleop = get_negator(revgtop);
+       if (!OidIsValid(revleop))
+               goto fail;                              /* insufficient info in catalogs */
 
        /*
         * Now, the fraction of the left variable that will be scanned is the
         * fraction that's <= the right-side maximum value.  But only believe
         * non-default estimates, else stick with our 1.0.
         */
-       selec = scalarineqsel(root, ltop, false, left,
-                                                 rightmax, right->vartype);
+       selec = scalarineqsel(root, leop, false, &leftvar,
+                                                 rightmax, righttype);
        if (selec != DEFAULT_INEQ_SEL)
                *leftscan = selec;
 
        /* And similarly for the right variable. */
-       selec = scalarineqsel(root, revltop, false, right,
-                                                 leftmax, left->vartype);
+       selec = scalarineqsel(root, revleop, false, &rightvar,
+                                                 leftmax, lefttype);
        if (selec != DEFAULT_INEQ_SEL)
                *rightscan = selec;
 
        /*
         * Only one of the two fractions can really be less than 1.0; believe
-        * the smaller estimate and reset the other one to exactly 1.0.
+        * the smaller estimate and reset the other one to exactly 1.0.  If we
+        * get exactly equal estimates (as can easily happen with self-joins),
+        * believe neither.
         */
        if (*leftscan > *rightscan)
                *leftscan = 1.0;
-       else
+       else if (*leftscan < *rightscan)
                *rightscan = 1.0;
+       else
+               *leftscan = *rightscan = 1.0;
+
+fail:
+       ReleaseVariableStats(leftvar);
+       ReleaseVariableStats(rightvar);
 }
 
+
 /*
- * get_var_maximum
- *             Estimate the maximum value of the specified variable.
- *             If successful, store value in *max and return TRUE.
- *             If no data available, return FALSE.
- *
- * sortop is the "<" comparison operator to use.  (To extract the
- * minimum instead of the maximum, just pass the ">" operator instead.)
+ * Helper routine for estimate_num_groups: add an item to a list of
+ * GroupVarInfos, but only if it's not known equal to any of the existing
+ * entries.
  */
-static bool
-get_var_maximum(Query *root, Var *var, Oid sortop, Datum *max)
+typedef struct
 {
-       Datum           tmax = 0;
-       bool            have_max = false;
-       Oid                     relid;
-       HeapTuple       statsTuple;
-       Form_pg_statistic stats;
-       int16           typLen;
-       bool            typByVal;
-       Datum      *values;
-       int                     nvalues;
-       int                     i;
+       Node       *var;                /* might be an expression, not just a Var */
+       RelOptInfo *rel;                /* relation it belongs to */
+       double          ndistinct;      /* # distinct values */
+} GroupVarInfo;
+
+static List *
+add_unique_group_var(PlannerInfo *root, List *varinfos,
+                                        Node *var, VariableStatData *vardata)
+{
+       GroupVarInfo *varinfo;
+       double          ndistinct;
+       ListCell   *lc;
 
-       relid = getrelid(var->varno, root->rtable);
-       if (relid == InvalidOid)
-               return false;
+       ndistinct = get_variable_numdistinct(vardata);
 
-       /* get stats for the attribute */
-       statsTuple = SearchSysCache(STATRELATT,
-                                                               ObjectIdGetDatum(relid),
-                                                               Int16GetDatum(var->varattno),
-                                                               0, 0);
-       if (!HeapTupleIsValid(statsTuple))
+       /* cannot use foreach here because of possible list_delete */
+       lc = list_head(varinfos);
+       while (lc)
        {
-               /* no stats available, so default result */
-               return false;
+               varinfo = (GroupVarInfo *) lfirst(lc);
+
+               /* must advance lc before list_delete possibly pfree's it */
+               lc = lnext(lc);
+
+               /* Drop exact duplicates */
+               if (equal(var, varinfo->var))
+                       return varinfos;
+
+               /*
+                * Drop known-equal vars, but only if they belong to different
+                * relations (see comments for estimate_num_groups)
+                */
+               if (vardata->rel != varinfo->rel &&
+                       exprs_known_equal(root, var, varinfo->var))
+               {
+                       if (varinfo->ndistinct <= ndistinct)
+                       {
+                               /* Keep older item, forget new one */
+                               return varinfos;
+                       }
+                       else
+                       {
+                               /* Delete the older item */
+                               varinfos = list_delete_ptr(varinfos, varinfo);
+                       }
+               }
        }
-       stats = (Form_pg_statistic) GETSTRUCT(statsTuple);
 
-       get_typlenbyval(var->vartype, &typLen, &typByVal);
+       varinfo = (GroupVarInfo *) palloc(sizeof(GroupVarInfo));
+
+       varinfo->var = var;
+       varinfo->rel = vardata->rel;
+       varinfo->ndistinct = ndistinct;
+       varinfos = lappend(varinfos, varinfo);
+       return varinfos;
+}
+
+/*
+ * estimate_num_groups         - Estimate number of groups in a grouped query
+ *
+ * Given a query having a GROUP BY clause, estimate how many groups there
+ * will be --- ie, the number of distinct combinations of the GROUP BY
+ * expressions.
+ *
+ * This routine is also used to estimate the number of rows emitted by
+ * a DISTINCT filtering step; that is an isomorphic problem.  (Note:
+ * actually, we only use it for DISTINCT when there's no grouping or
+ * aggregation ahead of the DISTINCT.)
+ *
+ * Inputs:
+ *     root - the query
+ *     groupExprs - list of expressions being grouped by
+ *     input_rows - number of rows estimated to arrive at the group/unique
+ *             filter step
+ *
+ * Given the lack of any cross-correlation statistics in the system, it's
+ * impossible to do anything really trustworthy with GROUP BY conditions
+ * involving multiple Vars.  We should however avoid assuming the worst
+ * case (all possible cross-product terms actually appear as groups) since
+ * very often the grouped-by Vars are highly correlated.  Our current approach
+ * is as follows:
+ *     1.      Reduce the given expressions to a list of unique Vars used.  For
+ *             example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
+ *             It is clearly correct not to count the same Var more than once.
+ *             It is also reasonable to treat f(x) the same as x: f() cannot
+ *             increase the number of distinct values (unless it is volatile,
+ *             which we consider unlikely for grouping), but it probably won't
+ *             reduce the number of distinct values much either.
+ *             As a special case, if a GROUP BY expression can be matched to an
+ *             expressional index for which we have statistics, then we treat the
+ *             whole expression as though it were just a Var.
+ *     2.      If the list contains Vars of different relations that are known equal
+ *             due to equijoin clauses, then drop all but one of the Vars from each
+ *             known-equal set, keeping the one with smallest estimated # of values
+ *             (since the extra values of the others can't appear in joined rows).
+ *             Note the reason we only consider Vars of different relations is that
+ *             if we considered ones of the same rel, we'd be double-counting the
+ *             restriction selectivity of the equality in the next step.
+ *     3.      For Vars within a single source rel, we multiply together the numbers
+ *             of values, clamp to the number of rows in the rel (divided by 10 if
+ *             more than one Var), and then multiply by the selectivity of the
+ *             restriction clauses for that rel.  When there's more than one Var,
+ *             the initial product is probably too high (it's the worst case) but
+ *             clamping to a fraction of the rel's rows seems to be a helpful
+ *             heuristic for not letting the estimate get out of hand.  (The factor
+ *             of 10 is derived from pre-Postgres-7.4 practice.)  Multiplying
+ *             by the restriction selectivity is effectively assuming that the
+ *             restriction clauses are independent of the grouping, which is a crummy
+ *             assumption, but it's hard to do better.
+ *     4.      If there are Vars from multiple rels, we repeat step 3 for each such
+ *             rel, and multiply the results together.
+ * Note that rels not containing grouped Vars are ignored completely, as are
+ * join clauses other than the equijoin clauses used in step 2.  Such rels
+ * cannot increase the number of groups, and we assume such clauses do not
+ * reduce the number either (somewhat bogus, but we don't have the info to
+ * do better).
+ */
+double
+estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
+{
+       List       *varinfos = NIL;
+       double          numdistinct;
+       ListCell   *l;
+
+       /* We should not be called unless query has GROUP BY (or DISTINCT) */
+       Assert(groupExprs != NIL);
 
        /*
-        * If there is a histogram, grab the last or first value as appropriate.
-        *
-        * If there is a histogram that is sorted with some other operator
-        * than the one we want, fail --- this suggests that there is data
-        * we can't use.
+        * Steps 1/2: find the unique Vars used, treating an expression as a Var
+        * if we can find stats for it.  For each one, record the statistical
+        * estimate of number of distinct values (total in its table, without
+        * regard for filtering).
         */
-       if (get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
-                                                STATISTIC_KIND_HISTOGRAM, sortop,
-                                                &values, &nvalues,
-                                                NULL, NULL))
+       foreach(l, groupExprs)
        {
-               if (nvalues > 0)
+               Node       *groupexpr = (Node *) lfirst(l);
+               VariableStatData vardata;
+               List       *varshere;
+               ListCell   *l2;
+
+               /*
+                * If examine_variable is able to deduce anything about the GROUP BY
+                * expression, treat it as a single variable even if it's really more
+                * complicated.
+                */
+               examine_variable(root, groupexpr, 0, &vardata);
+               if (vardata.statsTuple != NULL || vardata.isunique)
                {
-                       tmax = datumCopy(values[nvalues-1], typByVal, typLen);
-                       have_max = true;
+                       varinfos = add_unique_group_var(root, varinfos,
+                                                                                       groupexpr, &vardata);
+                       ReleaseVariableStats(vardata);
+                       continue;
                }
-               free_attstatsslot(var->vartype, values, nvalues, NULL, 0);
-       }
-       else
-       {
-               Oid             rsortop = get_commutator(sortop);
+               ReleaseVariableStats(vardata);
 
-               if (OidIsValid(rsortop) &&
-                       get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
-                                                        STATISTIC_KIND_HISTOGRAM, rsortop,
-                                                        &values, &nvalues,
-                                                        NULL, NULL))
+               /*
+                * Else pull out the component Vars
+                */
+               varshere = pull_var_clause(groupexpr, false);
+
+               /*
+                * If we find any variable-free GROUP BY item, then either it is a
+                * constant (and we can ignore it) or it contains a volatile
+                * function; in the latter case we punt and assume that each input
+                * row will yield a distinct group.
+                */
+               if (varshere == NIL)
                {
-                       if (nvalues > 0)
-                       {
-                               tmax = datumCopy(values[0], typByVal, typLen);
-                               have_max = true;
-                       }
-                       free_attstatsslot(var->vartype, values, nvalues, NULL, 0);
+                       if (contain_volatile_functions(groupexpr))
+                               return input_rows;
+                       continue;
                }
-               else if (get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
-                                                                 STATISTIC_KIND_HISTOGRAM, InvalidOid,
-                                                                 &values, &nvalues,
-                                                                 NULL, NULL))
+
+               /*
+                * Else add variables to varinfos list
+                */
+               foreach(l2, varshere)
                {
-                       free_attstatsslot(var->vartype, values, nvalues, NULL, 0);
-                       ReleaseSysCache(statsTuple);
-                       return false;
+                       Node       *var = (Node *) lfirst(l2);
+
+                       examine_variable(root, var, 0, &vardata);
+                       varinfos = add_unique_group_var(root, varinfos, var, &vardata);
+                       ReleaseVariableStats(vardata);
                }
        }
 
+       /* If now no Vars, we must have an all-constant GROUP BY list. */
+       if (varinfos == NIL)
+               return 1.0;
+
        /*
-        * If we have most-common-values info, look for a large MCV.  This
-        * is needed even if we also have a histogram, since the histogram
-        * excludes the MCVs.  However, usually the MCVs will not be the
-        * extreme values, so avoid unnecessary data copying.
+        * Steps 3/4: group Vars by relation and estimate total numdistinct.
+        *
+        * For each iteration of the outer loop, we process the frontmost Var in
+        * varinfos, plus all other Vars in the same relation.  We remove
+        * these Vars from the newvarinfos list for the next iteration. This
+        * is the easiest way to group Vars of same rel together.
         */
-       if (get_attstatsslot(statsTuple, var->vartype, var->vartypmod,
-                                                STATISTIC_KIND_MCV, InvalidOid,
-                                                &values, &nvalues,
-                                                NULL, NULL))
-       {
-               bool    large_mcv = false;
-               FmgrInfo        opproc;
+       numdistinct = 1.0;
 
-               fmgr_info(get_opcode(sortop), &opproc);
+       do
+       {
+               GroupVarInfo *varinfo1 = (GroupVarInfo *) linitial(varinfos);
+               RelOptInfo *rel = varinfo1->rel;
+               double          reldistinct = varinfo1->ndistinct;
+               double          relmaxndistinct = reldistinct;
+               int                     relvarcount = 1;
+               List       *newvarinfos = NIL;
 
-               for (i = 0; i < nvalues; i++)
+               /*
+                * Get the product of numdistinct estimates of the Vars for this rel.
+                * Also, construct new varinfos list of remaining Vars.
+                */
+               for_each_cell(l, lnext(list_head(varinfos)))
                {
-                       if (!have_max)
+                       GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
+
+                       if (varinfo2->rel == varinfo1->rel)
                        {
-                               tmax = values[i];
-                               large_mcv = have_max = true;
+                               reldistinct *= varinfo2->ndistinct;
+                               if (relmaxndistinct < varinfo2->ndistinct)
+                                       relmaxndistinct = varinfo2->ndistinct;
+                               relvarcount++;
                        }
-                       else if (DatumGetBool(FunctionCall2(&opproc, tmax, values[i])))
+                       else
                        {
-                               tmax = values[i];
-                               large_mcv = true;
+                               /* not time to process varinfo2 yet */
+                               newvarinfos = lcons(varinfo2, newvarinfos);
                        }
                }
-               if (large_mcv)
-                       tmax = datumCopy(tmax, typByVal, typLen);
-               free_attstatsslot(var->vartype, values, nvalues, NULL, 0);
+
+               /*
+                * Sanity check --- don't divide by zero if empty relation.
+                */
+               Assert(rel->reloptkind == RELOPT_BASEREL);
+               if (rel->tuples > 0)
+               {
+                       /*
+                        * Clamp to size of rel, or size of rel / 10 if multiple Vars.
+                        * The fudge factor is because the Vars are probably correlated
+                        * but we don't know by how much.  We should never clamp to less
+                        * than the largest ndistinct value for any of the Vars, though,
+                        * since there will surely be at least that many groups.
+                        */
+                       double          clamp = rel->tuples;
+
+                       if (relvarcount > 1)
+                       {
+                               clamp *= 0.1;
+                               if (clamp < relmaxndistinct)
+                               {
+                                       clamp = relmaxndistinct;
+                                       /* for sanity in case some ndistinct is too large: */
+                                       if (clamp > rel->tuples)
+                                               clamp = rel->tuples;
+                               }
+                       }
+                       if (reldistinct > clamp)
+                               reldistinct = clamp;
+
+                       /*
+                        * Multiply by restriction selectivity.
+                        */
+                       reldistinct *= rel->rows / rel->tuples;
+
+                       /*
+                        * Update estimate of total distinct groups.
+                        */
+                       numdistinct *= reldistinct;
+               }
+
+               varinfos = newvarinfos;
+       } while (varinfos != NIL);
+
+       numdistinct = ceil(numdistinct);
+
+       /* Guard against out-of-range answers */
+       if (numdistinct > input_rows)
+               numdistinct = input_rows;
+       if (numdistinct < 1.0)
+               numdistinct = 1.0;
+
+       return numdistinct;
+}
+
+/*
+ * Estimate hash bucketsize fraction (ie, number of entries in a bucket
+ * divided by total tuples in relation) if the specified expression is used
+ * as a hash key.
+ *
+ * XXX This is really pretty bogus since we're effectively assuming that the
+ * distribution of hash keys will be the same after applying restriction
+ * clauses as it was in the underlying relation.  However, we are not nearly
+ * smart enough to figure out how the restrict clauses might change the
+ * distribution, so this will have to do for now.
+ *
+ * We are passed the number of buckets the executor will use for the given
+ * input relation.     If the data were perfectly distributed, with the same
+ * number of tuples going into each available bucket, then the bucketsize
+ * fraction would be 1/nbuckets.  But this happy state of affairs will occur
+ * only if (a) there are at least nbuckets distinct data values, and (b)
+ * we have a not-too-skewed data distribution. Otherwise the buckets will
+ * be nonuniformly occupied.  If the other relation in the join has a key
+ * distribution similar to this one's, then the most-loaded buckets are
+ * exactly those that will be probed most often.  Therefore, the "average"
+ * bucket size for costing purposes should really be taken as something close
+ * to the "worst case" bucket size.  We try to estimate this by adjusting the
+ * fraction if there are too few distinct data values, and then scaling up
+ * by the ratio of the most common value's frequency to the average frequency.
+ *
+ * If no statistics are available, use a default estimate of 0.1.  This will
+ * discourage use of a hash rather strongly if the inner relation is large,
+ * which is what we want.  We do not want to hash unless we know that the
+ * inner rel is well-dispersed (or the alternatives seem much worse).
+ */
+Selectivity
+estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
+{
+       VariableStatData vardata;
+       double          estfract,
+                               ndistinct,
+                               stanullfrac,
+                               mcvfreq,
+                               avgfreq;
+       float4     *numbers;
+       int                     nnumbers;
+
+       examine_variable(root, hashkey, 0, &vardata);
+
+       /* Get number of distinct values and fraction that are null */
+       ndistinct = get_variable_numdistinct(&vardata);
+
+       if (HeapTupleIsValid(vardata.statsTuple))
+       {
+               Form_pg_statistic stats;
+
+               stats = (Form_pg_statistic) GETSTRUCT(vardata.statsTuple);
+               stanullfrac = stats->stanullfrac;
        }
+       else
+       {
+               /*
+                * Believe a default ndistinct only if it came from stats.
+                * Otherwise punt and return 0.1, per comments above.
+                */
+               if (ndistinct == DEFAULT_NUM_DISTINCT)
+               {
+                       ReleaseVariableStats(vardata);
+                       return (Selectivity) 0.1;
+               }
 
-       ReleaseSysCache(statsTuple);
+               stanullfrac = 0.0;
+       }
 
-       *max = tmax;
-       return have_max;
+       /* Compute avg freq of all distinct data values in raw relation */
+       avgfreq = (1.0 - stanullfrac) / ndistinct;
+
+       /*
+        * Adjust ndistinct to account for restriction clauses.  Observe we
+        * are assuming that the data distribution is affected uniformly by
+        * the restriction clauses!
+        *
+        * XXX Possibly better way, but much more expensive: multiply by
+        * selectivity of rel's restriction clauses that mention the target
+        * Var.
+        */
+       if (vardata.rel)
+               ndistinct *= vardata.rel->rows / vardata.rel->tuples;
+
+       /*
+        * Initial estimate of bucketsize fraction is 1/nbuckets as long as
+        * the number of buckets is less than the expected number of distinct
+        * values; otherwise it is 1/ndistinct.
+        */
+       if (ndistinct > nbuckets)
+               estfract = 1.0 / nbuckets;
+       else
+               estfract = 1.0 / ndistinct;
+
+       /*
+        * Look up the frequency of the most common value, if available.
+        */
+       mcvfreq = 0.0;
+
+       if (HeapTupleIsValid(vardata.statsTuple))
+       {
+               if (get_attstatsslot(vardata.statsTuple,
+                                                        vardata.atttype, vardata.atttypmod,
+                                                        STATISTIC_KIND_MCV, InvalidOid,
+                                                        NULL, NULL, &numbers, &nnumbers))
+               {
+                       /*
+                        * The first MCV stat is for the most common value.
+                        */
+                       if (nnumbers > 0)
+                               mcvfreq = numbers[0];
+                       free_attstatsslot(vardata.atttype, NULL, 0,
+                                                         numbers, nnumbers);
+               }
+       }
+
+       /*
+        * Adjust estimated bucketsize upward to account for skewed
+        * distribution.
+        */
+       if (avgfreq > 0.0 && mcvfreq > avgfreq)
+               estfract *= mcvfreq / avgfreq;
+
+       /*
+        * Clamp bucketsize to sane range (the above adjustment could easily
+        * produce an out-of-range result).  We set the lower bound a little
+        * above zero, since zero isn't a very sane result.
+        */
+       if (estfract < 1.0e-6)
+               estfract = 1.0e-6;
+       else if (estfract > 1.0)
+               estfract = 1.0;
+
+       ReleaseVariableStats(vardata);
+
+       return (Selectivity) estfract;
 }
 
+
+/*-------------------------------------------------------------------------
+ *
+ * Support routines
+ *
+ *-------------------------------------------------------------------------
+ */
+
 /*
  * convert_to_scalar
  *       Convert non-NULL values of the indicated types to the comparison
@@ -1977,6 +2302,21 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
                                  Datum lobound, Datum hibound, Oid boundstypid,
                                  double *scaledlobound, double *scaledhibound)
 {
+       /*
+        * Both the valuetypid and the boundstypid should exactly match
+        * the declared input type(s) of the operator we are invoked for,
+        * so we just error out if either is not recognized.
+        *
+        * XXX The histogram we are interpolating between points of could belong
+        * to a column that's only binary-compatible with the declared type.
+        * In essence we are assuming that the semantics of binary-compatible
+        * types are enough alike that we can use a histogram generated with one
+        * type's operators to estimate selectivity for the other's.  This is
+        * outright wrong in some cases --- in particular signed versus unsigned
+        * interpretation could trip us up.  But it's useful enough in the
+        * majority of cases that we do it anyway.  Should think about more
+        * rigorous ways to do it.
+        */
        switch (valuetypid)
        {
                        /*
@@ -1991,6 +2331,11 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
                case NUMERICOID:
                case OIDOID:
                case REGPROCOID:
+               case REGPROCEDUREOID:
+               case REGOPEROID:
+               case REGOPERATOROID:
+               case REGCLASSOID:
+               case REGTYPEOID:
                        *scaledvalue = convert_numeric_to_scalar(value, valuetypid);
                        *scaledlobound = convert_numeric_to_scalar(lobound, boundstypid);
                        *scaledhibound = convert_numeric_to_scalar(hibound, boundstypid);
@@ -2005,9 +2350,9 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
                case TEXTOID:
                case NAMEOID:
                        {
-                               unsigned char *valstr = convert_string_datum(value, valuetypid);
-                               unsigned char *lostr = convert_string_datum(lobound, boundstypid);
-                               unsigned char *histr = convert_string_datum(hibound, boundstypid);
+                               char *valstr = convert_string_datum(value, valuetypid);
+                               char *lostr = convert_string_datum(lobound, boundstypid);
+                               char *histr = convert_string_datum(hibound, boundstypid);
 
                                convert_string_to_scalar(valstr, scaledvalue,
                                                                                 lostr, scaledlobound,
@@ -2058,6 +2403,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
                        return true;
        }
        /* Don't know how to convert */
+       *scaledvalue = *scaledlobound = *scaledhibound = 0;
        return false;
 }
 
@@ -2088,6 +2434,11 @@ convert_numeric_to_scalar(Datum value, Oid typid)
                                                                                                   value));
                case OIDOID:
                case REGPROCOID:
+               case REGPROCEDUREOID:
+               case REGOPEROID:
+               case REGOPERATOROID:
+               case REGCLASSOID:
+               case REGTYPEOID:
                        /* we can treat OIDs as integers... */
                        return (double) DatumGetObjectId(value);
        }
@@ -2096,7 +2447,7 @@ convert_numeric_to_scalar(Datum value, Oid typid)
         * Can't get here unless someone tries to use scalarltsel/scalargtsel
         * on an operator with one numeric and one non-numeric operand.
         */
-       elog(ERROR, "convert_numeric_to_scalar: unsupported type %u", typid);
+       elog(ERROR, "unsupported type: %u", typid);
        return 0;
 }
 
@@ -2121,31 +2472,31 @@ convert_numeric_to_scalar(Datum value, Oid typid)
  * so this is more likely to happen than you might think.)
  */
 static void
-convert_string_to_scalar(unsigned char *value,
+convert_string_to_scalar(char *value,
                                                 double *scaledvalue,
-                                                unsigned char *lobound,
+                                                char *lobound,
                                                 double *scaledlobound,
-                                                unsigned char *hibound,
+                                                char *hibound,
                                                 double *scaledhibound)
 {
        int                     rangelo,
                                rangehi;
-       unsigned char *sptr;
+       char       *sptr;
 
-       rangelo = rangehi = hibound[0];
+       rangelo = rangehi = (unsigned char) hibound[0];
        for (sptr = lobound; *sptr; sptr++)
        {
-               if (rangelo > *sptr)
-                       rangelo = *sptr;
-               if (rangehi < *sptr)
-                       rangehi = *sptr;
+               if (rangelo > (unsigned char) *sptr)
+                       rangelo = (unsigned char) *sptr;
+               if (rangehi < (unsigned char) *sptr)
+                       rangehi = (unsigned char) *sptr;
        }
        for (sptr = hibound; *sptr; sptr++)
        {
-               if (rangelo > *sptr)
-                       rangelo = *sptr;
-               if (rangehi < *sptr)
-                       rangehi = *sptr;
+               if (rangelo > (unsigned char) *sptr)
+                       rangelo = (unsigned char) *sptr;
+               if (rangehi < (unsigned char) *sptr)
+                       rangehi = (unsigned char) *sptr;
        }
        /* If range includes any upper-case ASCII chars, make it include all */
        if (rangelo <= 'Z' && rangehi >= 'A')
@@ -2201,9 +2552,9 @@ convert_string_to_scalar(unsigned char *value,
 }
 
 static double
-convert_one_string_to_scalar(unsigned char *value, int rangelo, int rangehi)
+convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
 {
-       int                     slen = strlen((char *) value);
+       int                     slen = strlen(value);
        double          num,
                                denom,
                                base;
@@ -2224,7 +2575,7 @@ convert_one_string_to_scalar(unsigned char *value, int rangelo, int rangehi)
        denom = base;
        while (slen-- > 0)
        {
-               int                     ch = *value++;
+               int                     ch = (unsigned char) *value++;
 
                if (ch < rangelo)
                        ch = rangelo - 1;
@@ -2240,20 +2591,14 @@ convert_one_string_to_scalar(unsigned char *value, int rangelo, int rangehi)
 /*
  * Convert a string-type Datum into a palloc'd, null-terminated string.
  *
- * If USE_LOCALE is defined, we must pass the string through strxfrm()
+ * When using a non-C locale, we must pass the string through strxfrm()
  * before continuing, so as to generate correct locale-specific results.
  */
-static unsigned char *
+static char *
 convert_string_datum(Datum value, Oid typid)
 {
        char       *val;
 
-#ifdef USE_LOCALE
-       char       *xfrmstr;
-       size_t          xfrmsize;
-       size_t          xfrmlen;
-#endif
-
        switch (typid)
        {
                case CHAROID:
@@ -2286,27 +2631,37 @@ convert_string_datum(Datum value, Oid typid)
                         * Can't get here unless someone tries to use scalarltsel on
                         * an operator with one string and one non-string operand.
                         */
-                       elog(ERROR, "convert_string_datum: unsupported type %u", typid);
+                       elog(ERROR, "unsupported type: %u", typid);
                        return NULL;
        }
 
-#ifdef USE_LOCALE
-       /* Guess that transformed string is not much bigger than original */
-       xfrmsize = strlen(val) + 32;    /* arbitrary pad value here... */
-       xfrmstr = (char *) palloc(xfrmsize);
-       xfrmlen = strxfrm(xfrmstr, val, xfrmsize);
-       if (xfrmlen >= xfrmsize)
+       if (!lc_collate_is_c())
        {
-               /* Oops, didn't make it */
-               pfree(xfrmstr);
+               char       *xfrmstr;
+               size_t          xfrmlen;
+               size_t          xfrmlen2;
+
+               /*
+                * Note: originally we guessed at a suitable output buffer size,
+                * and only needed to call strxfrm twice if our guess was too
+                * small. However, it seems that some versions of Solaris have
+                * buggy strxfrm that can write past the specified buffer length
+                * in that scenario.  So, do it the dumb way for portability.
+                *
+                * Yet other systems (e.g., glibc) sometimes return a smaller value
+                * from the second call than the first; thus the Assert must be <=
+                * not == as you'd expect.  Can't any of these people program
+                * their way out of a paper bag?
+                */
+               xfrmlen = strxfrm(NULL, val, 0);
                xfrmstr = (char *) palloc(xfrmlen + 1);
-               xfrmlen = strxfrm(xfrmstr, val, xfrmlen + 1);
+               xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
+               Assert(xfrmlen2 <= xfrmlen);
+               pfree(val);
+               val = xfrmstr;
        }
-       pfree(val);
-       val = xfrmstr;
-#endif
 
-       return (unsigned char *) val;
+       return val;
 }
 
 /*
@@ -2429,17 +2784,31 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
                                 * assumed average month length of 365.25/12.0 days.  Not
                                 * too accurate, but plenty good enough for our purposes.
                                 */
-                               return interval->time +
-                                       interval->month * (365.25 / 12.0 * 24.0 * 60.0 * 60.0);
+#ifdef HAVE_INT64_TIMESTAMP
+                               return interval->time + interval->day * (double)USECS_PER_DAY +
+                                          interval->month * ((DAYS_PER_YEAR / (double)MONTHS_PER_YEAR) * USECS_PER_DAY);
+#else
+                               return interval->time + interval->day * SECS_PER_DAY +
+                                               interval->month * ((DAYS_PER_YEAR / (double)MONTHS_PER_YEAR) * (double)SECS_PER_DAY);
+#endif
                        }
                case RELTIMEOID:
+#ifdef HAVE_INT64_TIMESTAMP
+                       return (DatumGetRelativeTime(value) * 1000000.0);
+#else
                        return DatumGetRelativeTime(value);
+#endif
                case TINTERVALOID:
                        {
-                               TimeInterval interval = DatumGetTimeInterval(value);
+                               TimeInterval tinterval = DatumGetTimeInterval(value);
 
-                               if (interval->status != 0)
-                                       return interval->data[1] - interval->data[0];
+#ifdef HAVE_INT64_TIMESTAMP
+                               if (tinterval->status != 0)
+                                       return ((tinterval->data[1] - tinterval->data[0]) * 1000000.0);
+#else
+                               if (tinterval->status != 0)
+                                       return tinterval->data[1] - tinterval->data[0];
+#endif
                                return 0;               /* for lack of a better idea */
                        }
                case TIMEOID:
@@ -2449,7 +2818,11 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
                                TimeTzADT  *timetz = DatumGetTimeTzADTP(value);
 
                                /* use GMT-equivalent time */
+#ifdef HAVE_INT64_TIMESTAMP
+                               return (double) (timetz->time + (timetz->zone * 1000000.0));
+#else
                                return (double) (timetz->time + timetz->zone);
+#endif
                        }
        }
 
@@ -2457,191 +2830,541 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
         * Can't get here unless someone tries to use scalarltsel/scalargtsel
         * on an operator with one timevalue and one non-timevalue operand.
         */
-       elog(ERROR, "convert_timevalue_to_scalar: unsupported type %u", typid);
+       elog(ERROR, "unsupported type: %u", typid);
        return 0;
 }
 
 
 /*
- * get_att_numdistinct
- *       Estimate the number of distinct values of an attribute.
+ * get_restriction_variable
+ *             Examine the args of a restriction clause to see if it's of the
+ *             form (variable op pseudoconstant) or (pseudoconstant op variable),
+ *             where "variable" could be either a Var or an expression in vars of a
+ *             single relation.  If so, extract information about the variable,
+ *             and also indicate which side it was on and the other argument.
+ *
+ * Inputs:
+ *     root: the planner info
+ *     args: clause argument list
+ *     varRelid: see specs for restriction selectivity functions
  *
- * var: identifies the attribute to examine.
- * stats: pg_statistic tuple for attribute, or NULL if not available.
+ * Outputs: (these are valid only if TRUE is returned)
+ *     *vardata: gets information about variable (see examine_variable)
+ *     *other: gets other clause argument, aggressively reduced to a constant
+ *     *varonleft: set TRUE if variable is on the left, FALSE if on the right
  *
- * NB: be careful to produce an integral result, since callers may compare
- * the result to exact integer counts.
+ * Returns TRUE if a variable is identified, otherwise FALSE.
+ *
+ * Note: if there are Vars on both sides of the clause, we must fail, because
+ * callers are expecting that the other side will act like a pseudoconstant.
  */
-static double
-get_att_numdistinct(Query *root, Var *var, Form_pg_statistic stats)
+static bool
+get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
+                                                VariableStatData *vardata, Node **other,
+                                                bool *varonleft)
 {
-       RelOptInfo *rel;
-       double          ntuples;
-
-       /*
-        * Special-case boolean columns: presumably, two distinct values.
-        *
-        * Are there any other cases we should wire in special estimates for?
-        */
-       if (var->vartype == BOOLOID)
-               return 2.0;
+       Node       *left,
+                          *right;
+       VariableStatData rdata;
 
-       /*
-        * Otherwise we need to get the relation size.
-        */
-       rel = find_base_rel(root, var->varno);
-       ntuples = rel->tuples;
+       /* Fail if not a binary opclause (probably shouldn't happen) */
+       if (list_length(args) != 2)
+               return false;
 
-       if (ntuples <= 0.0)
-               return DEFAULT_NUM_DISTINCT;    /* no data available; return a
-                                                                                * default */
+       left = (Node *) linitial(args);
+       right = (Node *) lsecond(args);
 
        /*
-        * Look to see if there is a unique index on the attribute. If so, we
-        * assume it's distinct, ignoring pg_statistic info which could be out
-        * of date.
+        * Examine both sides.  Note that when varRelid is nonzero, Vars of
+        * other relations will be treated as pseudoconstants.
         */
-       if (has_unique_index(rel, var->varattno))
-               return ntuples;
+       examine_variable(root, left, varRelid, vardata);
+       examine_variable(root, right, varRelid, &rdata);
 
        /*
-        * If ANALYZE determined a fixed or scaled estimate, use it.
+        * If one side is a variable and the other not, we win.
         */
-       if (stats)
+       if (vardata->rel && rdata.rel == NULL)
        {
-               if (stats->stadistinct > 0.0)
-                       return stats->stadistinct;
-               if (stats->stadistinct < 0.0)
-                       return floor((-stats->stadistinct * ntuples) + 0.5);
+               *varonleft = true;
+               *other = estimate_expression_value(rdata.var);
+               /* Assume we need no ReleaseVariableStats(rdata) here */
+               return true;
        }
 
-       /*
-        * ANALYZE does not compute stats for system attributes, but some of
-        * them can reasonably be assumed unique anyway.
-        */
-       switch (var->varattno)
+       if (vardata->rel == NULL && rdata.rel)
        {
-               case ObjectIdAttributeNumber:
-               case SelfItemPointerAttributeNumber:
-                       return ntuples;
-               case TableOidAttributeNumber:
-                       return 1.0;
+               *varonleft = false;
+               *other = estimate_expression_value(vardata->var);
+               /* Assume we need no ReleaseVariableStats(*vardata) here */
+               *vardata = rdata;
+               return true;
        }
 
-       /*
-        * Estimate ndistinct = ntuples if the table is small, else use
-        * default.
-        */
-       if (ntuples < DEFAULT_NUM_DISTINCT)
-               return ntuples;
+       /* Ooops, clause has wrong structure (probably var op var) */
+       ReleaseVariableStats(*vardata);
+       ReleaseVariableStats(rdata);
 
-       return DEFAULT_NUM_DISTINCT;
+       return false;
 }
 
 /*
- * get_restriction_var
- *             Examine the args of a restriction clause to see if it's of the
- *             form (var op something) or (something op var).  If so, extract
- *             and return the var and the other argument.
+ * get_join_variables
+ *             Apply examine_variable() to each side of a join clause.
+ */
+static void
+get_join_variables(PlannerInfo *root, List *args,
+                                  VariableStatData *vardata1, VariableStatData *vardata2)
+{
+       Node       *left,
+                          *right;
+
+       if (list_length(args) != 2)
+               elog(ERROR, "join operator should take two arguments");
+
+       left = (Node *) linitial(args);
+       right = (Node *) lsecond(args);
+
+       examine_variable(root, left, 0, vardata1);
+       examine_variable(root, right, 0, vardata2);
+}
+
+/*
+ * examine_variable
+ *             Try to look up statistical data about an expression.
+ *             Fill in a VariableStatData struct to describe the expression.
  *
  * Inputs:
- *     args: clause argument list
+ *     root: the planner info
+ *     node: the expression tree to examine
  *     varRelid: see specs for restriction selectivity functions
  *
- * Outputs: (these are set only if TRUE is returned)
- *     *var: gets Var node
- *     *other: gets other clause argument
- *     *varonleft: set TRUE if var is on the left, FALSE if on the right
+ * Outputs: *vardata is filled as follows:
+ *     var: the input expression (with any binary relabeling stripped, if
+ *             it is or contains a variable; but otherwise the type is preserved)
+ *     rel: RelOptInfo for relation containing variable; NULL if expression
+ *             contains no Vars (NOTE this could point to a RelOptInfo of a
+ *             subquery, not one in the current query).
+ *     statsTuple: the pg_statistic entry for the variable, if one exists;
+ *             otherwise NULL.
+ *     vartype: exposed type of the expression; this should always match
+ *             the declared input type of the operator we are estimating for.
+ *     atttype, atttypmod: type data to pass to get_attstatsslot().  This is
+ *             commonly the same as the exposed type of the variable argument,
+ *             but can be different in binary-compatible-type cases.
  *
- * Returns TRUE if a Var is identified, otherwise FALSE.
+ * Caller is responsible for doing ReleaseVariableStats() before exiting.
  */
-static bool
-get_restriction_var(List *args,
-                                       int varRelid,
-                                       Var **var,
-                                       Node **other,
-                                       bool *varonleft)
+static void
+examine_variable(PlannerInfo *root, Node *node, int varRelid,
+                                VariableStatData *vardata)
 {
-       Node       *left,
-                          *right;
+       Node       *basenode;
+       Relids          varnos;
+       RelOptInfo *onerel;
 
-       if (length(args) != 2)
-               return false;
+       /* Make sure we don't return dangling pointers in vardata */
+       MemSet(vardata, 0, sizeof(VariableStatData));
 
-       left = (Node *) lfirst(args);
-       right = (Node *) lsecond(args);
+       /* Save the exposed type of the expression */
+       vardata->vartype = exprType(node);
 
-       /* Ignore any binary-compatible relabeling */
+       /* Look inside any binary-compatible relabeling */
 
-       if (IsA(left, RelabelType))
-               left = ((RelabelType *) left)->arg;
-       if (IsA(right, RelabelType))
-               right = ((RelabelType *) right)->arg;
+       if (IsA(node, RelabelType))
+               basenode = (Node *) ((RelabelType *) node)->arg;
+       else
+               basenode = node;
 
-       /* Look for the var */
+       /* Fast path for a simple Var */
 
-       if (IsA(left, Var) &&
-               (varRelid == 0 || varRelid == ((Var *) left)->varno))
+       if (IsA(basenode, Var) &&
+               (varRelid == 0 || varRelid == ((Var *) basenode)->varno))
        {
-               *var = (Var *) left;
-               *other = right;
-               *varonleft = true;
+               Var                *var = (Var *) basenode;
+               Oid                     relid;
+
+               vardata->var = basenode;        /* return Var without relabeling */
+               vardata->rel = find_base_rel(root, var->varno);
+               vardata->atttype = var->vartype;
+               vardata->atttypmod = var->vartypmod;
+
+               relid = getrelid(var->varno, root->parse->rtable);
+
+               if (OidIsValid(relid))
+               {
+                       vardata->statsTuple = SearchSysCache(STATRELATT,
+                                                                                                ObjectIdGetDatum(relid),
+                                                                                       Int16GetDatum(var->varattno),
+                                                                                                0, 0);
+               }
+               else
+               {
+                       /*
+                        * XXX This means the Var comes from a JOIN or sub-SELECT.
+                        * Later add code to dig down into the join etc and see if we
+                        * can trace the variable to something with stats.      (But
+                        * beware of sub-SELECTs with DISTINCT/GROUP BY/etc.  Perhaps
+                        * there are no cases where this would really be useful,
+                        * because we'd have flattened the subselect if it is??)
+                        */
+               }
+
+               return;
        }
-       else if (IsA(right, Var) &&
-                        (varRelid == 0 || varRelid == ((Var *) right)->varno))
+
+       /*
+        * Okay, it's a more complicated expression.  Determine variable
+        * membership.  Note that when varRelid isn't zero, only vars of that
+        * relation are considered "real" vars.
+        */
+       varnos = pull_varnos(basenode);
+
+       onerel = NULL;
+
+       switch (bms_membership(varnos))
        {
-               *var = (Var *) right;
-               *other = left;
-               *varonleft = false;
+               case BMS_EMPTY_SET:
+                       /* No Vars at all ... must be pseudo-constant clause */
+                       break;
+               case BMS_SINGLETON:
+                       if (varRelid == 0 || bms_is_member(varRelid, varnos))
+                       {
+                               onerel = find_base_rel(root,
+                                  (varRelid ? varRelid : bms_singleton_member(varnos)));
+                               vardata->rel = onerel;
+                               node = basenode; /* strip any relabeling */
+                       }
+                       /* else treat it as a constant */
+                       break;
+               case BMS_MULTIPLE:
+                       if (varRelid == 0)
+                       {
+                               /* treat it as a variable of a join relation */
+                               vardata->rel = find_join_rel(root, varnos);
+                               node = basenode; /* strip any relabeling */
+                       }
+                       else if (bms_is_member(varRelid, varnos))
+                       {
+                               /* ignore the vars belonging to other relations */
+                               vardata->rel = find_base_rel(root, varRelid);
+                               node = basenode; /* strip any relabeling */
+                               /* note: no point in expressional-index search here */
+                       }
+                       /* else treat it as a constant */
+                       break;
+       }
+
+       bms_free(varnos);
+
+       vardata->var = node;
+       vardata->atttype = exprType(node);
+       vardata->atttypmod = exprTypmod(node);
+
+       if (onerel)
+       {
+               /*
+                * We have an expression in vars of a single relation.  Try to
+                * match it to expressional index columns, in hopes of finding
+                * some statistics.
+                *
+                * XXX it's conceivable that there are multiple matches with
+                * different index opclasses; if so, we need to pick one that
+                * matches the operator we are estimating for.  FIXME later.
+                */
+               ListCell   *ilist;
+
+               foreach(ilist, onerel->indexlist)
+               {
+                       IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
+                       ListCell   *indexpr_item;
+                       int                     pos;
+
+                       indexpr_item = list_head(index->indexprs);
+                       if (indexpr_item == NULL)
+                               continue;               /* no expressions here... */
+
+                       /*
+                        * Ignore partial indexes since they probably don't reflect
+                        * whole-relation statistics.  Possibly reconsider this later.
+                        */
+                       if (index->indpred)
+                               continue;
+
+                       for (pos = 0; pos < index->ncolumns; pos++)
+                       {
+                               if (index->indexkeys[pos] == 0)
+                               {
+                                       Node       *indexkey;
+
+                                       if (indexpr_item == NULL)
+                                               elog(ERROR, "too few entries in indexprs list");
+                                       indexkey = (Node *) lfirst(indexpr_item);
+                                       if (indexkey && IsA(indexkey, RelabelType))
+                                               indexkey = (Node *) ((RelabelType *) indexkey)->arg;
+                                       if (equal(node, indexkey))
+                                       {
+                                               /*
+                                                * Found a match ... is it a unique index? Tests
+                                                * here should match has_unique_index().
+                                                */
+                                               if (index->unique &&
+                                                       index->ncolumns == 1 &&
+                                                       index->indpred == NIL)
+                                                       vardata->isunique = true;
+                                               /* Has it got stats? */
+                                               vardata->statsTuple = SearchSysCache(STATRELATT,
+                                                                          ObjectIdGetDatum(index->indexoid),
+                                                                                                 Int16GetDatum(pos + 1),
+                                                                                                                        0, 0);
+                                               if (vardata->statsTuple)
+                                                       break;
+                                       }
+                                       indexpr_item = lnext(indexpr_item);
+                               }
+                       }
+                       if (vardata->statsTuple)
+                               break;
+               }
+       }
+}
+
+/*
+ * get_variable_numdistinct
+ *       Estimate the number of distinct values of a variable.
+ *
+ * vardata: results of examine_variable
+ *
+ * NB: be careful to produce an integral result, since callers may compare
+ * the result to exact integer counts.
+ */
+static double
+get_variable_numdistinct(VariableStatData *vardata)
+{
+       double          stadistinct;
+       double          ntuples;
+
+       /*
+        * Determine the stadistinct value to use.      There are cases where we
+        * can get an estimate even without a pg_statistic entry, or can get a
+        * better value than is in pg_statistic.
+        */
+       if (HeapTupleIsValid(vardata->statsTuple))
+       {
+               /* Use the pg_statistic entry */
+               Form_pg_statistic stats;
+
+               stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
+               stadistinct = stats->stadistinct;
+       }
+       else if (vardata->vartype == BOOLOID)
+       {
+               /*
+                * Special-case boolean columns: presumably, two distinct values.
+                *
+                * Are there any other datatypes we should wire in special estimates
+                * for?
+                */
+               stadistinct = 2.0;
        }
        else
        {
-               /* Duh, it's too complicated for me... */
-               return false;
+               /*
+                * We don't keep statistics for system columns, but in some cases
+                * we can infer distinctness anyway.
+                */
+               if (vardata->var && IsA(vardata->var, Var))
+               {
+                       switch (((Var *) vardata->var)->varattno)
+                       {
+                               case ObjectIdAttributeNumber:
+                               case SelfItemPointerAttributeNumber:
+                                       stadistinct = -1.0; /* unique */
+                                       break;
+                               case TableOidAttributeNumber:
+                                       stadistinct = 1.0;      /* only 1 value */
+                                       break;
+                               default:
+                                       stadistinct = 0.0;      /* means "unknown" */
+                                       break;
+                       }
+               }
+               else
+                       stadistinct = 0.0;      /* means "unknown" */
+
+               /*
+                * XXX consider using estimate_num_groups on expressions?
+                */
        }
 
-       return true;
+       /*
+        * If there is a unique index for the variable, assume it is unique no
+        * matter what pg_statistic says (the statistics could be out of
+        * date).  Can skip search if we already think it's unique.
+        */
+       if (stadistinct != -1.0)
+       {
+               if (vardata->isunique)
+                       stadistinct = -1.0;
+               else if (vardata->var && IsA(vardata->var, Var) &&
+                                vardata->rel &&
+                                has_unique_index(vardata->rel,
+                                                                 ((Var *) vardata->var)->varattno))
+                       stadistinct = -1.0;
+       }
+
+       /*
+        * If we had an absolute estimate, use that.
+        */
+       if (stadistinct > 0.0)
+               return stadistinct;
+
+       /*
+        * Otherwise we need to get the relation size; punt if not available.
+        */
+       if (vardata->rel == NULL)
+               return DEFAULT_NUM_DISTINCT;
+       ntuples = vardata->rel->tuples;
+       if (ntuples <= 0.0)
+               return DEFAULT_NUM_DISTINCT;
+
+       /*
+        * If we had a relative estimate, use that.
+        */
+       if (stadistinct < 0.0)
+               return floor((-stadistinct * ntuples) + 0.5);
+
+       /*
+        * With no data, estimate ndistinct = ntuples if the table is small,
+        * else use default.
+        */
+       if (ntuples < DEFAULT_NUM_DISTINCT)
+               return ntuples;
+
+       return DEFAULT_NUM_DISTINCT;
 }
 
 /*
- * get_join_vars
+ * get_variable_maximum
+ *             Estimate the maximum value of the specified variable.
+ *             If successful, store value in *max and return TRUE.
+ *             If no data available, return FALSE.
  *
- * Extract the two Vars from a join clause's argument list.  Returns
- * NULL for arguments that are not simple vars.
+ * sortop is the "<" comparison operator to use.  (To extract the
+ * minimum instead of the maximum, just pass the ">" operator instead.)
  */
-static void
-get_join_vars(List *args, Var **var1, Var **var2)
+static bool
+get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
+                                        Oid sortop, Datum *max)
 {
-       Node       *left,
-                          *right;
+       Datum           tmax = 0;
+       bool            have_max = false;
+       Form_pg_statistic stats;
+       int16           typLen;
+       bool            typByVal;
+       Datum      *values;
+       int                     nvalues;
+       int                     i;
 
-       if (length(args) != 2)
+       if (!HeapTupleIsValid(vardata->statsTuple))
        {
-               *var1 = NULL;
-               *var2 = NULL;
-               return;
+               /* no stats available, so default result */
+               return false;
+       }
+       stats = (Form_pg_statistic) GETSTRUCT(vardata->statsTuple);
+
+       get_typlenbyval(vardata->atttype, &typLen, &typByVal);
+
+       /*
+        * If there is a histogram, grab the last or first value as
+        * appropriate.
+        *
+        * If there is a histogram that is sorted with some other operator than
+        * the one we want, fail --- this suggests that there is data we can't
+        * use.
+        */
+       if (get_attstatsslot(vardata->statsTuple,
+                                                vardata->atttype, vardata->atttypmod,
+                                                STATISTIC_KIND_HISTOGRAM, sortop,
+                                                &values, &nvalues,
+                                                NULL, NULL))
+       {
+               if (nvalues > 0)
+               {
+                       tmax = datumCopy(values[nvalues - 1], typByVal, typLen);
+                       have_max = true;
+               }
+               free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
+       }
+       else
+       {
+               Oid                     rsortop = get_commutator(sortop);
+
+               if (OidIsValid(rsortop) &&
+                       get_attstatsslot(vardata->statsTuple,
+                                                        vardata->atttype, vardata->atttypmod,
+                                                        STATISTIC_KIND_HISTOGRAM, rsortop,
+                                                        &values, &nvalues,
+                                                        NULL, NULL))
+               {
+                       if (nvalues > 0)
+                       {
+                               tmax = datumCopy(values[0], typByVal, typLen);
+                               have_max = true;
+                       }
+                       free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
+               }
+               else if (get_attstatsslot(vardata->statsTuple,
+                                                                 vardata->atttype, vardata->atttypmod,
+                                                                 STATISTIC_KIND_HISTOGRAM, InvalidOid,
+                                                                 &values, &nvalues,
+                                                                 NULL, NULL))
+               {
+                       free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
+                       return false;
+               }
        }
 
-       left = (Node *) lfirst(args);
-       right = (Node *) lsecond(args);
+       /*
+        * If we have most-common-values info, look for a large MCV.  This is
+        * needed even if we also have a histogram, since the histogram
+        * excludes the MCVs.  However, usually the MCVs will not be the
+        * extreme values, so avoid unnecessary data copying.
+        */
+       if (get_attstatsslot(vardata->statsTuple,
+                                                vardata->atttype, vardata->atttypmod,
+                                                STATISTIC_KIND_MCV, InvalidOid,
+                                                &values, &nvalues,
+                                                NULL, NULL))
+       {
+               bool            large_mcv = false;
+               FmgrInfo        opproc;
 
-       /* Ignore any binary-compatible relabeling */
-       if (IsA(left, RelabelType))
-               left = ((RelabelType *) left)->arg;
-       if (IsA(right, RelabelType))
-               right = ((RelabelType *) right)->arg;
+               fmgr_info(get_opcode(sortop), &opproc);
 
-       if (IsA(left, Var))
-               *var1 = (Var *) left;
-       else
-               *var1 = NULL;
+               for (i = 0; i < nvalues; i++)
+               {
+                       if (!have_max)
+                       {
+                               tmax = values[i];
+                               large_mcv = have_max = true;
+                       }
+                       else if (DatumGetBool(FunctionCall2(&opproc, tmax, values[i])))
+                       {
+                               tmax = values[i];
+                               large_mcv = true;
+                       }
+               }
+               if (large_mcv)
+                       tmax = datumCopy(tmax, typByVal, typLen);
+               free_attstatsslot(vardata->atttype, values, nvalues, NULL, 0);
+       }
 
-       if (IsA(right, Var))
-               *var2 = (Var *) right;
-       else
-               *var2 = NULL;
+       *max = tmax;
+       return have_max;
 }
 
+
 /*-------------------------------------------------------------------------
  *
  * Pattern analysis functions
@@ -2659,36 +3382,73 @@ get_join_vars(List *args, Var **var1, Var **var2)
 
 /*
  * Extract the fixed prefix, if any, for a pattern.
- * *prefix is set to a palloc'd prefix string,
- * or to NULL if no fixed prefix exists for the pattern.
- * *rest is set to point to the remainder of the pattern after the
- * portion describing the fixed prefix.
+ *
+ * *prefix is set to a palloc'd prefix string (in the form of a Const node),
+ *     or to NULL if no fixed prefix exists for the pattern.
+ * *rest is set to a palloc'd Const representing the remainder of the pattern
+ *     after the portion describing the fixed prefix.
+ * Each of these has the same type (TEXT or BYTEA) as the given pattern Const.
+ *
  * The return value distinguishes no fixed prefix, a partial prefix,
  * or an exact-match-only pattern.
  */
 
 static Pattern_Prefix_Status
-like_fixed_prefix(char *patt, bool case_insensitive,
-                                 char **prefix, char **rest)
+like_fixed_prefix(Const *patt_const, bool case_insensitive,
+                                 Const **prefix_const, Const **rest_const)
 {
        char       *match;
+       char       *patt;
+       int                     pattlen;
+       char       *rest;
+       Oid                     typeid = patt_const->consttype;
        int                     pos,
                                match_pos;
 
-       *prefix = match = palloc(strlen(patt) + 1);
-       match_pos = 0;
+       /* the right-hand const is type text or bytea */
+       Assert(typeid == BYTEAOID || typeid == TEXTOID);
+
+       if (typeid == BYTEAOID && case_insensitive)
+               ereport(ERROR,
+                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+               errmsg("case insensitive matching not supported on type bytea")));
+
+       if (typeid != BYTEAOID)
+       {
+               patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
+               pattlen = strlen(patt);
+       }
+       else
+       {
+               bytea      *bstr = DatumGetByteaP(patt_const->constvalue);
+
+               pattlen = VARSIZE(bstr) - VARHDRSZ;
+               if (pattlen > 0)
+               {
+                       patt = (char *) palloc(pattlen);
+                       memcpy(patt, VARDATA(bstr), pattlen);
+               }
+               else
+                       patt = NULL;
+
+               if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
+                       pfree(bstr);
+       }
 
-       for (pos = 0; patt[pos]; pos++)
+       match = palloc(pattlen + 1);
+       match_pos = 0;
+       for (pos = 0; pos < pattlen; pos++)
        {
                /* % and _ are wildcard characters in LIKE */
                if (patt[pos] == '%' ||
                        patt[pos] == '_')
                        break;
-               /* Backslash quotes the next character */
+
+               /* Backslash escapes the next character */
                if (patt[pos] == '\\')
                {
                        pos++;
-                       if (patt[pos] == '\0')
+                       if (patt[pos] == '\0' && typeid != BYTEAOID)
                                break;
                }
 
@@ -2708,35 +3468,69 @@ like_fixed_prefix(char *patt, bool case_insensitive,
        }
 
        match[match_pos] = '\0';
-       *rest = &patt[pos];
+       rest = &patt[pos];
+
+       if (typeid != BYTEAOID)
+       {
+               *prefix_const = string_to_const(match, typeid);
+               *rest_const = string_to_const(rest, typeid);
+       }
+       else
+       {
+               *prefix_const = string_to_bytea_const(match, match_pos);
+               *rest_const = string_to_bytea_const(rest, pattlen - pos);
+       }
+
+       if (patt != NULL)
+               pfree(patt);
+       pfree(match);
 
        /* in LIKE, an empty pattern is an exact match! */
-       if (patt[pos] == '\0')
+       if (pos == pattlen)
                return Pattern_Prefix_Exact;    /* reached end of pattern, so
                                                                                 * exact */
 
        if (match_pos > 0)
                return Pattern_Prefix_Partial;
 
-       pfree(match);
-       *prefix = NULL;
        return Pattern_Prefix_None;
 }
 
 static Pattern_Prefix_Status
-regex_fixed_prefix(char *patt, bool case_insensitive,
-                                  char **prefix, char **rest)
+regex_fixed_prefix(Const *patt_const, bool case_insensitive,
+                                  Const **prefix_const, Const **rest_const)
 {
        char       *match;
        int                     pos,
                                match_pos,
+                               prev_pos,
+                               prev_match_pos,
                                paren_depth;
+       char       *patt;
+       char       *rest;
+       Oid                     typeid = patt_const->consttype;
+
+       /*
+        * Should be unnecessary, there are no bytea regex operators defined.
+        * As such, it should be noted that the rest of this function has *not*
+        * been made safe for binary (possibly NULL containing) strings.
+        */
+       if (typeid == BYTEAOID)
+               ereport(ERROR,
+                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                errmsg("regular-expression matching not supported on type bytea")));
+
+       /* the right-hand const is type text for all of these */
+       patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
 
        /* Pattern must be anchored left */
        if (patt[0] != '^')
        {
-               *prefix = NULL;
-               *rest = patt;
+               rest = patt;
+
+               *prefix_const = NULL;
+               *rest_const = string_to_const(rest, typeid);
+
                return Pattern_Prefix_None;
        }
 
@@ -2749,8 +3543,11 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
        {
                if (patt[pos] == '|' && paren_depth == 0)
                {
-                       *prefix = NULL;
-                       *rest = patt;
+                       rest = patt;
+
+                       *prefix_const = NULL;
+                       *rest_const = string_to_const(rest, typeid);
+
                        return Pattern_Prefix_None;
                }
                else if (patt[pos] == '(')
@@ -2767,12 +3564,14 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
        }
 
        /* OK, allocate space for pattern */
-       *prefix = match = palloc(strlen(patt) + 1);
-       match_pos = 0;
+       match = palloc(strlen(patt) + 1);
+       prev_match_pos = match_pos = 0;
 
        /* note start at pos 1 to skip leading ^ */
-       for (pos = 1; patt[pos]; pos++)
+       for (prev_pos = pos = 1; patt[pos]; )
        {
+               int             len;
+
                /*
                 * Check for characters that indicate multiple possible matches
                 * here. XXX I suspect isalpha() is not an adequately
@@ -2786,6 +3585,14 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
                        (case_insensitive && isalpha((unsigned char) patt[pos])))
                        break;
 
+               /*
+                * In AREs, backslash followed by alphanumeric is an escape, not
+                * a quoted character.  Must treat it as having multiple possible
+                * matches.
+                */
+               if (patt[pos] == '\\' && isalnum((unsigned char) patt[pos + 1]))
+                       break;
+
                /*
                 * Check for quantifiers.  Except for +, this means the preceding
                 * character is optional, so we must remove it from the prefix
@@ -2795,14 +3602,13 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
                        patt[pos] == '?' ||
                        patt[pos] == '{')
                {
-                       if (match_pos > 0)
-                               match_pos--;
-                       pos--;
+                       match_pos = prev_match_pos;
+                       pos = prev_pos;
                        break;
                }
                if (patt[pos] == '+')
                {
-                       pos--;
+                       pos = prev_pos;
                        break;
                }
                if (patt[pos] == '\\')
@@ -2812,29 +3618,47 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
                        if (patt[pos] == '\0')
                                break;
                }
-               match[match_pos++] = patt[pos];
+               /* save position in case we need to back up on next loop cycle */
+               prev_match_pos = match_pos;
+               prev_pos = pos;
+               /* must use encoding-aware processing here */
+               len = pg_mblen(&patt[pos]);
+               memcpy(&match[match_pos], &patt[pos], len);
+               match_pos += len;
+               pos += len;
        }
 
        match[match_pos] = '\0';
-       *rest = &patt[pos];
+       rest = &patt[pos];
 
        if (patt[pos] == '$' && patt[pos + 1] == '\0')
        {
-               *rest = &patt[pos + 1];
+               rest = &patt[pos + 1];
+
+               *prefix_const = string_to_const(match, typeid);
+               *rest_const = string_to_const(rest, typeid);
+
+               pfree(patt);
+               pfree(match);
+
                return Pattern_Prefix_Exact;    /* pattern specifies exact match */
        }
 
+       *prefix_const = string_to_const(match, typeid);
+       *rest_const = string_to_const(rest, typeid);
+
+       pfree(patt);
+       pfree(match);
+
        if (match_pos > 0)
                return Pattern_Prefix_Partial;
 
-       pfree(match);
-       *prefix = NULL;
        return Pattern_Prefix_None;
 }
 
 Pattern_Prefix_Status
-pattern_fixed_prefix(char *patt, Pattern_Type ptype,
-                                        char **prefix, char **rest)
+pattern_fixed_prefix(Const *patt, Pattern_Type ptype,
+                                        Const **prefix, Const **rest)
 {
        Pattern_Prefix_Status result;
 
@@ -2853,7 +3677,7 @@ pattern_fixed_prefix(char *patt, Pattern_Type ptype,
                        result = regex_fixed_prefix(patt, true, prefix, rest);
                        break;
                default:
-                       elog(ERROR, "pattern_fixed_prefix: bogus ptype");
+                       elog(ERROR, "unrecognized ptype: %d", (int) ptype);
                        result = Pattern_Prefix_None;           /* keep compiler quiet */
                        break;
        }
@@ -2864,7 +3688,11 @@ pattern_fixed_prefix(char *patt, Pattern_Type ptype,
  * Estimate the selectivity of a fixed prefix for a pattern match.
  *
  * A fixed prefix "foo" is estimated as the selectivity of the expression
- * "var >= 'foo' AND var < 'fop'" (see also indxqual.c).
+ * "variable >= 'foo' AND variable < 'fop'" (see also indxpath.c).
+ *
+ * We use the >= and < operators from the specified btree opclass to do the
+ * estimation. The given variable and Const must be of the associated
+ * datatype.
  *
  * XXX Note: we make use of the upper bound to estimate operator selectivity
  * even if the locale is such that we cannot rely on the upper-bound string.
@@ -2872,20 +3700,19 @@ pattern_fixed_prefix(char *patt, Pattern_Type ptype,
  * more useful to use the upper-bound code than not.
  */
 static Selectivity
-prefix_selectivity(Query *root, Var *var, char *prefix)
+prefix_selectivity(PlannerInfo *root, Node *variable,
+                                  Oid opclass, Const *prefixcon)
 {
        Selectivity prefixsel;
        Oid                     cmpopr;
-       Const      *prefixcon;
        List       *cmpargs;
-       char       *greaterstr;
+       Const      *greaterstrcon;
 
-       cmpopr = find_operator(">=", var->vartype);
+       cmpopr = get_opclass_member(opclass, InvalidOid,
+                                                               BTGreaterEqualStrategyNumber);
        if (cmpopr == InvalidOid)
-               elog(ERROR, "prefix_selectivity: no >= operator for type %u",
-                        var->vartype);
-       prefixcon = string_to_const(prefix, var->vartype);
-       cmpargs = makeList2(var, prefixcon);
+               elog(ERROR, "no >= operator for opclass %u", opclass);
+       cmpargs = list_make2(variable, prefixcon);
        /* Assume scalargtsel is appropriate for all supported types */
        prefixsel = DatumGetFloat8(DirectFunctionCall4(scalargtsel,
                                                                                                   PointerGetDatum(root),
@@ -2898,17 +3725,16 @@ prefix_selectivity(Query *root, Var *var, char *prefix)
         *      "x < greaterstr".
         *-------
         */
-       greaterstr = make_greater_string(prefix, var->vartype);
-       if (greaterstr)
+       greaterstrcon = make_greater_string(prefixcon);
+       if (greaterstrcon)
        {
                Selectivity topsel;
 
-               cmpopr = find_operator("<", var->vartype);
+               cmpopr = get_opclass_member(opclass, InvalidOid,
+                                                                       BTLessStrategyNumber);
                if (cmpopr == InvalidOid)
-                       elog(ERROR, "prefix_selectivity: no < operator for type %u",
-                                var->vartype);
-               prefixcon = string_to_const(greaterstr, var->vartype);
-               cmpargs = makeList2(var, prefixcon);
+                       elog(ERROR, "no < operator for opclass %u", opclass);
+               cmpargs = list_make2(variable, greaterstrcon);
                /* Assume scalarltsel is appropriate for all supported types */
                topsel = DatumGetFloat8(DirectFunctionCall4(scalarltsel,
                                                                                                        PointerGetDatum(root),
@@ -2922,6 +3748,9 @@ prefix_selectivity(Query *root, Var *var, char *prefix)
                 */
                prefixsel = topsel + prefixsel - 1.0;
 
+               /* Adjust for double-exclusion of NULLs */
+               prefixsel += nulltestsel(root, IS_NULL, variable, 0);
+
                /*
                 * A zero or slightly negative prefixsel should be converted into
                 * a small positive value; we probably are dealing with a very
@@ -2972,14 +3801,50 @@ prefix_selectivity(Query *root, Var *var, char *prefix)
 #define PARTIAL_WILDCARD_SEL 2.0
 
 static Selectivity
-like_selectivity(char *patt, bool case_insensitive)
+like_selectivity(Const *patt_const, bool case_insensitive)
 {
        Selectivity sel = 1.0;
        int                     pos;
+       int                     start;
+       Oid                     typeid = patt_const->consttype;
+       char       *patt;
+       int                     pattlen;
+
+       /* the right-hand const is type text or bytea */
+       Assert(typeid == BYTEAOID || typeid == TEXTOID);
+
+       if (typeid == BYTEAOID && case_insensitive)
+               ereport(ERROR,
+                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+               errmsg("case insensitive matching not supported on type bytea")));
+
+       if (typeid != BYTEAOID)
+       {
+               patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
+               pattlen = strlen(patt);
+       }
+       else
+       {
+               bytea      *bstr = DatumGetByteaP(patt_const->constvalue);
+
+               pattlen = VARSIZE(bstr) - VARHDRSZ;
+               if (pattlen > 0)
+               {
+                       patt = (char *) palloc(pattlen);
+                       memcpy(patt, VARDATA(bstr), pattlen);
+               }
+               else
+                       patt = NULL;
+
+               if ((Pointer) bstr != DatumGetPointer(patt_const->constvalue))
+                       pfree(bstr);
+       }
+       /* patt should never be NULL in practice */
+       Assert(patt != NULL);
 
        /* Skip any leading %; it's already factored into initial sel */
-       pos = (*patt == '%') ? 1 : 0;
-       for (; patt[pos]; pos++)
+       start = (*patt == '%') ? 1 : 0;
+       for (pos = start; pos < pattlen; pos++)
        {
                /* % and _ are wildcard characters in LIKE */
                if (patt[pos] == '%')
@@ -2990,7 +3855,7 @@ like_selectivity(char *patt, bool case_insensitive)
                {
                        /* Backslash quotes the next character */
                        pos++;
-                       if (patt[pos] == '\0')
+                       if (patt[pos] == '\0' && typeid != BYTEAOID)
                                break;
                        sel *= FIXED_CHAR_SEL;
                }
@@ -3097,10 +3962,26 @@ regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
 }
 
 static Selectivity
-regex_selectivity(char *patt, bool case_insensitive)
+regex_selectivity(Const *patt_const, bool case_insensitive)
 {
        Selectivity sel;
-       int                     pattlen = strlen(patt);
+       char       *patt;
+       int                     pattlen;
+       Oid                     typeid = patt_const->consttype;
+
+       /*
+        * Should be unnecessary, there are no bytea regex operators defined.
+        * As such, it should be noted that the rest of this function has *not*
+        * been made safe for binary (possibly NULL containing) strings.
+        */
+       if (typeid == BYTEAOID)
+               ereport(ERROR,
+                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                errmsg("regular-expression matching not supported on type bytea")));
+
+       /* the right-hand const is type text for all of these */
+       patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
+       pattlen = strlen(patt);
 
        /* If patt doesn't end with $, consider it to have a trailing wildcard */
        if (pattlen > 0 && patt[pattlen - 1] == '$' &&
@@ -3121,7 +4002,7 @@ regex_selectivity(char *patt, bool case_insensitive)
 }
 
 static Selectivity
-pattern_selectivity(char *patt, Pattern_Type ptype)
+pattern_selectivity(Const *patt, Pattern_Type ptype)
 {
        Selectivity result;
 
@@ -3140,178 +4021,122 @@ pattern_selectivity(char *patt, Pattern_Type ptype)
                        result = regex_selectivity(patt, true);
                        break;
                default:
-                       elog(ERROR, "pattern_selectivity: bogus ptype");
+                       elog(ERROR, "unrecognized ptype: %d", (int) ptype);
                        result = 1.0;           /* keep compiler quiet */
                        break;
        }
        return result;
 }
 
-/*
- * Test whether the database's LOCALE setting is safe for LIKE/regexp index
- * optimization.  The key requirement here is that given a prefix string,
- * say "foo", we must be able to generate another string "fop" that is
- * greater than all strings "foobar" starting with "foo".  Unfortunately,
- * many non-C locales have bizarre collation rules in which "fop" > "foo"
- * is not sufficient to ensure "fop" > "foobar".  Until we can come up
- * with a more bulletproof way of generating the upper-bound string,
- * disable the optimization in locales where it is not known to be safe.
- */
-bool
-locale_is_like_safe(void)
-{
-#ifdef USE_LOCALE
-       /* Cache result so we only have to compute it once */
-       static int      result = -1;
-       char       *localeptr;
-
-       if (result >= 0)
-               return (bool) result;
-       localeptr = setlocale(LC_COLLATE, NULL);
-       if (!localeptr)
-               elog(PANIC, "Invalid LC_COLLATE setting");
-
-       /*
-        * Currently we accept only "C" and "POSIX" (do any systems still
-        * return "POSIX"?).  Which other locales allow safe optimization?
-        */
-       if (strcmp(localeptr, "C") == 0)
-               result = true;
-       else if (strcmp(localeptr, "POSIX") == 0)
-               result = true;
-       else
-               result = false;
-       return (bool) result;
-#else                                                  /* not USE_LOCALE */
-       return true;                            /* We must be in C locale, which is OK */
-#endif   /* USE_LOCALE */
-}
 
 /*
- * Try to generate a string greater than the given string or any string it is
- * a prefix of.  If successful, return a palloc'd string; else return NULL.
- *
- * To work correctly in non-ASCII locales with weird collation orders,
- * we cannot simply increment "foo" to "fop" --- we have to check whether
- * we actually produced a string greater than the given one.  If not,
- * increment the righthand byte again and repeat.  If we max out the righthand
- * byte, truncate off the last character and start incrementing the next.
- * For example, if "z" were the last character in the sort order, then we
- * could produce "foo" as a string greater than "fonz".
- *
- * This could be rather slow in the worst case, but in most cases we won't
- * have to try more than one or two strings before succeeding.
- *
- * XXX this is actually not sufficient, since it only copes with the case
- * where individual characters collate in an order different from their
- * numeric code assignments.  It does not handle cases where there are
- * cross-character effects, such as specially sorted digraphs, multiple
- * sort passes, etc.  For now, we just shut down the whole thing in locales
- * that do such things :-(
+ * Try to generate a string greater than the given string or any
+ * string it is a prefix of.  If successful, return a palloc'd string
+ * in the form of a Const pointer; else return NULL.
+ *
+ * The key requirement here is that given a prefix string, say "foo",
+ * we must be able to generate another string "fop" that is greater
+ * than all strings "foobar" starting with "foo".
+ *
+ * If we max out the righthand byte, truncate off the last character
+ * and start incrementing the next.  For example, if "z" were the last
+ * character in the sort order, then we could produce "foo" as a
+ * string greater than "fonz".
+ *
+ * This could be rather slow in the worst case, but in most cases we
+ * won't have to try more than one or two strings before succeeding.
+ *
+ * NOTE: at present this assumes we are in the C locale, so that simple
+ * bytewise comparison applies.  However, we might be in a multibyte
+ * encoding such as UTF8, so we do have to watch out for generating
+ * invalid encoding sequences.
  */
-char *
-make_greater_string(const char *str, Oid datatype)
+Const *
+make_greater_string(const Const *str_const)
 {
+       Oid                     datatype = str_const->consttype;
        char       *workstr;
        int                     len;
 
-       /*
-        * Make a modifiable copy, which will be our return value if
-        * successful
-        */
-       workstr = pstrdup((char *) str);
+       /* Get the string and a modifiable copy */
+       if (datatype == NAMEOID)
+       {
+               workstr = DatumGetCString(DirectFunctionCall1(nameout,
+                                                                                                str_const->constvalue));
+               len = strlen(workstr);
+       }
+       else if (datatype == BYTEAOID)
+       {
+               bytea      *bstr = DatumGetByteaP(str_const->constvalue);
+
+               len = VARSIZE(bstr) - VARHDRSZ;
+               if (len > 0)
+               {
+                       workstr = (char *) palloc(len);
+                       memcpy(workstr, VARDATA(bstr), len);
+               }
+               else
+                       workstr = NULL;
+
+               if ((Pointer) bstr != DatumGetPointer(str_const->constvalue))
+                       pfree(bstr);
+       }
+       else
+       {
+               workstr = DatumGetCString(DirectFunctionCall1(textout,
+                                                                                                str_const->constvalue));
+               len = strlen(workstr);
+       }
 
-       while ((len = strlen(workstr)) > 0)
+       while (len > 0)
        {
                unsigned char *lastchar = (unsigned char *) (workstr + len - 1);
+               unsigned char savelastchar = *lastchar;
 
                /*
                 * Try to generate a larger string by incrementing the last byte.
                 */
                while (*lastchar < (unsigned char) 255)
                {
+                       Const      *workstr_const;
+
                        (*lastchar)++;
-                       if (string_lessthan(str, workstr, datatype))
-                               return workstr; /* Success! */
+
+                       if (datatype != BYTEAOID)
+                       {
+                               /* do not generate invalid encoding sequences */
+                               if (!pg_verifymbstr(workstr, len, true))
+                                       continue;
+                               workstr_const = string_to_const(workstr, datatype);
+                       }
+                       else
+                               workstr_const = string_to_bytea_const(workstr, len);
+
+                       pfree(workstr);
+                       return workstr_const;
                }
 
+               /* restore last byte so we don't confuse pg_mbcliplen */
+               *lastchar = savelastchar;
+
                /*
                 * Truncate off the last character, which might be more than 1
-                * byte in MULTIBYTE case.
+                * byte, depending on the character encoding.
                 */
-#ifdef MULTIBYTE
-               len = pg_mbcliplen((const unsigned char *) workstr, len, len - 1);
-               workstr[len] = '\0';
-#else
-               *lastchar = '\0';
-#endif
-       }
-
-       /* Failed... */
-       pfree(workstr);
-       return NULL;
-}
-
-/*
- * Test whether two strings are "<" according to the rules of the given
- * datatype.  We do this the hard way, ie, actually calling the type's
- * "<" operator function, to ensure we get the right result...
- */
-static bool
-string_lessthan(const char *str1, const char *str2, Oid datatype)
-{
-       Datum           datum1 = string_to_datum(str1, datatype);
-       Datum           datum2 = string_to_datum(str2, datatype);
-       bool            result;
-
-       switch (datatype)
-       {
-               case TEXTOID:
-                       result = DatumGetBool(DirectFunctionCall2(text_lt,
-                                                                                                         datum1, datum2));
-                       break;
-
-               case BPCHAROID:
-                       result = DatumGetBool(DirectFunctionCall2(bpcharlt,
-                                                                                                         datum1, datum2));
-                       break;
-
-               case VARCHAROID:
-                       result = DatumGetBool(DirectFunctionCall2(varcharlt,
-                                                                                                         datum1, datum2));
-                       break;
-
-               case NAMEOID:
-                       result = DatumGetBool(DirectFunctionCall2(namelt,
-                                                                                                         datum1, datum2));
-                       break;
-
-               case BYTEAOID:
-                       result = DatumGetBool(DirectFunctionCall2(bytealt,
-                                                                                                         datum1, datum2));
-                       break;
+               if (datatype != BYTEAOID && pg_database_encoding_max_length() > 1)
+                       len = pg_mbcliplen(workstr, len, len - 1);
+               else
+                       len -= 1;
 
-               default:
-                       elog(ERROR, "string_lessthan: unexpected datatype %u", datatype);
-                       result = false;
-                       break;
+               if (datatype != BYTEAOID)
+                       workstr[len] = '\0';
        }
 
-       pfree(DatumGetPointer(datum1));
-       pfree(DatumGetPointer(datum2));
-
-       return result;
-}
+       /* Failed... */
+       if (workstr != NULL)
+               pfree(workstr);
 
-/* See if there is a binary op of the given name for the given datatype */
-static Oid
-find_operator(const char *opname, Oid datatype)
-{
-       return GetSysCacheOid(OPERNAME,
-                                                 PointerGetDatum(opname),
-                                                 ObjectIdGetDatum(datatype),
-                                                 ObjectIdGetDatum(datatype),
-                                                 CharGetDatum('b'));
+       return NULL;
 }
 
 /*
@@ -3322,12 +4147,16 @@ find_operator(const char *opname, Oid datatype)
 static Datum
 string_to_datum(const char *str, Oid datatype)
 {
+       Assert(str != NULL);
+
        /*
         * We cheat a little by assuming that textin() will do for bpchar and
         * varchar constants too...
         */
        if (datatype == NAMEOID)
                return DirectFunctionCall1(namein, CStringGetDatum(str));
+       else if (datatype == BYTEAOID)
+               return DirectFunctionCall1(byteain, CStringGetDatum(str));
        else
                return DirectFunctionCall1(textin, CStringGetDatum(str));
 }
@@ -3341,7 +4170,23 @@ string_to_const(const char *str, Oid datatype)
        Datum           conval = string_to_datum(str, datatype);
 
        return makeConst(datatype, ((datatype == NAMEOID) ? NAMEDATALEN : -1),
-                                        conval, false, false, false, false);
+                                        conval, false, false);
+}
+
+/*
+ * Generate a Const node of bytea type from a binary C string and a length.
+ */
+static Const *
+string_to_bytea_const(const char *str, size_t str_len)
+{
+       bytea      *bstr = palloc(VARHDRSZ + str_len);
+       Datum           conval;
+
+       memcpy(VARDATA(bstr), str, str_len);
+       VARATT_SIZEP(bstr) = VARHDRSZ + str_len;
+       conval = PointerGetDatum(bstr);
+
+       return makeConst(BYTEAOID, -1, conval, false, false);
 }
 
 /*-------------------------------------------------------------------------
@@ -3352,60 +4197,84 @@ string_to_const(const char *str, Oid datatype)
  * don't have any better idea about how to estimate.  Index-type-specific
  * knowledge can be incorporated in the type-specific routines.
  *
+ * One bit of index-type-specific knowledge we can relatively easily use
+ * in genericcostestimate is the estimate of the number of index tuples
+ * visited.  If numIndexTuples is not 0 then it is used as the estimate,
+ * otherwise we compute a generic estimate.
+ *
  *-------------------------------------------------------------------------
  */
 
 static void
-genericcostestimate(Query *root, RelOptInfo *rel,
+genericcostestimate(PlannerInfo *root,
                                        IndexOptInfo *index, List *indexQuals,
+                                       double numIndexTuples,
                                        Cost *indexStartupCost,
                                        Cost *indexTotalCost,
                                        Selectivity *indexSelectivity,
                                        double *indexCorrelation)
 {
-       double          numIndexTuples;
        double          numIndexPages;
-       List       *selectivityQuals = indexQuals;
+       QualCost        index_qual_cost;
+       double          qual_op_cost;
+       double          qual_arg_cost;
+       List       *selectivityQuals;
 
        /*
         * If the index is partial, AND the index predicate with the
         * explicitly given indexquals to produce a more accurate idea of the
-        * index restriction.  This may produce redundant clauses, which we
-        * hope that cnfify and clauselist_selectivity will deal with
-        * intelligently.
+        * index selectivity.  This may produce redundant clauses.      We get rid
+        * of exact duplicates in the code below.  We expect that most cases
+        * of partial redundancy (such as "x < 4" from the qual and "x < 5"
+        * from the predicate) will be recognized and handled correctly by
+        * clauselist_selectivity().  This assumption is somewhat fragile,
+        * since it depends on predicate_implied_by() and clauselist_selectivity()
+        * having similar capabilities, and there are certainly many cases where
+        * we will end up with a too-low selectivity estimate.  This will bias the
+        * system in favor of using partial indexes where possible, which is not
+        * necessarily a bad thing. But it'd be nice to do better someday.
         *
-        * Note that index->indpred and indexQuals are both in implicit-AND form
-        * to start with, which we have to make explicit to hand to
-        * canonicalize_qual, and then we get back implicit-AND form again.
+        * Note that index->indpred and indexQuals are both in implicit-AND form,
+        * so ANDing them together just takes merging the lists.  However,
+        * eliminating duplicates is a bit trickier because indexQuals
+        * contains RestrictInfo nodes and the indpred does not.  It is okay
+        * to pass a mixed list to clauselist_selectivity, but we have to work
+        * a bit to generate a list without logical duplicates.  (We could
+        * just list_union indpred and strippedQuals, but then we'd not get
+        * caching of per-qual selectivity estimates.)
         */
        if (index->indpred != NIL)
        {
-               Expr       *andedQuals;
+               List       *strippedQuals;
+               List       *predExtraQuals;
 
-               andedQuals = make_ands_explicit(nconc(listCopy(index->indpred),
-                                                                                         indexQuals));
-               selectivityQuals = canonicalize_qual(andedQuals, true);
+               strippedQuals = get_actual_clauses(indexQuals);
+               predExtraQuals = list_difference(index->indpred, strippedQuals);
+               selectivityQuals = list_concat(predExtraQuals, indexQuals);
        }
+       else
+               selectivityQuals = indexQuals;
 
        /* Estimate the fraction of main-table tuples that will be visited */
        *indexSelectivity = clauselist_selectivity(root, selectivityQuals,
-                                                                                          lfirsti(rel->relids));
+                                                                                          index->rel->relid,
+                                                                                          JOIN_INNER);
 
        /*
-        * Estimate the number of tuples that will be visited.  We do it in
-        * this rather peculiar-looking way in order to get the right answer
-        * for partial indexes.  We can bound the number of tuples by the
-        * index size, in any case.
+        * If caller didn't give us an estimate, estimate the number of index
+        * tuples that will be visited.  We do it in this rather peculiar-looking
+        * way in order to get the right answer for partial indexes.
         */
-       numIndexTuples = *indexSelectivity * rel->tuples;
-
-       if (numIndexTuples > index->tuples)
-               numIndexTuples = index->tuples;
+       if (numIndexTuples <= 0.0)
+               numIndexTuples = *indexSelectivity * index->rel->tuples;
 
        /*
-        * Always estimate at least one tuple is touched, even when
+        * We can bound the number of tuples by the index size in any case.
+        * Also, always estimate at least one tuple is touched, even when
         * indexSelectivity estimate is tiny.
         */
+       if (numIndexTuples > index->tuples)
+               numIndexTuples = index->tuples;
        if (numIndexTuples < 1.0)
                numIndexTuples = 1.0;
 
@@ -3428,15 +4297,32 @@ genericcostestimate(Query *root, RelOptInfo *rel,
        /*
         * Compute the index access cost.
         *
-        * Our generic assumption is that the index pages will be read
+        * Disk cost: our generic assumption is that the index pages will be read
         * sequentially, so they have cost 1.0 each, not random_page_cost.
-        * Also, we charge for evaluation of the indexquals at each index
-        * tuple. All the costs are assumed to be paid incrementally during
-        * the scan.
         */
-       *indexStartupCost = 0;
-       *indexTotalCost = numIndexPages +
-               (cpu_index_tuple_cost + cost_qual_eval(indexQuals)) * numIndexTuples;
+       *indexTotalCost = numIndexPages;
+
+       /*
+        * CPU cost: any complex expressions in the indexquals will need to be
+        * evaluated once at the start of the scan to reduce them to runtime
+        * keys to pass to the index AM (see nodeIndexscan.c).  We model the
+        * per-tuple CPU costs as cpu_index_tuple_cost plus one
+        * cpu_operator_cost per indexqual operator.
+        *
+        * Note: this neglects the possible costs of rechecking lossy operators
+        * and OR-clause expressions.  Detecting that that might be needed
+        * seems more expensive than it's worth, though, considering all the
+        * other inaccuracies here ...
+        */
+       cost_qual_eval(&index_qual_cost, indexQuals);
+       qual_op_cost = cpu_operator_cost * list_length(indexQuals);
+       qual_arg_cost = index_qual_cost.startup +
+               index_qual_cost.per_tuple - qual_op_cost;
+       if (qual_arg_cost < 0)          /* just in case... */
+               qual_arg_cost = 0;
+       *indexStartupCost = qual_arg_cost;
+       *indexTotalCost += qual_arg_cost;
+       *indexTotalCost += numIndexTuples * (cpu_index_tuple_cost + qual_op_cost);
 
        /*
         * Generic assumption about index correlation: there isn't any.
@@ -3448,66 +4334,164 @@ genericcostestimate(Query *root, RelOptInfo *rel,
 Datum
 btcostestimate(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
-       RelOptInfo *rel = (RelOptInfo *) PG_GETARG_POINTER(1);
-       IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(2);
-       List       *indexQuals = (List *) PG_GETARG_POINTER(3);
-       Cost       *indexStartupCost = (Cost *) PG_GETARG_POINTER(4);
-       Cost       *indexTotalCost = (Cost *) PG_GETARG_POINTER(5);
-       Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(6);
-       double     *indexCorrelation = (double *) PG_GETARG_POINTER(7);
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+       IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
+       List       *indexQuals = (List *) PG_GETARG_POINTER(2);
+       Cost       *indexStartupCost = (Cost *) PG_GETARG_POINTER(3);
+       Cost       *indexTotalCost = (Cost *) PG_GETARG_POINTER(4);
+       Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(5);
+       double     *indexCorrelation = (double *) PG_GETARG_POINTER(6);
+       Oid                     relid;
+       AttrNumber      colnum;
+       HeapTuple       tuple;
+       double          numIndexTuples;
+       List       *indexBoundQuals;
+       int                     indexcol;
+       bool            eqQualHere;
+       ListCell   *l;
+
+       /*
+        * For a btree scan, only leading '=' quals plus inequality quals
+        * for the immediately next attribute contribute to index selectivity
+        * (these are the "boundary quals" that determine the starting and
+        * stopping points of the index scan).  Additional quals can suppress
+        * visits to the heap, so it's OK to count them in indexSelectivity,
+        * but they should not count for estimating numIndexTuples.  So we must
+        * examine the given indexQuals to find out which ones count as boundary
+        * quals.  We rely on the knowledge that they are given in index column
+        * order.
+        */
+       indexBoundQuals = NIL;
+       indexcol = 0;
+       eqQualHere = false;
+       foreach(l, indexQuals)
+       {
+               RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+               Expr   *clause;
+               Oid             clause_op;
+               int             op_strategy;
+
+               Assert(IsA(rinfo, RestrictInfo));
+               clause = rinfo->clause;
+               Assert(IsA(clause, OpExpr));
+               clause_op = ((OpExpr *) clause)->opno;
+               if (match_index_to_operand(get_leftop(clause), indexcol, index))
+               {
+                       /* clause_op is correct */
+               }
+               else if (match_index_to_operand(get_rightop(clause), indexcol, index))
+               {
+                       /* Must flip operator to get the opclass member */
+                       clause_op = get_commutator(clause_op);
+               }
+               else
+               {
+                       /* Must be past the end of quals for indexcol, try next */
+                       if (!eqQualHere)
+                               break;                  /* done if no '=' qual for indexcol */
+                       indexcol++;
+                       eqQualHere = false;
+                       if (match_index_to_operand(get_leftop(clause), indexcol, index))
+                       {
+                               /* clause_op is correct */
+                       }
+                       else if (match_index_to_operand(get_rightop(clause),
+                                                                                       indexcol, index))
+                       {
+                               /* Must flip operator to get the opclass member */
+                               clause_op = get_commutator(clause_op);
+                       }
+                       else
+                       {
+                               /* No quals for new indexcol, so we are done */
+                               break;
+                       }
+               }
+               op_strategy = get_op_opclass_strategy(clause_op,
+                                                                                         index->classlist[indexcol]);
+               Assert(op_strategy != 0);                       /* not a member of opclass?? */
+               if (op_strategy == BTEqualStrategyNumber)
+                       eqQualHere = true;
+               indexBoundQuals = lappend(indexBoundQuals, rinfo);
+       }
+
+       /*
+        * If index is unique and we found an '=' clause for each column,
+        * we can just assume numIndexTuples = 1 and skip the expensive
+        * clauselist_selectivity calculations.
+        */
+       if (index->unique && indexcol == index->ncolumns - 1 && eqQualHere)
+               numIndexTuples = 1.0;
+       else
+       {
+               Selectivity btreeSelectivity;
+
+               btreeSelectivity = clauselist_selectivity(root, indexBoundQuals,
+                                                                                                 index->rel->relid,
+                                                                                                 JOIN_INNER);
+               numIndexTuples = btreeSelectivity * index->rel->tuples;
+       }
 
-       genericcostestimate(root, rel, index, indexQuals,
+       genericcostestimate(root, index, indexQuals, numIndexTuples,
                                                indexStartupCost, indexTotalCost,
                                                indexSelectivity, indexCorrelation);
 
        /*
-        * If it's a functional index, leave the default zero-correlation
-        * estimate in place.  If not, and if we can get an estimate for the
-        * first variable's ordering correlation C from pg_statistic, estimate
-        * the index correlation as C / number-of-columns. (The idea here is
-        * that multiple columns dilute the importance of the first column's
-        * ordering, but don't negate it entirely.)
+        * If we can get an estimate of the first column's ordering
+        * correlation C from pg_statistic, estimate the index correlation as
+        * C for a single-column index, or C * 0.75 for multiple columns.
+        * (The idea here is that multiple columns dilute the importance of
+        * the first column's ordering, but don't negate it entirely.  Before
+        * 8.0 we divided the correlation by the number of columns, but that
+        * seems too strong.)
         */
-       if (index->indproc == InvalidOid)
+       if (index->indexkeys[0] != 0)
        {
-               Oid                     relid;
-               HeapTuple       tuple;
-
-               relid = getrelid(lfirsti(rel->relids), root->rtable);
+               /* Simple variable --- look to stats for the underlying table */
+               relid = getrelid(index->rel->relid, root->parse->rtable);
                Assert(relid != InvalidOid);
-               tuple = SearchSysCache(STATRELATT,
-                                                          ObjectIdGetDatum(relid),
-                                                          Int16GetDatum(index->indexkeys[0]),
-                                                          0, 0);
-               if (HeapTupleIsValid(tuple))
+               colnum = index->indexkeys[0];
+       }
+       else
+       {
+               /* Expression --- maybe there are stats for the index itself */
+               relid = index->indexoid;
+               colnum = 1;
+       }
+
+       tuple = SearchSysCache(STATRELATT,
+                                                  ObjectIdGetDatum(relid),
+                                                  Int16GetDatum(colnum),
+                                                  0, 0);
+
+       if (HeapTupleIsValid(tuple))
+       {
+               Oid                     typid;
+               int32           typmod;
+               float4     *numbers;
+               int                     nnumbers;
+
+               /* XXX this code would break with different storage type */
+               get_atttypetypmod(relid, colnum, &typid, &typmod);
+
+               if (get_attstatsslot(tuple, typid, typmod,
+                                                        STATISTIC_KIND_CORRELATION,
+                                                        index->ordering[0],
+                                                        NULL, NULL, &numbers, &nnumbers))
                {
-                       Oid                     typid;
-                       int32           typmod;
-                       float4     *numbers;
-                       int                     nnumbers;
-
-                       get_atttypetypmod(relid, index->indexkeys[0],
-                                                         &typid, &typmod);
-                       if (get_attstatsslot(tuple, typid, typmod,
-                                                                STATISTIC_KIND_CORRELATION,
-                                                                index->ordering[0],
-                                                                NULL, NULL, &numbers, &nnumbers))
-                       {
-                               double          varCorrelation;
-                               int                     nKeys;
+                       double          varCorrelation;
 
-                               Assert(nnumbers == 1);
-                               varCorrelation = numbers[0];
-                               for (nKeys = 1; index->indexkeys[nKeys] != 0; nKeys++)
-                                        /* skip */ ;
+                       Assert(nnumbers == 1);
+                       varCorrelation = numbers[0];
 
-                               *indexCorrelation = varCorrelation / nKeys;
+                       if (index->ncolumns > 1)
+                               *indexCorrelation = varCorrelation * 0.75;
+                       else
+                               *indexCorrelation = varCorrelation;
 
-                               free_attstatsslot(typid, NULL, 0, numbers, nnumbers);
-                       }
-                       ReleaseSysCache(tuple);
+                       free_attstatsslot(typid, NULL, 0, numbers, nnumbers);
                }
+               ReleaseSysCache(tuple);
        }
 
        PG_RETURN_VOID();
@@ -3516,16 +4500,15 @@ btcostestimate(PG_FUNCTION_ARGS)
 Datum
 rtcostestimate(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
-       RelOptInfo *rel = (RelOptInfo *) PG_GETARG_POINTER(1);
-       IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(2);
-       List       *indexQuals = (List *) PG_GETARG_POINTER(3);
-       Cost       *indexStartupCost = (Cost *) PG_GETARG_POINTER(4);
-       Cost       *indexTotalCost = (Cost *) PG_GETARG_POINTER(5);
-       Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(6);
-       double     *indexCorrelation = (double *) PG_GETARG_POINTER(7);
-
-       genericcostestimate(root, rel, index, indexQuals,
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+       IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
+       List       *indexQuals = (List *) PG_GETARG_POINTER(2);
+       Cost       *indexStartupCost = (Cost *) PG_GETARG_POINTER(3);
+       Cost       *indexTotalCost = (Cost *) PG_GETARG_POINTER(4);
+       Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(5);
+       double     *indexCorrelation = (double *) PG_GETARG_POINTER(6);
+
+       genericcostestimate(root, index, indexQuals, 0.0,
                                                indexStartupCost, indexTotalCost,
                                                indexSelectivity, indexCorrelation);
 
@@ -3535,16 +4518,15 @@ rtcostestimate(PG_FUNCTION_ARGS)
 Datum
 hashcostestimate(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
-       RelOptInfo *rel = (RelOptInfo *) PG_GETARG_POINTER(1);
-       IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(2);
-       List       *indexQuals = (List *) PG_GETARG_POINTER(3);
-       Cost       *indexStartupCost = (Cost *) PG_GETARG_POINTER(4);
-       Cost       *indexTotalCost = (Cost *) PG_GETARG_POINTER(5);
-       Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(6);
-       double     *indexCorrelation = (double *) PG_GETARG_POINTER(7);
-
-       genericcostestimate(root, rel, index, indexQuals,
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+       IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
+       List       *indexQuals = (List *) PG_GETARG_POINTER(2);
+       Cost       *indexStartupCost = (Cost *) PG_GETARG_POINTER(3);
+       Cost       *indexTotalCost = (Cost *) PG_GETARG_POINTER(4);
+       Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(5);
+       double     *indexCorrelation = (double *) PG_GETARG_POINTER(6);
+
+       genericcostestimate(root, index, indexQuals, 0.0,
                                                indexStartupCost, indexTotalCost,
                                                indexSelectivity, indexCorrelation);
 
@@ -3554,16 +4536,15 @@ hashcostestimate(PG_FUNCTION_ARGS)
 Datum
 gistcostestimate(PG_FUNCTION_ARGS)
 {
-       Query      *root = (Query *) PG_GETARG_POINTER(0);
-       RelOptInfo *rel = (RelOptInfo *) PG_GETARG_POINTER(1);
-       IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(2);
-       List       *indexQuals = (List *) PG_GETARG_POINTER(3);
-       Cost       *indexStartupCost = (Cost *) PG_GETARG_POINTER(4);
-       Cost       *indexTotalCost = (Cost *) PG_GETARG_POINTER(5);
-       Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(6);
-       double     *indexCorrelation = (double *) PG_GETARG_POINTER(7);
-
-       genericcostestimate(root, rel, index, indexQuals,
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+       IndexOptInfo *index = (IndexOptInfo *) PG_GETARG_POINTER(1);
+       List       *indexQuals = (List *) PG_GETARG_POINTER(2);
+       Cost       *indexStartupCost = (Cost *) PG_GETARG_POINTER(3);
+       Cost       *indexTotalCost = (Cost *) PG_GETARG_POINTER(4);
+       Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(5);
+       double     *indexCorrelation = (double *) PG_GETARG_POINTER(6);
+
+       genericcostestimate(root, index, indexQuals, 0.0,
                                                indexStartupCost, indexTotalCost,
                                                indexSelectivity, indexCorrelation);