]> granicus.if.org Git - postgresql/blobdiff - src/backend/optimizer/path/costsize.c
Pgindent run before 9.1 beta2.
[postgresql] / src / backend / optimizer / path / costsize.c
index f2a6d294ee8ba5c4da916ac2499a8939b49e4b2c..bb38768bd4358f72896e2d2c549bbd64dedcd24d 100644 (file)
  * detail.     Note that all of these parameters are user-settable, in case
  * the default values are drastically off for a particular platform.
  *
+ * seq_page_cost and random_page_cost can also be overridden for an individual
+ * tablespace, in case some data is on a fast disk and other data is on a slow
+ * disk.  Per-tablespace overrides never apply to temporary work files such as
+ * an external sort or a materialize node that overflows work_mem.
+ *
  * We compute two separate costs for each path:
  *             total_cost: total estimated cost to fetch all tuples
  *             startup_cost: cost that is expended before first tuple is fetched
  * the non-cost fields of the passed XXXPath to be filled in.
  *
  *
- * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *       $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.167 2006/10/04 00:29:53 momjian Exp $
+ *       src/backend/optimizer/path/costsize.c
  *
  *-------------------------------------------------------------------------
  */
 
 #include <math.h>
 
+#include "executor/executor.h"
 #include "executor/nodeHash.h"
 #include "miscadmin.h"
+#include "nodes/nodeFuncs.h"
 #include "optimizer/clauses.h"
 #include "optimizer/cost.h"
 #include "optimizer/pathnode.h"
+#include "optimizer/placeholder.h"
+#include "optimizer/plancat.h"
+#include "optimizer/planmain.h"
+#include "optimizer/restrictinfo.h"
 #include "parser/parsetree.h"
 #include "utils/lsyscache.h"
 #include "utils/selfuncs.h"
+#include "utils/spccache.h"
 #include "utils/tuplesort.h"
 
 
@@ -94,7 +106,7 @@ double               cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
 
 int                    effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
 
-Cost           disable_cost = 100000000.0;
+Cost           disable_cost = 1.0e10;
 
 bool           enable_seqscan = true;
 bool           enable_indexscan = true;
@@ -103,14 +115,29 @@ bool              enable_tidscan = true;
 bool           enable_sort = true;
 bool           enable_hashagg = true;
 bool           enable_nestloop = true;
+bool           enable_material = true;
 bool           enable_mergejoin = true;
 bool           enable_hashjoin = true;
 
-
-static bool cost_qual_eval_walker(Node *node, QualCost *total);
-static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
-                                  JoinType jointype);
-static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
+typedef struct
+{
+       PlannerInfo *root;
+       QualCost        total;
+} cost_qual_eval_context;
+
+static MergeScanSelCache *cached_scansel(PlannerInfo *root,
+                          RestrictInfo *rinfo,
+                          PathKey *pathkey);
+static void cost_rescan(PlannerInfo *root, Path *path,
+                       Cost *rescan_startup_cost, Cost *rescan_total_cost);
+static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
+static bool adjust_semi_join(PlannerInfo *root, JoinPath *path,
+                                SpecialJoinInfo *sjinfo,
+                                Selectivity *outer_match_frac,
+                                Selectivity *match_count,
+                                bool *indexed_join_quals);
+static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
+                                  List *quals);
 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
 static double relation_byte_size(double tuples, int width);
 static double page_size(double tuples, int width);
@@ -145,6 +172,7 @@ void
 cost_seqscan(Path *path, PlannerInfo *root,
                         RelOptInfo *baserel)
 {
+       double          spc_seq_page_cost;
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
        Cost            cpu_per_tuple;
@@ -156,10 +184,15 @@ cost_seqscan(Path *path, PlannerInfo *root,
        if (!enable_seqscan)
                startup_cost += disable_cost;
 
+       /* fetch estimated page cost for tablespace containing table */
+       get_tablespace_page_costs(baserel->reltablespace,
+                                                         NULL,
+                                                         &spc_seq_page_cost);
+
        /*
         * disk costs
         */
-       run_cost += seq_page_cost * baserel->pages;
+       run_cost += spc_seq_page_cost * baserel->pages;
 
        /* CPU costs */
        startup_cost += baserel->baserestrictcost.startup;
@@ -176,6 +209,7 @@ cost_seqscan(Path *path, PlannerInfo *root,
  *
  * 'index' is the index to be used
  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
+ * 'indexOrderBys' is the list of ORDER BY operators for amcanorderbyop indexes
  * 'outer_rel' is the outer relation when we are considering using the index
  *             scan as the inside of a nestloop join (hence, some of the indexQuals
  *             are join clauses, and we should expect repeated scans of the index);
@@ -185,18 +219,19 @@ cost_seqscan(Path *path, PlannerInfo *root,
  * additional fields of the IndexPath besides startup_cost and total_cost.
  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
  *
+ * indexQuals is a list of RestrictInfo nodes, but indexOrderBys is a list of
+ * bare expressions.
+ *
  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
  * Any additional quals evaluated as qpquals may reduce the number of returned
  * tuples, but they won't reduce the number of tuples we have to fetch from
  * the table, so they don't reduce the scan cost.
- *
- * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
- * it was a list of bare clause expressions.
  */
 void
 cost_index(IndexPath *path, PlannerInfo *root,
                   IndexOptInfo *index,
                   List *indexQuals,
+                  List *indexOrderBys,
                   RelOptInfo *outer_rel)
 {
        RelOptInfo *baserel = index->rel;
@@ -207,6 +242,8 @@ cost_index(IndexPath *path, PlannerInfo *root,
        Selectivity indexSelectivity;
        double          indexCorrelation,
                                csquared;
+       double          spc_seq_page_cost,
+                               spc_random_page_cost;
        Cost            min_IO_cost,
                                max_IO_cost;
        Cost            cpu_per_tuple;
@@ -228,10 +265,11 @@ cost_index(IndexPath *path, PlannerInfo *root,
         * the fraction of main-table tuples we will have to retrieve) and its
         * correlation to the main-table tuple order.
         */
-       OidFunctionCall8(index->amcostestimate,
+       OidFunctionCall9(index->amcostestimate,
                                         PointerGetDatum(root),
                                         PointerGetDatum(index),
                                         PointerGetDatum(indexQuals),
+                                        PointerGetDatum(indexOrderBys),
                                         PointerGetDatum(outer_rel),
                                         PointerGetDatum(&indexStartupCost),
                                         PointerGetDatum(&indexTotalCost),
@@ -253,13 +291,18 @@ cost_index(IndexPath *path, PlannerInfo *root,
        /* estimate number of main-table tuples fetched */
        tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
 
+       /* fetch estimated page costs for tablespace containing table */
+       get_tablespace_page_costs(baserel->reltablespace,
+                                                         &spc_random_page_cost,
+                                                         &spc_seq_page_cost);
+
        /*----------
         * Estimate number of main-table pages fetched, and compute I/O cost.
         *
         * When the index ordering is uncorrelated with the table ordering,
         * we use an approximation proposed by Mackert and Lohman (see
         * index_pages_fetched() for details) to compute the number of pages
-        * fetched, and then charge random_page_cost per page fetched.
+        * fetched, and then charge spc_random_page_cost per page fetched.
         *
         * When the index ordering is exactly correlated with the table ordering
         * (just after a CLUSTER, for example), the number of pages fetched should
@@ -267,7 +310,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
         * will be sequential fetches, not the random fetches that occur in the
         * uncorrelated case.  So if the number of pages is more than 1, we
         * ought to charge
-        *              random_page_cost + (pages_fetched - 1) * seq_page_cost
+        *              spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
         * For partially-correlated indexes, we ought to charge somewhere between
         * these two estimates.  We currently interpolate linearly between the
         * estimates based on the correlation squared (XXX is that appropriate?).
@@ -276,13 +319,12 @@ cost_index(IndexPath *path, PlannerInfo *root,
        if (outer_rel != NULL && outer_rel->rows > 1)
        {
                /*
-                * For repeated indexscans, scale up the number of tuples fetched in
+                * For repeated indexscans, the appropriate estimate for the
+                * uncorrelated case is to scale up the number of tuples fetched in
                 * the Mackert and Lohman formula by the number of scans, so that we
-                * estimate the number of pages fetched by all the scans. Then
+                * estimate the number of pages fetched by all the scans; then
                 * pro-rate the costs for one scan.  In this case we assume all the
-                * fetches are random accesses.  XXX it'd be good to include
-                * correlation in this model, but it's not clear how to do that
-                * without double-counting cache effects.
+                * fetches are random accesses.
                 */
                double          num_scans = outer_rel->rows;
 
@@ -291,7 +333,26 @@ cost_index(IndexPath *path, PlannerInfo *root,
                                                                                        (double) index->pages,
                                                                                        root);
 
-               run_cost += (pages_fetched * random_page_cost) / num_scans;
+               max_IO_cost = (pages_fetched * spc_random_page_cost) / num_scans;
+
+               /*
+                * In the perfectly correlated case, the number of pages touched by
+                * each scan is selectivity * table_size, and we can use the Mackert
+                * and Lohman formula at the page level to estimate how much work is
+                * saved by caching across scans.  We still assume all the fetches are
+                * random, though, which is an overestimate that's hard to correct for
+                * without double-counting the cache effects.  (But in most cases
+                * where such a plan is actually interesting, only one page would get
+                * fetched per scan anyway, so it shouldn't matter much.)
+                */
+               pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
+
+               pages_fetched = index_pages_fetched(pages_fetched * num_scans,
+                                                                                       baserel->pages,
+                                                                                       (double) index->pages,
+                                                                                       root);
+
+               min_IO_cost = (pages_fetched * spc_random_page_cost) / num_scans;
        }
        else
        {
@@ -305,22 +366,22 @@ cost_index(IndexPath *path, PlannerInfo *root,
                                                                                        root);
 
                /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
-               max_IO_cost = pages_fetched * random_page_cost;
+               max_IO_cost = pages_fetched * spc_random_page_cost;
 
                /* min_IO_cost is for the perfectly correlated case (csquared=1) */
                pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
-               min_IO_cost = random_page_cost;
+               min_IO_cost = spc_random_page_cost;
                if (pages_fetched > 1)
-                       min_IO_cost += (pages_fetched - 1) * seq_page_cost;
+                       min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
+       }
 
-               /*
-                * Now interpolate based on estimated index order correlation to get
-                * total disk I/O cost for main table accesses.
-                */
-               csquared = indexCorrelation * indexCorrelation;
+       /*
+        * Now interpolate based on estimated index order correlation to get total
+        * disk I/O cost for main table accesses.
+        */
+       csquared = indexCorrelation * indexCorrelation;
 
-               run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
-       }
+       run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
 
        /*
         * Estimate CPU costs per tuple.
@@ -339,7 +400,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
        {
                QualCost        index_qual_cost;
 
-               cost_qual_eval(&index_qual_cost, indexQuals);
+               cost_qual_eval(&index_qual_cost, indexQuals, root);
                /* any startup cost still has to be paid ... */
                cpu_per_tuple -= index_qual_cost.per_tuple;
        }
@@ -516,6 +577,8 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
        Cost            cost_per_page;
        double          tuples_fetched;
        double          pages_fetched;
+       double          spc_seq_page_cost,
+                               spc_random_page_cost;
        double          T;
 
        /* Should only be applied to base relations */
@@ -534,6 +597,11 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
 
        startup_cost += indexTotalCost;
 
+       /* Fetch estimated page costs for tablespace containing table. */
+       get_tablespace_page_costs(baserel->reltablespace,
+                                                         &spc_random_page_cost,
+                                                         &spc_seq_page_cost);
+
        /*
         * Estimate number of main-table pages fetched.
         */
@@ -572,17 +640,18 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
                pages_fetched = ceil(pages_fetched);
 
        /*
-        * For small numbers of pages we should charge random_page_cost apiece,
-        * while if nearly all the table's pages are being read, it's more
-        * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
-        * too. For lack of a better idea, interpolate like this to determine the
-        * cost per page.
+        * For small numbers of pages we should charge spc_random_page_cost
+        * apiece, while if nearly all the table's pages are being read, it's more
+        * appropriate to charge spc_seq_page_cost apiece.      The effect is
+        * nonlinear, too. For lack of a better idea, interpolate like this to
+        * determine the cost per page.
         */
        if (pages_fetched >= 2.0)
-               cost_per_page = random_page_cost -
-                       (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
+               cost_per_page = spc_random_page_cost -
+                       (spc_random_page_cost - spc_seq_page_cost)
+                       * sqrt(pages_fetched / T);
        else
-               cost_per_page = random_page_cost;
+               cost_per_page = spc_random_page_cost;
 
        run_cost += pages_fetched * cost_per_page;
 
@@ -614,6 +683,14 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
        {
                *cost = ((IndexPath *) path)->indextotalcost;
                *selec = ((IndexPath *) path)->indexselectivity;
+
+               /*
+                * Charge a small amount per retrieved tuple to reflect the costs of
+                * manipulating the bitmap.  This is mostly to make sure that a bitmap
+                * scan doesn't look to be the same cost as an indexscan to retrieve a
+                * single tuple.
+                */
+               *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
        }
        else if (IsA(path, BitmapAndPath))
        {
@@ -626,7 +703,10 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
                *selec = ((BitmapOrPath *) path)->bitmapselectivity;
        }
        else
+       {
                elog(ERROR, "unrecognized node type: %d", nodeTag(path));
+               *cost = *selec = 0;             /* keep compiler quiet */
+       }
 }
 
 /*
@@ -730,17 +810,17 @@ cost_tidscan(Path *path, PlannerInfo *root,
 {
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
+       bool            isCurrentOf = false;
        Cost            cpu_per_tuple;
+       QualCost        tid_qual_cost;
        int                     ntuples;
        ListCell   *l;
+       double          spc_random_page_cost;
 
        /* Should only be applied to base relations */
        Assert(baserel->relid > 0);
        Assert(baserel->rtekind == RTE_RELATION);
 
-       if (!enable_tidscan)
-               startup_cost += disable_cost;
-
        /* Count how many tuples we expect to retrieve */
        ntuples = 0;
        foreach(l, tidquals)
@@ -753,6 +833,12 @@ cost_tidscan(Path *path, PlannerInfo *root,
 
                        ntuples += estimate_array_length(arraynode);
                }
+               else if (IsA(lfirst(l), CurrentOfExpr))
+               {
+                       /* CURRENT OF yields 1 tuple */
+                       isCurrentOf = true;
+                       ntuples++;
+               }
                else
                {
                        /* It's just CTID = something, count 1 tuple */
@@ -760,12 +846,41 @@ cost_tidscan(Path *path, PlannerInfo *root,
                }
        }
 
+       /*
+        * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
+        * understands how to do it correctly.  Therefore, honor enable_tidscan
+        * only when CURRENT OF isn't present.  Also note that cost_qual_eval
+        * counts a CurrentOfExpr as having startup cost disable_cost, which we
+        * subtract off here; that's to prevent other plan types such as seqscan
+        * from winning.
+        */
+       if (isCurrentOf)
+       {
+               Assert(baserel->baserestrictcost.startup >= disable_cost);
+               startup_cost -= disable_cost;
+       }
+       else if (!enable_tidscan)
+               startup_cost += disable_cost;
+
+       /*
+        * The TID qual expressions will be computed once, any other baserestrict
+        * quals once per retrived tuple.
+        */
+       cost_qual_eval(&tid_qual_cost, tidquals, root);
+
+       /* fetch estimated page cost for tablespace containing table */
+       get_tablespace_page_costs(baserel->reltablespace,
+                                                         &spc_random_page_cost,
+                                                         NULL);
+
        /* disk costs --- assume each tuple on a different page */
-       run_cost += random_page_cost * ntuples;
+       run_cost += spc_random_page_cost * ntuples;
 
        /* CPU costs */
-       startup_cost += baserel->baserestrictcost.startup;
-       cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
+       startup_cost += baserel->baserestrictcost.startup +
+               tid_qual_cost.per_tuple;
+       cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple -
+               tid_qual_cost.per_tuple;
        run_cost += cpu_per_tuple * ntuples;
 
        path->startup_cost = startup_cost;
@@ -813,21 +928,34 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
        Cost            cpu_per_tuple;
+       RangeTblEntry *rte;
+       QualCost        exprcost;
 
        /* Should only be applied to base relations that are functions */
        Assert(baserel->relid > 0);
-       Assert(baserel->rtekind == RTE_FUNCTION);
+       rte = planner_rt_fetch(baserel->relid, root);
+       Assert(rte->rtekind == RTE_FUNCTION);
 
        /*
-        * For now, estimate function's cost at one operator eval per function
-        * call.  Someday we should revive the function cost estimate columns in
-        * pg_proc...
+        * Estimate costs of executing the function expression.
+        *
+        * Currently, nodeFunctionscan.c always executes the function to
+        * completion before returning any rows, and caches the results in a
+        * tuplestore.  So the function eval cost is all startup cost, and per-row
+        * costs are minimal.
+        *
+        * XXX in principle we ought to charge tuplestore spill costs if the
+        * number of rows is large.  However, given how phony our rowcount
+        * estimates for functions tend to be, there's not a lot of point in that
+        * refinement right now.
         */
-       cpu_per_tuple = cpu_operator_cost;
+       cost_qual_eval_node(&exprcost, rte->funcexpr, root);
+
+       startup_cost += exprcost.startup + exprcost.per_tuple;
 
        /* Add scanning CPU costs */
        startup_cost += baserel->baserestrictcost.startup;
-       cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
+       cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
        run_cost += cpu_per_tuple * baserel->tuples;
 
        path->startup_cost = startup_cost;
@@ -864,34 +992,121 @@ cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
        path->total_cost = startup_cost + run_cost;
 }
 
+/*
+ * cost_ctescan
+ *       Determines and returns the cost of scanning a CTE RTE.
+ *
+ * Note: this is used for both self-reference and regular CTEs; the
+ * possible cost differences are below the threshold of what we could
+ * estimate accurately anyway. Note that the costs of evaluating the
+ * referenced CTE query are added into the final plan as initplan costs,
+ * and should NOT be counted here.
+ */
+void
+cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
+{
+       Cost            startup_cost = 0;
+       Cost            run_cost = 0;
+       Cost            cpu_per_tuple;
+
+       /* Should only be applied to base relations that are CTEs */
+       Assert(baserel->relid > 0);
+       Assert(baserel->rtekind == RTE_CTE);
+
+       /* Charge one CPU tuple cost per row for tuplestore manipulation */
+       cpu_per_tuple = cpu_tuple_cost;
+
+       /* Add scanning CPU costs */
+       startup_cost += baserel->baserestrictcost.startup;
+       cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
+       run_cost += cpu_per_tuple * baserel->tuples;
+
+       path->startup_cost = startup_cost;
+       path->total_cost = startup_cost + run_cost;
+}
+
+/*
+ * cost_recursive_union
+ *       Determines and returns the cost of performing a recursive union,
+ *       and also the estimated output size.
+ *
+ * We are given Plans for the nonrecursive and recursive terms.
+ *
+ * Note that the arguments and output are Plans, not Paths as in most of
+ * the rest of this module.  That's because we don't bother setting up a
+ * Path representation for recursive union --- we have only one way to do it.
+ */
+void
+cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
+{
+       Cost            startup_cost;
+       Cost            total_cost;
+       double          total_rows;
+
+       /* We probably have decent estimates for the non-recursive term */
+       startup_cost = nrterm->startup_cost;
+       total_cost = nrterm->total_cost;
+       total_rows = nrterm->plan_rows;
+
+       /*
+        * We arbitrarily assume that about 10 recursive iterations will be
+        * needed, and that we've managed to get a good fix on the cost and output
+        * size of each one of them.  These are mighty shaky assumptions but it's
+        * hard to see how to do better.
+        */
+       total_cost += 10 * rterm->total_cost;
+       total_rows += 10 * rterm->plan_rows;
+
+       /*
+        * Also charge cpu_tuple_cost per row to account for the costs of
+        * manipulating the tuplestores.  (We don't worry about possible
+        * spill-to-disk costs.)
+        */
+       total_cost += cpu_tuple_cost * total_rows;
+
+       runion->startup_cost = startup_cost;
+       runion->total_cost = total_cost;
+       runion->plan_rows = total_rows;
+       runion->plan_width = Max(nrterm->plan_width, rterm->plan_width);
+}
+
 /*
  * cost_sort
  *       Determines and returns the cost of sorting a relation, including
  *       the cost of reading the input data.
  *
- * If the total volume of data to sort is less than work_mem, we will do
+ * If the total volume of data to sort is less than sort_mem, we will do
  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
  * comparisons for t tuples.
  *
- * If the total volume exceeds work_mem, we switch to a tape-style merge
+ * If the total volume exceeds sort_mem, we switch to a tape-style merge
  * algorithm.  There will still be about t*log2(t) tuple comparisons in
  * total, but we will also need to write and read each tuple once per
  * merge pass. We expect about ceil(logM(r)) merge passes where r is the
  * number of initial runs formed and M is the merge order used by tuplesort.c.
- * Since the average initial run should be about twice work_mem, we have
- *             disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
+ * Since the average initial run should be about twice sort_mem, we have
+ *             disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem)))
  *             cpu = comparison_cost * t * log2(t)
  *
+ * If the sort is bounded (i.e., only the first k result tuples are needed)
+ * and k tuples can fit into sort_mem, we use a heap method that keeps only
+ * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
+ *
  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
  * accesses (XXX can't we refine that guess?)
  *
- * We charge two operator evals per tuple comparison, which should be in
- * the right ballpark in most cases.
+ * By default, we charge two operator evals per tuple comparison, which should
+ * be in the right ballpark in most cases.     The caller can tweak this by
+ * specifying nonzero comparison_cost; typically that's used for any extra
+ * work that has to be done to prepare the inputs to the comparison operators.
  *
  * 'pathkeys' is a list of sort keys
  * 'input_cost' is the total cost for reading the input data
  * 'tuples' is the number of tuples in the relation
  * 'width' is the average tuple width in bytes
+ * 'comparison_cost' is the extra cost per comparison, if any
+ * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
+ * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
  *
  * NOTE: some callers currently pass NIL for pathkeys because they
  * can't conveniently supply the sort keys.  Since this routine doesn't
@@ -902,12 +1117,16 @@ cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
  */
 void
 cost_sort(Path *path, PlannerInfo *root,
-                 List *pathkeys, Cost input_cost, double tuples, int width)
+                 List *pathkeys, Cost input_cost, double tuples, int width,
+                 Cost comparison_cost, int sort_mem,
+                 double limit_tuples)
 {
        Cost            startup_cost = input_cost;
        Cost            run_cost = 0;
-       double          nbytes = relation_byte_size(tuples, width);
-       long            work_mem_bytes = work_mem * 1024L;
+       double          input_bytes = relation_byte_size(tuples, width);
+       double          output_bytes;
+       double          output_tuples;
+       long            sort_mem_bytes = sort_mem * 1024L;
 
        if (!enable_sort)
                startup_cost += disable_cost;
@@ -919,23 +1138,41 @@ cost_sort(Path *path, PlannerInfo *root,
        if (tuples < 2.0)
                tuples = 2.0;
 
-       /*
-        * CPU costs
-        *
-        * Assume about two operator evals per tuple comparison and N log2 N
-        * comparisons
-        */
-       startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
+       /* Include the default cost-per-comparison */
+       comparison_cost += 2.0 * cpu_operator_cost;
 
-       /* disk costs */
-       if (nbytes > work_mem_bytes)
+       /* Do we have a useful LIMIT? */
+       if (limit_tuples > 0 && limit_tuples < tuples)
        {
-               double          npages = ceil(nbytes / BLCKSZ);
-               double          nruns = (nbytes / work_mem_bytes) * 0.5;
-               double          mergeorder = tuplesort_merge_order(work_mem_bytes);
+               output_tuples = limit_tuples;
+               output_bytes = relation_byte_size(output_tuples, width);
+       }
+       else
+       {
+               output_tuples = tuples;
+               output_bytes = input_bytes;
+       }
+
+       if (output_bytes > sort_mem_bytes)
+       {
+               /*
+                * We'll have to use a disk-based sort of all the tuples
+                */
+               double          npages = ceil(input_bytes / BLCKSZ);
+               double          nruns = (input_bytes / sort_mem_bytes) * 0.5;
+               double          mergeorder = tuplesort_merge_order(sort_mem_bytes);
                double          log_runs;
                double          npageaccesses;
 
+               /*
+                * CPU costs
+                *
+                * Assume about N log2 N comparisons
+                */
+               startup_cost += comparison_cost * tuples * LOG2(tuples);
+
+               /* Disk costs */
+
                /* Compute logM(r) as log(r) / log(M) */
                if (nruns > mergeorder)
                        log_runs = ceil(log(nruns) / log(mergeorder));
@@ -946,10 +1183,29 @@ cost_sort(Path *path, PlannerInfo *root,
                startup_cost += npageaccesses *
                        (seq_page_cost * 0.75 + random_page_cost * 0.25);
        }
+       else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
+       {
+               /*
+                * We'll use a bounded heap-sort keeping just K tuples in memory, for
+                * a total number of tuple comparisons of N log2 K; but the constant
+                * factor is a bit higher than for quicksort.  Tweak it so that the
+                * cost curve is continuous at the crossover point.
+                */
+               startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
+       }
+       else
+       {
+               /* We'll use plain quicksort on all the input tuples */
+               startup_cost += comparison_cost * tuples * LOG2(tuples);
+       }
 
        /*
         * Also charge a small amount (arbitrarily set equal to operator cost) per
-        * extracted tuple.
+        * extracted tuple.  We don't charge cpu_tuple_cost because a Sort node
+        * doesn't do qual-checking or projection, so it has less overhead than
+        * most plan nodes.  Note it's correct to use tuples not output_tuples
+        * here --- the upper LIMIT will pro-rate the run cost so we'd be double
+        * counting the LIMIT otherwise.
         */
        run_cost += cpu_operator_cost * tuples;
 
@@ -957,6 +1213,70 @@ cost_sort(Path *path, PlannerInfo *root,
        path->total_cost = startup_cost + run_cost;
 }
 
+/*
+ * cost_merge_append
+ *       Determines and returns the cost of a MergeAppend node.
+ *
+ * MergeAppend merges several pre-sorted input streams, using a heap that
+ * at any given instant holds the next tuple from each stream. If there
+ * are N streams, we need about N*log2(N) tuple comparisons to construct
+ * the heap at startup, and then for each output tuple, about log2(N)
+ * comparisons to delete the top heap entry and another log2(N) comparisons
+ * to insert its successor from the same stream.
+ *
+ * (The effective value of N will drop once some of the input streams are
+ * exhausted, but it seems unlikely to be worth trying to account for that.)
+ *
+ * The heap is never spilled to disk, since we assume N is not very large.
+ * So this is much simpler than cost_sort.
+ *
+ * As in cost_sort, we charge two operator evals per tuple comparison.
+ *
+ * 'pathkeys' is a list of sort keys
+ * 'n_streams' is the number of input streams
+ * 'input_startup_cost' is the sum of the input streams' startup costs
+ * 'input_total_cost' is the sum of the input streams' total costs
+ * 'tuples' is the number of tuples in all the streams
+ */
+void
+cost_merge_append(Path *path, PlannerInfo *root,
+                                 List *pathkeys, int n_streams,
+                                 Cost input_startup_cost, Cost input_total_cost,
+                                 double tuples)
+{
+       Cost            startup_cost = 0;
+       Cost            run_cost = 0;
+       Cost            comparison_cost;
+       double          N;
+       double          logN;
+
+       /*
+        * Avoid log(0)...
+        */
+       N = (n_streams < 2) ? 2.0 : (double) n_streams;
+       logN = LOG2(N);
+
+       /* Assumed cost per tuple comparison */
+       comparison_cost = 2.0 * cpu_operator_cost;
+
+       /* Heap creation cost */
+       startup_cost += comparison_cost * N * logN;
+
+       /* Per-tuple heap maintenance cost */
+       run_cost += tuples * comparison_cost * 2.0 * logN;
+
+       /*
+        * Also charge a small amount (arbitrarily set equal to operator cost) per
+        * extracted tuple.  We don't charge cpu_tuple_cost because a MergeAppend
+        * node doesn't do qual-checking or projection, so it has less overhead
+        * than most plan nodes.
+        */
+       run_cost += cpu_operator_cost * tuples;
+
+       path->startup_cost = startup_cost + input_startup_cost;
+       path->total_cost = startup_cost + run_cost + input_total_cost;
+}
+
 /*
  * cost_material
  *       Determines and returns the cost of materializing a relation, including
@@ -964,41 +1284,48 @@ cost_sort(Path *path, PlannerInfo *root,
  *
  * If the total volume of data to materialize exceeds work_mem, we will need
  * to write it to disk, so the cost is much higher in that case.
+ *
+ * Note that here we are estimating the costs for the first scan of the
+ * relation, so the materialization is all overhead --- any savings will
+ * occur only on rescan, which is estimated in cost_rescan.
  */
 void
 cost_material(Path *path,
-                         Cost input_cost, double tuples, int width)
+                         Cost input_startup_cost, Cost input_total_cost,
+                         double tuples, int width)
 {
-       Cost            startup_cost = input_cost;
-       Cost            run_cost = 0;
+       Cost            startup_cost = input_startup_cost;
+       Cost            run_cost = input_total_cost - input_startup_cost;
        double          nbytes = relation_byte_size(tuples, width);
        long            work_mem_bytes = work_mem * 1024L;
 
-       /* disk costs */
+       /*
+        * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
+        * reflect bookkeeping overhead.  (This rate must be more than what
+        * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
+        * if it is exactly the same then there will be a cost tie between
+        * nestloop with A outer, materialized B inner and nestloop with B outer,
+        * materialized A inner.  The extra cost ensures we'll prefer
+        * materializing the smaller rel.)      Note that this is normally a good deal
+        * less than cpu_tuple_cost; which is OK because a Material plan node
+        * doesn't do qual-checking or projection, so it's got less overhead than
+        * most plan nodes.
+        */
+       run_cost += 2 * cpu_operator_cost * tuples;
+
+       /*
+        * If we will spill to disk, charge at the rate of seq_page_cost per page.
+        * This cost is assumed to be evenly spread through the plan run phase,
+        * which isn't exactly accurate but our cost model doesn't allow for
+        * nonuniform costs within the run phase.
+        */
        if (nbytes > work_mem_bytes)
        {
                double          npages = ceil(nbytes / BLCKSZ);
 
-               /* We'll write during startup and read during retrieval */
-               startup_cost += seq_page_cost * npages;
                run_cost += seq_page_cost * npages;
        }
 
-       /*
-        * Charge a very small amount per inserted tuple, to reflect bookkeeping
-        * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
-        * tie that would otherwise exist between nestloop with A outer,
-        * materialized B inner and nestloop with B outer, materialized A inner.
-        * The extra cost ensures we'll prefer materializing the smaller rel.
-        */
-       startup_cost += cpu_tuple_cost * 0.1 * tuples;
-
-       /*
-        * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
-        * so that it doesn't appear worthwhile to materialize a bare seqscan.
-        */
-       run_cost += cpu_tuple_cost * tuples;
-
        path->startup_cost = startup_cost;
        path->total_cost = startup_cost + run_cost;
 }
@@ -1008,25 +1335,40 @@ cost_material(Path *path,
  *             Determines and returns the cost of performing an Agg plan node,
  *             including the cost of its input.
  *
+ * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
+ * we are using a hashed Agg node just to do grouping).
+ *
  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
  * are for appropriately-sorted input.
  */
 void
 cost_agg(Path *path, PlannerInfo *root,
-                AggStrategy aggstrategy, int numAggs,
+                AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
                 int numGroupCols, double numGroups,
                 Cost input_startup_cost, Cost input_total_cost,
                 double input_tuples)
 {
        Cost            startup_cost;
        Cost            total_cost;
+       AggClauseCosts dummy_aggcosts;
+
+       /* Use all-zero per-aggregate costs if NULL is passed */
+       if (aggcosts == NULL)
+       {
+               Assert(aggstrategy == AGG_HASHED);
+               MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
+               aggcosts = &dummy_aggcosts;
+       }
 
        /*
-        * We charge one cpu_operator_cost per aggregate function per input tuple,
-        * and another one per output tuple (corresponding to transfn and finalfn
-        * calls respectively).  If we are grouping, we charge an additional
-        * cpu_operator_cost per grouping column per input tuple for grouping
-        * comparisons.
+        * The transCost.per_tuple component of aggcosts should be charged once
+        * per input tuple, corresponding to the costs of evaluating the aggregate
+        * transfns and their input expressions (with any startup cost of course
+        * charged but once).  The finalCost component is charged once per output
+        * tuple, corresponding to the costs of evaluating the finalfns.
+        *
+        * If we are grouping, we charge an additional cpu_operator_cost per
+        * grouping column per input tuple for grouping comparisons.
         *
         * We will produce a single output tuple if not grouping, and a tuple per
         * group otherwise.  We charge cpu_tuple_cost for each output tuple.
@@ -1043,7 +1385,9 @@ cost_agg(Path *path, PlannerInfo *root,
        if (aggstrategy == AGG_PLAIN)
        {
                startup_cost = input_total_cost;
-               startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
+               startup_cost += aggcosts->transCost.startup;
+               startup_cost += aggcosts->transCost.per_tuple * input_tuples;
+               startup_cost += aggcosts->finalCost;
                /* we aren't grouping */
                total_cost = startup_cost + cpu_tuple_cost;
        }
@@ -1053,19 +1397,21 @@ cost_agg(Path *path, PlannerInfo *root,
                startup_cost = input_startup_cost;
                total_cost = input_total_cost;
                /* calcs phrased this way to match HASHED case, see note above */
-               total_cost += cpu_operator_cost * input_tuples * numGroupCols;
-               total_cost += cpu_operator_cost * input_tuples * numAggs;
-               total_cost += cpu_operator_cost * numGroups * numAggs;
+               total_cost += aggcosts->transCost.startup;
+               total_cost += aggcosts->transCost.per_tuple * input_tuples;
+               total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
+               total_cost += aggcosts->finalCost * numGroups;
                total_cost += cpu_tuple_cost * numGroups;
        }
        else
        {
                /* must be AGG_HASHED */
                startup_cost = input_total_cost;
-               startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
-               startup_cost += cpu_operator_cost * input_tuples * numAggs;
+               startup_cost += aggcosts->transCost.startup;
+               startup_cost += aggcosts->transCost.per_tuple * input_tuples;
+               startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
                total_cost = startup_cost;
-               total_cost += cpu_operator_cost * numGroups * numAggs;
+               total_cost += aggcosts->finalCost * numGroups;
                total_cost += cpu_tuple_cost * numGroups;
        }
 
@@ -1073,6 +1419,68 @@ cost_agg(Path *path, PlannerInfo *root,
        path->total_cost = total_cost;
 }
 
+/*
+ * cost_windowagg
+ *             Determines and returns the cost of performing a WindowAgg plan node,
+ *             including the cost of its input.
+ *
+ * Input is assumed already properly sorted.
+ */
+void
+cost_windowagg(Path *path, PlannerInfo *root,
+                          List *windowFuncs, int numPartCols, int numOrderCols,
+                          Cost input_startup_cost, Cost input_total_cost,
+                          double input_tuples)
+{
+       Cost            startup_cost;
+       Cost            total_cost;
+       ListCell   *lc;
+
+       startup_cost = input_startup_cost;
+       total_cost = input_total_cost;
+
+       /*
+        * Window functions are assumed to cost their stated execution cost, plus
+        * the cost of evaluating their input expressions, per tuple.  Since they
+        * may in fact evaluate their inputs at multiple rows during each cycle,
+        * this could be a drastic underestimate; but without a way to know how
+        * many rows the window function will fetch, it's hard to do better.  In
+        * any case, it's a good estimate for all the built-in window functions,
+        * so we'll just do this for now.
+        */
+       foreach(lc, windowFuncs)
+       {
+               WindowFunc *wfunc = (WindowFunc *) lfirst(lc);
+               Cost            wfunccost;
+               QualCost        argcosts;
+
+               Assert(IsA(wfunc, WindowFunc));
+
+               wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
+
+               /* also add the input expressions' cost to per-input-row costs */
+               cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
+               startup_cost += argcosts.startup;
+               wfunccost += argcosts.per_tuple;
+
+               total_cost += wfunccost * input_tuples;
+       }
+
+       /*
+        * We also charge cpu_operator_cost per grouping column per tuple for
+        * grouping comparisons, plus cpu_tuple_cost per tuple for general
+        * overhead.
+        *
+        * XXX this neglects costs of spooling the data to disk when it overflows
+        * work_mem.  Sooner or later that should get accounted for.
+        */
+       total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
+       total_cost += cpu_tuple_cost * input_tuples;
+
+       path->startup_cost = startup_cost;
+       path->total_cost = total_cost;
+}
+
 /*
  * cost_group
  *             Determines and returns the cost of performing a Group plan node,
@@ -1108,7 +1516,9 @@ cost_group(Path *path, PlannerInfo *root,
  * output row count, which may be lower than the restriction-clause-only row
  * count of its parent.  (We don't include this case in the PATH_ROWS macro
  * because it applies *only* to a nestloop's inner relation.)  We have to
- * be prepared to recurse through Append nodes in case of an appendrel.
+ * be prepared to recurse through Append or MergeAppend nodes in case of an
+ * appendrel.  (It's not clear MergeAppend can be seen here, but we may as
+ * well handle it if so.)
  */
 static double
 nestloop_inner_path_rows(Path *path)
@@ -1129,6 +1539,16 @@ nestloop_inner_path_rows(Path *path)
                        result += nestloop_inner_path_rows((Path *) lfirst(l));
                }
        }
+       else if (IsA(path, MergeAppendPath))
+       {
+               ListCell   *l;
+
+               result = 0;
+               foreach(l, ((MergeAppendPath *) path)->subpaths)
+               {
+                       result += nestloop_inner_path_rows((Path *) lfirst(l));
+               }
+       }
        else
                result = PATH_ROWS(path);
 
@@ -1141,68 +1561,129 @@ nestloop_inner_path_rows(Path *path)
  *       nested loop algorithm.
  *
  * 'path' is already filled in except for the cost fields
+ * 'sjinfo' is extra info about the join for selectivity estimation
  */
 void
-cost_nestloop(NestPath *path, PlannerInfo *root)
+cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 {
        Path       *outer_path = path->outerjoinpath;
        Path       *inner_path = path->innerjoinpath;
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
+       Cost            inner_rescan_start_cost;
+       Cost            inner_rescan_total_cost;
+       Cost            inner_run_cost;
+       Cost            inner_rescan_run_cost;
        Cost            cpu_per_tuple;
        QualCost        restrict_qual_cost;
        double          outer_path_rows = PATH_ROWS(outer_path);
        double          inner_path_rows = nestloop_inner_path_rows(inner_path);
        double          ntuples;
-       Selectivity joininfactor;
+       Selectivity outer_match_frac;
+       Selectivity match_count;
+       bool            indexed_join_quals;
 
        if (!enable_nestloop)
                startup_cost += disable_cost;
 
-       /*
-        * If we're doing JOIN_IN then we will stop scanning inner tuples for an
-        * outer tuple as soon as we have one match.  Account for the effects of
-        * this by scaling down the cost estimates in proportion to the JOIN_IN
-        * selectivity.  (This assumes that all the quals attached to the join are
-        * IN quals, which should be true.)
-        */
-       joininfactor = join_in_selectivity(path, root);
+       /* estimate costs to rescan the inner relation */
+       cost_rescan(root, inner_path,
+                               &inner_rescan_start_cost,
+                               &inner_rescan_total_cost);
 
        /* cost of source data */
 
        /*
         * NOTE: clearly, we must pay both outer and inner paths' startup_cost
         * before we can start returning tuples, so the join's startup cost is
-        * their sum.  What's not so clear is whether the inner path's
-        * startup_cost must be paid again on each rescan of the inner path. This
-        * is not true if the inner path is materialized or is a hashjoin, but
-        * probably is true otherwise.
+        * their sum.  We'll also pay the inner path's rescan startup cost
+        * multiple times.
         */
        startup_cost += outer_path->startup_cost + inner_path->startup_cost;
        run_cost += outer_path->total_cost - outer_path->startup_cost;
-       if (IsA(inner_path, MaterialPath) ||
-               IsA(inner_path, HashPath))
-       {
-               /* charge only run cost for each iteration of inner path */
-       }
-       else
+       if (outer_path_rows > 1)
+               run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
+
+       inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
+       inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
+
+       if (adjust_semi_join(root, path, sjinfo,
+                                                &outer_match_frac,
+                                                &match_count,
+                                                &indexed_join_quals))
        {
+               double          outer_matched_rows;
+               Selectivity inner_scan_frac;
+
+               /*
+                * SEMI or ANTI join: executor will stop after first match.
+                *
+                * For an outer-rel row that has at least one match, we can expect the
+                * inner scan to stop after a fraction 1/(match_count+1) of the inner
+                * rows, if the matches are evenly distributed.  Since they probably
+                * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
+                * that fraction.  (If we used a larger fuzz factor, we'd have to
+                * clamp inner_scan_frac to at most 1.0; but since match_count is at
+                * least 1, no such clamp is needed now.)
+                *
+                * A complicating factor is that rescans may be cheaper than first
+                * scans.  If we never scan all the way to the end of the inner rel,
+                * it might be (depending on the plan type) that we'd never pay the
+                * whole inner first-scan run cost.  However it is difficult to
+                * estimate whether that will happen, so be conservative and always
+                * charge the whole first-scan cost once.
+                */
+               run_cost += inner_run_cost;
+
+               outer_matched_rows = rint(outer_path_rows * outer_match_frac);
+               inner_scan_frac = 2.0 / (match_count + 1.0);
+
+               /* Add inner run cost for additional outer tuples having matches */
+               if (outer_matched_rows > 1)
+                       run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
+
+               /* Compute number of tuples processed (not number emitted!) */
+               ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
+
                /*
-                * charge startup cost for each iteration of inner path, except we
-                * already charged the first startup_cost in our own startup
+                * For unmatched outer-rel rows, there are two cases.  If the inner
+                * path is an indexscan using all the joinquals as indexquals, then an
+                * unmatched row results in an indexscan returning no rows, which is
+                * probably quite cheap.  We estimate this case as the same cost to
+                * return the first tuple of a nonempty scan.  Otherwise, the executor
+                * will have to scan the whole inner rel; not so cheap.
                 */
-               run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
+               if (indexed_join_quals)
+               {
+                       run_cost += (outer_path_rows - outer_matched_rows) *
+                               inner_rescan_run_cost / inner_path_rows;
+
+                       /*
+                        * We won't be evaluating any quals at all for these rows, so
+                        * don't add them to ntuples.
+                        */
+               }
+               else
+               {
+                       run_cost += (outer_path_rows - outer_matched_rows) *
+                               inner_rescan_run_cost;
+                       ntuples += (outer_path_rows - outer_matched_rows) *
+                               inner_path_rows;
+               }
        }
-       run_cost += outer_path_rows *
-               (inner_path->total_cost - inner_path->startup_cost) * joininfactor;
+       else
+       {
+               /* Normal case; we'll scan whole input rel for each outer row */
+               run_cost += inner_run_cost;
+               if (outer_path_rows > 1)
+                       run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
 
-       /*
-        * Compute number of tuples processed (not number emitted!)
-        */
-       ntuples = outer_path_rows * inner_path_rows * joininfactor;
+               /* Compute number of tuples processed (not number emitted!) */
+               ntuples = outer_path_rows * inner_path_rows;
+       }
 
        /* CPU costs */
-       cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo);
+       cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
        startup_cost += restrict_qual_cost.startup;
        cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
        run_cost += cpu_per_tuple * ntuples;
@@ -1216,7 +1697,19 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
  *       Determines and returns the cost of joining two relations using the
  *       merge join algorithm.
  *
- * 'path' is already filled in except for the cost fields
+ * Unlike other costsize functions, this routine makes one actual decision:
+ * whether we should materialize the inner path.  We do that either because
+ * the inner path can't support mark/restore, or because it's cheaper to
+ * use an interposed Material node to handle mark/restore.     When the decision
+ * is cost-based it would be logically cleaner to build and cost two separate
+ * paths with and without that flag set; but that would require repeating most
+ * of the calculations here, which are not all that cheap.     Since the choice
+ * will not affect output pathkeys or startup cost, only total cost, there is
+ * no possibility of wanting to keep both paths.  So it seems best to make
+ * the decision here and record it in the path's materialize_inner field.
+ *
+ * 'path' is already filled in except for the cost fields and materialize_inner
+ * 'sjinfo' is extra info about the join for selectivity estimation
  *
  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
  * outersortkeys and innersortkeys are lists of the keys to be used
@@ -1224,7 +1717,7 @@ cost_nestloop(NestPath *path, PlannerInfo *root)
  * sort is needed because the source path is already ordered.
  */
 void
-cost_mergejoin(MergePath *path, PlannerInfo *root)
+cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 {
        Path       *outer_path = path->jpath.outerjoinpath;
        Path       *inner_path = path->jpath.innerjoinpath;
@@ -1233,57 +1726,61 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
        List       *innersortkeys = path->innersortkeys;
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
-       Cost            cpu_per_tuple;
-       Selectivity merge_selec;
+       Cost            cpu_per_tuple,
+                               inner_run_cost,
+                               bare_inner_cost,
+                               mat_inner_cost;
        QualCost        merge_qual_cost;
        QualCost        qp_qual_cost;
-       RestrictInfo *firstclause;
        double          outer_path_rows = PATH_ROWS(outer_path);
        double          inner_path_rows = PATH_ROWS(inner_path);
        double          outer_rows,
-                               inner_rows;
+                               inner_rows,
+                               outer_skip_rows,
+                               inner_skip_rows;
        double          mergejointuples,
                                rescannedtuples;
        double          rescanratio;
-       Selectivity outerscansel,
-                               innerscansel;
-       Selectivity joininfactor;
+       Selectivity outerstartsel,
+                               outerendsel,
+                               innerstartsel,
+                               innerendsel;
        Path            sort_path;              /* dummy for result of cost_sort */
 
+       /* Protect some assumptions below that rowcounts aren't zero or NaN */
+       if (outer_path_rows <= 0 || isnan(outer_path_rows))
+               outer_path_rows = 1;
+       if (inner_path_rows <= 0 || isnan(inner_path_rows))
+               inner_path_rows = 1;
+
        if (!enable_mergejoin)
                startup_cost += disable_cost;
 
        /*
-        * Compute cost and selectivity of the mergequals and qpquals (other
-        * restriction clauses) separately.  We use approx_selectivity here for
-        * speed --- in most cases, any errors won't affect the result much.
-        *
-        * Note: it's probably bogus to use the normal selectivity calculation
-        * here when either the outer or inner path is a UniquePath.
+        * Compute cost of the mergequals and qpquals (other restriction clauses)
+        * separately.
         */
-       merge_selec = approx_selectivity(root, mergeclauses,
-                                                                        path->jpath.jointype);
-       cost_qual_eval(&merge_qual_cost, mergeclauses);
-       cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
+       cost_qual_eval(&merge_qual_cost, mergeclauses, root);
+       cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
        qp_qual_cost.startup -= merge_qual_cost.startup;
        qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
 
-       /* approx # tuples passing the merge quals */
-       mergejointuples = clamp_row_est(merge_selec * outer_path_rows * inner_path_rows);
+       /*
+        * Get approx # tuples passing the mergequals.  We use approx_tuple_count
+        * here because we need an estimate done with JOIN_INNER semantics.
+        */
+       mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
 
        /*
         * When there are equal merge keys in the outer relation, the mergejoin
         * must rescan any matching tuples in the inner relation. This means
-        * re-fetching inner tuples.  Our cost model for this is that a re-fetch
-        * costs the same as an original fetch, which is probably an overestimate;
-        * but on the other hand we ignore the bookkeeping costs of mark/restore.
-        * Not clear if it's worth developing a more refined model.
+        * re-fetching inner tuples; we have to estimate how often that happens.
         *
-        * The number of re-fetches can be estimated approximately as size of
-        * merge join output minus size of inner relation.      Assume that the
-        * distinct key values are 1, 2, ..., and denote the number of values of
-        * each key in the outer relation as m1, m2, ...; in the inner relation,
-        * n1, n2, ... Then we have
+        * For regular inner and outer joins, the number of re-fetches can be
+        * estimated approximately as size of merge join output minus size of
+        * inner relation. Assume that the distinct key values are 1, 2, ..., and
+        * denote the number of values of each key in the outer relation as m1,
+        * m2, ...; in the inner relation, n1, n2, ...  Then we have
         *
         * size of join = m1 * n1 + m2 * n2 + ...
         *
@@ -1296,6 +1793,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
         * are effectively subtracting those from the number of rescanned tuples,
         * when we should not.  Can we do better without expensive selectivity
         * computations?
+        *
+        * The whole issue is moot if we are working from a unique-ified outer
+        * input.
         */
        if (IsA(outer_path, UniquePath))
                rescannedtuples = 0;
@@ -1306,62 +1806,106 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
                if (rescannedtuples < 0)
                        rescannedtuples = 0;
        }
-       /* We'll inflate inner run cost this much to account for rescanning */
+       /* We'll inflate various costs this much to account for rescanning */
        rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
 
        /*
         * A merge join will stop as soon as it exhausts either input stream
         * (unless it's an outer join, in which case the outer side has to be
         * scanned all the way anyway).  Estimate fraction of the left and right
-        * inputs that will actually need to be scanned. We use only the first
-        * (most significant) merge clause for this purpose.
-        *
-        * Since this calculation is somewhat expensive, and will be the same for
-        * all mergejoin paths associated with the merge clause, we cache the
-        * results in the RestrictInfo node.
+        * inputs that will actually need to be scanned.  Likewise, we can
+        * estimate the number of rows that will be skipped before the first join
+        * pair is found, which should be factored into startup cost. We use only
+        * the first (most significant) merge clause for this purpose. Since
+        * mergejoinscansel() is a fairly expensive computation, we cache the
+        * results in the merge clause RestrictInfo.
         */
        if (mergeclauses && path->jpath.jointype != JOIN_FULL)
        {
-               firstclause = (RestrictInfo *) linitial(mergeclauses);
-               if (firstclause->left_mergescansel < 0) /* not computed yet? */
-                       mergejoinscansel(root, (Node *) firstclause->clause,
-                                                        &firstclause->left_mergescansel,
-                                                        &firstclause->right_mergescansel);
-
-               if (bms_is_subset(firstclause->left_relids, outer_path->parent->relids))
+               RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
+               List       *opathkeys;
+               List       *ipathkeys;
+               PathKey    *opathkey;
+               PathKey    *ipathkey;
+               MergeScanSelCache *cache;
+
+               /* Get the input pathkeys to determine the sort-order details */
+               opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
+               ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
+               Assert(opathkeys);
+               Assert(ipathkeys);
+               opathkey = (PathKey *) linitial(opathkeys);
+               ipathkey = (PathKey *) linitial(ipathkeys);
+               /* debugging check */
+               if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
+                       opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
+                       opathkey->pk_strategy != ipathkey->pk_strategy ||
+                       opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
+                       elog(ERROR, "left and right pathkeys do not match in mergejoin");
+
+               /* Get the selectivity with caching */
+               cache = cached_scansel(root, firstclause, opathkey);
+
+               if (bms_is_subset(firstclause->left_relids,
+                                                 outer_path->parent->relids))
                {
                        /* left side of clause is outer */
-                       outerscansel = firstclause->left_mergescansel;
-                       innerscansel = firstclause->right_mergescansel;
+                       outerstartsel = cache->leftstartsel;
+                       outerendsel = cache->leftendsel;
+                       innerstartsel = cache->rightstartsel;
+                       innerendsel = cache->rightendsel;
                }
                else
                {
                        /* left side of clause is inner */
-                       outerscansel = firstclause->right_mergescansel;
-                       innerscansel = firstclause->left_mergescansel;
+                       outerstartsel = cache->rightstartsel;
+                       outerendsel = cache->rightendsel;
+                       innerstartsel = cache->leftstartsel;
+                       innerendsel = cache->leftendsel;
+               }
+               if (path->jpath.jointype == JOIN_LEFT ||
+                       path->jpath.jointype == JOIN_ANTI)
+               {
+                       outerstartsel = 0.0;
+                       outerendsel = 1.0;
                }
-               if (path->jpath.jointype == JOIN_LEFT)
-                       outerscansel = 1.0;
                else if (path->jpath.jointype == JOIN_RIGHT)
-                       innerscansel = 1.0;
+               {
+                       innerstartsel = 0.0;
+                       innerendsel = 1.0;
+               }
        }
        else
        {
                /* cope with clauseless or full mergejoin */
-               outerscansel = innerscansel = 1.0;
+               outerstartsel = innerstartsel = 0.0;
+               outerendsel = innerendsel = 1.0;
        }
 
-       /* convert selectivity to row count; must scan at least one row */
-       outer_rows = clamp_row_est(outer_path_rows * outerscansel);
-       inner_rows = clamp_row_est(inner_path_rows * innerscansel);
+       /*
+        * Convert selectivities to row counts.  We force outer_rows and
+        * inner_rows to be at least 1, but the skip_rows estimates can be zero.
+        */
+       outer_skip_rows = rint(outer_path_rows * outerstartsel);
+       inner_skip_rows = rint(inner_path_rows * innerstartsel);
+       outer_rows = clamp_row_est(outer_path_rows * outerendsel);
+       inner_rows = clamp_row_est(inner_path_rows * innerendsel);
+
+       Assert(outer_skip_rows <= outer_rows);
+       Assert(inner_skip_rows <= inner_rows);
 
        /*
         * Readjust scan selectivities to account for above rounding.  This is
         * normally an insignificant effect, but when there are only a few rows in
         * the inputs, failing to do this makes for a large percentage error.
         */
-       outerscansel = outer_rows / outer_path_rows;
-       innerscansel = inner_rows / inner_path_rows;
+       outerstartsel = outer_skip_rows / outer_path_rows;
+       innerstartsel = inner_skip_rows / inner_path_rows;
+       outerendsel = outer_rows / outer_path_rows;
+       innerendsel = inner_rows / inner_path_rows;
+
+       Assert(outerstartsel <= outerendsel);
+       Assert(innerstartsel <= innerendsel);
 
        /* cost of source data */
 
@@ -1372,16 +1916,23 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
                                  outersortkeys,
                                  outer_path->total_cost,
                                  outer_path_rows,
-                                 outer_path->parent->width);
+                                 outer_path->parent->width,
+                                 0.0,
+                                 work_mem,
+                                 -1.0);
                startup_cost += sort_path.startup_cost;
+               startup_cost += (sort_path.total_cost - sort_path.startup_cost)
+                       * outerstartsel;
                run_cost += (sort_path.total_cost - sort_path.startup_cost)
-                       * outerscansel;
+                       * (outerendsel - outerstartsel);
        }
        else
        {
                startup_cost += outer_path->startup_cost;
+               startup_cost += (outer_path->total_cost - outer_path->startup_cost)
+                       * outerstartsel;
                run_cost += (outer_path->total_cost - outer_path->startup_cost)
-                       * outerscansel;
+                       * (outerendsel - outerstartsel);
        }
 
        if (innersortkeys)                      /* do we need to sort inner? */
@@ -1391,66 +1942,202 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
                                  innersortkeys,
                                  inner_path->total_cost,
                                  inner_path_rows,
-                                 inner_path->parent->width);
+                                 inner_path->parent->width,
+                                 0.0,
+                                 work_mem,
+                                 -1.0);
                startup_cost += sort_path.startup_cost;
-               run_cost += (sort_path.total_cost - sort_path.startup_cost)
-                       * innerscansel * rescanratio;
+               startup_cost += (sort_path.total_cost - sort_path.startup_cost)
+                       * innerstartsel;
+               inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
+                       * (innerendsel - innerstartsel);
        }
        else
        {
                startup_cost += inner_path->startup_cost;
-               run_cost += (inner_path->total_cost - inner_path->startup_cost)
-                       * innerscansel * rescanratio;
+               startup_cost += (inner_path->total_cost - inner_path->startup_cost)
+                       * innerstartsel;
+               inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
+                       * (innerendsel - innerstartsel);
        }
 
-       /* CPU costs */
+       /*
+        * Decide whether we want to materialize the inner input to shield it from
+        * mark/restore and performing re-fetches.      Our cost model for regular
+        * re-fetches is that a re-fetch costs the same as an original fetch,
+        * which is probably an overestimate; but on the other hand we ignore the
+        * bookkeeping costs of mark/restore.  Not clear if it's worth developing
+        * a more refined model.  So we just need to inflate the inner run cost by
+        * rescanratio.
+        */
+       bare_inner_cost = inner_run_cost * rescanratio;
 
        /*
-        * If we're doing JOIN_IN then we will stop outputting inner tuples for an
-        * outer tuple as soon as we have one match.  Account for the effects of
-        * this by scaling down the cost estimates in proportion to the expected
-        * output size.  (This assumes that all the quals attached to the join are
-        * IN quals, which should be true.)
+        * When we interpose a Material node the re-fetch cost is assumed to be
+        * just cpu_operator_cost per tuple, independently of the underlying
+        * plan's cost; and we charge an extra cpu_operator_cost per original
+        * fetch as well.  Note that we're assuming the materialize node will
+        * never spill to disk, since it only has to remember tuples back to the
+        * last mark.  (If there are a huge number of duplicates, our other cost
+        * factors will make the path so expensive that it probably won't get
+        * chosen anyway.)      So we don't use cost_rescan here.
+        *
+        * Note: keep this estimate in sync with create_mergejoin_plan's labeling
+        * of the generated Material node.
         */
-       joininfactor = join_in_selectivity(&path->jpath, root);
+       mat_inner_cost = inner_run_cost +
+               cpu_operator_cost * inner_path_rows * rescanratio;
+
+       /*
+        * Prefer materializing if it looks cheaper, unless the user has asked to
+        * suppress materialization.
+        */
+       if (enable_material && mat_inner_cost < bare_inner_cost)
+               path->materialize_inner = true;
+
+       /*
+        * Even if materializing doesn't look cheaper, we *must* do it if the
+        * inner path is to be used directly (without sorting) and it doesn't
+        * support mark/restore.
+        *
+        * Since the inner side must be ordered, and only Sorts and IndexScans can
+        * create order to begin with, and they both support mark/restore, you
+        * might think there's no problem --- but you'd be wrong.  Nestloop and
+        * merge joins can *preserve* the order of their inputs, so they can be
+        * selected as the input of a mergejoin, and they don't support
+        * mark/restore at present.
+        *
+        * We don't test the value of enable_material here, because
+        * materialization is required for correctness in this case, and turning
+        * it off does not entitle us to deliver an invalid plan.
+        */
+       else if (innersortkeys == NIL &&
+                        !ExecSupportsMarkRestore(inner_path->pathtype))
+               path->materialize_inner = true;
+
+       /*
+        * Also, force materializing if the inner path is to be sorted and the
+        * sort is expected to spill to disk.  This is because the final merge
+        * pass can be done on-the-fly if it doesn't have to support mark/restore.
+        * We don't try to adjust the cost estimates for this consideration,
+        * though.
+        *
+        * Since materialization is a performance optimization in this case,
+        * rather than necessary for correctness, we skip it if enable_material is
+        * off.
+        */
+       else if (enable_material && innersortkeys != NIL &&
+                        relation_byte_size(inner_path_rows, inner_path->parent->width) >
+                        (work_mem * 1024L))
+               path->materialize_inner = true;
+       else
+               path->materialize_inner = false;
+
+       /* Charge the right incremental cost for the chosen case */
+       if (path->materialize_inner)
+               run_cost += mat_inner_cost;
+       else
+               run_cost += bare_inner_cost;
+
+       /* CPU costs */
 
        /*
         * The number of tuple comparisons needed is approximately number of outer
         * rows plus number of inner rows plus number of rescanned tuples (can we
         * refine this?).  At each one, we need to evaluate the mergejoin quals.
-        * NOTE: JOIN_IN mode does not save any work here, so do NOT include
-        * joininfactor.
         */
        startup_cost += merge_qual_cost.startup;
+       startup_cost += merge_qual_cost.per_tuple *
+               (outer_skip_rows + inner_skip_rows * rescanratio);
        run_cost += merge_qual_cost.per_tuple *
-               (outer_rows + inner_rows * rescanratio);
+               ((outer_rows - outer_skip_rows) +
+                (inner_rows - inner_skip_rows) * rescanratio);
 
        /*
         * For each tuple that gets through the mergejoin proper, we charge
         * cpu_tuple_cost plus the cost of evaluating additional restriction
         * clauses that are to be applied at the join.  (This is pessimistic since
-        * not all of the quals may get evaluated at each tuple.)  This work is
-        * skipped in JOIN_IN mode, so apply the factor.
+        * not all of the quals may get evaluated at each tuple.)
+        *
+        * Note: we could adjust for SEMI/ANTI joins skipping some qual
+        * evaluations here, but it's probably not worth the trouble.
         */
        startup_cost += qp_qual_cost.startup;
        cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
-       run_cost += cpu_per_tuple * mergejointuples * joininfactor;
+       run_cost += cpu_per_tuple * mergejointuples;
 
        path->jpath.path.startup_cost = startup_cost;
        path->jpath.path.total_cost = startup_cost + run_cost;
 }
 
+/*
+ * run mergejoinscansel() with caching
+ */
+static MergeScanSelCache *
+cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
+{
+       MergeScanSelCache *cache;
+       ListCell   *lc;
+       Selectivity leftstartsel,
+                               leftendsel,
+                               rightstartsel,
+                               rightendsel;
+       MemoryContext oldcontext;
+
+       /* Do we have this result already? */
+       foreach(lc, rinfo->scansel_cache)
+       {
+               cache = (MergeScanSelCache *) lfirst(lc);
+               if (cache->opfamily == pathkey->pk_opfamily &&
+                       cache->collation == pathkey->pk_eclass->ec_collation &&
+                       cache->strategy == pathkey->pk_strategy &&
+                       cache->nulls_first == pathkey->pk_nulls_first)
+                       return cache;
+       }
+
+       /* Nope, do the computation */
+       mergejoinscansel(root,
+                                        (Node *) rinfo->clause,
+                                        pathkey->pk_opfamily,
+                                        pathkey->pk_strategy,
+                                        pathkey->pk_nulls_first,
+                                        &leftstartsel,
+                                        &leftendsel,
+                                        &rightstartsel,
+                                        &rightendsel);
+
+       /* Cache the result in suitably long-lived workspace */
+       oldcontext = MemoryContextSwitchTo(root->planner_cxt);
+
+       cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
+       cache->opfamily = pathkey->pk_opfamily;
+       cache->collation = pathkey->pk_eclass->ec_collation;
+       cache->strategy = pathkey->pk_strategy;
+       cache->nulls_first = pathkey->pk_nulls_first;
+       cache->leftstartsel = leftstartsel;
+       cache->leftendsel = leftendsel;
+       cache->rightstartsel = rightstartsel;
+       cache->rightendsel = rightendsel;
+
+       rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
+
+       MemoryContextSwitchTo(oldcontext);
+
+       return cache;
+}
+
 /*
  * cost_hashjoin
  *       Determines and returns the cost of joining two relations using the
  *       hash join algorithm.
  *
  * 'path' is already filled in except for the cost fields
+ * 'sjinfo' is extra info about the join for selectivity estimation
  *
  * Note: path's hashclauses should be a subset of the joinrestrictinfo list
  */
 void
-cost_hashjoin(HashPath *path, PlannerInfo *root)
+cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 {
        Path       *outer_path = path->jpath.outerjoinpath;
        Path       *inner_path = path->jpath.innerjoinpath;
@@ -1458,45 +2145,33 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
        Cost            cpu_per_tuple;
-       Selectivity hash_selec;
        QualCost        hash_qual_cost;
        QualCost        qp_qual_cost;
        double          hashjointuples;
        double          outer_path_rows = PATH_ROWS(outer_path);
        double          inner_path_rows = PATH_ROWS(inner_path);
-       double          outerbytes = relation_byte_size(outer_path_rows,
-                                                                                               outer_path->parent->width);
-       double          innerbytes = relation_byte_size(inner_path_rows,
-                                                                                               inner_path->parent->width);
        int                     num_hashclauses = list_length(hashclauses);
        int                     numbuckets;
        int                     numbatches;
+       int                     num_skew_mcvs;
        double          virtualbuckets;
        Selectivity innerbucketsize;
-       Selectivity joininfactor;
+       Selectivity outer_match_frac;
+       Selectivity match_count;
        ListCell   *hcl;
 
        if (!enable_hashjoin)
                startup_cost += disable_cost;
 
        /*
-        * Compute cost and selectivity of the hashquals and qpquals (other
-        * restriction clauses) separately.  We use approx_selectivity here for
-        * speed --- in most cases, any errors won't affect the result much.
-        *
-        * Note: it's probably bogus to use the normal selectivity calculation
-        * here when either the outer or inner path is a UniquePath.
+        * Compute cost of the hashquals and qpquals (other restriction clauses)
+        * separately.
         */
-       hash_selec = approx_selectivity(root, hashclauses,
-                                                                       path->jpath.jointype);
-       cost_qual_eval(&hash_qual_cost, hashclauses);
-       cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo);
+       cost_qual_eval(&hash_qual_cost, hashclauses, root);
+       cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
        qp_qual_cost.startup -= hash_qual_cost.startup;
        qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
 
-       /* approx # tuples passing the hash quals */
-       hashjointuples = clamp_row_est(hash_selec * outer_path_rows * inner_path_rows);
-
        /* cost of source data */
        startup_cost += outer_path->startup_cost;
        run_cost += outer_path->total_cost - outer_path->startup_cost;
@@ -1504,22 +2179,39 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
 
        /*
         * Cost of computing hash function: must do it once per input tuple. We
-        * charge one cpu_operator_cost for each column's hash function.
+        * charge one cpu_operator_cost for each column's hash function.  Also,
+        * tack on one cpu_tuple_cost per inner row, to model the costs of
+        * inserting the row into the hashtable.
         *
         * XXX when a hashclause is more complex than a single operator, we really
         * should charge the extra eval costs of the left or right side, as
         * appropriate, here.  This seems more work than it's worth at the moment.
         */
-       startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
+       startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
+               * inner_path_rows;
        run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
 
-       /* Get hash table size that executor would use for inner relation */
+       /*
+        * Get hash table size that executor would use for inner relation.
+        *
+        * XXX for the moment, always assume that skew optimization will be
+        * performed.  As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
+        * trying to determine that for sure.
+        *
+        * XXX at some point it might be interesting to try to account for skew
+        * optimization in the cost estimate, but for now, we don't.
+        */
        ExecChooseHashTableSize(inner_path_rows,
                                                        inner_path->parent->width,
+                                                       true,           /* useskew */
                                                        &numbuckets,
-                                                       &numbatches);
+                                                       &numbatches,
+                                                       &num_skew_mcvs);
        virtualbuckets = (double) numbuckets *(double) numbatches;
 
+       /* mark the path with estimated # of batches */
+       path->num_batches = numbatches;
+
        /*
         * Determine bucketsize fraction for inner relation.  We use the smallest
         * bucketsize estimated for any individual hashclause; this is undoubtedly
@@ -1590,9 +2282,9 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
        /*
         * If inner relation is too big then we will need to "batch" the join,
         * which implies writing and reading most of the tuples to disk an extra
-        * time.  Charge one cost unit per page of I/O (correct since it should be
-        * nice and sequential...).  Writing the inner rel counts as startup cost,
-        * all the rest as run cost.
+        * time.  Charge seq_page_cost per page, since the I/O should be nice and
+        * sequential.  Writing the inner rel counts as startup cost, all the rest
+        * as run cost.
         */
        if (numbatches > 1)
        {
@@ -1601,33 +2293,84 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
                double          innerpages = page_size(inner_path_rows,
                                                                                   inner_path->parent->width);
 
-               startup_cost += innerpages;
-               run_cost += innerpages + 2 * outerpages;
+               startup_cost += seq_page_cost * innerpages;
+               run_cost += seq_page_cost * (innerpages + 2 * outerpages);
        }
 
        /* CPU costs */
 
-       /*
-        * If we're doing JOIN_IN then we will stop comparing inner tuples to an
-        * outer tuple as soon as we have one match.  Account for the effects of
-        * this by scaling down the cost estimates in proportion to the expected
-        * output size.  (This assumes that all the quals attached to the join are
-        * IN quals, which should be true.)
-        */
-       joininfactor = join_in_selectivity(&path->jpath, root);
+       if (adjust_semi_join(root, &path->jpath, sjinfo,
+                                                &outer_match_frac,
+                                                &match_count,
+                                                NULL))
+       {
+               double          outer_matched_rows;
+               Selectivity inner_scan_frac;
 
-       /*
-        * The number of tuple comparisons needed is the number of outer tuples
-        * times the typical number of tuples in a hash bucket, which is the inner
-        * relation size times its bucketsize fraction.  At each one, we need to
-        * evaluate the hashjoin quals.  (Note: charging the full qual eval cost
-        * at each tuple is pessimistic, since we don't evaluate the quals unless
-        * the hash values match exactly.)
-        */
-       startup_cost += hash_qual_cost.startup;
-       run_cost += hash_qual_cost.per_tuple *
-               outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) *
-               joininfactor;
+               /*
+                * SEMI or ANTI join: executor will stop after first match.
+                *
+                * For an outer-rel row that has at least one match, we can expect the
+                * bucket scan to stop after a fraction 1/(match_count+1) of the
+                * bucket's rows, if the matches are evenly distributed.  Since they
+                * probably aren't quite evenly distributed, we apply a fuzz factor of
+                * 2.0 to that fraction.  (If we used a larger fuzz factor, we'd have
+                * to clamp inner_scan_frac to at most 1.0; but since match_count is
+                * at least 1, no such clamp is needed now.)
+                */
+               outer_matched_rows = rint(outer_path_rows * outer_match_frac);
+               inner_scan_frac = 2.0 / (match_count + 1.0);
+
+               startup_cost += hash_qual_cost.startup;
+               run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
+                       clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
+
+               /*
+                * For unmatched outer-rel rows, the picture is quite a lot different.
+                * In the first place, there is no reason to assume that these rows
+                * preferentially hit heavily-populated buckets; instead assume they
+                * are uncorrelated with the inner distribution and so they see an
+                * average bucket size of inner_path_rows / virtualbuckets.  In the
+                * second place, it seems likely that they will have few if any exact
+                * hash-code matches and so very few of the tuples in the bucket will
+                * actually require eval of the hash quals.  We don't have any good
+                * way to estimate how many will, but for the moment assume that the
+                * effective cost per bucket entry is one-tenth what it is for
+                * matchable tuples.
+                */
+               run_cost += hash_qual_cost.per_tuple *
+                       (outer_path_rows - outer_matched_rows) *
+                       clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
+
+               /* Get # of tuples that will pass the basic join */
+               if (path->jpath.jointype == JOIN_SEMI)
+                       hashjointuples = outer_matched_rows;
+               else
+                       hashjointuples = outer_path_rows - outer_matched_rows;
+       }
+       else
+       {
+               /*
+                * The number of tuple comparisons needed is the number of outer
+                * tuples times the typical number of tuples in a hash bucket, which
+                * is the inner relation size times its bucketsize fraction.  At each
+                * one, we need to evaluate the hashjoin quals.  But actually,
+                * charging the full qual eval cost at each tuple is pessimistic,
+                * since we don't evaluate the quals unless the hash values match
+                * exactly.  For lack of a better idea, halve the cost estimate to
+                * allow for that.
+                */
+               startup_cost += hash_qual_cost.startup;
+               run_cost += hash_qual_cost.per_tuple * outer_path_rows *
+                       clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
+
+               /*
+                * Get approx # tuples passing the hashquals.  We use
+                * approx_tuple_count here because we need an estimate done with
+                * JOIN_INNER semantics.
+                */
+               hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
+       }
 
        /*
         * For each tuple that gets through the hashjoin proper, we charge
@@ -1637,44 +2380,219 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
         */
        startup_cost += qp_qual_cost.startup;
        cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
-       run_cost += cpu_per_tuple * hashjointuples * joininfactor;
-
-       /*
-        * Bias against putting larger relation on inside.      We don't want an
-        * absolute prohibition, though, since larger relation might have better
-        * bucketsize --- and we can't trust the size estimates unreservedly,
-        * anyway.      Instead, inflate the run cost by the square root of the size
-        * ratio.  (Why square root?  No real good reason, but it seems
-        * reasonable...)
-        *
-        * Note: before 7.4 we implemented this by inflating startup cost; but if
-        * there's a disable_cost component in the input paths' startup cost, that
-        * unfairly penalizes the hash.  Probably it'd be better to keep track of
-        * disable penalty separately from cost.
-        */
-       if (innerbytes > outerbytes && outerbytes > 0)
-               run_cost *= sqrt(innerbytes / outerbytes);
+       run_cost += cpu_per_tuple * hashjointuples;
 
        path->jpath.path.startup_cost = startup_cost;
        path->jpath.path.total_cost = startup_cost + run_cost;
 }
 
 
+/*
+ * cost_subplan
+ *             Figure the costs for a SubPlan (or initplan).
+ *
+ * Note: we could dig the subplan's Plan out of the root list, but in practice
+ * all callers have it handy already, so we make them pass it.
+ */
+void
+cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
+{
+       QualCost        sp_cost;
+
+       /* Figure any cost for evaluating the testexpr */
+       cost_qual_eval(&sp_cost,
+                                  make_ands_implicit((Expr *) subplan->testexpr),
+                                  root);
+
+       if (subplan->useHashTable)
+       {
+               /*
+                * If we are using a hash table for the subquery outputs, then the
+                * cost of evaluating the query is a one-time cost.  We charge one
+                * cpu_operator_cost per tuple for the work of loading the hashtable,
+                * too.
+                */
+               sp_cost.startup += plan->total_cost +
+                       cpu_operator_cost * plan->plan_rows;
+
+               /*
+                * The per-tuple costs include the cost of evaluating the lefthand
+                * expressions, plus the cost of probing the hashtable.  We already
+                * accounted for the lefthand expressions as part of the testexpr, and
+                * will also have counted one cpu_operator_cost for each comparison
+                * operator.  That is probably too low for the probing cost, but it's
+                * hard to make a better estimate, so live with it for now.
+                */
+       }
+       else
+       {
+               /*
+                * Otherwise we will be rescanning the subplan output on each
+                * evaluation.  We need to estimate how much of the output we will
+                * actually need to scan.  NOTE: this logic should agree with the
+                * tuple_fraction estimates used by make_subplan() in
+                * plan/subselect.c.
+                */
+               Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
+
+               if (subplan->subLinkType == EXISTS_SUBLINK)
+               {
+                       /* we only need to fetch 1 tuple */
+                       sp_cost.per_tuple += plan_run_cost / plan->plan_rows;
+               }
+               else if (subplan->subLinkType == ALL_SUBLINK ||
+                                subplan->subLinkType == ANY_SUBLINK)
+               {
+                       /* assume we need 50% of the tuples */
+                       sp_cost.per_tuple += 0.50 * plan_run_cost;
+                       /* also charge a cpu_operator_cost per row examined */
+                       sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
+               }
+               else
+               {
+                       /* assume we need all tuples */
+                       sp_cost.per_tuple += plan_run_cost;
+               }
+
+               /*
+                * Also account for subplan's startup cost. If the subplan is
+                * uncorrelated or undirect correlated, AND its topmost node is one
+                * that materializes its output, assume that we'll only need to pay
+                * its startup cost once; otherwise assume we pay the startup cost
+                * every time.
+                */
+               if (subplan->parParam == NIL &&
+                       ExecMaterializesOutput(nodeTag(plan)))
+                       sp_cost.startup += plan->startup_cost;
+               else
+                       sp_cost.per_tuple += plan->startup_cost;
+       }
+
+       subplan->startup_cost = sp_cost.startup;
+       subplan->per_call_cost = sp_cost.per_tuple;
+}
+
+
+/*
+ * cost_rescan
+ *             Given a finished Path, estimate the costs of rescanning it after
+ *             having done so the first time.  For some Path types a rescan is
+ *             cheaper than an original scan (if no parameters change), and this
+ *             function embodies knowledge about that.  The default is to return
+ *             the same costs stored in the Path.      (Note that the cost estimates
+ *             actually stored in Paths are always for first scans.)
+ *
+ * This function is not currently intended to model effects such as rescans
+ * being cheaper due to disk block caching; what we are concerned with is
+ * plan types wherein the executor caches results explicitly, or doesn't
+ * redo startup calculations, etc.
+ */
+static void
+cost_rescan(PlannerInfo *root, Path *path,
+                       Cost *rescan_startup_cost,      /* output parameters */
+                       Cost *rescan_total_cost)
+{
+       switch (path->pathtype)
+       {
+               case T_FunctionScan:
+
+                       /*
+                        * Currently, nodeFunctionscan.c always executes the function to
+                        * completion before returning any rows, and caches the results in
+                        * a tuplestore.  So the function eval cost is all startup cost
+                        * and isn't paid over again on rescans. However, all run costs
+                        * will be paid over again.
+                        */
+                       *rescan_startup_cost = 0;
+                       *rescan_total_cost = path->total_cost - path->startup_cost;
+                       break;
+               case T_HashJoin:
+
+                       /*
+                        * Assume that all of the startup cost represents hash table
+                        * building, which we won't have to do over.
+                        */
+                       *rescan_startup_cost = 0;
+                       *rescan_total_cost = path->total_cost - path->startup_cost;
+                       break;
+               case T_CteScan:
+               case T_WorkTableScan:
+                       {
+                               /*
+                                * These plan types materialize their final result in a
+                                * tuplestore or tuplesort object.      So the rescan cost is only
+                                * cpu_tuple_cost per tuple, unless the result is large enough
+                                * to spill to disk.
+                                */
+                               Cost            run_cost = cpu_tuple_cost * path->parent->rows;
+                               double          nbytes = relation_byte_size(path->parent->rows,
+                                                                                                               path->parent->width);
+                               long            work_mem_bytes = work_mem * 1024L;
+
+                               if (nbytes > work_mem_bytes)
+                               {
+                                       /* It will spill, so account for re-read cost */
+                                       double          npages = ceil(nbytes / BLCKSZ);
+
+                                       run_cost += seq_page_cost * npages;
+                               }
+                               *rescan_startup_cost = 0;
+                               *rescan_total_cost = run_cost;
+                       }
+                       break;
+               case T_Material:
+               case T_Sort:
+                       {
+                               /*
+                                * These plan types not only materialize their results, but do
+                                * not implement qual filtering or projection.  So they are
+                                * even cheaper to rescan than the ones above.  We charge only
+                                * cpu_operator_cost per tuple.  (Note: keep that in sync with
+                                * the run_cost charge in cost_sort, and also see comments in
+                                * cost_material before you change it.)
+                                */
+                               Cost            run_cost = cpu_operator_cost * path->parent->rows;
+                               double          nbytes = relation_byte_size(path->parent->rows,
+                                                                                                               path->parent->width);
+                               long            work_mem_bytes = work_mem * 1024L;
+
+                               if (nbytes > work_mem_bytes)
+                               {
+                                       /* It will spill, so account for re-read cost */
+                                       double          npages = ceil(nbytes / BLCKSZ);
+
+                                       run_cost += seq_page_cost * npages;
+                               }
+                               *rescan_startup_cost = 0;
+                               *rescan_total_cost = run_cost;
+                       }
+                       break;
+               default:
+                       *rescan_startup_cost = path->startup_cost;
+                       *rescan_total_cost = path->total_cost;
+                       break;
+       }
+}
+
+
 /*
  * cost_qual_eval
  *             Estimate the CPU costs of evaluating a WHERE clause.
  *             The input can be either an implicitly-ANDed list of boolean
- *             expressions, or a list of RestrictInfo nodes.
+ *             expressions, or a list of RestrictInfo nodes.  (The latter is
+ *             preferred since it allows caching of the results.)
  *             The result includes both a one-time (startup) component,
  *             and a per-evaluation component.
  */
 void
-cost_qual_eval(QualCost *cost, List *quals)
+cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
 {
+       cost_qual_eval_context context;
        ListCell   *l;
 
-       cost->startup = 0;
-       cost->per_tuple = 0;
+       context.root = root;
+       context.total.startup = 0;
+       context.total.per_tuple = 0;
 
        /* We don't charge any cost for the implicit ANDing at top level ... */
 
@@ -1682,63 +2600,110 @@ cost_qual_eval(QualCost *cost, List *quals)
        {
                Node       *qual = (Node *) lfirst(l);
 
-               /*
-                * RestrictInfo nodes contain an eval_cost field reserved for this
-                * routine's use, so that it's not necessary to evaluate the qual
-                * clause's cost more than once.  If the clause's cost hasn't been
-                * computed yet, the field's startup value will contain -1.
-                *
-                * If the RestrictInfo is marked pseudoconstant, it will be tested
-                * only once, so treat its cost as all startup cost.
-                */
-               if (qual && IsA(qual, RestrictInfo))
-               {
-                       RestrictInfo *rinfo = (RestrictInfo *) qual;
-
-                       if (rinfo->eval_cost.startup < 0)
-                       {
-                               rinfo->eval_cost.startup = 0;
-                               rinfo->eval_cost.per_tuple = 0;
-                               cost_qual_eval_walker((Node *) rinfo->clause,
-                                                                         &rinfo->eval_cost);
-                               if (rinfo->pseudoconstant)
-                               {
-                                       /* count one execution during startup */
-                                       rinfo->eval_cost.startup += rinfo->eval_cost.per_tuple;
-                                       rinfo->eval_cost.per_tuple = 0;
-                               }
-                       }
-                       cost->startup += rinfo->eval_cost.startup;
-                       cost->per_tuple += rinfo->eval_cost.per_tuple;
-               }
-               else
-               {
-                       /* If it's a bare expression, must always do it the hard way */
-                       cost_qual_eval_walker(qual, cost);
-               }
+               cost_qual_eval_walker(qual, &context);
        }
+
+       *cost = context.total;
+}
+
+/*
+ * cost_qual_eval_node
+ *             As above, for a single RestrictInfo or expression.
+ */
+void
+cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
+{
+       cost_qual_eval_context context;
+
+       context.root = root;
+       context.total.startup = 0;
+       context.total.per_tuple = 0;
+
+       cost_qual_eval_walker(qual, &context);
+
+       *cost = context.total;
 }
 
 static bool
-cost_qual_eval_walker(Node *node, QualCost *total)
+cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
 {
        if (node == NULL)
                return false;
 
        /*
-        * Our basic strategy is to charge one cpu_operator_cost for each operator
-        * or function node in the given tree.  Vars and Consts are charged zero,
-        * and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
-        * better than no model at all.
+        * RestrictInfo nodes contain an eval_cost field reserved for this
+        * routine's use, so that it's not necessary to evaluate the qual clause's
+        * cost more than once.  If the clause's cost hasn't been computed yet,
+        * the field's startup value will contain -1.
+        */
+       if (IsA(node, RestrictInfo))
+       {
+               RestrictInfo *rinfo = (RestrictInfo *) node;
+
+               if (rinfo->eval_cost.startup < 0)
+               {
+                       cost_qual_eval_context locContext;
+
+                       locContext.root = context->root;
+                       locContext.total.startup = 0;
+                       locContext.total.per_tuple = 0;
+
+                       /*
+                        * For an OR clause, recurse into the marked-up tree so that we
+                        * set the eval_cost for contained RestrictInfos too.
+                        */
+                       if (rinfo->orclause)
+                               cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
+                       else
+                               cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
+
+                       /*
+                        * If the RestrictInfo is marked pseudoconstant, it will be tested
+                        * only once, so treat its cost as all startup cost.
+                        */
+                       if (rinfo->pseudoconstant)
+                       {
+                               /* count one execution during startup */
+                               locContext.total.startup += locContext.total.per_tuple;
+                               locContext.total.per_tuple = 0;
+                       }
+                       rinfo->eval_cost = locContext.total;
+               }
+               context->total.startup += rinfo->eval_cost.startup;
+               context->total.per_tuple += rinfo->eval_cost.per_tuple;
+               /* do NOT recurse into children */
+               return false;
+       }
+
+       /*
+        * For each operator or function node in the given tree, we charge the
+        * estimated execution cost given by pg_proc.procost (remember to multiply
+        * this by cpu_operator_cost).
+        *
+        * Vars and Consts are charged zero, and so are boolean operators (AND,
+        * OR, NOT). Simplistic, but a lot better than no model at all.
         *
         * Should we try to account for the possibility of short-circuit
-        * evaluation of AND/OR?
+        * evaluation of AND/OR?  Probably *not*, because that would make the
+        * results depend on the clause ordering, and we are not in any position
+        * to expect that the current ordering of the clauses is the one that's
+        * going to end up being used.  The above per-RestrictInfo caching would
+        * not mix well with trying to re-order clauses anyway.
         */
-       if (IsA(node, FuncExpr) ||
-               IsA(node, OpExpr) ||
-               IsA(node, DistinctExpr) ||
-               IsA(node, NullIfExpr))
-               total->per_tuple += cpu_operator_cost;
+       if (IsA(node, FuncExpr))
+       {
+               context->total.per_tuple +=
+                       get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
+       }
+       else if (IsA(node, OpExpr) ||
+                        IsA(node, DistinctExpr) ||
+                        IsA(node, NullIfExpr))
+       {
+               /* rely on struct equivalence to treat these all alike */
+               set_opfuncid((OpExpr *) node);
+               context->total.per_tuple +=
+                       get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
+       }
        else if (IsA(node, ScalarArrayOpExpr))
        {
                /*
@@ -1748,15 +2713,67 @@ cost_qual_eval_walker(Node *node, QualCost *total)
                ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
                Node       *arraynode = (Node *) lsecond(saop->args);
 
-               total->per_tuple +=
+               set_sa_opfuncid(saop);
+               context->total.per_tuple += get_func_cost(saop->opfuncid) *
                        cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
        }
+       else if (IsA(node, Aggref) ||
+                        IsA(node, WindowFunc))
+       {
+               /*
+                * Aggref and WindowFunc nodes are (and should be) treated like Vars,
+                * ie, zero execution cost in the current model, because they behave
+                * essentially like Vars in execQual.c.  We disregard the costs of
+                * their input expressions for the same reason.  The actual execution
+                * costs of the aggregate/window functions and their arguments have to
+                * be factored into plan-node-specific costing of the Agg or WindowAgg
+                * plan node.
+                */
+               return false;                   /* don't recurse into children */
+       }
+       else if (IsA(node, CoerceViaIO))
+       {
+               CoerceViaIO *iocoerce = (CoerceViaIO *) node;
+               Oid                     iofunc;
+               Oid                     typioparam;
+               bool            typisvarlena;
+
+               /* check the result type's input function */
+               getTypeInputInfo(iocoerce->resulttype,
+                                                &iofunc, &typioparam);
+               context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
+               /* check the input type's output function */
+               getTypeOutputInfo(exprType((Node *) iocoerce->arg),
+                                                 &iofunc, &typisvarlena);
+               context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
+       }
+       else if (IsA(node, ArrayCoerceExpr))
+       {
+               ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
+               Node       *arraynode = (Node *) acoerce->arg;
+
+               if (OidIsValid(acoerce->elemfuncid))
+                       context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
+                               cpu_operator_cost * estimate_array_length(arraynode);
+       }
        else if (IsA(node, RowCompareExpr))
        {
                /* Conservatively assume we will check all the columns */
                RowCompareExpr *rcexpr = (RowCompareExpr *) node;
+               ListCell   *lc;
+
+               foreach(lc, rcexpr->opnos)
+               {
+                       Oid                     opid = lfirst_oid(lc);
 
-               total->per_tuple += cpu_operator_cost * list_length(rcexpr->opnos);
+                       context->total.per_tuple += get_func_cost(get_opcode(opid)) *
+                               cpu_operator_cost;
+               }
+       }
+       else if (IsA(node, CurrentOfExpr))
+       {
+               /* Report high cost to prevent selection of anything but TID scan */
+               context->total.startup += disable_cost;
        }
        else if (IsA(node, SubLink))
        {
@@ -1770,89 +2787,207 @@ cost_qual_eval_walker(Node *node, QualCost *total)
                 * subplan will be executed on each evaluation, so charge accordingly.
                 * (Sub-selects that can be executed as InitPlans have already been
                 * removed from the expression.)
-                *
-                * An exception occurs when we have decided we can implement the
-                * subplan by hashing.
                 */
                SubPlan    *subplan = (SubPlan *) node;
-               Plan       *plan = subplan->plan;
 
-               if (subplan->useHashTable)
+               context->total.startup += subplan->startup_cost;
+               context->total.per_tuple += subplan->per_call_cost;
+
+               /*
+                * We don't want to recurse into the testexpr, because it was already
+                * counted in the SubPlan node's costs.  So we're done.
+                */
+               return false;
+       }
+       else if (IsA(node, AlternativeSubPlan))
+       {
+               /*
+                * Arbitrarily use the first alternative plan for costing.      (We should
+                * certainly only include one alternative, and we don't yet have
+                * enough information to know which one the executor is most likely to
+                * use.)
+                */
+               AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
+
+               return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
+                                                                        context);
+       }
+
+       /* recurse into children */
+       return expression_tree_walker(node, cost_qual_eval_walker,
+                                                                 (void *) context);
+}
+
+
+/*
+ * adjust_semi_join
+ *       Estimate how much of the inner input a SEMI or ANTI join
+ *       can be expected to scan.
+ *
+ * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
+ * inner rows as soon as it finds a match to the current outer row.
+ * We should therefore adjust some of the cost components for this effect.
+ * This function computes some estimates needed for these adjustments.
+ *
+ * 'path' is already filled in except for the cost fields
+ * 'sjinfo' is extra info about the join for selectivity estimation
+ *
+ * Returns TRUE if this is a SEMI or ANTI join, FALSE if not.
+ *
+ * Output parameters (set only in TRUE-result case):
+ * *outer_match_frac is set to the fraction of the outer tuples that are
+ *             expected to have at least one match.
+ * *match_count is set to the average number of matches expected for
+ *             outer tuples that have at least one match.
+ * *indexed_join_quals is set to TRUE if all the joinquals are used as
+ *             inner index quals, FALSE if not.
+ *
+ * indexed_join_quals can be passed as NULL if that information is not
+ * relevant (it is only useful for the nestloop case).
+ */
+static bool
+adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
+                                Selectivity *outer_match_frac,
+                                Selectivity *match_count,
+                                bool *indexed_join_quals)
+{
+       JoinType        jointype = path->jointype;
+       Selectivity jselec;
+       Selectivity nselec;
+       Selectivity avgmatch;
+       SpecialJoinInfo norm_sjinfo;
+       List       *joinquals;
+       ListCell   *l;
+
+       /* Fall out if it's not JOIN_SEMI or JOIN_ANTI */
+       if (jointype != JOIN_SEMI && jointype != JOIN_ANTI)
+               return false;
+
+       /*
+        * Note: it's annoying to repeat this selectivity estimation on each call,
+        * when the joinclause list will be the same for all path pairs
+        * implementing a given join.  clausesel.c will save us from the worst
+        * effects of this by caching at the RestrictInfo level; but perhaps it'd
+        * be worth finding a way to cache the results at a higher level.
+        */
+
+       /*
+        * In an ANTI join, we must ignore clauses that are "pushed down", since
+        * those won't affect the match logic.  In a SEMI join, we do not
+        * distinguish joinquals from "pushed down" quals, so just use the whole
+        * restrictinfo list.
+        */
+       if (jointype == JOIN_ANTI)
+       {
+               joinquals = NIL;
+               foreach(l, path->joinrestrictinfo)
                {
-                       /*
-                        * If we are using a hash table for the subquery outputs, then the
-                        * cost of evaluating the query is a one-time cost. We charge one
-                        * cpu_operator_cost per tuple for the work of loading the
-                        * hashtable, too.
-                        */
-                       total->startup += plan->total_cost +
-                               cpu_operator_cost * plan->plan_rows;
+                       RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
 
-                       /*
-                        * The per-tuple costs include the cost of evaluating the lefthand
-                        * expressions, plus the cost of probing the hashtable. Recursion
-                        * into the testexpr will handle the lefthand expressions
-                        * properly, and will count one cpu_operator_cost for each
-                        * comparison operator.  That is probably too low for the probing
-                        * cost, but it's hard to make a better estimate, so live with it
-                        * for now.
-                        */
+                       Assert(IsA(rinfo, RestrictInfo));
+                       if (!rinfo->is_pushed_down)
+                               joinquals = lappend(joinquals, rinfo);
                }
-               else
-               {
-                       /*
-                        * Otherwise we will be rescanning the subplan output on each
-                        * evaluation.  We need to estimate how much of the output we will
-                        * actually need to scan.  NOTE: this logic should agree with the
-                        * estimates used by make_subplan() in plan/subselect.c.
-                        */
-                       Cost            plan_run_cost = plan->total_cost - plan->startup_cost;
+       }
+       else
+               joinquals = path->joinrestrictinfo;
 
-                       if (subplan->subLinkType == EXISTS_SUBLINK)
-                       {
-                               /* we only need to fetch 1 tuple */
-                               total->per_tuple += plan_run_cost / plan->plan_rows;
-                       }
-                       else if (subplan->subLinkType == ALL_SUBLINK ||
-                                        subplan->subLinkType == ANY_SUBLINK)
-                       {
-                               /* assume we need 50% of the tuples */
-                               total->per_tuple += 0.50 * plan_run_cost;
-                               /* also charge a cpu_operator_cost per row examined */
-                               total->per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
-                       }
-                       else
-                       {
-                               /* assume we need all tuples */
-                               total->per_tuple += plan_run_cost;
-                       }
+       /*
+        * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
+        */
+       jselec = clauselist_selectivity(root,
+                                                                       joinquals,
+                                                                       0,
+                                                                       jointype,
+                                                                       sjinfo);
 
-                       /*
-                        * Also account for subplan's startup cost. If the subplan is
-                        * uncorrelated or undirect correlated, AND its topmost node is a
-                        * Sort or Material node, assume that we'll only need to pay its
-                        * startup cost once; otherwise assume we pay the startup cost
-                        * every time.
-                        */
-                       if (subplan->parParam == NIL &&
-                               (IsA(plan, Sort) ||
-                                IsA(plan, Material)))
-                               total->startup += plan->startup_cost;
-                       else
-                               total->per_tuple += plan->startup_cost;
+       /*
+        * Also get the normal inner-join selectivity of the join clauses.
+        */
+       norm_sjinfo.type = T_SpecialJoinInfo;
+       norm_sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
+       norm_sjinfo.min_righthand = path->innerjoinpath->parent->relids;
+       norm_sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
+       norm_sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
+       norm_sjinfo.jointype = JOIN_INNER;
+       /* we don't bother trying to make the remaining fields valid */
+       norm_sjinfo.lhs_strict = false;
+       norm_sjinfo.delay_upper_joins = false;
+       norm_sjinfo.join_quals = NIL;
+
+       nselec = clauselist_selectivity(root,
+                                                                       joinquals,
+                                                                       0,
+                                                                       JOIN_INNER,
+                                                                       &norm_sjinfo);
+
+       /* Avoid leaking a lot of ListCells */
+       if (jointype == JOIN_ANTI)
+               list_free(joinquals);
+
+       /*
+        * jselec can be interpreted as the fraction of outer-rel rows that have
+        * any matches (this is true for both SEMI and ANTI cases).  And nselec is
+        * the fraction of the Cartesian product that matches.  So, the average
+        * number of matches for each outer-rel row that has at least one match is
+        * nselec * inner_rows / jselec.
+        *
+        * Note: it is correct to use the inner rel's "rows" count here, not
+        * PATH_ROWS(), even if the inner path under consideration is an inner
+        * indexscan.  This is because we have included all the join clauses in
+        * the selectivity estimate, even ones used in an inner indexscan.
+        */
+       if (jselec > 0)                         /* protect against zero divide */
+       {
+               avgmatch = nselec * path->innerjoinpath->parent->rows / jselec;
+               /* Clamp to sane range */
+               avgmatch = Max(1.0, avgmatch);
+       }
+       else
+               avgmatch = 1.0;
+
+       *outer_match_frac = jselec;
+       *match_count = avgmatch;
+
+       /*
+        * If requested, check whether the inner path uses all the joinquals as
+        * indexquals.  (If that's true, we can assume that an unmatched outer
+        * tuple is cheap to process, whereas otherwise it's probably expensive.)
+        */
+       if (indexed_join_quals)
+       {
+               if (path->joinrestrictinfo != NIL)
+               {
+                       List       *nrclauses;
+
+                       nrclauses = select_nonredundant_join_clauses(root,
+                                                                                                         path->joinrestrictinfo,
+                                                                                                                path->innerjoinpath);
+                       *indexed_join_quals = (nrclauses == NIL);
+               }
+               else
+               {
+                       /* a clauseless join does NOT qualify */
+                       *indexed_join_quals = false;
                }
        }
 
-       return expression_tree_walker(node, cost_qual_eval_walker,
-                                                                 (void *) total);
+       return true;
 }
 
 
 /*
- * approx_selectivity
- *             Quick-and-dirty estimation of clause selectivities.
- *             The input can be either an implicitly-ANDed list of boolean
- *             expressions, or a list of RestrictInfo nodes (typically the latter).
+ * approx_tuple_count
+ *             Quick-and-dirty estimation of the number of join rows passing
+ *             a set of qual conditions.
+ *
+ * The quals can be either an implicitly-ANDed list of boolean expressions,
+ * or a list of RestrictInfo nodes (typically the latter).
+ *
+ * We intentionally compute the selectivity under JOIN_INNER rules, even
+ * if it's some type of outer join.  This is appropriate because we are
+ * trying to figure out how many tuples pass the initial merge or hash
+ * join step.
  *
  * This is quick-and-dirty because we bypass clauselist_selectivity, and
  * simply multiply the independent clause selectivities together.  Now
@@ -1865,20 +3000,43 @@ cost_qual_eval_walker(Node *node, QualCost *total)
  * output tuples are generated and passed through qpqual checking, it
  * seems OK to live with the approximation.
  */
-static Selectivity
-approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
+static double
+approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
 {
-       Selectivity total = 1.0;
+       double          tuples;
+       double          outer_tuples = path->outerjoinpath->parent->rows;
+       double          inner_tuples = path->innerjoinpath->parent->rows;
+       SpecialJoinInfo sjinfo;
+       Selectivity selec = 1.0;
        ListCell   *l;
 
+       /*
+        * Make up a SpecialJoinInfo for JOIN_INNER semantics.
+        */
+       sjinfo.type = T_SpecialJoinInfo;
+       sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
+       sjinfo.min_righthand = path->innerjoinpath->parent->relids;
+       sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
+       sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
+       sjinfo.jointype = JOIN_INNER;
+       /* we don't bother trying to make the remaining fields valid */
+       sjinfo.lhs_strict = false;
+       sjinfo.delay_upper_joins = false;
+       sjinfo.join_quals = NIL;
+
+       /* Get the approximate selectivity */
        foreach(l, quals)
        {
                Node       *qual = (Node *) lfirst(l);
 
                /* Note that clause_selectivity will be able to cache its result */
-               total *= clause_selectivity(root, qual, 0, jointype);
+               selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
        }
-       return total;
+
+       /* Apply it to the input relation sizes */
+       tuples = selec * outer_tuples * inner_tuples;
+
+       return clamp_row_est(tuples);
 }
 
 
@@ -1887,7 +3045,7 @@ approx_selectivity(PlannerInfo *root, List *quals, JoinType jointype)
  *             Set the size estimates for the given base relation.
  *
  * The rel's targetlist and restrictinfo list must have been constructed
- * already.
+ * already, and rel->tuples must be set.
  *
  * We set the following fields of the rel node:
  *     rows: the estimated number of output tuples (after applying
@@ -1907,11 +3065,12 @@ set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
                clauselist_selectivity(root,
                                                           rel->baserestrictinfo,
                                                           0,
-                                                          JOIN_INNER);
+                                                          JOIN_INNER,
+                                                          NULL);
 
        rel->rows = clamp_row_est(nrows);
 
-       cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo);
+       cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
 
        set_rel_width(root, rel);
 }
@@ -1933,11 +3092,6 @@ set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
  * calculations for each pair of input rels that's encountered, and somehow
  * average the results?  Probably way more trouble than it's worth.)
  *
- * It's important that the results for symmetric JoinTypes be symmetric,
- * eg, (rel1, rel2, JOIN_LEFT) should produce the same result as (rel2,
- * rel1, JOIN_RIGHT).  Also, JOIN_IN should produce the same result as
- * JOIN_UNIQUE_INNER, likewise JOIN_REVERSE_IN == JOIN_UNIQUE_OUTER.
- *
  * We set only the rows field here.  The width field was already set by
  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
  */
@@ -1945,76 +3099,109 @@ void
 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
                                                   RelOptInfo *outer_rel,
                                                   RelOptInfo *inner_rel,
-                                                  JoinType jointype,
+                                                  SpecialJoinInfo *sjinfo,
                                                   List *restrictlist)
 {
-       Selectivity selec;
+       JoinType        jointype = sjinfo->jointype;
+       Selectivity jselec;
+       Selectivity pselec;
        double          nrows;
-       UniquePath *upath;
 
        /*
         * Compute joinclause selectivity.      Note that we are only considering
         * clauses that become restriction clauses at this join level; we are not
         * double-counting them because they were not considered in estimating the
         * sizes of the component rels.
+        *
+        * For an outer join, we have to distinguish the selectivity of the join's
+        * own clauses (JOIN/ON conditions) from any clauses that were "pushed
+        * down".  For inner joins we just count them all as joinclauses.
         */
-       selec = clauselist_selectivity(root,
-                                                                  restrictlist,
-                                                                  0,
-                                                                  jointype);
+       if (IS_OUTER_JOIN(jointype))
+       {
+               List       *joinquals = NIL;
+               List       *pushedquals = NIL;
+               ListCell   *l;
+
+               /* Grovel through the clauses to separate into two lists */
+               foreach(l, restrictlist)
+               {
+                       RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+
+                       Assert(IsA(rinfo, RestrictInfo));
+                       if (rinfo->is_pushed_down)
+                               pushedquals = lappend(pushedquals, rinfo);
+                       else
+                               joinquals = lappend(joinquals, rinfo);
+               }
+
+               /* Get the separate selectivities */
+               jselec = clauselist_selectivity(root,
+                                                                               joinquals,
+                                                                               0,
+                                                                               jointype,
+                                                                               sjinfo);
+               pselec = clauselist_selectivity(root,
+                                                                               pushedquals,
+                                                                               0,
+                                                                               jointype,
+                                                                               sjinfo);
+
+               /* Avoid leaking a lot of ListCells */
+               list_free(joinquals);
+               list_free(pushedquals);
+       }
+       else
+       {
+               jselec = clauselist_selectivity(root,
+                                                                               restrictlist,
+                                                                               0,
+                                                                               jointype,
+                                                                               sjinfo);
+               pselec = 0.0;                   /* not used, keep compiler quiet */
+       }
 
        /*
         * Basically, we multiply size of Cartesian product by selectivity.
         *
-        * If we are doing an outer join, take that into account: the output must
-        * be at least as large as the non-nullable input.      (Is there any chance
-        * of being even smarter?)      (XXX this is not really right, because it
-        * assumes all the restriction clauses are join clauses; we should figure
-        * pushed-down clauses separately.)
+        * If we are doing an outer join, take that into account: the joinqual
+        * selectivity has to be clamped using the knowledge that the output must
+        * be at least as large as the non-nullable input.      However, any
+        * pushed-down quals are applied after the outer join, so their
+        * selectivity applies fully.
         *
-        * For JOIN_IN and variants, the Cartesian product is figured with respect
-        * to a unique-ified input, and then we can clamp to the size of the other
-        * input.
+        * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
+        * of LHS rows that have matches, and we apply that straightforwardly.
         */
        switch (jointype)
        {
                case JOIN_INNER:
-                       nrows = outer_rel->rows * inner_rel->rows * selec;
+                       nrows = outer_rel->rows * inner_rel->rows * jselec;
                        break;
                case JOIN_LEFT:
-                       nrows = outer_rel->rows * inner_rel->rows * selec;
+                       nrows = outer_rel->rows * inner_rel->rows * jselec;
                        if (nrows < outer_rel->rows)
                                nrows = outer_rel->rows;
-                       break;
-               case JOIN_RIGHT:
-                       nrows = outer_rel->rows * inner_rel->rows * selec;
-                       if (nrows < inner_rel->rows)
-                               nrows = inner_rel->rows;
+                       nrows *= pselec;
                        break;
                case JOIN_FULL:
-                       nrows = outer_rel->rows * inner_rel->rows * selec;
+                       nrows = outer_rel->rows * inner_rel->rows * jselec;
                        if (nrows < outer_rel->rows)
                                nrows = outer_rel->rows;
                        if (nrows < inner_rel->rows)
                                nrows = inner_rel->rows;
+                       nrows *= pselec;
                        break;
-               case JOIN_IN:
-               case JOIN_UNIQUE_INNER:
-                       upath = create_unique_path(root, inner_rel,
-                                                                          inner_rel->cheapest_total_path);
-                       nrows = outer_rel->rows * upath->rows * selec;
-                       if (nrows > outer_rel->rows)
-                               nrows = outer_rel->rows;
+               case JOIN_SEMI:
+                       nrows = outer_rel->rows * jselec;
+                       /* pselec not used */
                        break;
-               case JOIN_REVERSE_IN:
-               case JOIN_UNIQUE_OUTER:
-                       upath = create_unique_path(root, outer_rel,
-                                                                          outer_rel->cheapest_total_path);
-                       nrows = upath->rows * inner_rel->rows * selec;
-                       if (nrows > inner_rel->rows)
-                               nrows = inner_rel->rows;
+               case JOIN_ANTI:
+                       nrows = outer_rel->rows * (1.0 - jselec);
+                       nrows *= pselec;
                        break;
                default:
+                       /* other values not expected here */
                        elog(ERROR, "unrecognized join type: %d", (int) jointype);
                        nrows = 0;                      /* keep compiler quiet */
                        break;
@@ -2024,59 +3211,73 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
 }
 
 /*
- * join_in_selectivity
- *       Determines the factor by which a JOIN_IN join's result is expected
- *       to be smaller than an ordinary inner join.
+ * set_subquery_size_estimates
+ *             Set the size estimates for a base relation that is a subquery.
  *
- * 'path' is already filled in except for the cost fields
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already, and the plan for the subquery must have been completed.
+ * We look at the subquery's plan and PlannerInfo to extract data.
+ *
+ * We set the same fields as set_baserel_size_estimates.
  */
-static Selectivity
-join_in_selectivity(JoinPath *path, PlannerInfo *root)
+void
+set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel,
+                                                       PlannerInfo *subroot)
 {
-       RelOptInfo *innerrel;
-       UniquePath *innerunique;
-       Selectivity selec;
-       double          nrows;
+       RangeTblEntry *rte;
+       ListCell   *lc;
 
-       /* Return 1.0 whenever it's not JOIN_IN */
-       if (path->jointype != JOIN_IN)
-               return 1.0;
+       /* Should only be applied to base relations that are subqueries */
+       Assert(rel->relid > 0);
+       rte = planner_rt_fetch(rel->relid, root);
+       Assert(rte->rtekind == RTE_SUBQUERY);
 
-       /*
-        * Return 1.0 if the inner side is already known unique.  The case where
-        * the inner path is already a UniquePath probably cannot happen in
-        * current usage, but check it anyway for completeness.  The interesting
-        * case is where we've determined the inner relation itself is unique,
-        * which we can check by looking at the rows estimate for its UniquePath.
-        */
-       if (IsA(path->innerjoinpath, UniquePath))
-               return 1.0;
-       innerrel = path->innerjoinpath->parent;
-       innerunique = create_unique_path(root,
-                                                                        innerrel,
-                                                                        innerrel->cheapest_total_path);
-       if (innerunique->rows >= innerrel->rows)
-               return 1.0;
+       /* Copy raw number of output rows from subplan */
+       rel->tuples = rel->subplan->plan_rows;
 
        /*
-        * Compute same result set_joinrel_size_estimates would compute for
-        * JOIN_INNER.  Note that we use the input rels' absolute size estimates,
-        * not PATH_ROWS() which might be less; if we used PATH_ROWS() we'd be
-        * double-counting the effects of any join clauses used in input scans.
-        */
-       selec = clauselist_selectivity(root,
-                                                                  path->joinrestrictinfo,
-                                                                  0,
-                                                                  JOIN_INNER);
-       nrows = path->outerjoinpath->parent->rows * innerrel->rows * selec;
-
-       nrows = clamp_row_est(nrows);
-
-       /* See if it's larger than the actual JOIN_IN size estimate */
-       if (nrows > path->path.parent->rows)
-               return path->path.parent->rows / nrows;
-       else
-               return 1.0;
+        * Compute per-output-column width estimates by examining the subquery's
+        * targetlist.  For any output that is a plain Var, get the width estimate
+        * that was made while planning the subquery.  Otherwise, fall back on a
+        * datatype-based estimate.
+        */
+       foreach(lc, subroot->parse->targetList)
+       {
+               TargetEntry *te = (TargetEntry *) lfirst(lc);
+               Node       *texpr = (Node *) te->expr;
+               int32           item_width;
+
+               Assert(IsA(te, TargetEntry));
+               /* junk columns aren't visible to upper query */
+               if (te->resjunk)
+                       continue;
+
+               /*
+                * XXX This currently doesn't work for subqueries containing set
+                * operations, because the Vars in their tlists are bogus references
+                * to the first leaf subquery, which wouldn't give the right answer
+                * even if we could still get to its PlannerInfo.  So fall back on
+                * datatype in that case.
+                */
+               if (IsA(texpr, Var) &&
+                       subroot->parse->setOperations == NULL)
+               {
+                       Var                *var = (Var *) texpr;
+                       RelOptInfo *subrel = find_base_rel(subroot, var->varno);
+
+                       item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
+               }
+               else
+               {
+                       item_width = get_typavgwidth(exprType(texpr), exprTypmod(texpr));
+               }
+               Assert(item_width > 0);
+               Assert(te->resno >= rel->min_attr && te->resno <= rel->max_attr);
+               rel->attr_widths[te->resno - rel->min_attr] = item_width;
+       }
+
+       /* Now estimate number of output rows, etc */
+       set_baserel_size_estimates(root, rel);
 }
 
 /*
@@ -2095,19 +3296,11 @@ set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
 
        /* Should only be applied to base relations that are functions */
        Assert(rel->relid > 0);
-       rte = rt_fetch(rel->relid, root->parse->rtable);
+       rte = planner_rt_fetch(rel->relid, root);
        Assert(rte->rtekind == RTE_FUNCTION);
 
-       /*
-        * Estimate number of rows the function itself will return.
-        *
-        * XXX no idea how to do this yet; but we can at least check whether
-        * function returns set or not...
-        */
-       if (expression_returns_set(rte->funcexpr))
-               rel->tuples = 1000;
-       else
-               rel->tuples = 1;
+       /* Estimate number of rows the function itself will return */
+       rel->tuples = clamp_row_est(expression_returns_set_rows(rte->funcexpr));
 
        /* Now estimate number of output rows, etc */
        set_baserel_size_estimates(root, rel);
@@ -2129,7 +3322,7 @@ set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
 
        /* Should only be applied to base relations that are values lists */
        Assert(rel->relid > 0);
-       rte = rt_fetch(rel->relid, root->parse->rtable);
+       rte = planner_rt_fetch(rel->relid, root);
        Assert(rte->rtekind == RTE_VALUES);
 
        /*
@@ -2144,16 +3337,88 @@ set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
        set_baserel_size_estimates(root, rel);
 }
 
+/*
+ * set_cte_size_estimates
+ *             Set the size estimates for a base relation that is a CTE reference.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already, and we need the completed plan for the CTE (if a regular CTE)
+ * or the non-recursive term (if a self-reference).
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
+{
+       RangeTblEntry *rte;
+
+       /* Should only be applied to base relations that are CTE references */
+       Assert(rel->relid > 0);
+       rte = planner_rt_fetch(rel->relid, root);
+       Assert(rte->rtekind == RTE_CTE);
+
+       if (rte->self_reference)
+       {
+               /*
+                * In a self-reference, arbitrarily assume the average worktable size
+                * is about 10 times the nonrecursive term's size.
+                */
+               rel->tuples = 10 * cteplan->plan_rows;
+       }
+       else
+       {
+               /* Otherwise just believe the CTE plan's output estimate */
+               rel->tuples = cteplan->plan_rows;
+       }
+
+       /* Now estimate number of output rows, etc */
+       set_baserel_size_estimates(root, rel);
+}
+
+/*
+ * set_foreign_size_estimates
+ *             Set the size estimates for a base relation that is a foreign table.
+ *
+ * There is not a whole lot that we can do here; the foreign-data wrapper
+ * is responsible for producing useful estimates.  We can do a decent job
+ * of estimating baserestrictcost, so we set that, and we also set up width
+ * using what will be purely datatype-driven estimates from the targetlist.
+ * There is no way to do anything sane with the rows value, so we just put
+ * a default estimate and hope that the wrapper can improve on it.     The
+ * wrapper's PlanForeignScan function will be called momentarily.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ */
+void
+set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+       /* Should only be applied to base relations */
+       Assert(rel->relid > 0);
+
+       rel->rows = 1000;                       /* entirely bogus default estimate */
+
+       cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
+
+       set_rel_width(root, rel);
+}
+
 
 /*
  * set_rel_width
  *             Set the estimated output width of a base relation.
  *
+ * The estimated output width is the sum of the per-attribute width estimates
+ * for the actually-referenced columns, plus any PHVs or other expressions
+ * that have to be calculated at this relation.  This is the amount of data
+ * we'd need to pass upwards in case of a sort, hash, etc.
+ *
  * NB: this works best on plain relations because it prefers to look at
- * real Vars.  It will fail to make use of pg_statistic info when applied
- * to a subquery relation, even if the subquery outputs are simple vars
- * that we could have gotten info for. Is it worth trying to be smarter
- * about subqueries?
+ * real Vars.  For subqueries, set_subquery_size_estimates will already have
+ * copied up whatever per-column estimates were made within the subquery,
+ * and for other types of rels there isn't much we can do anyway.  We fall
+ * back on (fairly stupid) datatype-based width estimates if we can't get
+ * any better number.
  *
  * The per-attribute width estimates are cached for possible re-use while
  * building join relations.
@@ -2161,55 +3426,122 @@ set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
 static void
 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
 {
+       Oid                     reloid = planner_rt_fetch(rel->relid, root)->relid;
        int32           tuple_width = 0;
-       ListCell   *tllist;
+       bool            have_wholerow_var = false;
+       ListCell   *lc;
 
-       foreach(tllist, rel->reltargetlist)
+       foreach(lc, rel->reltargetlist)
        {
-               Var                *var = (Var *) lfirst(tllist);
-               int                     ndx;
-               Oid                     relid;
-               int32           item_width;
+               Node       *node = (Node *) lfirst(lc);
 
-               /* For now, punt on whole-row child Vars */
-               if (!IsA(var, Var))
+               if (IsA(node, Var))
                {
-                       tuple_width += 32;      /* arbitrary */
-                       continue;
-               }
+                       Var                *var = (Var *) node;
+                       int                     ndx;
+                       int32           item_width;
 
-               ndx = var->varattno - rel->min_attr;
+                       Assert(var->varno == rel->relid);
+                       Assert(var->varattno >= rel->min_attr);
+                       Assert(var->varattno <= rel->max_attr);
 
-               /*
-                * The width probably hasn't been cached yet, but may as well check
-                */
-               if (rel->attr_widths[ndx] > 0)
-               {
-                       tuple_width += rel->attr_widths[ndx];
-                       continue;
-               }
+                       ndx = var->varattno - rel->min_attr;
 
-               relid = getrelid(var->varno, root->parse->rtable);
-               if (relid != InvalidOid)
-               {
-                       item_width = get_attavgwidth(relid, var->varattno);
-                       if (item_width > 0)
+                       /*
+                        * If it's a whole-row Var, we'll deal with it below after we have
+                        * already cached as many attr widths as possible.
+                        */
+                       if (var->varattno == 0)
+                       {
+                               have_wholerow_var = true;
+                               continue;
+                       }
+
+                       /*
+                        * The width may have been cached already (especially if it's a
+                        * subquery), so don't duplicate effort.
+                        */
+                       if (rel->attr_widths[ndx] > 0)
                        {
-                               rel->attr_widths[ndx] = item_width;
-                               tuple_width += item_width;
+                               tuple_width += rel->attr_widths[ndx];
                                continue;
                        }
+
+                       /* Try to get column width from statistics */
+                       if (reloid != InvalidOid && var->varattno > 0)
+                       {
+                               item_width = get_attavgwidth(reloid, var->varattno);
+                               if (item_width > 0)
+                               {
+                                       rel->attr_widths[ndx] = item_width;
+                                       tuple_width += item_width;
+                                       continue;
+                               }
+                       }
+
+                       /*
+                        * Not a plain relation, or can't find statistics for it. Estimate
+                        * using just the type info.
+                        */
+                       item_width = get_typavgwidth(var->vartype, var->vartypmod);
+                       Assert(item_width > 0);
+                       rel->attr_widths[ndx] = item_width;
+                       tuple_width += item_width;
+               }
+               else if (IsA(node, PlaceHolderVar))
+               {
+                       PlaceHolderVar *phv = (PlaceHolderVar *) node;
+                       PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
+
+                       tuple_width += phinfo->ph_width;
+               }
+               else
+               {
+                       /*
+                        * We could be looking at an expression pulled up from a subquery,
+                        * or a ROW() representing a whole-row child Var, etc.  Do what we
+                        * can using the expression type information.
+                        */
+                       int32           item_width;
+
+                       item_width = get_typavgwidth(exprType(node), exprTypmod(node));
+                       Assert(item_width > 0);
+                       tuple_width += item_width;
+               }
+       }
+
+       /*
+        * If we have a whole-row reference, estimate its width as the sum of
+        * per-column widths plus sizeof(HeapTupleHeaderData).
+        */
+       if (have_wholerow_var)
+       {
+               int32           wholerow_width = sizeof(HeapTupleHeaderData);
+
+               if (reloid != InvalidOid)
+               {
+                       /* Real relation, so estimate true tuple width */
+                       wholerow_width += get_relation_data_width(reloid,
+                                                                                  rel->attr_widths - rel->min_attr);
+               }
+               else
+               {
+                       /* Do what we can with info for a phony rel */
+                       AttrNumber      i;
+
+                       for (i = 1; i <= rel->max_attr; i++)
+                               wholerow_width += rel->attr_widths[i - rel->min_attr];
                }
 
+               rel->attr_widths[0 - rel->min_attr] = wholerow_width;
+
                /*
-                * Not a plain relation, or can't find statistics for it. Estimate
-                * using just the type info.
+                * Include the whole-row Var as part of the output tuple.  Yes, that
+                * really is what happens at runtime.
                 */
-               item_width = get_typavgwidth(var->vartype, var->vartypmod);
-               Assert(item_width > 0);
-               rel->attr_widths[ndx] = item_width;
-               tuple_width += item_width;
+               tuple_width += wholerow_width;
        }
+
        Assert(tuple_width >= 0);
        rel->width = tuple_width;
 }