]> granicus.if.org Git - postgresql/blobdiff - src/backend/optimizer/path/costsize.c
Pgindent run before 9.1 beta2.
[postgresql] / src / backend / optimizer / path / costsize.c
index 1f8f62314e5a0b3c4d06cc286f57a20dc8606d73..bb38768bd4358f72896e2d2c549bbd64dedcd24d 100644 (file)
  * detail.     Note that all of these parameters are user-settable, in case
  * the default values are drastically off for a particular platform.
  *
+ * seq_page_cost and random_page_cost can also be overridden for an individual
+ * tablespace, in case some data is on a fast disk and other data is on a slow
+ * disk.  Per-tablespace overrides never apply to temporary work files such as
+ * an external sort or a materialize node that overflows work_mem.
+ *
  * We compute two separate costs for each path:
  *             total_cost: total estimated cost to fetch all tuples
  *             startup_cost: cost that is expended before first tuple is fetched
  * the non-cost fields of the passed XXXPath to be filled in.
  *
  *
- * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *       $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.203 2009/01/01 17:23:43 momjian Exp $
+ *       src/backend/optimizer/path/costsize.c
  *
  *-------------------------------------------------------------------------
  */
@@ -63,6 +68,7 @@
 
 #include <math.h>
 
+#include "executor/executor.h"
 #include "executor/nodeHash.h"
 #include "miscadmin.h"
 #include "nodes/nodeFuncs.h"
 #include "optimizer/cost.h"
 #include "optimizer/pathnode.h"
 #include "optimizer/placeholder.h"
+#include "optimizer/plancat.h"
 #include "optimizer/planmain.h"
+#include "optimizer/restrictinfo.h"
 #include "parser/parsetree.h"
 #include "utils/lsyscache.h"
 #include "utils/selfuncs.h"
+#include "utils/spccache.h"
 #include "utils/tuplesort.h"
 
 
@@ -97,7 +106,7 @@ double               cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
 
 int                    effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
 
-Cost           disable_cost = 100000000.0;
+Cost           disable_cost = 1.0e10;
 
 bool           enable_seqscan = true;
 bool           enable_indexscan = true;
@@ -106,6 +115,7 @@ bool                enable_tidscan = true;
 bool           enable_sort = true;
 bool           enable_hashagg = true;
 bool           enable_nestloop = true;
+bool           enable_material = true;
 bool           enable_mergejoin = true;
 bool           enable_hashjoin = true;
 
@@ -118,9 +128,16 @@ typedef struct
 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
                           RestrictInfo *rinfo,
                           PathKey *pathkey);
+static void cost_rescan(PlannerInfo *root, Path *path,
+                       Cost *rescan_startup_cost, Cost *rescan_total_cost);
 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
+static bool adjust_semi_join(PlannerInfo *root, JoinPath *path,
+                                SpecialJoinInfo *sjinfo,
+                                Selectivity *outer_match_frac,
+                                Selectivity *match_count,
+                                bool *indexed_join_quals);
 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
-                                                                List *quals, SpecialJoinInfo *sjinfo);
+                                  List *quals);
 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
 static double relation_byte_size(double tuples, int width);
 static double page_size(double tuples, int width);
@@ -155,6 +172,7 @@ void
 cost_seqscan(Path *path, PlannerInfo *root,
                         RelOptInfo *baserel)
 {
+       double          spc_seq_page_cost;
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
        Cost            cpu_per_tuple;
@@ -166,10 +184,15 @@ cost_seqscan(Path *path, PlannerInfo *root,
        if (!enable_seqscan)
                startup_cost += disable_cost;
 
+       /* fetch estimated page cost for tablespace containing table */
+       get_tablespace_page_costs(baserel->reltablespace,
+                                                         NULL,
+                                                         &spc_seq_page_cost);
+
        /*
         * disk costs
         */
-       run_cost += seq_page_cost * baserel->pages;
+       run_cost += spc_seq_page_cost * baserel->pages;
 
        /* CPU costs */
        startup_cost += baserel->baserestrictcost.startup;
@@ -186,6 +209,7 @@ cost_seqscan(Path *path, PlannerInfo *root,
  *
  * 'index' is the index to be used
  * 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
+ * 'indexOrderBys' is the list of ORDER BY operators for amcanorderbyop indexes
  * 'outer_rel' is the outer relation when we are considering using the index
  *             scan as the inside of a nestloop join (hence, some of the indexQuals
  *             are join clauses, and we should expect repeated scans of the index);
@@ -195,18 +219,19 @@ cost_seqscan(Path *path, PlannerInfo *root,
  * additional fields of the IndexPath besides startup_cost and total_cost.
  * These fields are needed if the IndexPath is used in a BitmapIndexScan.
  *
+ * indexQuals is a list of RestrictInfo nodes, but indexOrderBys is a list of
+ * bare expressions.
+ *
  * NOTE: 'indexQuals' must contain only clauses usable as index restrictions.
  * Any additional quals evaluated as qpquals may reduce the number of returned
  * tuples, but they won't reduce the number of tuples we have to fetch from
  * the table, so they don't reduce the scan cost.
- *
- * NOTE: as of 8.0, indexQuals is a list of RestrictInfo nodes, where formerly
- * it was a list of bare clause expressions.
  */
 void
 cost_index(IndexPath *path, PlannerInfo *root,
                   IndexOptInfo *index,
                   List *indexQuals,
+                  List *indexOrderBys,
                   RelOptInfo *outer_rel)
 {
        RelOptInfo *baserel = index->rel;
@@ -217,6 +242,8 @@ cost_index(IndexPath *path, PlannerInfo *root,
        Selectivity indexSelectivity;
        double          indexCorrelation,
                                csquared;
+       double          spc_seq_page_cost,
+                               spc_random_page_cost;
        Cost            min_IO_cost,
                                max_IO_cost;
        Cost            cpu_per_tuple;
@@ -238,10 +265,11 @@ cost_index(IndexPath *path, PlannerInfo *root,
         * the fraction of main-table tuples we will have to retrieve) and its
         * correlation to the main-table tuple order.
         */
-       OidFunctionCall8(index->amcostestimate,
+       OidFunctionCall9(index->amcostestimate,
                                         PointerGetDatum(root),
                                         PointerGetDatum(index),
                                         PointerGetDatum(indexQuals),
+                                        PointerGetDatum(indexOrderBys),
                                         PointerGetDatum(outer_rel),
                                         PointerGetDatum(&indexStartupCost),
                                         PointerGetDatum(&indexTotalCost),
@@ -263,13 +291,18 @@ cost_index(IndexPath *path, PlannerInfo *root,
        /* estimate number of main-table tuples fetched */
        tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
 
+       /* fetch estimated page costs for tablespace containing table */
+       get_tablespace_page_costs(baserel->reltablespace,
+                                                         &spc_random_page_cost,
+                                                         &spc_seq_page_cost);
+
        /*----------
         * Estimate number of main-table pages fetched, and compute I/O cost.
         *
         * When the index ordering is uncorrelated with the table ordering,
         * we use an approximation proposed by Mackert and Lohman (see
         * index_pages_fetched() for details) to compute the number of pages
-        * fetched, and then charge random_page_cost per page fetched.
+        * fetched, and then charge spc_random_page_cost per page fetched.
         *
         * When the index ordering is exactly correlated with the table ordering
         * (just after a CLUSTER, for example), the number of pages fetched should
@@ -277,7 +310,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
         * will be sequential fetches, not the random fetches that occur in the
         * uncorrelated case.  So if the number of pages is more than 1, we
         * ought to charge
-        *              random_page_cost + (pages_fetched - 1) * seq_page_cost
+        *              spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
         * For partially-correlated indexes, we ought to charge somewhere between
         * these two estimates.  We currently interpolate linearly between the
         * estimates based on the correlation squared (XXX is that appropriate?).
@@ -300,7 +333,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
                                                                                        (double) index->pages,
                                                                                        root);
 
-               max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
+               max_IO_cost = (pages_fetched * spc_random_page_cost) / num_scans;
 
                /*
                 * In the perfectly correlated case, the number of pages touched by
@@ -319,7 +352,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
                                                                                        (double) index->pages,
                                                                                        root);
 
-               min_IO_cost = (pages_fetched * random_page_cost) / num_scans;
+               min_IO_cost = (pages_fetched * spc_random_page_cost) / num_scans;
        }
        else
        {
@@ -333,13 +366,13 @@ cost_index(IndexPath *path, PlannerInfo *root,
                                                                                        root);
 
                /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
-               max_IO_cost = pages_fetched * random_page_cost;
+               max_IO_cost = pages_fetched * spc_random_page_cost;
 
                /* min_IO_cost is for the perfectly correlated case (csquared=1) */
                pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
-               min_IO_cost = random_page_cost;
+               min_IO_cost = spc_random_page_cost;
                if (pages_fetched > 1)
-                       min_IO_cost += (pages_fetched - 1) * seq_page_cost;
+                       min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
        }
 
        /*
@@ -544,6 +577,8 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
        Cost            cost_per_page;
        double          tuples_fetched;
        double          pages_fetched;
+       double          spc_seq_page_cost,
+                               spc_random_page_cost;
        double          T;
 
        /* Should only be applied to base relations */
@@ -562,6 +597,11 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
 
        startup_cost += indexTotalCost;
 
+       /* Fetch estimated page costs for tablespace containing table. */
+       get_tablespace_page_costs(baserel->reltablespace,
+                                                         &spc_random_page_cost,
+                                                         &spc_seq_page_cost);
+
        /*
         * Estimate number of main-table pages fetched.
         */
@@ -600,17 +640,18 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
                pages_fetched = ceil(pages_fetched);
 
        /*
-        * For small numbers of pages we should charge random_page_cost apiece,
-        * while if nearly all the table's pages are being read, it's more
-        * appropriate to charge seq_page_cost apiece.  The effect is nonlinear,
-        * too. For lack of a better idea, interpolate like this to determine the
-        * cost per page.
+        * For small numbers of pages we should charge spc_random_page_cost
+        * apiece, while if nearly all the table's pages are being read, it's more
+        * appropriate to charge spc_seq_page_cost apiece.      The effect is
+        * nonlinear, too. For lack of a better idea, interpolate like this to
+        * determine the cost per page.
         */
        if (pages_fetched >= 2.0)
-               cost_per_page = random_page_cost -
-                       (random_page_cost - seq_page_cost) * sqrt(pages_fetched / T);
+               cost_per_page = spc_random_page_cost -
+                       (spc_random_page_cost - spc_seq_page_cost)
+                       * sqrt(pages_fetched / T);
        else
-               cost_per_page = random_page_cost;
+               cost_per_page = spc_random_page_cost;
 
        run_cost += pages_fetched * cost_per_page;
 
@@ -774,6 +815,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
        QualCost        tid_qual_cost;
        int                     ntuples;
        ListCell   *l;
+       double          spc_random_page_cost;
 
        /* Should only be applied to base relations */
        Assert(baserel->relid > 0);
@@ -826,8 +868,13 @@ cost_tidscan(Path *path, PlannerInfo *root,
         */
        cost_qual_eval(&tid_qual_cost, tidquals, root);
 
+       /* fetch estimated page cost for tablespace containing table */
+       get_tablespace_page_costs(baserel->reltablespace,
+                                                         &spc_random_page_cost,
+                                                         NULL);
+
        /* disk costs --- assume each tuple on a different page */
-       run_cost += random_page_cost * ntuples;
+       run_cost += spc_random_page_cost * ntuples;
 
        /* CPU costs */
        startup_cost += baserel->baserestrictcost.startup +
@@ -889,15 +936,26 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
        rte = planner_rt_fetch(baserel->relid, root);
        Assert(rte->rtekind == RTE_FUNCTION);
 
-       /* Estimate costs of executing the function expression */
+       /*
+        * Estimate costs of executing the function expression.
+        *
+        * Currently, nodeFunctionscan.c always executes the function to
+        * completion before returning any rows, and caches the results in a
+        * tuplestore.  So the function eval cost is all startup cost, and per-row
+        * costs are minimal.
+        *
+        * XXX in principle we ought to charge tuplestore spill costs if the
+        * number of rows is large.  However, given how phony our rowcount
+        * estimates for functions tend to be, there's not a lot of point in that
+        * refinement right now.
+        */
        cost_qual_eval_node(&exprcost, rte->funcexpr, root);
 
-       startup_cost += exprcost.startup;
-       cpu_per_tuple = exprcost.per_tuple;
+       startup_cost += exprcost.startup + exprcost.per_tuple;
 
        /* Add scanning CPU costs */
        startup_cost += baserel->baserestrictcost.startup;
-       cpu_per_tuple += cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
+       cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
        run_cost += cpu_per_tuple * baserel->tuples;
 
        path->startup_cost = startup_cost;
@@ -940,7 +998,7 @@ cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
  *
  * Note: this is used for both self-reference and regular CTEs; the
  * possible cost differences are below the threshold of what we could
- * estimate accurately anyway.  Note that the costs of evaluating the
+ * estimate accurately anyway. Note that the costs of evaluating the
  * referenced CTE query are added into the final plan as initplan costs,
  * and should NOT be counted here.
  */
@@ -992,9 +1050,9 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
 
        /*
         * We arbitrarily assume that about 10 recursive iterations will be
-        * needed, and that we've managed to get a good fix on the cost and
-        * output size of each one of them.  These are mighty shaky assumptions
-        * but it's hard to see how to do better.
+        * needed, and that we've managed to get a good fix on the cost and output
+        * size of each one of them.  These are mighty shaky assumptions but it's
+        * hard to see how to do better.
         */
        total_cost += 10 * rterm->total_cost;
        total_rows += 10 * rterm->plan_rows;
@@ -1017,33 +1075,37 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
  *       Determines and returns the cost of sorting a relation, including
  *       the cost of reading the input data.
  *
- * If the total volume of data to sort is less than work_mem, we will do
+ * If the total volume of data to sort is less than sort_mem, we will do
  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
  * comparisons for t tuples.
  *
- * If the total volume exceeds work_mem, we switch to a tape-style merge
+ * If the total volume exceeds sort_mem, we switch to a tape-style merge
  * algorithm.  There will still be about t*log2(t) tuple comparisons in
  * total, but we will also need to write and read each tuple once per
  * merge pass. We expect about ceil(logM(r)) merge passes where r is the
  * number of initial runs formed and M is the merge order used by tuplesort.c.
- * Since the average initial run should be about twice work_mem, we have
- *             disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
+ * Since the average initial run should be about twice sort_mem, we have
+ *             disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem)))
  *             cpu = comparison_cost * t * log2(t)
  *
  * If the sort is bounded (i.e., only the first k result tuples are needed)
- * and k tuples can fit into work_mem, we use a heap method that keeps only
+ * and k tuples can fit into sort_mem, we use a heap method that keeps only
  * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
  *
  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
  * accesses (XXX can't we refine that guess?)
  *
- * We charge two operator evals per tuple comparison, which should be in
- * the right ballpark in most cases.
+ * By default, we charge two operator evals per tuple comparison, which should
+ * be in the right ballpark in most cases.     The caller can tweak this by
+ * specifying nonzero comparison_cost; typically that's used for any extra
+ * work that has to be done to prepare the inputs to the comparison operators.
  *
  * 'pathkeys' is a list of sort keys
  * 'input_cost' is the total cost for reading the input data
  * 'tuples' is the number of tuples in the relation
  * 'width' is the average tuple width in bytes
+ * 'comparison_cost' is the extra cost per comparison, if any
+ * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
  * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
  *
  * NOTE: some callers currently pass NIL for pathkeys because they
@@ -1056,6 +1118,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
 void
 cost_sort(Path *path, PlannerInfo *root,
                  List *pathkeys, Cost input_cost, double tuples, int width,
+                 Cost comparison_cost, int sort_mem,
                  double limit_tuples)
 {
        Cost            startup_cost = input_cost;
@@ -1063,7 +1126,7 @@ cost_sort(Path *path, PlannerInfo *root,
        double          input_bytes = relation_byte_size(tuples, width);
        double          output_bytes;
        double          output_tuples;
-       long            work_mem_bytes = work_mem * 1024L;
+       long            sort_mem_bytes = sort_mem * 1024L;
 
        if (!enable_sort)
                startup_cost += disable_cost;
@@ -1075,6 +1138,9 @@ cost_sort(Path *path, PlannerInfo *root,
        if (tuples < 2.0)
                tuples = 2.0;
 
+       /* Include the default cost-per-comparison */
+       comparison_cost += 2.0 * cpu_operator_cost;
+
        /* Do we have a useful LIMIT? */
        if (limit_tuples > 0 && limit_tuples < tuples)
        {
@@ -1087,24 +1153,23 @@ cost_sort(Path *path, PlannerInfo *root,
                output_bytes = input_bytes;
        }
 
-       if (output_bytes > work_mem_bytes)
+       if (output_bytes > sort_mem_bytes)
        {
                /*
                 * We'll have to use a disk-based sort of all the tuples
                 */
                double          npages = ceil(input_bytes / BLCKSZ);
-               double          nruns = (input_bytes / work_mem_bytes) * 0.5;
-               double          mergeorder = tuplesort_merge_order(work_mem_bytes);
+               double          nruns = (input_bytes / sort_mem_bytes) * 0.5;
+               double          mergeorder = tuplesort_merge_order(sort_mem_bytes);
                double          log_runs;
                double          npageaccesses;
 
                /*
                 * CPU costs
                 *
-                * Assume about two operator evals per tuple comparison and N log2 N
-                * comparisons
+                * Assume about N log2 N comparisons
                 */
-               startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
+               startup_cost += comparison_cost * tuples * LOG2(tuples);
 
                /* Disk costs */
 
@@ -1118,7 +1183,7 @@ cost_sort(Path *path, PlannerInfo *root,
                startup_cost += npageaccesses *
                        (seq_page_cost * 0.75 + random_page_cost * 0.25);
        }
-       else if (tuples > 2 * output_tuples || input_bytes > work_mem_bytes)
+       else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
        {
                /*
                 * We'll use a bounded heap-sort keeping just K tuples in memory, for
@@ -1126,17 +1191,19 @@ cost_sort(Path *path, PlannerInfo *root,
                 * factor is a bit higher than for quicksort.  Tweak it so that the
                 * cost curve is continuous at the crossover point.
                 */
-               startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(2.0 * output_tuples);
+               startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
        }
        else
        {
                /* We'll use plain quicksort on all the input tuples */
-               startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
+               startup_cost += comparison_cost * tuples * LOG2(tuples);
        }
 
        /*
         * Also charge a small amount (arbitrarily set equal to operator cost) per
-        * extracted tuple.  Note it's correct to use tuples not output_tuples
+        * extracted tuple.  We don't charge cpu_tuple_cost because a Sort node
+        * doesn't do qual-checking or projection, so it has less overhead than
+        * most plan nodes.  Note it's correct to use tuples not output_tuples
         * here --- the upper LIMIT will pro-rate the run cost so we'd be double
         * counting the LIMIT otherwise.
         */
@@ -1147,20 +1214,67 @@ cost_sort(Path *path, PlannerInfo *root,
 }
 
 /*
- * sort_exceeds_work_mem
- *       Given a finished Sort plan node, detect whether it is expected to
- *       spill to disk (ie, will need more than work_mem workspace)
+ * cost_merge_append
+ *       Determines and returns the cost of a MergeAppend node.
+ *
+ * MergeAppend merges several pre-sorted input streams, using a heap that
+ * at any given instant holds the next tuple from each stream. If there
+ * are N streams, we need about N*log2(N) tuple comparisons to construct
+ * the heap at startup, and then for each output tuple, about log2(N)
+ * comparisons to delete the top heap entry and another log2(N) comparisons
+ * to insert its successor from the same stream.
+ *
+ * (The effective value of N will drop once some of the input streams are
+ * exhausted, but it seems unlikely to be worth trying to account for that.)
  *
- * This assumes there will be no available LIMIT.
+ * The heap is never spilled to disk, since we assume N is not very large.
+ * So this is much simpler than cost_sort.
+ *
+ * As in cost_sort, we charge two operator evals per tuple comparison.
+ *
+ * 'pathkeys' is a list of sort keys
+ * 'n_streams' is the number of input streams
+ * 'input_startup_cost' is the sum of the input streams' startup costs
+ * 'input_total_cost' is the sum of the input streams' total costs
+ * 'tuples' is the number of tuples in all the streams
  */
-bool
-sort_exceeds_work_mem(Sort *sort)
+void
+cost_merge_append(Path *path, PlannerInfo *root,
+                                 List *pathkeys, int n_streams,
+                                 Cost input_startup_cost, Cost input_total_cost,
+                                 double tuples)
 {
-       double          input_bytes = relation_byte_size(sort->plan.plan_rows,
-                                                                                                sort->plan.plan_width);
-       long            work_mem_bytes = work_mem * 1024L;
+       Cost            startup_cost = 0;
+       Cost            run_cost = 0;
+       Cost            comparison_cost;
+       double          N;
+       double          logN;
+
+       /*
+        * Avoid log(0)...
+        */
+       N = (n_streams < 2) ? 2.0 : (double) n_streams;
+       logN = LOG2(N);
+
+       /* Assumed cost per tuple comparison */
+       comparison_cost = 2.0 * cpu_operator_cost;
+
+       /* Heap creation cost */
+       startup_cost += comparison_cost * N * logN;
+
+       /* Per-tuple heap maintenance cost */
+       run_cost += tuples * comparison_cost * 2.0 * logN;
+
+       /*
+        * Also charge a small amount (arbitrarily set equal to operator cost) per
+        * extracted tuple.  We don't charge cpu_tuple_cost because a MergeAppend
+        * node doesn't do qual-checking or projection, so it has less overhead
+        * than most plan nodes.
+        */
+       run_cost += cpu_operator_cost * tuples;
 
-       return (input_bytes > work_mem_bytes);
+       path->startup_cost = startup_cost + input_startup_cost;
+       path->total_cost = startup_cost + run_cost + input_total_cost;
 }
 
 /*
@@ -1170,41 +1284,48 @@ sort_exceeds_work_mem(Sort *sort)
  *
  * If the total volume of data to materialize exceeds work_mem, we will need
  * to write it to disk, so the cost is much higher in that case.
+ *
+ * Note that here we are estimating the costs for the first scan of the
+ * relation, so the materialization is all overhead --- any savings will
+ * occur only on rescan, which is estimated in cost_rescan.
  */
 void
 cost_material(Path *path,
-                         Cost input_cost, double tuples, int width)
+                         Cost input_startup_cost, Cost input_total_cost,
+                         double tuples, int width)
 {
-       Cost            startup_cost = input_cost;
-       Cost            run_cost = 0;
+       Cost            startup_cost = input_startup_cost;
+       Cost            run_cost = input_total_cost - input_startup_cost;
        double          nbytes = relation_byte_size(tuples, width);
        long            work_mem_bytes = work_mem * 1024L;
 
-       /* disk costs */
+       /*
+        * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
+        * reflect bookkeeping overhead.  (This rate must be more than what
+        * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
+        * if it is exactly the same then there will be a cost tie between
+        * nestloop with A outer, materialized B inner and nestloop with B outer,
+        * materialized A inner.  The extra cost ensures we'll prefer
+        * materializing the smaller rel.)      Note that this is normally a good deal
+        * less than cpu_tuple_cost; which is OK because a Material plan node
+        * doesn't do qual-checking or projection, so it's got less overhead than
+        * most plan nodes.
+        */
+       run_cost += 2 * cpu_operator_cost * tuples;
+
+       /*
+        * If we will spill to disk, charge at the rate of seq_page_cost per page.
+        * This cost is assumed to be evenly spread through the plan run phase,
+        * which isn't exactly accurate but our cost model doesn't allow for
+        * nonuniform costs within the run phase.
+        */
        if (nbytes > work_mem_bytes)
        {
                double          npages = ceil(nbytes / BLCKSZ);
 
-               /* We'll write during startup and read during retrieval */
-               startup_cost += seq_page_cost * npages;
                run_cost += seq_page_cost * npages;
        }
 
-       /*
-        * Charge a very small amount per inserted tuple, to reflect bookkeeping
-        * costs.  We use cpu_tuple_cost/10 for this.  This is needed to break the
-        * tie that would otherwise exist between nestloop with A outer,
-        * materialized B inner and nestloop with B outer, materialized A inner.
-        * The extra cost ensures we'll prefer materializing the smaller rel.
-        */
-       startup_cost += cpu_tuple_cost * 0.1 * tuples;
-
-       /*
-        * Also charge a small amount per extracted tuple.      We use cpu_tuple_cost
-        * so that it doesn't appear worthwhile to materialize a bare seqscan.
-        */
-       run_cost += cpu_tuple_cost * tuples;
-
        path->startup_cost = startup_cost;
        path->total_cost = startup_cost + run_cost;
 }
@@ -1214,25 +1335,40 @@ cost_material(Path *path,
  *             Determines and returns the cost of performing an Agg plan node,
  *             including the cost of its input.
  *
+ * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
+ * we are using a hashed Agg node just to do grouping).
+ *
  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
  * are for appropriately-sorted input.
  */
 void
 cost_agg(Path *path, PlannerInfo *root,
-                AggStrategy aggstrategy, int numAggs,
+                AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
                 int numGroupCols, double numGroups,
                 Cost input_startup_cost, Cost input_total_cost,
                 double input_tuples)
 {
        Cost            startup_cost;
        Cost            total_cost;
+       AggClauseCosts dummy_aggcosts;
+
+       /* Use all-zero per-aggregate costs if NULL is passed */
+       if (aggcosts == NULL)
+       {
+               Assert(aggstrategy == AGG_HASHED);
+               MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
+               aggcosts = &dummy_aggcosts;
+       }
 
        /*
-        * We charge one cpu_operator_cost per aggregate function per input tuple,
-        * and another one per output tuple (corresponding to transfn and finalfn
-        * calls respectively).  If we are grouping, we charge an additional
-        * cpu_operator_cost per grouping column per input tuple for grouping
-        * comparisons.
+        * The transCost.per_tuple component of aggcosts should be charged once
+        * per input tuple, corresponding to the costs of evaluating the aggregate
+        * transfns and their input expressions (with any startup cost of course
+        * charged but once).  The finalCost component is charged once per output
+        * tuple, corresponding to the costs of evaluating the finalfns.
+        *
+        * If we are grouping, we charge an additional cpu_operator_cost per
+        * grouping column per input tuple for grouping comparisons.
         *
         * We will produce a single output tuple if not grouping, and a tuple per
         * group otherwise.  We charge cpu_tuple_cost for each output tuple.
@@ -1245,15 +1381,13 @@ cost_agg(Path *path, PlannerInfo *root,
         * there's roundoff error we might do the wrong thing.  So be sure that
         * the computations below form the same intermediate values in the same
         * order.
-        *
-        * Note: ideally we should use the pg_proc.procost costs of each
-        * aggregate's component functions, but for now that seems like an
-        * excessive amount of work.
         */
        if (aggstrategy == AGG_PLAIN)
        {
                startup_cost = input_total_cost;
-               startup_cost += cpu_operator_cost * (input_tuples + 1) * numAggs;
+               startup_cost += aggcosts->transCost.startup;
+               startup_cost += aggcosts->transCost.per_tuple * input_tuples;
+               startup_cost += aggcosts->finalCost;
                /* we aren't grouping */
                total_cost = startup_cost + cpu_tuple_cost;
        }
@@ -1263,19 +1397,21 @@ cost_agg(Path *path, PlannerInfo *root,
                startup_cost = input_startup_cost;
                total_cost = input_total_cost;
                /* calcs phrased this way to match HASHED case, see note above */
-               total_cost += cpu_operator_cost * input_tuples * numGroupCols;
-               total_cost += cpu_operator_cost * input_tuples * numAggs;
-               total_cost += cpu_operator_cost * numGroups * numAggs;
+               total_cost += aggcosts->transCost.startup;
+               total_cost += aggcosts->transCost.per_tuple * input_tuples;
+               total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
+               total_cost += aggcosts->finalCost * numGroups;
                total_cost += cpu_tuple_cost * numGroups;
        }
        else
        {
                /* must be AGG_HASHED */
                startup_cost = input_total_cost;
-               startup_cost += cpu_operator_cost * input_tuples * numGroupCols;
-               startup_cost += cpu_operator_cost * input_tuples * numAggs;
+               startup_cost += aggcosts->transCost.startup;
+               startup_cost += aggcosts->transCost.per_tuple * input_tuples;
+               startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
                total_cost = startup_cost;
-               total_cost += cpu_operator_cost * numGroups * numAggs;
+               total_cost += aggcosts->finalCost * numGroups;
                total_cost += cpu_tuple_cost * numGroups;
        }
 
@@ -1292,25 +1428,53 @@ cost_agg(Path *path, PlannerInfo *root,
  */
 void
 cost_windowagg(Path *path, PlannerInfo *root,
-                          int numWindowFuncs, int numPartCols, int numOrderCols,
+                          List *windowFuncs, int numPartCols, int numOrderCols,
                           Cost input_startup_cost, Cost input_total_cost,
                           double input_tuples)
 {
        Cost            startup_cost;
        Cost            total_cost;
+       ListCell   *lc;
 
        startup_cost = input_startup_cost;
        total_cost = input_total_cost;
 
        /*
-        * We charge one cpu_operator_cost per window function per tuple (often a
-        * drastic underestimate, but without a way to gauge how many tuples the
-        * window function will fetch, it's hard to do better).  We also charge
-        * cpu_operator_cost per grouping column per tuple for grouping
-        * comparisons, plus cpu_tuple_cost per tuple for general overhead.
-        */
-       total_cost += cpu_operator_cost * input_tuples * numWindowFuncs;
-       total_cost += cpu_operator_cost * input_tuples * (numPartCols + numOrderCols);
+        * Window functions are assumed to cost their stated execution cost, plus
+        * the cost of evaluating their input expressions, per tuple.  Since they
+        * may in fact evaluate their inputs at multiple rows during each cycle,
+        * this could be a drastic underestimate; but without a way to know how
+        * many rows the window function will fetch, it's hard to do better.  In
+        * any case, it's a good estimate for all the built-in window functions,
+        * so we'll just do this for now.
+        */
+       foreach(lc, windowFuncs)
+       {
+               WindowFunc *wfunc = (WindowFunc *) lfirst(lc);
+               Cost            wfunccost;
+               QualCost        argcosts;
+
+               Assert(IsA(wfunc, WindowFunc));
+
+               wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
+
+               /* also add the input expressions' cost to per-input-row costs */
+               cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
+               startup_cost += argcosts.startup;
+               wfunccost += argcosts.per_tuple;
+
+               total_cost += wfunccost * input_tuples;
+       }
+
+       /*
+        * We also charge cpu_operator_cost per grouping column per tuple for
+        * grouping comparisons, plus cpu_tuple_cost per tuple for general
+        * overhead.
+        *
+        * XXX this neglects costs of spooling the data to disk when it overflows
+        * work_mem.  Sooner or later that should get accounted for.
+        */
+       total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
        total_cost += cpu_tuple_cost * input_tuples;
 
        path->startup_cost = startup_cost;
@@ -1352,7 +1516,9 @@ cost_group(Path *path, PlannerInfo *root,
  * output row count, which may be lower than the restriction-clause-only row
  * count of its parent.  (We don't include this case in the PATH_ROWS macro
  * because it applies *only* to a nestloop's inner relation.)  We have to
- * be prepared to recurse through Append nodes in case of an appendrel.
+ * be prepared to recurse through Append or MergeAppend nodes in case of an
+ * appendrel.  (It's not clear MergeAppend can be seen here, but we may as
+ * well handle it if so.)
  */
 static double
 nestloop_inner_path_rows(Path *path)
@@ -1373,6 +1539,16 @@ nestloop_inner_path_rows(Path *path)
                        result += nestloop_inner_path_rows((Path *) lfirst(l));
                }
        }
+       else if (IsA(path, MergeAppendPath))
+       {
+               ListCell   *l;
+
+               result = 0;
+               foreach(l, ((MergeAppendPath *) path)->subpaths)
+               {
+                       result += nestloop_inner_path_rows((Path *) lfirst(l));
+               }
+       }
        else
                result = PATH_ROWS(path);
 
@@ -1394,47 +1570,117 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
        Path       *inner_path = path->innerjoinpath;
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
+       Cost            inner_rescan_start_cost;
+       Cost            inner_rescan_total_cost;
+       Cost            inner_run_cost;
+       Cost            inner_rescan_run_cost;
        Cost            cpu_per_tuple;
        QualCost        restrict_qual_cost;
        double          outer_path_rows = PATH_ROWS(outer_path);
        double          inner_path_rows = nestloop_inner_path_rows(inner_path);
        double          ntuples;
+       Selectivity outer_match_frac;
+       Selectivity match_count;
+       bool            indexed_join_quals;
 
        if (!enable_nestloop)
                startup_cost += disable_cost;
 
+       /* estimate costs to rescan the inner relation */
+       cost_rescan(root, inner_path,
+                               &inner_rescan_start_cost,
+                               &inner_rescan_total_cost);
+
        /* cost of source data */
 
        /*
         * NOTE: clearly, we must pay both outer and inner paths' startup_cost
         * before we can start returning tuples, so the join's startup cost is
-        * their sum.  What's not so clear is whether the inner path's
-        * startup_cost must be paid again on each rescan of the inner path. This
-        * is not true if the inner path is materialized or is a hashjoin, but
-        * probably is true otherwise.
+        * their sum.  We'll also pay the inner path's rescan startup cost
+        * multiple times.
         */
        startup_cost += outer_path->startup_cost + inner_path->startup_cost;
        run_cost += outer_path->total_cost - outer_path->startup_cost;
-       if (IsA(inner_path, MaterialPath) ||
-               IsA(inner_path, HashPath))
-       {
-               /* charge only run cost for each iteration of inner path */
-       }
-       else
+       if (outer_path_rows > 1)
+               run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
+
+       inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
+       inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
+
+       if (adjust_semi_join(root, path, sjinfo,
+                                                &outer_match_frac,
+                                                &match_count,
+                                                &indexed_join_quals))
        {
+               double          outer_matched_rows;
+               Selectivity inner_scan_frac;
+
                /*
-                * charge startup cost for each iteration of inner path, except we
-                * already charged the first startup_cost in our own startup
+                * SEMI or ANTI join: executor will stop after first match.
+                *
+                * For an outer-rel row that has at least one match, we can expect the
+                * inner scan to stop after a fraction 1/(match_count+1) of the inner
+                * rows, if the matches are evenly distributed.  Since they probably
+                * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
+                * that fraction.  (If we used a larger fuzz factor, we'd have to
+                * clamp inner_scan_frac to at most 1.0; but since match_count is at
+                * least 1, no such clamp is needed now.)
+                *
+                * A complicating factor is that rescans may be cheaper than first
+                * scans.  If we never scan all the way to the end of the inner rel,
+                * it might be (depending on the plan type) that we'd never pay the
+                * whole inner first-scan run cost.  However it is difficult to
+                * estimate whether that will happen, so be conservative and always
+                * charge the whole first-scan cost once.
                 */
-               run_cost += (outer_path_rows - 1) * inner_path->startup_cost;
+               run_cost += inner_run_cost;
+
+               outer_matched_rows = rint(outer_path_rows * outer_match_frac);
+               inner_scan_frac = 2.0 / (match_count + 1.0);
+
+               /* Add inner run cost for additional outer tuples having matches */
+               if (outer_matched_rows > 1)
+                       run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
+
+               /* Compute number of tuples processed (not number emitted!) */
+               ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
+
+               /*
+                * For unmatched outer-rel rows, there are two cases.  If the inner
+                * path is an indexscan using all the joinquals as indexquals, then an
+                * unmatched row results in an indexscan returning no rows, which is
+                * probably quite cheap.  We estimate this case as the same cost to
+                * return the first tuple of a nonempty scan.  Otherwise, the executor
+                * will have to scan the whole inner rel; not so cheap.
+                */
+               if (indexed_join_quals)
+               {
+                       run_cost += (outer_path_rows - outer_matched_rows) *
+                               inner_rescan_run_cost / inner_path_rows;
+
+                       /*
+                        * We won't be evaluating any quals at all for these rows, so
+                        * don't add them to ntuples.
+                        */
+               }
+               else
+               {
+                       run_cost += (outer_path_rows - outer_matched_rows) *
+                               inner_rescan_run_cost;
+                       ntuples += (outer_path_rows - outer_matched_rows) *
+                               inner_path_rows;
+               }
        }
-       run_cost += outer_path_rows *
-               (inner_path->total_cost - inner_path->startup_cost);
+       else
+       {
+               /* Normal case; we'll scan whole input rel for each outer row */
+               run_cost += inner_run_cost;
+               if (outer_path_rows > 1)
+                       run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
 
-       /*
-        * Compute number of tuples processed (not number emitted!)
-        */
-       ntuples = outer_path_rows * inner_path_rows;
+               /* Compute number of tuples processed (not number emitted!) */
+               ntuples = outer_path_rows * inner_path_rows;
+       }
 
        /* CPU costs */
        cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
@@ -1451,7 +1697,18 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
  *       Determines and returns the cost of joining two relations using the
  *       merge join algorithm.
  *
- * 'path' is already filled in except for the cost fields
+ * Unlike other costsize functions, this routine makes one actual decision:
+ * whether we should materialize the inner path.  We do that either because
+ * the inner path can't support mark/restore, or because it's cheaper to
+ * use an interposed Material node to handle mark/restore.     When the decision
+ * is cost-based it would be logically cleaner to build and cost two separate
+ * paths with and without that flag set; but that would require repeating most
+ * of the calculations here, which are not all that cheap.     Since the choice
+ * will not affect output pathkeys or startup cost, only total cost, there is
+ * no possibility of wanting to keep both paths.  So it seems best to make
+ * the decision here and record it in the path's materialize_inner field.
+ *
+ * 'path' is already filled in except for the cost fields and materialize_inner
  * 'sjinfo' is extra info about the join for selectivity estimation
  *
  * Notes: path's mergeclauses should be a subset of the joinrestrictinfo list;
@@ -1469,7 +1726,10 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
        List       *innersortkeys = path->innersortkeys;
        Cost            startup_cost = 0;
        Cost            run_cost = 0;
-       Cost            cpu_per_tuple;
+       Cost            cpu_per_tuple,
+                               inner_run_cost,
+                               bare_inner_cost,
+                               mat_inner_cost;
        QualCost        merge_qual_cost;
        QualCost        qp_qual_cost;
        double          outer_path_rows = PATH_ROWS(outer_path);
@@ -1487,10 +1747,10 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
                                innerendsel;
        Path            sort_path;              /* dummy for result of cost_sort */
 
-       /* Protect some assumptions below that rowcounts aren't zero */
-       if (outer_path_rows <= 0)
+       /* Protect some assumptions below that rowcounts aren't zero or NaN */
+       if (outer_path_rows <= 0 || isnan(outer_path_rows))
                outer_path_rows = 1;
-       if (inner_path_rows <= 0)
+       if (inner_path_rows <= 0 || isnan(inner_path_rows))
                inner_path_rows = 1;
 
        if (!enable_mergejoin)
@@ -1506,26 +1766,21 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
        qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
 
        /*
-        * Get approx # tuples passing the mergequals.  We use approx_tuple_count
-        * here for speed --- in most cases, any errors won't affect the result
-        * much.
+        * Get approx # tuples passing the mergequals.  We use approx_tuple_count
+        * here because we need an estimate done with JOIN_INNER semantics.
         */
-       mergejointuples = approx_tuple_count(root, &path->jpath,
-                                                                                mergeclauses, sjinfo);
+       mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
 
        /*
         * When there are equal merge keys in the outer relation, the mergejoin
         * must rescan any matching tuples in the inner relation. This means
-        * re-fetching inner tuples.  Our cost model for this is that a re-fetch
-        * costs the same as an original fetch, which is probably an overestimate;
-        * but on the other hand we ignore the bookkeeping costs of mark/restore.
-        * Not clear if it's worth developing a more refined model.
+        * re-fetching inner tuples; we have to estimate how often that happens.
         *
         * For regular inner and outer joins, the number of re-fetches can be
         * estimated approximately as size of merge join output minus size of
         * inner relation. Assume that the distinct key values are 1, 2, ..., and
         * denote the number of values of each key in the outer relation as m1,
-        * m2, ...; in the inner relation, n1, n2, ...  Then we have
+        * m2, ...; in the inner relation, n1, n2, ...  Then we have
         *
         * size of join = m1 * n1 + m2 * n2 + ...
         *
@@ -1539,16 +1794,10 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
         * when we should not.  Can we do better without expensive selectivity
         * computations?
         *
-        * For SEMI and ANTI joins, only one inner tuple need be rescanned for
-        * each group of same-keyed outer tuples (assuming that all joinquals
-        * are merge quals).  This makes the effect small enough to ignore,
-        * so we just set rescannedtuples = 0.  Likewise, the whole issue is
-        * moot if we are working from a unique-ified outer input.
-        */
-       if (sjinfo->jointype == JOIN_SEMI ||
-               sjinfo->jointype == JOIN_ANTI)
-               rescannedtuples = 0;
-       else if (IsA(outer_path, UniquePath))
+        * The whole issue is moot if we are working from a unique-ified outer
+        * input.
+        */
+       if (IsA(outer_path, UniquePath))
                rescannedtuples = 0;
        else
        {
@@ -1557,7 +1806,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
                if (rescannedtuples < 0)
                        rescannedtuples = 0;
        }
-       /* We'll inflate inner run cost this much to account for rescanning */
+       /* We'll inflate various costs this much to account for rescanning */
        rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
 
        /*
@@ -1565,11 +1814,11 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
         * (unless it's an outer join, in which case the outer side has to be
         * scanned all the way anyway).  Estimate fraction of the left and right
         * inputs that will actually need to be scanned.  Likewise, we can
-        * estimate the number of rows that will be skipped before the first
-        * join pair is found, which should be factored into startup cost.
-        * We use only the first (most significant) merge clause for this purpose.
-        * Since mergejoinscansel() is a fairly expensive computation, we cache
-        * the results in the merge clause RestrictInfo.
+        * estimate the number of rows that will be skipped before the first join
+        * pair is found, which should be factored into startup cost. We use only
+        * the first (most significant) merge clause for this purpose. Since
+        * mergejoinscansel() is a fairly expensive computation, we cache the
+        * results in the merge clause RestrictInfo.
         */
        if (mergeclauses && path->jpath.jointype != JOIN_FULL)
        {
@@ -1589,6 +1838,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
                ipathkey = (PathKey *) linitial(ipathkeys);
                /* debugging check */
                if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
+                       opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
                        opathkey->pk_strategy != ipathkey->pk_strategy ||
                        opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
                        elog(ERROR, "left and right pathkeys do not match in mergejoin");
@@ -1667,6 +1917,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
                                  outer_path->total_cost,
                                  outer_path_rows,
                                  outer_path->parent->width,
+                                 0.0,
+                                 work_mem,
                                  -1.0);
                startup_cost += sort_path.startup_cost;
                startup_cost += (sort_path.total_cost - sort_path.startup_cost)
@@ -1691,35 +1943,102 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
                                  inner_path->total_cost,
                                  inner_path_rows,
                                  inner_path->parent->width,
+                                 0.0,
+                                 work_mem,
                                  -1.0);
                startup_cost += sort_path.startup_cost;
                startup_cost += (sort_path.total_cost - sort_path.startup_cost)
-                       * innerstartsel * rescanratio;
-               run_cost += (sort_path.total_cost - sort_path.startup_cost)
-                       * (innerendsel - innerstartsel) * rescanratio;
-
-               /*
-                * If the inner sort is expected to spill to disk, we want to add a
-                * materialize node to shield it from the need to handle mark/restore.
-                * This will allow it to perform the last merge pass on-the-fly, while
-                * in most cases not requiring the materialize to spill to disk.
-                * Charge an extra cpu_tuple_cost per tuple to account for the
-                * materialize node.  (Keep this estimate in sync with similar ones in
-                * create_mergejoin_path and create_mergejoin_plan.)
-                */
-               if (relation_byte_size(inner_path_rows, inner_path->parent->width) >
-                       (work_mem * 1024L))
-                       run_cost += cpu_tuple_cost * inner_path_rows;
+                       * innerstartsel;
+               inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
+                       * (innerendsel - innerstartsel);
        }
        else
        {
                startup_cost += inner_path->startup_cost;
                startup_cost += (inner_path->total_cost - inner_path->startup_cost)
-                       * innerstartsel * rescanratio;
-               run_cost += (inner_path->total_cost - inner_path->startup_cost)
-                       * (innerendsel - innerstartsel) * rescanratio;
+                       * innerstartsel;
+               inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
+                       * (innerendsel - innerstartsel);
        }
 
+       /*
+        * Decide whether we want to materialize the inner input to shield it from
+        * mark/restore and performing re-fetches.      Our cost model for regular
+        * re-fetches is that a re-fetch costs the same as an original fetch,
+        * which is probably an overestimate; but on the other hand we ignore the
+        * bookkeeping costs of mark/restore.  Not clear if it's worth developing
+        * a more refined model.  So we just need to inflate the inner run cost by
+        * rescanratio.
+        */
+       bare_inner_cost = inner_run_cost * rescanratio;
+
+       /*
+        * When we interpose a Material node the re-fetch cost is assumed to be
+        * just cpu_operator_cost per tuple, independently of the underlying
+        * plan's cost; and we charge an extra cpu_operator_cost per original
+        * fetch as well.  Note that we're assuming the materialize node will
+        * never spill to disk, since it only has to remember tuples back to the
+        * last mark.  (If there are a huge number of duplicates, our other cost
+        * factors will make the path so expensive that it probably won't get
+        * chosen anyway.)      So we don't use cost_rescan here.
+        *
+        * Note: keep this estimate in sync with create_mergejoin_plan's labeling
+        * of the generated Material node.
+        */
+       mat_inner_cost = inner_run_cost +
+               cpu_operator_cost * inner_path_rows * rescanratio;
+
+       /*
+        * Prefer materializing if it looks cheaper, unless the user has asked to
+        * suppress materialization.
+        */
+       if (enable_material && mat_inner_cost < bare_inner_cost)
+               path->materialize_inner = true;
+
+       /*
+        * Even if materializing doesn't look cheaper, we *must* do it if the
+        * inner path is to be used directly (without sorting) and it doesn't
+        * support mark/restore.
+        *
+        * Since the inner side must be ordered, and only Sorts and IndexScans can
+        * create order to begin with, and they both support mark/restore, you
+        * might think there's no problem --- but you'd be wrong.  Nestloop and
+        * merge joins can *preserve* the order of their inputs, so they can be
+        * selected as the input of a mergejoin, and they don't support
+        * mark/restore at present.
+        *
+        * We don't test the value of enable_material here, because
+        * materialization is required for correctness in this case, and turning
+        * it off does not entitle us to deliver an invalid plan.
+        */
+       else if (innersortkeys == NIL &&
+                        !ExecSupportsMarkRestore(inner_path->pathtype))
+               path->materialize_inner = true;
+
+       /*
+        * Also, force materializing if the inner path is to be sorted and the
+        * sort is expected to spill to disk.  This is because the final merge
+        * pass can be done on-the-fly if it doesn't have to support mark/restore.
+        * We don't try to adjust the cost estimates for this consideration,
+        * though.
+        *
+        * Since materialization is a performance optimization in this case,
+        * rather than necessary for correctness, we skip it if enable_material is
+        * off.
+        */
+       else if (enable_material && innersortkeys != NIL &&
+                        relation_byte_size(inner_path_rows, inner_path->parent->width) >
+                        (work_mem * 1024L))
+               path->materialize_inner = true;
+       else
+               path->materialize_inner = false;
+
+       /* Charge the right incremental cost for the chosen case */
+       if (path->materialize_inner)
+               run_cost += mat_inner_cost;
+       else
+               run_cost += bare_inner_cost;
+
        /* CPU costs */
 
        /*
@@ -1739,6 +2058,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
         * cpu_tuple_cost plus the cost of evaluating additional restriction
         * clauses that are to be applied at the join.  (This is pessimistic since
         * not all of the quals may get evaluated at each tuple.)
+        *
+        * Note: we could adjust for SEMI/ANTI joins skipping some qual
+        * evaluations here, but it's probably not worth the trouble.
         */
        startup_cost += qp_qual_cost.startup;
        cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
@@ -1767,6 +2089,7 @@ cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
        {
                cache = (MergeScanSelCache *) lfirst(lc);
                if (cache->opfamily == pathkey->pk_opfamily &&
+                       cache->collation == pathkey->pk_eclass->ec_collation &&
                        cache->strategy == pathkey->pk_strategy &&
                        cache->nulls_first == pathkey->pk_nulls_first)
                        return cache;
@@ -1788,6 +2111,7 @@ cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
 
        cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
        cache->opfamily = pathkey->pk_opfamily;
+       cache->collation = pathkey->pk_eclass->ec_collation;
        cache->strategy = pathkey->pk_strategy;
        cache->nulls_first = pathkey->pk_nulls_first;
        cache->leftstartsel = leftstartsel;
@@ -1829,8 +2153,11 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
        int                     num_hashclauses = list_length(hashclauses);
        int                     numbuckets;
        int                     numbatches;
+       int                     num_skew_mcvs;
        double          virtualbuckets;
        Selectivity innerbucketsize;
+       Selectivity outer_match_frac;
+       Selectivity match_count;
        ListCell   *hcl;
 
        if (!enable_hashjoin)
@@ -1845,14 +2172,6 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
        qp_qual_cost.startup -= hash_qual_cost.startup;
        qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
 
-       /*
-        * Get approx # tuples passing the hashquals.  We use approx_tuple_count
-        * here for speed --- in most cases, any errors won't affect the result
-        * much.
-        */
-       hashjointuples = approx_tuple_count(root, &path->jpath,
-                                                                               hashclauses, sjinfo);
-
        /* cost of source data */
        startup_cost += outer_path->startup_cost;
        run_cost += outer_path->total_cost - outer_path->startup_cost;
@@ -1872,13 +2191,27 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
                * inner_path_rows;
        run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
 
-       /* Get hash table size that executor would use for inner relation */
+       /*
+        * Get hash table size that executor would use for inner relation.
+        *
+        * XXX for the moment, always assume that skew optimization will be
+        * performed.  As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
+        * trying to determine that for sure.
+        *
+        * XXX at some point it might be interesting to try to account for skew
+        * optimization in the cost estimate, but for now, we don't.
+        */
        ExecChooseHashTableSize(inner_path_rows,
                                                        inner_path->parent->width,
+                                                       true,           /* useskew */
                                                        &numbuckets,
-                                                       &numbatches);
+                                                       &numbatches,
+                                                       &num_skew_mcvs);
        virtualbuckets = (double) numbuckets *(double) numbatches;
 
+       /* mark the path with estimated # of batches */
+       path->num_batches = numbatches;
+
        /*
         * Determine bucketsize fraction for inner relation.  We use the smallest
         * bucketsize estimated for any individual hashclause; this is undoubtedly
@@ -1966,18 +2299,78 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 
        /* CPU costs */
 
-       /*
-        * The number of tuple comparisons needed is the number of outer tuples
-        * times the typical number of tuples in a hash bucket, which is the inner
-        * relation size times its bucketsize fraction.  At each one, we need to
-        * evaluate the hashjoin quals.  But actually, charging the full qual eval
-        * cost at each tuple is pessimistic, since we don't evaluate the quals
-        * unless the hash values match exactly.  For lack of a better idea, halve
-        * the cost estimate to allow for that.
-        */
-       startup_cost += hash_qual_cost.startup;
-       run_cost += hash_qual_cost.per_tuple *
-               outer_path_rows * clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
+       if (adjust_semi_join(root, &path->jpath, sjinfo,
+                                                &outer_match_frac,
+                                                &match_count,
+                                                NULL))
+       {
+               double          outer_matched_rows;
+               Selectivity inner_scan_frac;
+
+               /*
+                * SEMI or ANTI join: executor will stop after first match.
+                *
+                * For an outer-rel row that has at least one match, we can expect the
+                * bucket scan to stop after a fraction 1/(match_count+1) of the
+                * bucket's rows, if the matches are evenly distributed.  Since they
+                * probably aren't quite evenly distributed, we apply a fuzz factor of
+                * 2.0 to that fraction.  (If we used a larger fuzz factor, we'd have
+                * to clamp inner_scan_frac to at most 1.0; but since match_count is
+                * at least 1, no such clamp is needed now.)
+                */
+               outer_matched_rows = rint(outer_path_rows * outer_match_frac);
+               inner_scan_frac = 2.0 / (match_count + 1.0);
+
+               startup_cost += hash_qual_cost.startup;
+               run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
+                       clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
+
+               /*
+                * For unmatched outer-rel rows, the picture is quite a lot different.
+                * In the first place, there is no reason to assume that these rows
+                * preferentially hit heavily-populated buckets; instead assume they
+                * are uncorrelated with the inner distribution and so they see an
+                * average bucket size of inner_path_rows / virtualbuckets.  In the
+                * second place, it seems likely that they will have few if any exact
+                * hash-code matches and so very few of the tuples in the bucket will
+                * actually require eval of the hash quals.  We don't have any good
+                * way to estimate how many will, but for the moment assume that the
+                * effective cost per bucket entry is one-tenth what it is for
+                * matchable tuples.
+                */
+               run_cost += hash_qual_cost.per_tuple *
+                       (outer_path_rows - outer_matched_rows) *
+                       clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
+
+               /* Get # of tuples that will pass the basic join */
+               if (path->jpath.jointype == JOIN_SEMI)
+                       hashjointuples = outer_matched_rows;
+               else
+                       hashjointuples = outer_path_rows - outer_matched_rows;
+       }
+       else
+       {
+               /*
+                * The number of tuple comparisons needed is the number of outer
+                * tuples times the typical number of tuples in a hash bucket, which
+                * is the inner relation size times its bucketsize fraction.  At each
+                * one, we need to evaluate the hashjoin quals.  But actually,
+                * charging the full qual eval cost at each tuple is pessimistic,
+                * since we don't evaluate the quals unless the hash values match
+                * exactly.  For lack of a better idea, halve the cost estimate to
+                * allow for that.
+                */
+               startup_cost += hash_qual_cost.startup;
+               run_cost += hash_qual_cost.per_tuple * outer_path_rows *
+                       clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
+
+               /*
+                * Get approx # tuples passing the hashquals.  We use
+                * approx_tuple_count here because we need an estimate done with
+                * JOIN_INNER semantics.
+                */
+               hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
+       }
 
        /*
         * For each tuple that gets through the hashjoin proper, we charge
@@ -2025,18 +2418,17 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
                /*
                 * The per-tuple costs include the cost of evaluating the lefthand
                 * expressions, plus the cost of probing the hashtable.  We already
-                * accounted for the lefthand expressions as part of the testexpr,
-                * and will also have counted one cpu_operator_cost for each
-                * comparison operator.  That is probably too low for the probing
-                * cost, but it's hard to make a better estimate, so live with it for
-                * now.
+                * accounted for the lefthand expressions as part of the testexpr, and
+                * will also have counted one cpu_operator_cost for each comparison
+                * operator.  That is probably too low for the probing cost, but it's
+                * hard to make a better estimate, so live with it for now.
                 */
        }
        else
        {
                /*
                 * Otherwise we will be rescanning the subplan output on each
-                * evaluation.  We need to estimate how much of the output we will
+                * evaluation.  We need to estimate how much of the output we will
                 * actually need to scan.  NOTE: this logic should agree with the
                 * tuple_fraction estimates used by make_subplan() in
                 * plan/subselect.c.
@@ -2064,13 +2456,13 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
 
                /*
                 * Also account for subplan's startup cost. If the subplan is
-                * uncorrelated or undirect correlated, AND its topmost node is a Sort
-                * or Material node, assume that we'll only need to pay its startup
-                * cost once; otherwise assume we pay the startup cost every time.
+                * uncorrelated or undirect correlated, AND its topmost node is one
+                * that materializes its output, assume that we'll only need to pay
+                * its startup cost once; otherwise assume we pay the startup cost
+                * every time.
                 */
                if (subplan->parParam == NIL &&
-                       (IsA(plan, Sort) ||
-                        IsA(plan, Material)))
+                       ExecMaterializesOutput(nodeTag(plan)))
                        sp_cost.startup += plan->startup_cost;
                else
                        sp_cost.per_tuple += plan->startup_cost;
@@ -2081,6 +2473,108 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
 }
 
 
+/*
+ * cost_rescan
+ *             Given a finished Path, estimate the costs of rescanning it after
+ *             having done so the first time.  For some Path types a rescan is
+ *             cheaper than an original scan (if no parameters change), and this
+ *             function embodies knowledge about that.  The default is to return
+ *             the same costs stored in the Path.      (Note that the cost estimates
+ *             actually stored in Paths are always for first scans.)
+ *
+ * This function is not currently intended to model effects such as rescans
+ * being cheaper due to disk block caching; what we are concerned with is
+ * plan types wherein the executor caches results explicitly, or doesn't
+ * redo startup calculations, etc.
+ */
+static void
+cost_rescan(PlannerInfo *root, Path *path,
+                       Cost *rescan_startup_cost,      /* output parameters */
+                       Cost *rescan_total_cost)
+{
+       switch (path->pathtype)
+       {
+               case T_FunctionScan:
+
+                       /*
+                        * Currently, nodeFunctionscan.c always executes the function to
+                        * completion before returning any rows, and caches the results in
+                        * a tuplestore.  So the function eval cost is all startup cost
+                        * and isn't paid over again on rescans. However, all run costs
+                        * will be paid over again.
+                        */
+                       *rescan_startup_cost = 0;
+                       *rescan_total_cost = path->total_cost - path->startup_cost;
+                       break;
+               case T_HashJoin:
+
+                       /*
+                        * Assume that all of the startup cost represents hash table
+                        * building, which we won't have to do over.
+                        */
+                       *rescan_startup_cost = 0;
+                       *rescan_total_cost = path->total_cost - path->startup_cost;
+                       break;
+               case T_CteScan:
+               case T_WorkTableScan:
+                       {
+                               /*
+                                * These plan types materialize their final result in a
+                                * tuplestore or tuplesort object.      So the rescan cost is only
+                                * cpu_tuple_cost per tuple, unless the result is large enough
+                                * to spill to disk.
+                                */
+                               Cost            run_cost = cpu_tuple_cost * path->parent->rows;
+                               double          nbytes = relation_byte_size(path->parent->rows,
+                                                                                                               path->parent->width);
+                               long            work_mem_bytes = work_mem * 1024L;
+
+                               if (nbytes > work_mem_bytes)
+                               {
+                                       /* It will spill, so account for re-read cost */
+                                       double          npages = ceil(nbytes / BLCKSZ);
+
+                                       run_cost += seq_page_cost * npages;
+                               }
+                               *rescan_startup_cost = 0;
+                               *rescan_total_cost = run_cost;
+                       }
+                       break;
+               case T_Material:
+               case T_Sort:
+                       {
+                               /*
+                                * These plan types not only materialize their results, but do
+                                * not implement qual filtering or projection.  So they are
+                                * even cheaper to rescan than the ones above.  We charge only
+                                * cpu_operator_cost per tuple.  (Note: keep that in sync with
+                                * the run_cost charge in cost_sort, and also see comments in
+                                * cost_material before you change it.)
+                                */
+                               Cost            run_cost = cpu_operator_cost * path->parent->rows;
+                               double          nbytes = relation_byte_size(path->parent->rows,
+                                                                                                               path->parent->width);
+                               long            work_mem_bytes = work_mem * 1024L;
+
+                               if (nbytes > work_mem_bytes)
+                               {
+                                       /* It will spill, so account for re-read cost */
+                                       double          npages = ceil(nbytes / BLCKSZ);
+
+                                       run_cost += seq_page_cost * npages;
+                               }
+                               *rescan_startup_cost = 0;
+                               *rescan_total_cost = run_cost;
+                       }
+                       break;
+               default:
+                       *rescan_startup_cost = path->startup_cost;
+                       *rescan_total_cost = path->total_cost;
+                       break;
+       }
+}
+
+
 /*
  * cost_qual_eval
  *             Estimate the CPU costs of evaluating a WHERE clause.
@@ -2189,17 +2683,12 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
         * Vars and Consts are charged zero, and so are boolean operators (AND,
         * OR, NOT). Simplistic, but a lot better than no model at all.
         *
-        * Note that Aggref and WindowFunc nodes are (and should be) treated
-        * like Vars --- whatever execution cost they have is absorbed into
-        * plan-node-specific costing.  As far as expression evaluation is
-        * concerned they're just like Vars.
-        *
         * Should we try to account for the possibility of short-circuit
         * evaluation of AND/OR?  Probably *not*, because that would make the
         * results depend on the clause ordering, and we are not in any position
         * to expect that the current ordering of the clauses is the one that's
-        * going to end up being used.  (Is it worth applying order_qual_clauses
-        * much earlier in the planning process to fix this?)
+        * going to end up being used.  The above per-RestrictInfo caching would
+        * not mix well with trying to re-order clauses anyway.
         */
        if (IsA(node, FuncExpr))
        {
@@ -2228,6 +2717,20 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
                context->total.per_tuple += get_func_cost(saop->opfuncid) *
                        cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
        }
+       else if (IsA(node, Aggref) ||
+                        IsA(node, WindowFunc))
+       {
+               /*
+                * Aggref and WindowFunc nodes are (and should be) treated like Vars,
+                * ie, zero execution cost in the current model, because they behave
+                * essentially like Vars in execQual.c.  We disregard the costs of
+                * their input expressions for the same reason.  The actual execution
+                * costs of the aggregate/window functions and their arguments have to
+                * be factored into plan-node-specific costing of the Agg or WindowAgg
+                * plan node.
+                */
+               return false;                   /* don't recurse into children */
+       }
        else if (IsA(node, CoerceViaIO))
        {
                CoerceViaIO *iocoerce = (CoerceViaIO *) node;
@@ -2299,10 +2802,10 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
        else if (IsA(node, AlternativeSubPlan))
        {
                /*
-                * Arbitrarily use the first alternative plan for costing.  (We should
+                * Arbitrarily use the first alternative plan for costing.      (We should
                 * certainly only include one alternative, and we don't yet have
-                * enough information to know which one the executor is most likely
-                * to use.)
+                * enough information to know which one the executor is most likely to
+                * use.)
                 */
                AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
 
@@ -2316,6 +2819,163 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
 }
 
 
+/*
+ * adjust_semi_join
+ *       Estimate how much of the inner input a SEMI or ANTI join
+ *       can be expected to scan.
+ *
+ * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
+ * inner rows as soon as it finds a match to the current outer row.
+ * We should therefore adjust some of the cost components for this effect.
+ * This function computes some estimates needed for these adjustments.
+ *
+ * 'path' is already filled in except for the cost fields
+ * 'sjinfo' is extra info about the join for selectivity estimation
+ *
+ * Returns TRUE if this is a SEMI or ANTI join, FALSE if not.
+ *
+ * Output parameters (set only in TRUE-result case):
+ * *outer_match_frac is set to the fraction of the outer tuples that are
+ *             expected to have at least one match.
+ * *match_count is set to the average number of matches expected for
+ *             outer tuples that have at least one match.
+ * *indexed_join_quals is set to TRUE if all the joinquals are used as
+ *             inner index quals, FALSE if not.
+ *
+ * indexed_join_quals can be passed as NULL if that information is not
+ * relevant (it is only useful for the nestloop case).
+ */
+static bool
+adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
+                                Selectivity *outer_match_frac,
+                                Selectivity *match_count,
+                                bool *indexed_join_quals)
+{
+       JoinType        jointype = path->jointype;
+       Selectivity jselec;
+       Selectivity nselec;
+       Selectivity avgmatch;
+       SpecialJoinInfo norm_sjinfo;
+       List       *joinquals;
+       ListCell   *l;
+
+       /* Fall out if it's not JOIN_SEMI or JOIN_ANTI */
+       if (jointype != JOIN_SEMI && jointype != JOIN_ANTI)
+               return false;
+
+       /*
+        * Note: it's annoying to repeat this selectivity estimation on each call,
+        * when the joinclause list will be the same for all path pairs
+        * implementing a given join.  clausesel.c will save us from the worst
+        * effects of this by caching at the RestrictInfo level; but perhaps it'd
+        * be worth finding a way to cache the results at a higher level.
+        */
+
+       /*
+        * In an ANTI join, we must ignore clauses that are "pushed down", since
+        * those won't affect the match logic.  In a SEMI join, we do not
+        * distinguish joinquals from "pushed down" quals, so just use the whole
+        * restrictinfo list.
+        */
+       if (jointype == JOIN_ANTI)
+       {
+               joinquals = NIL;
+               foreach(l, path->joinrestrictinfo)
+               {
+                       RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+
+                       Assert(IsA(rinfo, RestrictInfo));
+                       if (!rinfo->is_pushed_down)
+                               joinquals = lappend(joinquals, rinfo);
+               }
+       }
+       else
+               joinquals = path->joinrestrictinfo;
+
+       /*
+        * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
+        */
+       jselec = clauselist_selectivity(root,
+                                                                       joinquals,
+                                                                       0,
+                                                                       jointype,
+                                                                       sjinfo);
+
+       /*
+        * Also get the normal inner-join selectivity of the join clauses.
+        */
+       norm_sjinfo.type = T_SpecialJoinInfo;
+       norm_sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
+       norm_sjinfo.min_righthand = path->innerjoinpath->parent->relids;
+       norm_sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
+       norm_sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
+       norm_sjinfo.jointype = JOIN_INNER;
+       /* we don't bother trying to make the remaining fields valid */
+       norm_sjinfo.lhs_strict = false;
+       norm_sjinfo.delay_upper_joins = false;
+       norm_sjinfo.join_quals = NIL;
+
+       nselec = clauselist_selectivity(root,
+                                                                       joinquals,
+                                                                       0,
+                                                                       JOIN_INNER,
+                                                                       &norm_sjinfo);
+
+       /* Avoid leaking a lot of ListCells */
+       if (jointype == JOIN_ANTI)
+               list_free(joinquals);
+
+       /*
+        * jselec can be interpreted as the fraction of outer-rel rows that have
+        * any matches (this is true for both SEMI and ANTI cases).  And nselec is
+        * the fraction of the Cartesian product that matches.  So, the average
+        * number of matches for each outer-rel row that has at least one match is
+        * nselec * inner_rows / jselec.
+        *
+        * Note: it is correct to use the inner rel's "rows" count here, not
+        * PATH_ROWS(), even if the inner path under consideration is an inner
+        * indexscan.  This is because we have included all the join clauses in
+        * the selectivity estimate, even ones used in an inner indexscan.
+        */
+       if (jselec > 0)                         /* protect against zero divide */
+       {
+               avgmatch = nselec * path->innerjoinpath->parent->rows / jselec;
+               /* Clamp to sane range */
+               avgmatch = Max(1.0, avgmatch);
+       }
+       else
+               avgmatch = 1.0;
+
+       *outer_match_frac = jselec;
+       *match_count = avgmatch;
+
+       /*
+        * If requested, check whether the inner path uses all the joinquals as
+        * indexquals.  (If that's true, we can assume that an unmatched outer
+        * tuple is cheap to process, whereas otherwise it's probably expensive.)
+        */
+       if (indexed_join_quals)
+       {
+               if (path->joinrestrictinfo != NIL)
+               {
+                       List       *nrclauses;
+
+                       nrclauses = select_nonredundant_join_clauses(root,
+                                                                                                         path->joinrestrictinfo,
+                                                                                                                path->innerjoinpath);
+                       *indexed_join_quals = (nrclauses == NIL);
+               }
+               else
+               {
+                       /* a clauseless join does NOT qualify */
+                       *indexed_join_quals = false;
+               }
+       }
+
+       return true;
+}
+
+
 /*
  * approx_tuple_count
  *             Quick-and-dirty estimation of the number of join rows passing
@@ -2324,6 +2984,11 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
  * The quals can be either an implicitly-ANDed list of boolean expressions,
  * or a list of RestrictInfo nodes (typically the latter).
  *
+ * We intentionally compute the selectivity under JOIN_INNER rules, even
+ * if it's some type of outer join.  This is appropriate because we are
+ * trying to figure out how many tuples pass the initial merge or hash
+ * join step.
+ *
  * This is quick-and-dirty because we bypass clauselist_selectivity, and
  * simply multiply the independent clause selectivities together.  Now
  * clauselist_selectivity often can't do any better than that anyhow, but
@@ -2336,31 +3001,40 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
  * seems OK to live with the approximation.
  */
 static double
-approx_tuple_count(PlannerInfo *root, JoinPath *path,
-                                  List *quals, SpecialJoinInfo *sjinfo)
+approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
 {
        double          tuples;
        double          outer_tuples = path->outerjoinpath->parent->rows;
        double          inner_tuples = path->innerjoinpath->parent->rows;
+       SpecialJoinInfo sjinfo;
        Selectivity selec = 1.0;
        ListCell   *l;
 
+       /*
+        * Make up a SpecialJoinInfo for JOIN_INNER semantics.
+        */
+       sjinfo.type = T_SpecialJoinInfo;
+       sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
+       sjinfo.min_righthand = path->innerjoinpath->parent->relids;
+       sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
+       sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
+       sjinfo.jointype = JOIN_INNER;
+       /* we don't bother trying to make the remaining fields valid */
+       sjinfo.lhs_strict = false;
+       sjinfo.delay_upper_joins = false;
+       sjinfo.join_quals = NIL;
+
        /* Get the approximate selectivity */
        foreach(l, quals)
        {
                Node       *qual = (Node *) lfirst(l);
 
                /* Note that clause_selectivity will be able to cache its result */
-               selec *= clause_selectivity(root, qual, 0, sjinfo->jointype, sjinfo);
+               selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
        }
 
-       /* Apply it correctly using the input relation sizes */
-       if (sjinfo->jointype == JOIN_SEMI)
-               tuples = selec * outer_tuples;
-       else if (sjinfo->jointype == JOIN_ANTI)
-               tuples = (1.0 - selec) * outer_tuples;
-       else
-               tuples = selec * outer_tuples * inner_tuples;
+       /* Apply it to the input relation sizes */
+       tuples = selec * outer_tuples * inner_tuples;
 
        return clamp_row_est(tuples);
 }
@@ -2371,7 +3045,7 @@ approx_tuple_count(PlannerInfo *root, JoinPath *path,
  *             Set the size estimates for the given base relation.
  *
  * The rel's targetlist and restrictinfo list must have been constructed
- * already.
+ * already, and rel->tuples must be set.
  *
  * We set the following fields of the rel node:
  *     rows: the estimated number of output tuples (after applying
@@ -2536,6 +3210,76 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
        rel->rows = clamp_row_est(nrows);
 }
 
+/*
+ * set_subquery_size_estimates
+ *             Set the size estimates for a base relation that is a subquery.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already, and the plan for the subquery must have been completed.
+ * We look at the subquery's plan and PlannerInfo to extract data.
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel,
+                                                       PlannerInfo *subroot)
+{
+       RangeTblEntry *rte;
+       ListCell   *lc;
+
+       /* Should only be applied to base relations that are subqueries */
+       Assert(rel->relid > 0);
+       rte = planner_rt_fetch(rel->relid, root);
+       Assert(rte->rtekind == RTE_SUBQUERY);
+
+       /* Copy raw number of output rows from subplan */
+       rel->tuples = rel->subplan->plan_rows;
+
+       /*
+        * Compute per-output-column width estimates by examining the subquery's
+        * targetlist.  For any output that is a plain Var, get the width estimate
+        * that was made while planning the subquery.  Otherwise, fall back on a
+        * datatype-based estimate.
+        */
+       foreach(lc, subroot->parse->targetList)
+       {
+               TargetEntry *te = (TargetEntry *) lfirst(lc);
+               Node       *texpr = (Node *) te->expr;
+               int32           item_width;
+
+               Assert(IsA(te, TargetEntry));
+               /* junk columns aren't visible to upper query */
+               if (te->resjunk)
+                       continue;
+
+               /*
+                * XXX This currently doesn't work for subqueries containing set
+                * operations, because the Vars in their tlists are bogus references
+                * to the first leaf subquery, which wouldn't give the right answer
+                * even if we could still get to its PlannerInfo.  So fall back on
+                * datatype in that case.
+                */
+               if (IsA(texpr, Var) &&
+                       subroot->parse->setOperations == NULL)
+               {
+                       Var                *var = (Var *) texpr;
+                       RelOptInfo *subrel = find_base_rel(subroot, var->varno);
+
+                       item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
+               }
+               else
+               {
+                       item_width = get_typavgwidth(exprType(texpr), exprTypmod(texpr));
+               }
+               Assert(item_width > 0);
+               Assert(te->resno >= rel->min_attr && te->resno <= rel->max_attr);
+               rel->attr_widths[te->resno - rel->min_attr] = item_width;
+       }
+
+       /* Now estimate number of output rows, etc */
+       set_baserel_size_estimates(root, rel);
+}
+
 /*
  * set_function_size_estimates
  *             Set the size estimates for a base relation that is a function call.
@@ -2616,8 +3360,8 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
        if (rte->self_reference)
        {
                /*
-                * In a self-reference, arbitrarily assume the average worktable
-                * size is about 10 times the nonrecursive term's size.
+                * In a self-reference, arbitrarily assume the average worktable size
+                * is about 10 times the nonrecursive term's size.
                 */
                rel->tuples = 10 * cteplan->plan_rows;
        }
@@ -2631,16 +3375,50 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
        set_baserel_size_estimates(root, rel);
 }
 
+/*
+ * set_foreign_size_estimates
+ *             Set the size estimates for a base relation that is a foreign table.
+ *
+ * There is not a whole lot that we can do here; the foreign-data wrapper
+ * is responsible for producing useful estimates.  We can do a decent job
+ * of estimating baserestrictcost, so we set that, and we also set up width
+ * using what will be purely datatype-driven estimates from the targetlist.
+ * There is no way to do anything sane with the rows value, so we just put
+ * a default estimate and hope that the wrapper can improve on it.     The
+ * wrapper's PlanForeignScan function will be called momentarily.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ */
+void
+set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+       /* Should only be applied to base relations */
+       Assert(rel->relid > 0);
+
+       rel->rows = 1000;                       /* entirely bogus default estimate */
+
+       cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
+
+       set_rel_width(root, rel);
+}
+
 
 /*
  * set_rel_width
  *             Set the estimated output width of a base relation.
  *
+ * The estimated output width is the sum of the per-attribute width estimates
+ * for the actually-referenced columns, plus any PHVs or other expressions
+ * that have to be calculated at this relation.  This is the amount of data
+ * we'd need to pass upwards in case of a sort, hash, etc.
+ *
  * NB: this works best on plain relations because it prefers to look at
- * real Vars.  It will fail to make use of pg_statistic info when applied
- * to a subquery relation, even if the subquery outputs are simple vars
- * that we could have gotten info for. Is it worth trying to be smarter
- * about subqueries?
+ * real Vars.  For subqueries, set_subquery_size_estimates will already have
+ * copied up whatever per-column estimates were made within the subquery,
+ * and for other types of rels there isn't much we can do anyway.  We fall
+ * back on (fairly stupid) datatype-based width estimates if we can't get
+ * any better number.
  *
  * The per-attribute width estimates are cached for possible re-use while
  * building join relations.
@@ -2650,6 +3428,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
 {
        Oid                     reloid = planner_rt_fetch(rel->relid, root)->relid;
        int32           tuple_width = 0;
+       bool            have_wholerow_var = false;
        ListCell   *lc;
 
        foreach(lc, rel->reltargetlist)
@@ -2669,7 +3448,18 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
                        ndx = var->varattno - rel->min_attr;
 
                        /*
-                        * The width probably hasn't been cached yet, but may as well check
+                        * If it's a whole-row Var, we'll deal with it below after we have
+                        * already cached as many attr widths as possible.
+                        */
+                       if (var->varattno == 0)
+                       {
+                               have_wholerow_var = true;
+                               continue;
+                       }
+
+                       /*
+                        * The width may have been cached already (especially if it's a
+                        * subquery), so don't duplicate effort.
                         */
                        if (rel->attr_widths[ndx] > 0)
                        {
@@ -2678,7 +3468,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
                        }
 
                        /* Try to get column width from statistics */
-                       if (reloid != InvalidOid)
+                       if (reloid != InvalidOid && var->varattno > 0)
                        {
                                item_width = get_attavgwidth(reloid, var->varattno);
                                if (item_width > 0)
@@ -2707,10 +3497,51 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
                }
                else
                {
-                       /* For now, punt on whole-row child Vars */
-                       tuple_width += 32;      /* arbitrary */
+                       /*
+                        * We could be looking at an expression pulled up from a subquery,
+                        * or a ROW() representing a whole-row child Var, etc.  Do what we
+                        * can using the expression type information.
+                        */
+                       int32           item_width;
+
+                       item_width = get_typavgwidth(exprType(node), exprTypmod(node));
+                       Assert(item_width > 0);
+                       tuple_width += item_width;
                }
        }
+
+       /*
+        * If we have a whole-row reference, estimate its width as the sum of
+        * per-column widths plus sizeof(HeapTupleHeaderData).
+        */
+       if (have_wholerow_var)
+       {
+               int32           wholerow_width = sizeof(HeapTupleHeaderData);
+
+               if (reloid != InvalidOid)
+               {
+                       /* Real relation, so estimate true tuple width */
+                       wholerow_width += get_relation_data_width(reloid,
+                                                                                  rel->attr_widths - rel->min_attr);
+               }
+               else
+               {
+                       /* Do what we can with info for a phony rel */
+                       AttrNumber      i;
+
+                       for (i = 1; i <= rel->max_attr; i++)
+                               wholerow_width += rel->attr_widths[i - rel->min_attr];
+               }
+
+               rel->attr_widths[0 - rel->min_attr] = wholerow_width;
+
+               /*
+                * Include the whole-row Var as part of the output tuple.  Yes, that
+                * really is what happens at runtime.
+                */
+               tuple_width += wholerow_width;
+       }
+
        Assert(tuple_width >= 0);
        rel->width = tuple_width;
 }