* Planning is complete, we just need to convert the selected
* Path into a Plan.
*
- * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.115 2002/05/18 02:25:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.213 2006/07/11 16:35:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
-#include <sys/types.h>
+#include <limits.h>
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
-#include "optimizer/paths.h"
+#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
+#include "optimizer/predtest.h"
#include "optimizer/restrictinfo.h"
#include "optimizer/tlist.h"
#include "optimizer/var.h"
+#include "parser/parse_clause.h"
#include "parser/parse_expr.h"
+#include "parser/parsetree.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
-static Scan *create_scan_plan(Query *root, Path *best_path);
-static Join *create_join_plan(Query *root, JoinPath *best_path);
-static Append *create_append_plan(Query *root, AppendPath *best_path);
-static SeqScan *create_seqscan_plan(Path *best_path, List *tlist,
- List *scan_clauses);
-static IndexScan *create_indexscan_plan(Query *root, IndexPath *best_path,
- List *tlist, List *scan_clauses);
-static TidScan *create_tidscan_plan(TidPath *best_path, List *tlist,
- List *scan_clauses);
-static SubqueryScan *create_subqueryscan_plan(Path *best_path,
+static Plan *create_scan_plan(PlannerInfo *root, Path *best_path);
+static List *build_relation_tlist(RelOptInfo *rel);
+static bool use_physical_tlist(RelOptInfo *rel);
+static void disuse_physical_tlist(Plan *plan, Path *path);
+static Plan *create_gating_plan(PlannerInfo *root, Plan *plan, List *quals);
+static Plan *create_join_plan(PlannerInfo *root, JoinPath *best_path);
+static Plan *create_append_plan(PlannerInfo *root, AppendPath *best_path);
+static Result *create_result_plan(PlannerInfo *root, ResultPath *best_path);
+static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path);
+static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path);
+static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses);
+static IndexScan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
+ List *tlist, List *scan_clauses,
+ List **nonlossy_clauses);
+static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root,
+ BitmapHeapPath *best_path,
+ List *tlist, List *scan_clauses);
+static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
+ List **qual, List **indexqual);
+static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
+ List *tlist, List *scan_clauses);
+static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
-static FunctionScan *create_functionscan_plan(Path *best_path,
+static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
-static NestLoop *create_nestloop_plan(Query *root,
- NestPath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
-static MergeJoin *create_mergejoin_plan(Query *root,
- MergePath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
-static HashJoin *create_hashjoin_plan(Query *root,
- HashPath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
-static void fix_indxqual_references(List *indexquals, IndexPath *index_path,
- List **fixed_indexquals,
- List **recheck_indexquals);
-static void fix_indxqual_sublist(List *indexqual, int baserelid,
- IndexOptInfo *index,
- List **fixed_quals, List **recheck_quals);
-static Node *fix_indxqual_operand(Node *node, int baserelid,
- IndexOptInfo *index,
- Oid *opclass);
-static List *switch_outer(List *clauses);
+static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path,
+ Plan *outer_plan, Plan *inner_plan);
+static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path,
+ Plan *outer_plan, Plan *inner_plan);
+static HashJoin *create_hashjoin_plan(PlannerInfo *root, HashPath *best_path,
+ Plan *outer_plan, Plan *inner_plan);
+static void fix_indexqual_references(List *indexquals, IndexPath *index_path,
+ List **fixed_indexquals,
+ List **nonlossy_indexquals,
+ List **indexstrategy,
+ List **indexsubtype);
+static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index,
+ Oid *opclass);
+static List *get_switched_clauses(List *clauses, Relids outerrelids);
+static List *order_qual_clauses(PlannerInfo *root, List *clauses);
static void copy_path_costsize(Plan *dest, Path *src);
static void copy_plan_costsize(Plan *dest, Plan *src);
static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
- List *indxid, List *indxqual,
- List *indxqualorig,
+ Oid indexid, List *indexqual, List *indexqualorig,
+ List *indexstrategy, List *indexsubtype,
ScanDirection indexscandir);
+static BitmapIndexScan *make_bitmap_indexscan(Index scanrelid, Oid indexid,
+ List *indexqual,
+ List *indexqualorig,
+ List *indexstrategy,
+ List *indexsubtype);
+static BitmapHeapScan *make_bitmap_heapscan(List *qptlist,
+ List *qpqual,
+ Plan *lefttree,
+ List *bitmapqualorig,
+ Index scanrelid);
static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
- List *tideval);
+ List *tidquals);
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
- Index scanrelid);
+ Index scanrelid);
+static BitmapAnd *make_bitmap_and(List *bitmapplans);
+static BitmapOr *make_bitmap_or(List *bitmapplans);
static NestLoop *make_nestloop(List *tlist,
List *joinclauses, List *otherclauses,
Plan *lefttree, Plan *righttree,
List *hashclauses,
Plan *lefttree, Plan *righttree,
JoinType jointype);
-static Hash *make_hash(List *tlist, Node *hashkey, Plan *lefttree);
+static Hash *make_hash(Plan *lefttree);
static MergeJoin *make_mergejoin(List *tlist,
List *joinclauses, List *otherclauses,
List *mergeclauses,
Plan *lefttree, Plan *righttree,
JoinType jointype);
+static Sort *make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
+ AttrNumber *sortColIdx, Oid *sortOperators);
+static Sort *make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree,
+ List *pathkeys);
+
/*
* create_plan
* Returns a Plan tree.
*/
Plan *
-create_plan(Query *root, Path *best_path)
+create_plan(PlannerInfo *root, Path *best_path)
{
Plan *plan;
switch (best_path->pathtype)
{
- case T_IndexScan:
case T_SeqScan:
+ case T_IndexScan:
+ case T_BitmapHeapScan:
case T_TidScan:
case T_SubqueryScan:
case T_FunctionScan:
- plan = (Plan *) create_scan_plan(root, best_path);
+ plan = create_scan_plan(root, best_path);
break;
case T_HashJoin:
case T_MergeJoin:
case T_NestLoop:
- plan = (Plan *) create_join_plan(root,
- (JoinPath *) best_path);
+ plan = create_join_plan(root,
+ (JoinPath *) best_path);
break;
case T_Append:
- plan = (Plan *) create_append_plan(root,
- (AppendPath *) best_path);
+ plan = create_append_plan(root,
+ (AppendPath *) best_path);
+ break;
+ case T_Result:
+ plan = (Plan *) create_result_plan(root,
+ (ResultPath *) best_path);
+ break;
+ case T_Material:
+ plan = (Plan *) create_material_plan(root,
+ (MaterialPath *) best_path);
+ break;
+ case T_Unique:
+ plan = create_unique_plan(root,
+ (UniquePath *) best_path);
break;
default:
- elog(ERROR, "create_plan: unknown pathtype %d",
- best_path->pathtype);
+ elog(ERROR, "unrecognized node type: %d",
+ (int) best_path->pathtype);
plan = NULL; /* keep compiler quiet */
break;
}
-#ifdef NOT_USED /* fix xfunc */
- /* sort clauses by cost/(1-selectivity) -- JMH 2/26/92 */
- if (XfuncMode != XFUNC_OFF)
- {
- set_qpqual((Plan) plan,
- lisp_qsort(get_qpqual((Plan) plan),
- xfunc_clause_compare));
- if (XfuncMode != XFUNC_NOR)
- /* sort the disjuncts within each clause by cost -- JMH 3/4/92 */
- xfunc_disjunct_sort(plan->qpqual);
- }
-#endif
-
return plan;
}
/*
* create_scan_plan
* Create a scan plan for the parent relation of 'best_path'.
- *
- * Returns a Plan node.
*/
-static Scan *
-create_scan_plan(Query *root, Path *best_path)
+static Plan *
+create_scan_plan(PlannerInfo *root, Path *best_path)
{
- Scan *plan;
- List *tlist = best_path->parent->targetlist;
+ RelOptInfo *rel = best_path->parent;
+ List *tlist;
List *scan_clauses;
+ Plan *plan;
/*
- * Extract the relevant restriction clauses from the parent relation;
- * the executor must apply all these restrictions during the scan.
+ * For table scans, rather than using the relation targetlist (which is
+ * only those Vars actually needed by the query), we prefer to generate a
+ * tlist containing all Vars in order. This will allow the executor to
+ * optimize away projection of the table tuples, if possible. (Note that
+ * planner.c may replace the tlist we generate here, forcing projection to
+ * occur.)
*/
- scan_clauses = get_actual_clauses(best_path->parent->baserestrictinfo);
+ if (use_physical_tlist(rel))
+ {
+ tlist = build_physical_tlist(root, rel);
+ /* if fail because of dropped cols, use regular method */
+ if (tlist == NIL)
+ tlist = build_relation_tlist(rel);
+ }
+ else
+ tlist = build_relation_tlist(rel);
+
+ /*
+ * Extract the relevant restriction clauses from the parent relation.
+ * The executor must apply all these restrictions during the scan,
+ * except for pseudoconstants which we'll take care of below.
+ */
+ scan_clauses = rel->baserestrictinfo;
switch (best_path->pathtype)
{
case T_SeqScan:
- plan = (Scan *) create_seqscan_plan(best_path,
+ plan = (Plan *) create_seqscan_plan(root,
+ best_path,
tlist,
scan_clauses);
break;
case T_IndexScan:
- plan = (Scan *) create_indexscan_plan(root,
+ plan = (Plan *) create_indexscan_plan(root,
(IndexPath *) best_path,
tlist,
- scan_clauses);
+ scan_clauses,
+ NULL);
+ break;
+
+ case T_BitmapHeapScan:
+ plan = (Plan *) create_bitmap_scan_plan(root,
+ (BitmapHeapPath *) best_path,
+ tlist,
+ scan_clauses);
break;
case T_TidScan:
- plan = (Scan *) create_tidscan_plan((TidPath *) best_path,
+ plan = (Plan *) create_tidscan_plan(root,
+ (TidPath *) best_path,
tlist,
scan_clauses);
break;
case T_SubqueryScan:
- plan = (Scan *) create_subqueryscan_plan(best_path,
+ plan = (Plan *) create_subqueryscan_plan(root,
+ best_path,
tlist,
scan_clauses);
break;
case T_FunctionScan:
- plan = (Scan *) create_functionscan_plan(best_path,
- tlist,
- scan_clauses);
+ plan = (Plan *) create_functionscan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
break;
default:
- elog(ERROR, "create_scan_plan: unknown node type: %d",
- best_path->pathtype);
+ elog(ERROR, "unrecognized node type: %d",
+ (int) best_path->pathtype);
plan = NULL; /* keep compiler quiet */
break;
}
+ /*
+ * If there are any pseudoconstant clauses attached to this node,
+ * insert a gating Result node that evaluates the pseudoconstants
+ * as one-time quals.
+ */
+ if (root->hasPseudoConstantQuals)
+ plan = create_gating_plan(root, plan, scan_clauses);
+
return plan;
}
+/*
+ * Build a target list (ie, a list of TargetEntry) for a relation.
+ */
+static List *
+build_relation_tlist(RelOptInfo *rel)
+{
+ List *tlist = NIL;
+ int resno = 1;
+ ListCell *v;
+
+ foreach(v, rel->reltargetlist)
+ {
+ /* Do we really need to copy here? Not sure */
+ Var *var = (Var *) copyObject(lfirst(v));
+
+ tlist = lappend(tlist, makeTargetEntry((Expr *) var,
+ resno,
+ NULL,
+ false));
+ resno++;
+ }
+ return tlist;
+}
+
+/*
+ * use_physical_tlist
+ * Decide whether to use a tlist matching relation structure,
+ * rather than only those Vars actually referenced.
+ */
+static bool
+use_physical_tlist(RelOptInfo *rel)
+{
+ int i;
+
+ /*
+ * We can do this for real relation scans, subquery scans, and function
+ * scans (but not for, eg, joins).
+ */
+ if (rel->rtekind != RTE_RELATION &&
+ rel->rtekind != RTE_SUBQUERY &&
+ rel->rtekind != RTE_FUNCTION)
+ return false;
+
+ /*
+ * Can't do it with inheritance cases either (mainly because Append
+ * doesn't project).
+ */
+ if (rel->reloptkind != RELOPT_BASEREL)
+ return false;
+
+ /*
+ * Can't do it if any system columns or whole-row Vars are requested,
+ * either. (This could possibly be fixed but would take some fragile
+ * assumptions in setrefs.c, I think.)
+ */
+ for (i = rel->min_attr; i <= 0; i++)
+ {
+ if (!bms_is_empty(rel->attr_needed[i - rel->min_attr]))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * disuse_physical_tlist
+ * Switch a plan node back to emitting only Vars actually referenced.
+ *
+ * If the plan node immediately above a scan would prefer to get only
+ * needed Vars and not a physical tlist, it must call this routine to
+ * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
+ * and Material nodes want this, so they don't have to store useless columns.
+ */
+static void
+disuse_physical_tlist(Plan *plan, Path *path)
+{
+ /* Only need to undo it for path types handled by create_scan_plan() */
+ switch (path->pathtype)
+ {
+ case T_SeqScan:
+ case T_IndexScan:
+ case T_BitmapHeapScan:
+ case T_TidScan:
+ case T_SubqueryScan:
+ case T_FunctionScan:
+ plan->targetlist = build_relation_tlist(path->parent);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * create_gating_plan
+ * Deal with pseudoconstant qual clauses
+ *
+ * If the node's quals list includes any pseudoconstant quals, put them
+ * into a gating Result node atop the already-built plan. Otherwise,
+ * return the plan as-is.
+ *
+ * Note that we don't change cost or size estimates when doing gating.
+ * The costs of qual eval were already folded into the plan's startup cost.
+ * Leaving the size alone amounts to assuming that the gating qual will
+ * succeed, which is the conservative estimate for planning upper queries.
+ * We certainly don't want to assume the output size is zero (unless the
+ * gating qual is actually constant FALSE, and that case is dealt with in
+ * clausesel.c). Interpolating between the two cases is silly, because
+ * it doesn't reflect what will really happen at runtime, and besides which
+ * in most cases we have only a very bad idea of the probability of the gating
+ * qual being true.
+ */
+static Plan *
+create_gating_plan(PlannerInfo *root, Plan *plan, List *quals)
+{
+ List *pseudoconstants;
+
+ /* Pull out any pseudoconstant quals from the RestrictInfo list */
+ pseudoconstants = extract_actual_clauses(quals, true);
+
+ if (!pseudoconstants)
+ return plan;
+
+ pseudoconstants = order_qual_clauses(root, pseudoconstants);
+
+ return (Plan *) make_result((List *) copyObject(plan->targetlist),
+ (Node *) pseudoconstants,
+ plan);
+}
+
/*
* create_join_plan
* Create a join plan for 'best_path' and (recursively) plans for its
* inner and outer paths.
- *
- * Returns a Plan node.
*/
-static Join *
-create_join_plan(Query *root, JoinPath *best_path)
+static Plan *
+create_join_plan(PlannerInfo *root, JoinPath *best_path)
{
- List *join_tlist = best_path->path.parent->targetlist;
Plan *outer_plan;
- List *outer_tlist;
Plan *inner_plan;
- List *inner_tlist;
- List *joinclauses;
- List *otherclauses;
- Join *plan;
+ Plan *plan;
outer_plan = create_plan(root, best_path->outerjoinpath);
- outer_tlist = outer_plan->targetlist;
-
inner_plan = create_plan(root, best_path->innerjoinpath);
- inner_tlist = inner_plan->targetlist;
-
- if (IS_OUTER_JOIN(best_path->jointype))
- {
- get_actual_join_clauses(best_path->joinrestrictinfo,
- &joinclauses, &otherclauses);
- }
- else
- {
- /* We can treat all clauses alike for an inner join */
- joinclauses = get_actual_clauses(best_path->joinrestrictinfo);
- otherclauses = NIL;
- }
switch (best_path->path.pathtype)
{
case T_MergeJoin:
- plan = (Join *) create_mergejoin_plan(root,
+ plan = (Plan *) create_mergejoin_plan(root,
(MergePath *) best_path,
- join_tlist,
- joinclauses,
- otherclauses,
outer_plan,
- outer_tlist,
- inner_plan,
- inner_tlist);
+ inner_plan);
break;
case T_HashJoin:
- plan = (Join *) create_hashjoin_plan(root,
+ plan = (Plan *) create_hashjoin_plan(root,
(HashPath *) best_path,
- join_tlist,
- joinclauses,
- otherclauses,
outer_plan,
- outer_tlist,
- inner_plan,
- inner_tlist);
+ inner_plan);
break;
case T_NestLoop:
- plan = (Join *) create_nestloop_plan(root,
+ plan = (Plan *) create_nestloop_plan(root,
(NestPath *) best_path,
- join_tlist,
- joinclauses,
- otherclauses,
outer_plan,
- outer_tlist,
- inner_plan,
- inner_tlist);
+ inner_plan);
break;
default:
- elog(ERROR, "create_join_plan: unknown node type: %d",
- best_path->path.pathtype);
+ elog(ERROR, "unrecognized node type: %d",
+ (int) best_path->path.pathtype);
plan = NULL; /* keep compiler quiet */
break;
}
+ /*
+ * If there are any pseudoconstant clauses attached to this node,
+ * insert a gating Result node that evaluates the pseudoconstants
+ * as one-time quals.
+ */
+ if (root->hasPseudoConstantQuals)
+ plan = create_gating_plan(root, plan, best_path->joinrestrictinfo);
+
#ifdef NOT_USED
/*
- * * Expensive function pullups may have pulled local predicates *
- * into this path node. Put them in the qpqual of the plan node. *
- * JMH, 6/15/92
+ * * Expensive function pullups may have pulled local predicates * into
+ * this path node. Put them in the qpqual of the plan node. * JMH,
+ * 6/15/92
*/
if (get_loc_restrictinfo(best_path) != NIL)
set_qpqual((Plan) plan,
- nconc(get_qpqual((Plan) plan),
- get_actual_clauses(get_loc_restrictinfo(best_path))));
+ list_concat(get_qpqual((Plan) plan),
+ get_actual_clauses(get_loc_restrictinfo(best_path))));
#endif
return plan;
*
* Returns a Plan node.
*/
-static Append *
-create_append_plan(Query *root, AppendPath *best_path)
+static Plan *
+create_append_plan(PlannerInfo *root, AppendPath *best_path)
{
Append *plan;
- List *tlist = best_path->path.parent->targetlist;
+ List *tlist = build_relation_tlist(best_path->path.parent);
List *subplans = NIL;
- List *subpaths;
+ ListCell *subpaths;
+ /*
+ * It is possible for the subplans list to contain only one entry, or even
+ * no entries. Handle these cases specially.
+ *
+ * XXX ideally, if there's just one entry, we'd not bother to generate an
+ * Append node but just return the single child. At the moment this does
+ * not work because the varno of the child scan plan won't match the
+ * parent-rel Vars it'll be asked to emit.
+ */
+ if (best_path->subpaths == NIL)
+ {
+ /* Generate a Result plan with constant-FALSE gating qual */
+ return (Plan *) make_result(tlist,
+ (Node *) list_make1(makeBoolConst(false,
+ false)),
+ NULL);
+ }
+
+ /* Normal case with multiple subpaths */
foreach(subpaths, best_path->subpaths)
{
Path *subpath = (Path *) lfirst(subpaths);
plan = make_append(subplans, false, tlist);
+ return (Plan *) plan;
+}
+
+/*
+ * create_result_plan
+ * Create a Result plan for 'best_path'.
+ * This is only used for the case of a query with an empty jointree.
+ *
+ * Returns a Plan node.
+ */
+static Result *
+create_result_plan(PlannerInfo *root, ResultPath *best_path)
+{
+ List *tlist;
+ List *quals;
+
+ /* The tlist will be installed later, since we have no RelOptInfo */
+ Assert(best_path->path.parent == NULL);
+ tlist = NIL;
+
+ quals = order_qual_clauses(root, best_path->quals);
+
+ return make_result(tlist, (Node *) quals, NULL);
+}
+
+/*
+ * create_material_plan
+ * Create a Material plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ *
+ * Returns a Plan node.
+ */
+static Material *
+create_material_plan(PlannerInfo *root, MaterialPath *best_path)
+{
+ Material *plan;
+ Plan *subplan;
+
+ subplan = create_plan(root, best_path->subpath);
+
+ /* We don't want any excess columns in the materialized tuples */
+ disuse_physical_tlist(subplan, best_path->subpath);
+
+ plan = make_material(subplan);
+
+ copy_path_costsize(&plan->plan, (Path *) best_path);
+
+ return plan;
+}
+
+/*
+ * create_unique_plan
+ * Create a Unique plan for 'best_path' and (recursively) plans
+ * for its subpaths.
+ *
+ * Returns a Plan node.
+ */
+static Plan *
+create_unique_plan(PlannerInfo *root, UniquePath *best_path)
+{
+ Plan *plan;
+ Plan *subplan;
+ List *uniq_exprs;
+ List *newtlist;
+ int nextresno;
+ bool newitems;
+ int numGroupCols;
+ AttrNumber *groupColIdx;
+ int groupColPos;
+ ListCell *l;
+
+ subplan = create_plan(root, best_path->subpath);
+
+ /* Done if we don't need to do any actual unique-ifying */
+ if (best_path->umethod == UNIQUE_PATH_NOOP)
+ return subplan;
+
+ /*----------
+ * As constructed, the subplan has a "flat" tlist containing just the
+ * Vars needed here and at upper levels. The values we are supposed
+ * to unique-ify may be expressions in these variables. We have to
+ * add any such expressions to the subplan's tlist.
+ *
+ * The subplan may have a "physical" tlist if it is a simple scan plan.
+ * This should be left as-is if we don't need to add any expressions;
+ * but if we do have to add expressions, then a projection step will be
+ * needed at runtime anyway, and so we may as well remove unneeded items.
+ * Therefore newtlist starts from build_relation_tlist() not just a
+ * copy of the subplan's tlist; and we don't install it into the subplan
+ * unless stuff has to be added.
+ *
+ * To find the correct list of values to unique-ify, we look in the
+ * information saved for IN expressions. If this code is ever used in
+ * other scenarios, some other way of finding what to unique-ify will
+ * be needed.
+ *----------
+ */
+ uniq_exprs = NIL; /* just to keep compiler quiet */
+ foreach(l, root->in_info_list)
+ {
+ InClauseInfo *ininfo = (InClauseInfo *) lfirst(l);
+
+ if (bms_equal(ininfo->righthand, best_path->path.parent->relids))
+ {
+ uniq_exprs = ininfo->sub_targetlist;
+ break;
+ }
+ }
+ if (l == NULL) /* fell out of loop? */
+ elog(ERROR, "could not find UniquePath in in_info_list");
+
+ /* initialize modified subplan tlist as just the "required" vars */
+ newtlist = build_relation_tlist(best_path->path.parent);
+ nextresno = list_length(newtlist) + 1;
+ newitems = false;
+
+ foreach(l, uniq_exprs)
+ {
+ Node *uniqexpr = lfirst(l);
+ TargetEntry *tle;
+
+ tle = tlist_member(uniqexpr, newtlist);
+ if (!tle)
+ {
+ tle = makeTargetEntry((Expr *) uniqexpr,
+ nextresno,
+ NULL,
+ false);
+ newtlist = lappend(newtlist, tle);
+ nextresno++;
+ newitems = true;
+ }
+ }
+
+ if (newitems)
+ {
+ /*
+ * If the top plan node can't do projections, we need to add a Result
+ * node to help it along.
+ */
+ if (!is_projection_capable_plan(subplan))
+ subplan = (Plan *) make_result(newtlist, NULL, subplan);
+ else
+ subplan->targetlist = newtlist;
+ }
+
+ /*
+ * Build control information showing which subplan output columns are to
+ * be examined by the grouping step. Unfortunately we can't merge this
+ * with the previous loop, since we didn't then know which version of the
+ * subplan tlist we'd end up using.
+ */
+ newtlist = subplan->targetlist;
+ numGroupCols = list_length(uniq_exprs);
+ groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
+ groupColPos = 0;
+
+ foreach(l, uniq_exprs)
+ {
+ Node *uniqexpr = lfirst(l);
+ TargetEntry *tle;
+
+ tle = tlist_member(uniqexpr, newtlist);
+ if (!tle) /* shouldn't happen */
+ elog(ERROR, "failed to find unique expression in subplan tlist");
+ groupColIdx[groupColPos++] = tle->resno;
+ }
+
+ if (best_path->umethod == UNIQUE_PATH_HASH)
+ {
+ long numGroups;
+
+ numGroups = (long) Min(best_path->rows, (double) LONG_MAX);
+
+ /*
+ * Since the Agg node is going to project anyway, we can give it the
+ * minimum output tlist, without any stuff we might have added to the
+ * subplan tlist.
+ */
+ plan = (Plan *) make_agg(root,
+ build_relation_tlist(best_path->path.parent),
+ NIL,
+ AGG_HASHED,
+ numGroupCols,
+ groupColIdx,
+ numGroups,
+ 0,
+ subplan);
+ }
+ else
+ {
+ List *sortList = NIL;
+
+ for (groupColPos = 0; groupColPos < numGroupCols; groupColPos++)
+ {
+ TargetEntry *tle;
+
+ tle = get_tle_by_resno(subplan->targetlist,
+ groupColIdx[groupColPos]);
+ Assert(tle != NULL);
+ sortList = addTargetToSortList(NULL, tle,
+ sortList, subplan->targetlist,
+ SORTBY_ASC, NIL, false);
+ }
+ plan = (Plan *) make_sort_from_sortclauses(root, sortList, subplan);
+ plan = (Plan *) make_unique(plan, sortList);
+ }
+
+ /* Adjust output size estimate (other fields should be OK already) */
+ plan->plan_rows = best_path->rows;
+
return plan;
}
* with restriction clauses 'scan_clauses' and targetlist 'tlist'.
*/
static SeqScan *
-create_seqscan_plan(Path *best_path, List *tlist, List *scan_clauses)
+create_seqscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
{
SeqScan *scan_plan;
- Index scan_relid;
+ Index scan_relid = best_path->parent->relid;
- /* there should be exactly one base rel involved... */
- Assert(length(best_path->parent->relids) == 1);
+ /* it should be a base rel... */
+ Assert(scan_relid > 0);
Assert(best_path->parent->rtekind == RTE_RELATION);
- scan_relid = (Index) lfirsti(best_path->parent->relids);
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
scan_plan = make_seqscan(tlist,
scan_clauses,
/*
* create_indexscan_plan
- * Returns a indexscan plan for the base relation scanned by 'best_path'
+ * Returns an indexscan plan for the base relation scanned by 'best_path'
* with restriction clauses 'scan_clauses' and targetlist 'tlist'.
*
- * The indexqual of the path contains a sublist of implicitly-ANDed qual
- * conditions for each scan of the index(es); if there is more than one
- * scan then the retrieved tuple sets are ORed together. The indexqual
- * and indexinfo lists must have the same length, ie, the number of scans
- * that will occur. Note it is possible for a qual condition sublist
- * to be empty --- then no index restrictions will be applied during that
- * scan.
+ * The indexquals list of the path contains implicitly-ANDed qual conditions.
+ * The list can be empty --- then no index restrictions will be applied during
+ * the scan.
+ *
+ * If nonlossy_clauses isn't NULL, *nonlossy_clauses receives a list of the
+ * nonlossy indexquals.
*/
static IndexScan *
-create_indexscan_plan(Query *root,
+create_indexscan_plan(PlannerInfo *root,
IndexPath *best_path,
List *tlist,
- List *scan_clauses)
+ List *scan_clauses,
+ List **nonlossy_clauses)
{
- List *indxqual = best_path->indexqual;
- Index baserelid;
+ List *indexquals = best_path->indexquals;
+ Index baserelid = best_path->path.parent->relid;
+ Oid indexoid = best_path->indexinfo->indexoid;
List *qpqual;
- Expr *indxqual_or_expr = NULL;
- List *fixed_indxqual;
- List *recheck_indxqual;
- List *indexids;
- List *ixinfo;
+ List *stripped_indexquals;
+ List *fixed_indexquals;
+ List *nonlossy_indexquals;
+ List *indexstrategy;
+ List *indexsubtype;
+ ListCell *l;
IndexScan *scan_plan;
- /* there should be exactly one base rel involved... */
- Assert(length(best_path->path.parent->relids) == 1);
+ /* it should be a base rel... */
+ Assert(baserelid > 0);
Assert(best_path->path.parent->rtekind == RTE_RELATION);
- baserelid = lfirsti(best_path->path.parent->relids);
+ /*
+ * Build "stripped" indexquals structure (no RestrictInfos) to pass to
+ * executor as indexqualorig
+ */
+ stripped_indexquals = get_actual_clauses(indexquals);
/*
- * Build list of index OIDs.
+ * The executor needs a copy with the indexkey on the left of each clause
+ * and with index attr numbers substituted for table ones. This pass also
+ * gets strategy info and looks for "lossy" operators.
*/
- indexids = NIL;
- foreach(ixinfo, best_path->indexinfo)
- {
- IndexOptInfo *index = (IndexOptInfo *) lfirst(ixinfo);
+ fix_indexqual_references(indexquals, best_path,
+ &fixed_indexquals,
+ &nonlossy_indexquals,
+ &indexstrategy,
+ &indexsubtype);
- indexids = lappendi(indexids, index->indexoid);
- }
+ /* pass back nonlossy quals if caller wants 'em */
+ if (nonlossy_clauses)
+ *nonlossy_clauses = nonlossy_indexquals;
/*
- * The qpqual list must contain all restrictions not automatically
- * handled by the index. Normally the predicates in the indxqual are
- * checked fully by the index, but if the index is "lossy" for a
- * particular operator (as signaled by the amopreqcheck flag in
- * pg_amop), then we need to double-check that predicate in qpqual,
- * because the index may return more tuples than match the predicate.
+ * If this is an innerjoin scan, the indexclauses will contain join
+ * clauses that are not present in scan_clauses (since the passed-in value
+ * is just the rel's baserestrictinfo list). We must add these clauses to
+ * scan_clauses to ensure they get checked. In most cases we will remove
+ * the join clauses again below, but if a join clause contains a special
+ * operator, we need to make sure it gets into the scan_clauses.
*
- * Since the indexquals were generated from the restriction clauses given
- * by scan_clauses, there will normally be some duplications between
- * the lists. We get rid of the duplicates, then add back if lossy.
+ * Note: pointer comparison should be enough to determine RestrictInfo
+ * matches.
*/
- if (length(indxqual) > 1)
- {
- /*
- * Build an expression representation of the indexqual, expanding
- * the implicit OR and AND semantics of the first- and
- * second-level lists.
- */
- List *orclauses = NIL;
- List *orclause;
+ if (best_path->isjoininner)
+ scan_clauses = list_union_ptr(scan_clauses, best_path->indexclauses);
- foreach(orclause, indxqual)
+ /*
+ * The qpqual list must contain all restrictions not automatically handled
+ * by the index. All the predicates in the indexquals will be checked
+ * (either by the index itself, or by nodeIndexscan.c), but if there are
+ * any "special" operators involved then they must be included in qpqual.
+ * Also, any lossy index operators must be rechecked in the qpqual. The
+ * upshot is that qpqual must contain scan_clauses minus whatever appears
+ * in nonlossy_indexquals.
+ *
+ * In normal cases simple pointer equality checks will be enough to spot
+ * duplicate RestrictInfos, so we try that first. In some situations
+ * (particularly with OR'd index conditions) we may have scan_clauses that
+ * are not equal to, but are logically implied by, the index quals; so we
+ * also try a predicate_implied_by() check to see if we can discard quals
+ * that way. (predicate_implied_by assumes its first input contains only
+ * immutable functions, so we have to check that.)
+ *
+ * We can also discard quals that are implied by a partial index's
+ * predicate, but only in a plain SELECT; when scanning a target relation
+ * of UPDATE/DELETE/SELECT FOR UPDATE, we must leave such quals in the
+ * plan so that they'll be properly rechecked by EvalPlanQual testing.
+ *
+ * While at it, we strip off the RestrictInfos to produce a list of plain
+ * expressions (this loop replaces extract_actual_clauses used in the
+ * other routines in this file). We have to ignore pseudoconstants.
+ */
+ qpqual = NIL;
+ foreach(l, scan_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+
+ Assert(IsA(rinfo, RestrictInfo));
+ if (rinfo->pseudoconstant)
+ continue;
+ if (list_member_ptr(nonlossy_indexquals, rinfo))
+ continue;
+ if (!contain_mutable_functions((Node *) rinfo->clause))
{
- orclauses = lappend(orclauses,
- make_ands_explicit(lfirst(orclause)));
- }
- indxqual_or_expr = make_orclause(orclauses);
+ List *clausel = list_make1(rinfo->clause);
- qpqual = set_difference(scan_clauses, makeList1(indxqual_or_expr));
- }
- else if (indxqual != NIL)
- {
- /*
- * Here, we can simply treat the first sublist as an independent
- * set of qual expressions, since there is no top-level OR
- * behavior.
- */
- qpqual = set_difference(scan_clauses, lfirst(indxqual));
+ if (predicate_implied_by(clausel, nonlossy_indexquals))
+ continue;
+ if (best_path->indexinfo->indpred)
+ {
+ if (baserelid != root->parse->resultRelation &&
+ get_rowmark(root->parse, baserelid) == NULL)
+ if (predicate_implied_by(clausel,
+ best_path->indexinfo->indpred))
+ continue;
+ }
+ }
+ qpqual = lappend(qpqual, rinfo->clause);
}
- else
- qpqual = scan_clauses;
+
+ /* Sort clauses into best execution order */
+ qpqual = order_qual_clauses(root, qpqual);
+
+ /* Finally ready to build the plan node */
+ scan_plan = make_indexscan(tlist,
+ qpqual,
+ baserelid,
+ indexoid,
+ fixed_indexquals,
+ stripped_indexquals,
+ indexstrategy,
+ indexsubtype,
+ best_path->indexscandir);
+
+ copy_path_costsize(&scan_plan->scan.plan, &best_path->path);
+ /* use the indexscan-specific rows estimate, not the parent rel's */
+ scan_plan->scan.plan.plan_rows = best_path->rows;
+
+ return scan_plan;
+}
+
+/*
+ * create_bitmap_scan_plan
+ * Returns a bitmap scan plan for the base relation scanned by 'best_path'
+ * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
+ */
+static BitmapHeapScan *
+create_bitmap_scan_plan(PlannerInfo *root,
+ BitmapHeapPath *best_path,
+ List *tlist,
+ List *scan_clauses)
+{
+ Index baserelid = best_path->path.parent->relid;
+ Plan *bitmapqualplan;
+ List *bitmapqualorig;
+ List *indexquals;
+ List *qpqual;
+ ListCell *l;
+ BitmapHeapScan *scan_plan;
+
+ /* it should be a base rel... */
+ Assert(baserelid > 0);
+ Assert(best_path->path.parent->rtekind == RTE_RELATION);
+
+ /* Process the bitmapqual tree into a Plan tree and qual lists */
+ bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual,
+ &bitmapqualorig, &indexquals);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
/*
- * The executor needs a copy with the indexkey on the left of each
- * clause and with index attr numbers substituted for table ones. This
- * pass also looks for "lossy" operators.
+ * If this is a innerjoin scan, the indexclauses will contain join clauses
+ * that are not present in scan_clauses (since the passed-in value is just
+ * the rel's baserestrictinfo list). We must add these clauses to
+ * scan_clauses to ensure they get checked. In most cases we will remove
+ * the join clauses again below, but if a join clause contains a special
+ * operator, we need to make sure it gets into the scan_clauses.
*/
- fix_indxqual_references(indxqual, best_path,
- &fixed_indxqual, &recheck_indxqual);
+ if (best_path->isjoininner)
+ {
+ scan_clauses = list_concat_unique(scan_clauses, bitmapqualorig);
+ }
/*
- * If there were any "lossy" operators, need to add back the
- * appropriate qual clauses to the qpqual. When there is just one
- * indexscan being performed (ie, we have simple AND semantics), we
- * can just add the lossy clauses themselves to qpqual. If we have
- * OR-of-ANDs, we'd better add the entire original indexqual to make
- * sure that the semantics are correct.
+ * The qpqual list must contain all restrictions not automatically handled
+ * by the index. All the predicates in the indexquals will be checked
+ * (either by the index itself, or by nodeBitmapHeapscan.c), but if there
+ * are any "special" or lossy operators involved then they must be added
+ * to qpqual. The upshot is that qpqual must contain scan_clauses minus
+ * whatever appears in indexquals.
+ *
+ * In normal cases simple equal() checks will be enough to spot duplicate
+ * clauses, so we try that first. In some situations (particularly with
+ * OR'd index conditions) we may have scan_clauses that are not equal to,
+ * but are logically implied by, the index quals; so we also try a
+ * predicate_implied_by() check to see if we can discard quals that way.
+ * (predicate_implied_by assumes its first input contains only immutable
+ * functions, so we have to check that.)
+ *
+ * Unlike create_indexscan_plan(), we need take no special thought here
+ * for partial index predicates; this is because the predicate conditions
+ * are already listed in bitmapqualorig and indexquals. Bitmap scans
+ * have to do it that way because predicate conditions need to be rechecked
+ * if the scan becomes lossy.
*/
- if (recheck_indxqual != NIL)
+ qpqual = NIL;
+ foreach(l, scan_clauses)
{
- if (indxqual_or_expr)
- {
- /* Better do a deep copy of the original scanclauses */
- qpqual = lappend(qpqual, copyObject(indxqual_or_expr));
- }
- else
+ Node *clause = (Node *) lfirst(l);
+
+ if (list_member(indexquals, clause))
+ continue;
+ if (!contain_mutable_functions(clause))
{
- /* Subroutine already copied quals, so just append to list */
- Assert(length(recheck_indxqual) == 1);
- qpqual = nconc(qpqual, (List *) lfirst(recheck_indxqual));
+ List *clausel = list_make1(clause);
+
+ if (predicate_implied_by(clausel, indexquals))
+ continue;
}
+ qpqual = lappend(qpqual, clause);
}
+ /* Sort clauses into best execution order */
+ qpqual = order_qual_clauses(root, qpqual);
+
+ /*
+ * When dealing with special or lossy operators, we will at this point
+ * have duplicate clauses in qpqual and bitmapqualorig. We may as well
+ * drop 'em from bitmapqualorig, since there's no point in making the
+ * tests twice.
+ */
+ bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
+
+ /*
+ * Copy the finished bitmapqualorig to make sure we have an independent
+ * copy --- needed in case there are subplans in the index quals
+ */
+ bitmapqualorig = copyObject(bitmapqualorig);
+
/* Finally ready to build the plan node */
- scan_plan = make_indexscan(tlist,
- qpqual,
- baserelid,
- indexids,
- fixed_indxqual,
- indxqual,
- best_path->indexscandir);
+ scan_plan = make_bitmap_heapscan(tlist,
+ qpqual,
+ bitmapqualplan,
+ bitmapqualorig,
+ baserelid);
copy_path_costsize(&scan_plan->scan.plan, &best_path->path);
/* use the indexscan-specific rows estimate, not the parent rel's */
return scan_plan;
}
+/*
+ * Given a bitmapqual tree, generate the Plan tree that implements it
+ *
+ * As byproducts, we also return in *qual and *indexqual the qual lists
+ * (in implicit-AND form, without RestrictInfos) describing the original index
+ * conditions and the generated indexqual conditions. The latter is made to
+ * exclude lossy index operators. Both lists include partial-index predicates,
+ * because we have to recheck predicates as well as index conditions if the
+ * bitmap scan becomes lossy.
+ *
+ * Note: if you find yourself changing this, you probably need to change
+ * make_restrictinfo_from_bitmapqual too.
+ */
+static Plan *
+create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
+ List **qual, List **indexqual)
+{
+ Plan *plan;
+
+ if (IsA(bitmapqual, BitmapAndPath))
+ {
+ BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
+ List *subplans = NIL;
+ List *subquals = NIL;
+ List *subindexquals = NIL;
+ ListCell *l;
+
+ /*
+ * There may well be redundant quals among the subplans, since a
+ * top-level WHERE qual might have gotten used to form several
+ * different index quals. We don't try exceedingly hard to eliminate
+ * redundancies, but we do eliminate obvious duplicates by using
+ * list_concat_unique.
+ */
+ foreach(l, apath->bitmapquals)
+ {
+ Plan *subplan;
+ List *subqual;
+ List *subindexqual;
+
+ subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
+ &subqual, &subindexqual);
+ subplans = lappend(subplans, subplan);
+ subquals = list_concat_unique(subquals, subqual);
+ subindexquals = list_concat_unique(subindexquals, subindexqual);
+ }
+ plan = (Plan *) make_bitmap_and(subplans);
+ plan->startup_cost = apath->path.startup_cost;
+ plan->total_cost = apath->path.total_cost;
+ plan->plan_rows =
+ clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples);
+ plan->plan_width = 0; /* meaningless */
+ *qual = subquals;
+ *indexqual = subindexquals;
+ }
+ else if (IsA(bitmapqual, BitmapOrPath))
+ {
+ BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
+ List *subplans = NIL;
+ List *subquals = NIL;
+ List *subindexquals = NIL;
+ bool const_true_subqual = false;
+ bool const_true_subindexqual = false;
+ ListCell *l;
+
+ /*
+ * Here, we only detect qual-free subplans. A qual-free subplan would
+ * cause us to generate "... OR true ..." which we may as well reduce
+ * to just "true". We do not try to eliminate redundant subclauses
+ * because (a) it's not as likely as in the AND case, and (b) we might
+ * well be working with hundreds or even thousands of OR conditions,
+ * perhaps from a long IN list. The performance of list_append_unique
+ * would be unacceptable.
+ */
+ foreach(l, opath->bitmapquals)
+ {
+ Plan *subplan;
+ List *subqual;
+ List *subindexqual;
+
+ subplan = create_bitmap_subplan(root, (Path *) lfirst(l),
+ &subqual, &subindexqual);
+ subplans = lappend(subplans, subplan);
+ if (subqual == NIL)
+ const_true_subqual = true;
+ else if (!const_true_subqual)
+ subquals = lappend(subquals,
+ make_ands_explicit(subqual));
+ if (subindexqual == NIL)
+ const_true_subindexqual = true;
+ else if (!const_true_subindexqual)
+ subindexquals = lappend(subindexquals,
+ make_ands_explicit(subindexqual));
+ }
+ /*
+ * In the presence of ScalarArrayOpExpr quals, we might have built
+ * BitmapOrPaths with just one subpath; don't add an OR step.
+ */
+ if (list_length(subplans) == 1)
+ {
+ plan = (Plan *) linitial(subplans);
+ }
+ else
+ {
+ plan = (Plan *) make_bitmap_or(subplans);
+ plan->startup_cost = opath->path.startup_cost;
+ plan->total_cost = opath->path.total_cost;
+ plan->plan_rows =
+ clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
+ plan->plan_width = 0; /* meaningless */
+ }
+
+ /*
+ * If there were constant-TRUE subquals, the OR reduces to constant
+ * TRUE. Also, avoid generating one-element ORs, which could happen
+ * due to redundancy elimination or ScalarArrayOpExpr quals.
+ */
+ if (const_true_subqual)
+ *qual = NIL;
+ else if (list_length(subquals) <= 1)
+ *qual = subquals;
+ else
+ *qual = list_make1(make_orclause(subquals));
+ if (const_true_subindexqual)
+ *indexqual = NIL;
+ else if (list_length(subindexquals) <= 1)
+ *indexqual = subindexquals;
+ else
+ *indexqual = list_make1(make_orclause(subindexquals));
+ }
+ else if (IsA(bitmapqual, IndexPath))
+ {
+ IndexPath *ipath = (IndexPath *) bitmapqual;
+ IndexScan *iscan;
+ List *nonlossy_clauses;
+ ListCell *l;
+
+ /* Use the regular indexscan plan build machinery... */
+ iscan = create_indexscan_plan(root, ipath, NIL, NIL,
+ &nonlossy_clauses);
+ /* then convert to a bitmap indexscan */
+ plan = (Plan *) make_bitmap_indexscan(iscan->scan.scanrelid,
+ iscan->indexid,
+ iscan->indexqual,
+ iscan->indexqualorig,
+ iscan->indexstrategy,
+ iscan->indexsubtype);
+ plan->startup_cost = 0.0;
+ plan->total_cost = ipath->indextotalcost;
+ plan->plan_rows =
+ clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
+ plan->plan_width = 0; /* meaningless */
+ *qual = get_actual_clauses(ipath->indexclauses);
+ *indexqual = get_actual_clauses(nonlossy_clauses);
+ foreach(l, ipath->indexinfo->indpred)
+ {
+ Expr *pred = (Expr *) lfirst(l);
+
+ /*
+ * We know that the index predicate must have been implied by
+ * the query condition as a whole, but it may or may not be
+ * implied by the conditions that got pushed into the
+ * bitmapqual. Avoid generating redundant conditions.
+ */
+ if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
+ {
+ *qual = lappend(*qual, pred);
+ *indexqual = lappend(*indexqual, pred);
+ }
+ }
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
+ plan = NULL; /* keep compiler quiet */
+ }
+
+ return plan;
+}
+
/*
* create_tidscan_plan
* Returns a tidscan plan for the base relation scanned by 'best_path'
* with restriction clauses 'scan_clauses' and targetlist 'tlist'.
*/
static TidScan *
-create_tidscan_plan(TidPath *best_path, List *tlist, List *scan_clauses)
+create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
+ List *tlist, List *scan_clauses)
{
TidScan *scan_plan;
- Index scan_relid;
+ Index scan_relid = best_path->path.parent->relid;
+ List *ortidquals;
- /* there should be exactly one base rel involved... */
- Assert(length(best_path->path.parent->relids) == 1);
+ /* it should be a base rel... */
+ Assert(scan_relid > 0);
Assert(best_path->path.parent->rtekind == RTE_RELATION);
- scan_relid = (Index) lfirsti(best_path->path.parent->relids);
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /*
+ * Remove any clauses that are TID quals. This is a bit tricky since
+ * the tidquals list has implicit OR semantics.
+ */
+ ortidquals = best_path->tidquals;
+ if (list_length(ortidquals) > 1)
+ ortidquals = list_make1(make_orclause(ortidquals));
+ scan_clauses = list_difference(scan_clauses, ortidquals);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
scan_plan = make_tidscan(tlist,
scan_clauses,
scan_relid,
- best_path->tideval);
-
- if (best_path->unjoined_relids)
- scan_plan->needRescan = true;
+ best_path->tidquals);
copy_path_costsize(&scan_plan->scan.plan, &best_path->path);
* with restriction clauses 'scan_clauses' and targetlist 'tlist'.
*/
static SubqueryScan *
-create_subqueryscan_plan(Path *best_path, List *tlist, List *scan_clauses)
+create_subqueryscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
{
SubqueryScan *scan_plan;
- Index scan_relid;
+ Index scan_relid = best_path->parent->relid;
- /* there should be exactly one base rel involved... */
- Assert(length(best_path->parent->relids) == 1);
- /* and it must be a subquery */
+ /* it should be a subquery base rel... */
+ Assert(scan_relid > 0);
Assert(best_path->parent->rtekind == RTE_SUBQUERY);
- scan_relid = (Index) lfirsti(best_path->parent->relids);
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
scan_plan = make_subqueryscan(tlist,
scan_clauses,
scan_relid,
best_path->parent->subplan);
+ copy_path_costsize(&scan_plan->scan.plan, best_path);
+
return scan_plan;
}
* with restriction clauses 'scan_clauses' and targetlist 'tlist'.
*/
static FunctionScan *
-create_functionscan_plan(Path *best_path, List *tlist, List *scan_clauses)
+create_functionscan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
{
FunctionScan *scan_plan;
- Index scan_relid;
+ Index scan_relid = best_path->parent->relid;
- /* there should be exactly one base rel involved... */
- Assert(length(best_path->parent->relids) == 1);
- /* and it must be a function */
+ /* it should be a function base rel... */
+ Assert(scan_relid > 0);
Assert(best_path->parent->rtekind == RTE_FUNCTION);
- scan_relid = (Index) lfirsti(best_path->parent->relids);
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
scan_plan = make_functionscan(tlist, scan_clauses, scan_relid);
*
* JOIN METHODS
*
- * A general note about join_references() processing in these routines:
- * once we have changed a Var node to refer to a subplan output rather than
- * the original relation, it is no longer equal() to an unmodified Var node
- * for the same var. So, we cannot easily compare reference-adjusted qual
- * clauses to clauses that have not been adjusted. Fortunately, that
- * doesn't seem to be necessary; all the decisions are made before we do
- * the reference adjustments.
- *
- * A cleaner solution would be to not call join_references() here at all,
- * but leave it for setrefs.c to do at the end of plan tree construction.
- * But that would make switch_outer() much more complicated, and some care
- * would be needed to get setrefs.c to do the right thing with nestloop
- * inner indexscan quals. So, we do subplan reference adjustment here for
- * quals of join nodes (and *only* for quals of join nodes).
- *
*****************************************************************************/
static NestLoop *
-create_nestloop_plan(Query *root,
+create_nestloop_plan(PlannerInfo *root,
NestPath *best_path,
- List *tlist,
- List *joinclauses,
- List *otherclauses,
Plan *outer_plan,
- List *outer_tlist,
- Plan *inner_plan,
- List *inner_tlist)
+ Plan *inner_plan)
{
+ List *tlist = build_relation_tlist(best_path->path.parent);
+ List *joinrestrictclauses = best_path->joinrestrictinfo;
+ List *joinclauses;
+ List *otherclauses;
NestLoop *join_plan;
- if (IsA(inner_plan, IndexScan))
+ if (IsA(best_path->innerjoinpath, IndexPath))
{
/*
- * An index is being used to reduce the number of tuples scanned
- * in the inner relation. If there are join clauses being used
- * with the index, we must update their outer-rel var nodes to
- * refer to the outer side of the join.
- *
- * We can also remove those join clauses from the list of clauses
- * that have to be checked as qpquals at the join node, but only
- * if there's just one indexscan in the inner path (otherwise,
- * several different sets of clauses are being ORed together).
+ * An index is being used to reduce the number of tuples scanned in
+ * the inner relation. If there are join clauses being used with the
+ * index, we may remove those join clauses from the list of clauses
+ * that have to be checked as qpquals at the join node.
*
- * Note: if the index is lossy, the same clauses may also be getting
- * checked as qpquals in the indexscan. We can still remove them
- * from the nestloop's qpquals, but we gotta update the outer-rel
- * vars in the indexscan's qpquals too.
+ * We can also remove any join clauses that are redundant with those
+ * being used in the index scan; prior redundancy checks will not have
+ * caught this case because the join clauses would never have been put
+ * in the same joininfo list.
*
- * Note: we can safely do set_difference() against my clauses and
- * join_references() because the innerscan is a primitive plan,
- * and therefore has not itself done join_references renumbering
- * of the vars in its quals.
+ * We can skip this if the index path is an ordinary indexpath and not
+ * a special innerjoin path.
*/
- IndexScan *innerscan = (IndexScan *) inner_plan;
- List *indxqualorig = innerscan->indxqualorig;
+ IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
- /* No work needed if indxqual refers only to its own relation... */
- if (NumRelids((Node *) indxqualorig) > 1)
+ if (innerpath->isjoininner)
{
- Index innerrel = innerscan->scan.scanrelid;
-
- /*
- * Remove redundant tests from my clauses, if possible. Note
- * we must compare against indxqualorig not the "fixed"
- * indxqual (which has index attnos instead of relation
- * attnos, and may have been commuted as well).
- */
- if (length(indxqualorig) == 1) /* single indexscan? */
- joinclauses = set_difference(joinclauses,
- lfirst(indxqualorig));
-
- /* only refs to outer vars get changed in the inner indexqual */
- innerscan->indxqualorig = join_references(indxqualorig,
- root->rtable,
- outer_tlist,
- NIL,
- innerrel);
- innerscan->indxqual = join_references(innerscan->indxqual,
- root->rtable,
- outer_tlist,
- NIL,
- innerrel);
- /* fix the inner qpqual too, if it has join clauses */
- if (NumRelids((Node *) inner_plan->qual) > 1)
- inner_plan->qual = join_references(inner_plan->qual,
- root->rtable,
- outer_tlist,
- NIL,
- innerrel);
+ joinrestrictclauses =
+ select_nonredundant_join_clauses(root,
+ joinrestrictclauses,
+ innerpath->indexclauses,
+ IS_OUTER_JOIN(best_path->jointype));
}
}
- else if (IsA(inner_plan, TidScan))
- {
- TidScan *innerscan = (TidScan *) inner_plan;
-
- innerscan->tideval = join_references(innerscan->tideval,
- root->rtable,
- outer_tlist,
- inner_tlist,
- innerscan->scan.scanrelid);
- }
- else if (IsA_Join(inner_plan))
+ else if (IsA(best_path->innerjoinpath, BitmapHeapPath))
{
/*
- * Materialize the inner join for speed reasons.
+ * Same deal for bitmapped index scans.
*
- * XXX It is probably *not* always fastest to materialize an inner
- * join --- how can we estimate whether this is a good thing to
- * do?
+ * Note: both here and above, we ignore any implicit index
+ * restrictions associated with the use of partial indexes. This is
+ * OK because we're only trying to prove we can dispense with some
+ * join quals; failing to prove that doesn't result in an incorrect
+ * plan. It is the right way to proceed because adding more quals to
+ * the stuff we got from the original query would just make it harder
+ * to detect duplication. (Also, to change this we'd have to be
+ * wary of UPDATE/DELETE/SELECT FOR UPDATE target relations; see
+ * notes above about EvalPlanQual.)
*/
- inner_plan = (Plan *) make_material(inner_tlist,
- inner_plan);
+ BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath;
+
+ if (innerpath->isjoininner)
+ {
+ List *bitmapclauses;
+
+ bitmapclauses =
+ make_restrictinfo_from_bitmapqual(innerpath->bitmapqual,
+ true,
+ false);
+ joinrestrictclauses =
+ select_nonredundant_join_clauses(root,
+ joinrestrictclauses,
+ bitmapclauses,
+ IS_OUTER_JOIN(best_path->jointype));
+ }
}
- /*
- * Set quals to contain INNER/OUTER var references.
- */
- joinclauses = join_references(joinclauses,
- root->rtable,
- outer_tlist,
- inner_tlist,
- (Index) 0);
- otherclauses = join_references(otherclauses,
- root->rtable,
- outer_tlist,
- inner_tlist,
- (Index) 0);
+ /* Get the join qual clauses (in plain expression form) */
+ /* Any pseudoconstant clauses are ignored here */
+ if (IS_OUTER_JOIN(best_path->jointype))
+ {
+ extract_actual_join_clauses(joinrestrictclauses,
+ &joinclauses, &otherclauses);
+ }
+ else
+ {
+ /* We can treat all clauses alike for an inner join */
+ joinclauses = extract_actual_clauses(joinrestrictclauses, false);
+ otherclauses = NIL;
+ }
+
+ /* Sort clauses into best execution order */
+ joinclauses = order_qual_clauses(root, joinclauses);
+ otherclauses = order_qual_clauses(root, otherclauses);
join_plan = make_nestloop(tlist,
joinclauses,
}
static MergeJoin *
-create_mergejoin_plan(Query *root,
+create_mergejoin_plan(PlannerInfo *root,
MergePath *best_path,
- List *tlist,
- List *joinclauses,
- List *otherclauses,
Plan *outer_plan,
- List *outer_tlist,
- Plan *inner_plan,
- List *inner_tlist)
+ Plan *inner_plan)
{
+ List *tlist = build_relation_tlist(best_path->jpath.path.parent);
+ List *joinclauses;
+ List *otherclauses;
List *mergeclauses;
MergeJoin *join_plan;
- mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
+ /* Get the join qual clauses (in plain expression form) */
+ /* Any pseudoconstant clauses are ignored here */
+ if (IS_OUTER_JOIN(best_path->jpath.jointype))
+ {
+ extract_actual_join_clauses(best_path->jpath.joinrestrictinfo,
+ &joinclauses, &otherclauses);
+ }
+ else
+ {
+ /* We can treat all clauses alike for an inner join */
+ joinclauses = extract_actual_clauses(best_path->jpath.joinrestrictinfo,
+ false);
+ otherclauses = NIL;
+ }
/*
- * Remove the mergeclauses from the list of join qual clauses, leaving
- * the list of quals that must be checked as qpquals. Set those
- * clauses to contain INNER/OUTER var references.
+ * Remove the mergeclauses from the list of join qual clauses, leaving the
+ * list of quals that must be checked as qpquals.
*/
- joinclauses = join_references(set_difference(joinclauses, mergeclauses),
- root->rtable,
- outer_tlist,
- inner_tlist,
- (Index) 0);
+ mergeclauses = get_actual_clauses(best_path->path_mergeclauses);
+ joinclauses = list_difference(joinclauses, mergeclauses);
/*
- * Fix the additional qpquals too.
+ * Rearrange mergeclauses, if needed, so that the outer variable is always
+ * on the left.
*/
- otherclauses = join_references(otherclauses,
- root->rtable,
- outer_tlist,
- inner_tlist,
- (Index) 0);
+ mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
+ best_path->jpath.outerjoinpath->parent->relids);
- /*
- * Now set the references in the mergeclauses and rearrange them so
- * that the outer variable is always on the left.
- */
- mergeclauses = switch_outer(join_references(mergeclauses,
- root->rtable,
- outer_tlist,
- inner_tlist,
- (Index) 0));
+ /* Sort clauses into best execution order */
+ /* NB: do NOT reorder the mergeclauses */
+ joinclauses = order_qual_clauses(root, joinclauses);
+ otherclauses = order_qual_clauses(root, otherclauses);
/*
* Create explicit sort nodes for the outer and inner join paths if
- * necessary. The sort cost was already accounted for in the path.
+ * necessary. The sort cost was already accounted for in the path. Make
+ * sure there are no excess columns in the inputs if sorting.
*/
if (best_path->outersortkeys)
+ {
+ disuse_physical_tlist(outer_plan, best_path->jpath.outerjoinpath);
outer_plan = (Plan *)
make_sort_from_pathkeys(root,
- outer_tlist,
outer_plan,
best_path->outersortkeys);
+ }
if (best_path->innersortkeys)
+ {
+ disuse_physical_tlist(inner_plan, best_path->jpath.innerjoinpath);
inner_plan = (Plan *)
make_sort_from_pathkeys(root,
- inner_tlist,
inner_plan,
best_path->innersortkeys);
-
- /*
- * The executor requires the inner side of a mergejoin to support
- * "mark" and "restore" operations. Not all plan types do, so we must
- * be careful not to generate an invalid plan. If necessary, an
- * invalid inner plan can be handled by inserting a Materialize node.
- *
- * Since the inner side must be ordered, and only Sorts and IndexScans
- * can create order to begin with, you might think there's no problem
- * --- but you'd be wrong. Nestloop and merge joins can *preserve*
- * the order of their inputs, so they can be selected as the input of
- * a mergejoin, and that won't work in the present executor.
- *
- * Doing this here is a bit of a kluge since the cost of the Materialize
- * wasn't taken into account in our earlier decisions. But
- * Materialize is hard to estimate a cost for, and the above
- * consideration shows that this is a rare case anyway, so this seems
- * an acceptable way to proceed.
- *
- * This check must agree with ExecMarkPos/ExecRestrPos in
- * executor/execAmi.c!
- */
- switch (nodeTag(inner_plan))
- {
- case T_SeqScan:
- case T_IndexScan:
- case T_FunctionScan:
- case T_Material:
- case T_Sort:
- /* OK, these inner plans support mark/restore */
- break;
-
- default:
- /* Ooops, need to materialize the inner plan */
- inner_plan = (Plan *) make_material(inner_tlist,
- inner_plan);
- break;
}
/*
}
static HashJoin *
-create_hashjoin_plan(Query *root,
+create_hashjoin_plan(PlannerInfo *root,
HashPath *best_path,
- List *tlist,
- List *joinclauses,
- List *otherclauses,
Plan *outer_plan,
- List *outer_tlist,
- Plan *inner_plan,
- List *inner_tlist)
+ Plan *inner_plan)
{
+ List *tlist = build_relation_tlist(best_path->jpath.path.parent);
+ List *joinclauses;
+ List *otherclauses;
List *hashclauses;
HashJoin *join_plan;
Hash *hash_plan;
- Node *innerhashkey;
- /*
- * NOTE: there will always be exactly one hashclause in the list
- * best_path->path_hashclauses (cf. hash_inner_and_outer()). We
- * represent it as a list anyway, for convenience with routines that
- * want to work on lists of clauses.
- */
- hashclauses = get_actual_clauses(best_path->path_hashclauses);
+ /* Get the join qual clauses (in plain expression form) */
+ /* Any pseudoconstant clauses are ignored here */
+ if (IS_OUTER_JOIN(best_path->jpath.jointype))
+ {
+ extract_actual_join_clauses(best_path->jpath.joinrestrictinfo,
+ &joinclauses, &otherclauses);
+ }
+ else
+ {
+ /* We can treat all clauses alike for an inner join */
+ joinclauses = extract_actual_clauses(best_path->jpath.joinrestrictinfo,
+ false);
+ otherclauses = NIL;
+ }
/*
- * Remove the hashclauses from the list of join qual clauses, leaving
- * the list of quals that must be checked as qpquals. Set those
- * clauses to contain INNER/OUTER var references.
+ * Remove the hashclauses from the list of join qual clauses, leaving the
+ * list of quals that must be checked as qpquals.
*/
- joinclauses = join_references(set_difference(joinclauses, hashclauses),
- root->rtable,
- outer_tlist,
- inner_tlist,
- (Index) 0);
+ hashclauses = get_actual_clauses(best_path->path_hashclauses);
+ joinclauses = list_difference(joinclauses, hashclauses);
/*
- * Fix the additional qpquals too.
+ * Rearrange hashclauses, if needed, so that the outer variable is always
+ * on the left.
*/
- otherclauses = join_references(otherclauses,
- root->rtable,
- outer_tlist,
- inner_tlist,
- (Index) 0);
+ hashclauses = get_switched_clauses(best_path->path_hashclauses,
+ best_path->jpath.outerjoinpath->parent->relids);
- /*
- * Now set the references in the hashclauses and rearrange them so
- * that the outer variable is always on the left.
- */
- hashclauses = switch_outer(join_references(hashclauses,
- root->rtable,
- outer_tlist,
- inner_tlist,
- (Index) 0));
+ /* Sort clauses into best execution order */
+ joinclauses = order_qual_clauses(root, joinclauses);
+ otherclauses = order_qual_clauses(root, otherclauses);
+ hashclauses = order_qual_clauses(root, hashclauses);
- /* Now the righthand op of the sole hashclause is the inner hash key. */
- innerhashkey = (Node *) get_rightop(lfirst(hashclauses));
+ /* We don't want any excess columns in the hashed tuples */
+ disuse_physical_tlist(inner_plan, best_path->jpath.innerjoinpath);
/*
* Build the hash node and hash join node.
*/
- hash_plan = make_hash(inner_tlist, innerhashkey, inner_plan);
+ hash_plan = make_hash(inner_plan);
join_plan = make_hashjoin(tlist,
joinclauses,
otherclauses,
*****************************************************************************/
/*
- * fix_indxqual_references
+ * fix_indexqual_references
* Adjust indexqual clauses to the form the executor's indexqual
* machinery needs, and check for recheckable (lossy) index conditions.
*
- * We have four tasks here:
+ * We have five tasks here:
+ * * Remove RestrictInfo nodes from the input clauses.
* * Index keys must be represented by Var nodes with varattno set to the
* index's attribute number, not the attribute number in the original rel.
- * * indxpath.c may have selected an index that is binary-compatible with
- * the actual expression operator, but not exactly the same datatype.
- * We must replace the expression's operator with the binary-compatible
- * equivalent operator that the index will recognize.
* * If the index key is on the right, commute the clause to put it on the
- * left. (Someday the executor might not need this, but for now it does.)
- * * If the indexable operator is marked 'amopreqcheck' in pg_amop, then
- * the index is "lossy" for this operator: it may return more tuples than
- * actually satisfy the operator condition. For each such operator, we
- * must add (the original form of) the indexqual clause to the "qpquals"
- * of the indexscan node, where the operator will be re-evaluated to
- * ensure it passes.
+ * left.
+ * * We must construct lists of operator strategy numbers and subtypes
+ * for the top-level operators of each index clause.
+ * * We must detect any lossy index operators. The API is that we return
+ * a list of the input clauses whose operators are NOT lossy.
*
- * This code used to be entirely bogus for multi-index scans. Now it keeps
- * track of which index applies to each subgroup of index qual clauses...
- *
- * Both the input list and the output lists have the form of lists of sublists
- * of qual clauses --- the top-level list has one entry for each indexscan
- * to be performed. The semantics are OR-of-ANDs.
- *
- * fixed_indexquals receives a modified copy of the indexqual list --- the
+ * fixed_indexquals receives a modified copy of the indexquals list --- the
* original is not changed. Note also that the copy shares no substructure
* with the original; this is needed in case there is a subplan in it (we need
* two separate copies of the subplan tree, or things will go awry).
*
- * recheck_indexquals similarly receives a full copy of whichever clauses
- * need rechecking.
- */
-static void
-fix_indxqual_references(List *indexquals, IndexPath *index_path,
- List **fixed_indexquals, List **recheck_indexquals)
-{
- List *fixed_quals = NIL;
- List *recheck_quals = NIL;
- int baserelid = lfirsti(index_path->path.parent->relids);
- List *ixinfo = index_path->indexinfo;
- List *i;
-
- foreach(i, indexquals)
- {
- List *indexqual = lfirst(i);
- IndexOptInfo *index = (IndexOptInfo *) lfirst(ixinfo);
- List *fixed_qual;
- List *recheck_qual;
-
- fix_indxqual_sublist(indexqual, baserelid, index,
- &fixed_qual, &recheck_qual);
- fixed_quals = lappend(fixed_quals, fixed_qual);
- if (recheck_qual != NIL)
- recheck_quals = lappend(recheck_quals, recheck_qual);
-
- ixinfo = lnext(ixinfo);
- }
-
- *fixed_indexquals = fixed_quals;
- *recheck_indexquals = recheck_quals;
-}
-
-/*
- * Fix the sublist of indexquals to be used in a particular scan.
- *
- * For each qual clause, commute if needed to put the indexkey operand on the
- * left, and then fix its varattno. (We do not need to change the other side
- * of the clause.) Also change the operator if necessary, and check for
- * lossy index behavior.
+ * nonlossy_indexquals receives a list of the original input clauses (with
+ * RestrictInfos) that contain non-lossy operators.
*
- * Returns two lists: the list of fixed indexquals, and the list (usually
- * empty) of original clauses that must be rechecked as qpquals because
- * the index is lossy for this operator type.
+ * indexstrategy receives an integer list of strategy numbers.
+ * indexsubtype receives an OID list of strategy subtypes.
*/
static void
-fix_indxqual_sublist(List *indexqual, int baserelid, IndexOptInfo *index,
- List **fixed_quals, List **recheck_quals)
+fix_indexqual_references(List *indexquals, IndexPath *index_path,
+ List **fixed_indexquals,
+ List **nonlossy_indexquals,
+ List **indexstrategy,
+ List **indexsubtype)
{
- List *fixed_qual = NIL;
- List *recheck_qual = NIL;
- List *i;
+ IndexOptInfo *index = index_path->indexinfo;
+ ListCell *l;
+
+ *fixed_indexquals = NIL;
+ *nonlossy_indexquals = NIL;
+ *indexstrategy = NIL;
+ *indexsubtype = NIL;
- foreach(i, indexqual)
+ /*
+ * For each qual clause, commute if needed to put the indexkey operand on
+ * the left, and then fix its varattno. (We do not need to change the
+ * other side of the clause.) Then determine the operator's strategy
+ * number and subtype number, and check for lossy index behavior.
+ */
+ foreach(l, indexquals)
{
- Expr *clause = (Expr *) lfirst(i);
- Expr *newclause;
- List *leftvarnos;
- Oid opclass,
- newopno;
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
+ Expr *clause;
+ Oid clause_op;
+ Oid opclass;
+ int stratno;
+ Oid stratsubtype;
+ bool recheck;
- if (!is_opclause((Node *) clause) || length(clause->args) != 2)
- elog(ERROR, "fix_indxqual_sublist: indexqual clause is not binary opclause");
+ Assert(IsA(rinfo, RestrictInfo));
/*
* Make a copy that will become the fixed clause.
*
* We used to try to do a shallow copy here, but that fails if there
- * is a subplan in the arguments of the opclause. So just do a
- * full copy.
+ * is a subplan in the arguments of the opclause. So just do a full
+ * copy.
*/
- newclause = (Expr *) copyObject((Node *) clause);
+ clause = (Expr *) copyObject((Node *) rinfo->clause);
- /*
- * Check to see if the indexkey is on the right; if so, commute
- * the clause. The indexkey should be the side that refers to
- * (only) the base relation.
- */
- leftvarnos = pull_varnos((Node *) lfirst(newclause->args));
- if (length(leftvarnos) != 1 || lfirsti(leftvarnos) != baserelid)
- CommuteClause(newclause);
- freeList(leftvarnos);
+ if (IsA(clause, OpExpr))
+ {
+ OpExpr *op = (OpExpr *) clause;
- /*
- * Now, determine which index attribute this is, change the
- * indexkey operand as needed, and get the index opclass.
- */
- lfirst(newclause->args) = fix_indxqual_operand(lfirst(newclause->args),
- baserelid,
+ if (list_length(op->args) != 2)
+ elog(ERROR, "indexqual clause is not binary opclause");
+
+ /*
+ * Check to see if the indexkey is on the right; if so, commute
+ * the clause. The indexkey should be the side that refers to
+ * (only) the base relation.
+ */
+ if (!bms_equal(rinfo->left_relids, index->rel->relids))
+ CommuteOpExpr(op);
+
+ /*
+ * Now, determine which index attribute this is, change the
+ * indexkey operand as needed, and get the index opclass.
+ */
+ linitial(op->args) = fix_indexqual_operand(linitial(op->args),
index,
&opclass);
+ clause_op = op->opno;
+ }
+ else if (IsA(clause, RowCompareExpr))
+ {
+ RowCompareExpr *rc = (RowCompareExpr *) clause;
+ ListCell *lc;
- /*
- * Substitute the appropriate operator if the expression operator
- * is merely binary-compatible with the index. This shouldn't
- * fail, since indxpath.c found it before...
- */
- newopno = indexable_operator(newclause, opclass, true);
- if (newopno == InvalidOid)
- elog(ERROR, "fix_indxqual_sublist: failed to find substitute op");
- ((Oper *) newclause->oper)->opno = newopno;
+ /*
+ * Check to see if the indexkey is on the right; if so, commute
+ * the clause. The indexkey should be the side that refers to
+ * (only) the base relation.
+ */
+ if (!bms_overlap(pull_varnos(linitial(rc->largs)),
+ index->rel->relids))
+ CommuteRowCompareExpr(rc);
+
+ /*
+ * For each column in the row comparison, determine which index
+ * attribute this is and change the indexkey operand as needed.
+ *
+ * Save the index opclass for only the first column. We will
+ * return the operator and opclass info for just the first
+ * column of the row comparison; the executor will have to
+ * look up the rest if it needs them.
+ */
+ foreach(lc, rc->largs)
+ {
+ Oid tmp_opclass;
+
+ lfirst(lc) = fix_indexqual_operand(lfirst(lc),
+ index,
+ &tmp_opclass);
+ if (lc == list_head(rc->largs))
+ opclass = tmp_opclass;
+ }
+ clause_op = linitial_oid(rc->opnos);
+ }
+ else if (IsA(clause, ScalarArrayOpExpr))
+ {
+ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
- fixed_qual = lappend(fixed_qual, newclause);
+ /* Never need to commute... */
+
+ /*
+ * Now, determine which index attribute this is, change the
+ * indexkey operand as needed, and get the index opclass.
+ */
+ linitial(saop->args) = fix_indexqual_operand(linitial(saop->args),
+ index,
+ &opclass);
+ clause_op = saop->opno;
+ }
+ else
+ {
+ elog(ERROR, "unsupported indexqual type: %d",
+ (int) nodeTag(clause));
+ continue; /* keep compiler quiet */
+ }
+
+ *fixed_indexquals = lappend(*fixed_indexquals, clause);
/*
- * Finally, check to see if index is lossy for this operator. If
- * so, add (a copy of) original form of clause to recheck list.
+ * Look up the (possibly commuted) operator in the operator class to
+ * get its strategy numbers and the recheck indicator. This also
+ * double-checks that we found an operator matching the index.
*/
- if (op_requires_recheck(newopno, opclass))
- recheck_qual = lappend(recheck_qual,
- copyObject((Node *) clause));
- }
+ get_op_opclass_properties(clause_op, opclass,
+ &stratno, &stratsubtype, &recheck);
+
+ *indexstrategy = lappend_int(*indexstrategy, stratno);
+ *indexsubtype = lappend_oid(*indexsubtype, stratsubtype);
- *fixed_quals = fixed_qual;
- *recheck_quals = recheck_qual;
+ /* If it's not lossy, add to nonlossy_indexquals */
+ if (!recheck)
+ *nonlossy_indexquals = lappend(*nonlossy_indexquals, rinfo);
+ }
}
static Node *
-fix_indxqual_operand(Node *node, int baserelid, IndexOptInfo *index,
- Oid *opclass)
+fix_indexqual_operand(Node *node, IndexOptInfo *index, Oid *opclass)
{
/*
- * Remove any binary-compatible relabeling of the indexkey
+ * We represent index keys by Var nodes having the varno of the base table
+ * but varattno equal to the index's attribute number (index column
+ * position). This is a bit hokey ... would be cleaner to use a
+ * special-purpose node type that could not be mistaken for a regular Var.
+ * But it will do for now.
*/
- if (IsA(node, RelabelType))
- node = ((RelabelType *) node)->arg;
+ Var *result;
+ int pos;
+ ListCell *indexpr_item;
/*
- * We represent index keys by Var nodes having the varno of the base
- * table but varattno equal to the index's attribute number (index
- * column position). This is a bit hokey ... would be cleaner to use
- * a special-purpose node type that could not be mistaken for a
- * regular Var. But it will do for now.
+ * Remove any binary-compatible relabeling of the indexkey
*/
- if (IsA(node, Var))
+ if (IsA(node, RelabelType))
+ node = (Node *) ((RelabelType *) node)->arg;
+
+ if (IsA(node, Var) &&
+ ((Var *) node)->varno == index->rel->relid)
{
- /* If it's a var, find which index key position it occupies */
- Assert(index->indproc == InvalidOid);
+ /* Try to match against simple index columns */
+ int varatt = ((Var *) node)->varattno;
- if (((Var *) node)->varno == baserelid)
+ if (varatt != 0)
{
- int varatt = ((Var *) node)->varattno;
- int pos;
-
- for (pos = 0; pos < index->nkeys; pos++)
+ for (pos = 0; pos < index->ncolumns; pos++)
{
if (index->indexkeys[pos] == varatt)
{
- Node *newnode = copyObject(node);
-
- ((Var *) newnode)->varattno = pos + 1;
+ result = (Var *) copyObject(node);
+ result->varattno = pos + 1;
/* return the correct opclass, too */
*opclass = index->classlist[pos];
- return newnode;
+ return (Node *) result;
}
}
}
-
- /*
- * Oops, this Var isn't an indexkey!
- */
- elog(ERROR, "fix_indxqual_operand: var is not index attribute");
}
- /*
- * Else, it must be a func expression matching a functional index.
- * Since we currently only support single-column functional indexes,
- * the returned varattno must be 1.
- */
- Assert(index->indproc != InvalidOid);
- Assert(is_funcclause(node)); /* not a very thorough check, but easy */
-
- /* classlist[0] is the only class of a functional index */
- *opclass = index->classlist[0];
+ /* Try to match against index expressions */
+ indexpr_item = list_head(index->indexprs);
+ for (pos = 0; pos < index->ncolumns; pos++)
+ {
+ if (index->indexkeys[pos] == 0)
+ {
+ Node *indexkey;
+
+ if (indexpr_item == NULL)
+ elog(ERROR, "too few entries in indexprs list");
+ indexkey = (Node *) lfirst(indexpr_item);
+ if (indexkey && IsA(indexkey, RelabelType))
+ indexkey = (Node *) ((RelabelType *) indexkey)->arg;
+ if (equal(node, indexkey))
+ {
+ /* Found a match */
+ result = makeVar(index->rel->relid, pos + 1,
+ exprType(lfirst(indexpr_item)), -1,
+ 0);
+ /* return the correct opclass, too */
+ *opclass = index->classlist[pos];
+ return (Node *) result;
+ }
+ indexpr_item = lnext(indexpr_item);
+ }
+ }
- return (Node *) makeVar(baserelid, 1, exprType(node), -1, 0);
+ /* Ooops... */
+ elog(ERROR, "node is not an index attribute");
+ *opclass = InvalidOid; /* keep compiler quiet */
+ return NULL;
}
/*
- * switch_outer
- * Given a list of merge or hash joinclauses, rearrange the elements within
- * the clauses so the outer join variable is on the left and the inner is
- * on the right. The original list is not touched; a modified list
- * is returned.
+ * get_switched_clauses
+ * Given a list of merge or hash joinclauses (as RestrictInfo nodes),
+ * extract the bare clauses, and rearrange the elements within the
+ * clauses, if needed, so the outer join variable is on the left and
+ * the inner is on the right. The original data structure is not touched;
+ * a modified list is returned.
*/
static List *
-switch_outer(List *clauses)
+get_switched_clauses(List *clauses, Relids outerrelids)
{
List *t_list = NIL;
- List *i;
+ ListCell *l;
- foreach(i, clauses)
+ foreach(l, clauses)
{
- Expr *clause = (Expr *) lfirst(i);
- Var *op;
+ RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(l);
+ OpExpr *clause = (OpExpr *) restrictinfo->clause;
- Assert(is_opclause((Node *) clause));
- op = get_rightop(clause);
- Assert(op && IsA(op, Var));
- if (var_is_outer(op))
+ Assert(is_opclause(clause));
+ if (bms_is_subset(restrictinfo->right_relids, outerrelids))
{
/*
- * Duplicate just enough of the structure to allow commuting
- * the clause without changing the original list. Could use
+ * Duplicate just enough of the structure to allow commuting the
+ * clause without changing the original list. Could use
* copyObject, but a complete deep copy is overkill.
*/
- Expr *temp;
+ OpExpr *temp = makeNode(OpExpr);
- temp = make_clause(clause->opType, clause->oper,
- listCopy(clause->args));
+ temp->opno = clause->opno;
+ temp->opfuncid = InvalidOid;
+ temp->opresulttype = clause->opresulttype;
+ temp->opretset = clause->opretset;
+ temp->args = list_copy(clause->args);
/* Commute it --- note this modifies the temp node in-place. */
- CommuteClause(temp);
+ CommuteOpExpr(temp);
t_list = lappend(t_list, temp);
}
else
return t_list;
}
+/*
+ * order_qual_clauses
+ * Given a list of qual clauses that will all be evaluated at the same
+ * plan node, sort the list into the order we want to check the quals
+ * in at runtime.
+ *
+ * Ideally the order should be driven by a combination of execution cost and
+ * selectivity, but unfortunately we have so little information about
+ * execution cost of operators that it's really hard to do anything smart.
+ * For now, we just move any quals that contain SubPlan references (but not
+ * InitPlan references) to the end of the list.
+ */
+static List *
+order_qual_clauses(PlannerInfo *root, List *clauses)
+{
+ List *nosubplans;
+ List *withsubplans;
+ ListCell *l;
+
+ /* No need to work hard if the query is subselect-free */
+ if (!root->parse->hasSubLinks)
+ return clauses;
+
+ nosubplans = NIL;
+ withsubplans = NIL;
+ foreach(l, clauses)
+ {
+ Node *clause = (Node *) lfirst(l);
+
+ if (contain_subplans(clause))
+ withsubplans = lappend(withsubplans, clause);
+ else
+ nosubplans = lappend(nosubplans, clause);
+ }
+
+ return list_concat(nosubplans, withsubplans);
+}
+
/*
* Copy cost and size info from a Path node to the Plan node created from it.
* The executor won't use this info, but it's needed by EXPLAIN.
Plan *plan = &node->plan;
/* cost should be inserted by caller */
- plan->state = (EState *) NULL;
plan->targetlist = qptlist;
plan->qual = qpqual;
plan->lefttree = NULL;
plan->righttree = NULL;
node->scanrelid = scanrelid;
- node->scanstate = (CommonScanState *) NULL;
return node;
}
make_indexscan(List *qptlist,
List *qpqual,
Index scanrelid,
- List *indxid,
- List *indxqual,
- List *indxqualorig,
+ Oid indexid,
+ List *indexqual,
+ List *indexqualorig,
+ List *indexstrategy,
+ List *indexsubtype,
ScanDirection indexscandir)
{
IndexScan *node = makeNode(IndexScan);
Plan *plan = &node->scan.plan;
/* cost should be inserted by caller */
- plan->state = (EState *) NULL;
plan->targetlist = qptlist;
plan->qual = qpqual;
plan->lefttree = NULL;
plan->righttree = NULL;
node->scan.scanrelid = scanrelid;
- node->indxid = indxid;
- node->indxqual = indxqual;
- node->indxqualorig = indxqualorig;
- node->indxorderdir = indexscandir;
- node->scan.scanstate = (CommonScanState *) NULL;
+ node->indexid = indexid;
+ node->indexqual = indexqual;
+ node->indexqualorig = indexqualorig;
+ node->indexstrategy = indexstrategy;
+ node->indexsubtype = indexsubtype;
+ node->indexorderdir = indexscandir;
+
+ return node;
+}
+
+static BitmapIndexScan *
+make_bitmap_indexscan(Index scanrelid,
+ Oid indexid,
+ List *indexqual,
+ List *indexqualorig,
+ List *indexstrategy,
+ List *indexsubtype)
+{
+ BitmapIndexScan *node = makeNode(BitmapIndexScan);
+ Plan *plan = &node->scan.plan;
+
+ /* cost should be inserted by caller */
+ plan->targetlist = NIL; /* not used */
+ plan->qual = NIL; /* not used */
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->indexid = indexid;
+ node->indexqual = indexqual;
+ node->indexqualorig = indexqualorig;
+ node->indexstrategy = indexstrategy;
+ node->indexsubtype = indexsubtype;
+
+ return node;
+}
+
+static BitmapHeapScan *
+make_bitmap_heapscan(List *qptlist,
+ List *qpqual,
+ Plan *lefttree,
+ List *bitmapqualorig,
+ Index scanrelid)
+{
+ BitmapHeapScan *node = makeNode(BitmapHeapScan);
+ Plan *plan = &node->scan.plan;
+
+ /* cost should be inserted by caller */
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = lefttree;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->bitmapqualorig = bitmapqualorig;
return node;
}
make_tidscan(List *qptlist,
List *qpqual,
Index scanrelid,
- List *tideval)
+ List *tidquals)
{
TidScan *node = makeNode(TidScan);
Plan *plan = &node->scan.plan;
/* cost should be inserted by caller */
- plan->state = (EState *) NULL;
plan->targetlist = qptlist;
plan->qual = qpqual;
plan->lefttree = NULL;
plan->righttree = NULL;
node->scan.scanrelid = scanrelid;
- node->tideval = copyObject(tideval); /* XXX do we really need a
- * copy? */
- node->needRescan = false;
- node->scan.scanstate = (CommonScanState *) NULL;
+ node->tidquals = tidquals;
return node;
}
SubqueryScan *node = makeNode(SubqueryScan);
Plan *plan = &node->scan.plan;
+ /*
+ * Cost is figured here for the convenience of prepunion.c. Note this is
+ * only correct for the case where qpqual is empty; otherwise caller
+ * should overwrite cost with a better estimate.
+ */
copy_plan_costsize(plan, subplan);
- plan->state = (EState *) NULL;
+ plan->total_cost += cpu_tuple_cost * subplan->plan_rows;
+
plan->targetlist = qptlist;
plan->qual = qpqual;
plan->lefttree = NULL;
plan->righttree = NULL;
node->scan.scanrelid = scanrelid;
node->subplan = subplan;
- node->scan.scanstate = (CommonScanState *) NULL;
return node;
}
List *qpqual,
Index scanrelid)
{
- FunctionScan *node = makeNode(FunctionScan);
- Plan *plan = &node->scan.plan;
+ FunctionScan *node = makeNode(FunctionScan);
+ Plan *plan = &node->scan.plan;
/* cost should be inserted by caller */
- plan->state = (EState *) NULL;
plan->targetlist = qptlist;
plan->qual = qpqual;
plan->lefttree = NULL;
plan->righttree = NULL;
node->scan.scanrelid = scanrelid;
- node->scan.scanstate = (CommonScanState *) NULL;
return node;
}
{
Append *node = makeNode(Append);
Plan *plan = &node->plan;
- List *subnode;
+ ListCell *subnode;
- /* compute costs from subplan costs */
+ /*
+ * Compute cost as sum of subplan costs. We charge nothing extra for the
+ * Append itself, which perhaps is too optimistic, but since it doesn't do
+ * any selection or projection, it is a pretty cheap node.
+ */
plan->startup_cost = 0;
plan->total_cost = 0;
plan->plan_rows = 0;
{
Plan *subplan = (Plan *) lfirst(subnode);
- if (subnode == appendplans) /* first node? */
+ if (subnode == list_head(appendplans)) /* first node? */
plan->startup_cost = subplan->startup_cost;
plan->total_cost += subplan->total_cost;
plan->plan_rows += subplan->plan_rows;
if (plan->plan_width < subplan->plan_width)
plan->plan_width = subplan->plan_width;
}
- plan->state = (EState *) NULL;
+
plan->targetlist = tlist;
plan->qual = NIL;
plan->lefttree = NULL;
return node;
}
+static BitmapAnd *
+make_bitmap_and(List *bitmapplans)
+{
+ BitmapAnd *node = makeNode(BitmapAnd);
+ Plan *plan = &node->plan;
+
+ /* cost should be inserted by caller */
+ plan->targetlist = NIL;
+ plan->qual = NIL;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->bitmapplans = bitmapplans;
+
+ return node;
+}
+
+static BitmapOr *
+make_bitmap_or(List *bitmapplans)
+{
+ BitmapOr *node = makeNode(BitmapOr);
+ Plan *plan = &node->plan;
+
+ /* cost should be inserted by caller */
+ plan->targetlist = NIL;
+ plan->qual = NIL;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->bitmapplans = bitmapplans;
+
+ return node;
+}
+
static NestLoop *
make_nestloop(List *tlist,
List *joinclauses,
Plan *plan = &node->join.plan;
/* cost should be inserted by caller */
- plan->state = (EState *) NULL;
plan->targetlist = tlist;
plan->qual = otherclauses;
plan->lefttree = lefttree;
Plan *plan = &node->join.plan;
/* cost should be inserted by caller */
- plan->state = (EState *) NULL;
plan->targetlist = tlist;
plan->qual = otherclauses;
plan->lefttree = lefttree;
}
static Hash *
-make_hash(List *tlist, Node *hashkey, Plan *lefttree)
+make_hash(Plan *lefttree)
{
Hash *node = makeNode(Hash);
Plan *plan = &node->plan;
copy_plan_costsize(plan, lefttree);
/*
- * For plausibility, make startup & total costs equal total cost of
- * input plan; this only affects EXPLAIN display not decisions.
+ * For plausibility, make startup & total costs equal total cost of input
+ * plan; this only affects EXPLAIN display not decisions.
*/
plan->startup_cost = plan->total_cost;
- plan->state = (EState *) NULL;
- plan->targetlist = tlist;
- plan->qual = NULL;
+ plan->targetlist = copyObject(lefttree->targetlist);
+ plan->qual = NIL;
plan->lefttree = lefttree;
plan->righttree = NULL;
- node->hashkey = hashkey;
return node;
}
Plan *plan = &node->join.plan;
/* cost should be inserted by caller */
- plan->state = (EState *) NULL;
plan->targetlist = tlist;
plan->qual = otherclauses;
plan->lefttree = lefttree;
}
/*
- * To use make_sort directly, you must already have marked the tlist
- * with reskey and reskeyop information. The keys had better be
- * non-redundant, too (ie, there had better be tlist items marked with
- * each key number from 1 to keycount), or the executor will get confused!
+ * make_sort --- basic routine to build a Sort plan node
+ *
+ * Caller must have built the sortColIdx and sortOperators arrays already.
*/
-Sort *
-make_sort(Query *root, List *tlist, Plan *lefttree, int keycount)
+static Sort *
+make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
+ AttrNumber *sortColIdx, Oid *sortOperators)
{
Sort *node = makeNode(Sort);
Plan *plan = &node->plan;
copy_plan_costsize(plan, lefttree); /* only care about copying size */
cost_sort(&sort_path, root, NIL,
- lefttree->plan_rows, lefttree->plan_width);
- plan->startup_cost = sort_path.startup_cost + lefttree->total_cost;
- plan->total_cost = sort_path.total_cost + lefttree->total_cost;
- plan->state = (EState *) NULL;
- plan->targetlist = tlist;
+ lefttree->total_cost,
+ lefttree->plan_rows,
+ lefttree->plan_width);
+ plan->startup_cost = sort_path.startup_cost;
+ plan->total_cost = sort_path.total_cost;
+ plan->targetlist = copyObject(lefttree->targetlist);
plan->qual = NIL;
plan->lefttree = lefttree;
plan->righttree = NULL;
- node->keycount = keycount;
+ node->numCols = numCols;
+ node->sortColIdx = sortColIdx;
+ node->sortOperators = sortOperators;
return node;
}
+/*
+ * add_sort_column --- utility subroutine for building sort info arrays
+ *
+ * We need this routine because the same column might be selected more than
+ * once as a sort key column; if so, the extra mentions are redundant.
+ *
+ * Caller is assumed to have allocated the arrays large enough for the
+ * max possible number of columns. Return value is the new column count.
+ */
+static int
+add_sort_column(AttrNumber colIdx, Oid sortOp,
+ int numCols, AttrNumber *sortColIdx, Oid *sortOperators)
+{
+ int i;
+
+ for (i = 0; i < numCols; i++)
+ {
+ if (sortColIdx[i] == colIdx)
+ {
+ /* Already sorting by this col, so extra sort key is useless */
+ return numCols;
+ }
+ }
+
+ /* Add the column */
+ sortColIdx[numCols] = colIdx;
+ sortOperators[numCols] = sortOp;
+ return numCols + 1;
+}
+
/*
* make_sort_from_pathkeys
* Create sort plan to sort according to given pathkeys
*
- * 'tlist' is the target list of the input plan
* 'lefttree' is the node which yields input tuples
* 'pathkeys' is the list of pathkeys by which the result is to be sorted
*
- * We must convert the pathkey information into reskey and reskeyop fields
- * of resdom nodes in the sort plan's target list.
+ * We must convert the pathkey information into arrays of sort key column
+ * numbers and sort operator OIDs.
+ *
+ * If the pathkeys include expressions that aren't simple Vars, we will
+ * usually need to add resjunk items to the input plan's targetlist to
+ * compute these expressions (since the Sort node itself won't do it).
+ * If the input plan type isn't one that can do projections, this means
+ * adding a Result node just to do the projection.
*/
-Sort *
-make_sort_from_pathkeys(Query *root, List *tlist,
- Plan *lefttree, List *pathkeys)
+static Sort *
+make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys)
{
- List *sort_tlist;
- List *i;
- int numsortkeys = 0;
+ List *tlist = lefttree->targetlist;
+ ListCell *i;
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
+
+ /*
+ * We will need at most list_length(pathkeys) sort columns; possibly less
+ */
+ numsortkeys = list_length(pathkeys);
+ sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
+ sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
- /* Create a new target list for the sort, with sort keys set. */
- sort_tlist = new_unsorted_tlist(tlist);
+ numsortkeys = 0;
foreach(i, pathkeys)
{
List *keysublist = (List *) lfirst(i);
PathKeyItem *pathkey = NULL;
- Resdom *resdom = NULL;
- List *j;
+ TargetEntry *tle = NULL;
+ ListCell *j;
/*
* We can sort by any one of the sort key items listed in this
* sublist. For now, we take the first one that corresponds to an
- * available Var in the sort_tlist.
+ * available Var in the tlist. If there isn't any, use the first one
+ * that is an expression in the input's vars.
*
* XXX if we have a choice, is there any way of figuring out which
- * might be cheapest to execute? (For example, int4lt is likely
- * much cheaper to execute than numericlt, but both might appear
- * in the same pathkey sublist...) Not clear that we ever will
- * have a choice in practice, so it may not matter.
+ * might be cheapest to execute? (For example, int4lt is likely much
+ * cheaper to execute than numericlt, but both might appear in the
+ * same pathkey sublist...) Not clear that we ever will have a choice
+ * in practice, so it may not matter.
*/
foreach(j, keysublist)
{
- pathkey = lfirst(j);
+ pathkey = (PathKeyItem *) lfirst(j);
Assert(IsA(pathkey, PathKeyItem));
- resdom = tlist_member(pathkey->key, sort_tlist);
- if (resdom)
+ tle = tlist_member(pathkey->key, tlist);
+ if (tle)
break;
}
- if (!resdom)
- elog(ERROR, "make_sort_from_pathkeys: cannot find tlist item to sort");
+ if (!tle)
+ {
+ /* No matching Var; look for a computable expression */
+ foreach(j, keysublist)
+ {
+ List *exprvars;
+ ListCell *k;
+
+ pathkey = (PathKeyItem *) lfirst(j);
+ exprvars = pull_var_clause(pathkey->key, false);
+ foreach(k, exprvars)
+ {
+ if (!tlist_member(lfirst(k), tlist))
+ break;
+ }
+ list_free(exprvars);
+ if (!k)
+ break; /* found usable expression */
+ }
+ if (!j)
+ elog(ERROR, "could not find pathkey item to sort");
+
+ /*
+ * Do we need to insert a Result node?
+ */
+ if (!is_projection_capable_plan(lefttree))
+ {
+ tlist = copyObject(tlist);
+ lefttree = (Plan *) make_result(tlist, NULL, lefttree);
+ }
+
+ /*
+ * Add resjunk entry to input's tlist
+ */
+ tle = makeTargetEntry((Expr *) pathkey->key,
+ list_length(tlist) + 1,
+ NULL,
+ true);
+ tlist = lappend(tlist, tle);
+ lefttree->targetlist = tlist; /* just in case NIL before */
+ }
/*
- * The resdom might be already marked as a sort key, if the
- * pathkeys contain duplicate entries. (This can happen in
- * scenarios where multiple mergejoinable clauses mention the same
- * var, for example.) In that case the current pathkey is
- * essentially a no-op, because only one value can be seen within
- * any subgroup where it would be consulted. We can ignore it.
+ * The column might already be selected as a sort key, if the pathkeys
+ * contain duplicate entries. (This can happen in scenarios where
+ * multiple mergejoinable clauses mention the same var, for example.)
+ * So enter it only once in the sort arrays.
*/
- if (resdom->reskey == 0)
- {
- /* OK, mark it as a sort key and set the sort operator */
- resdom->reskey = ++numsortkeys;
- resdom->reskeyop = pathkey->sortop;
- }
+ numsortkeys = add_sort_column(tle->resno, pathkey->sortop,
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
- return make_sort(root, sort_tlist, lefttree, numsortkeys);
+ return make_sort(root, lefttree, numsortkeys,
+ sortColIdx, sortOperators);
}
-Material *
-make_material(List *tlist, Plan *lefttree)
+/*
+ * make_sort_from_sortclauses
+ * Create sort plan to sort according to given sortclauses
+ *
+ * 'sortcls' is a list of SortClauses
+ * 'lefttree' is the node which yields input tuples
+ */
+Sort *
+make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree)
{
- Material *node = makeNode(Material);
- Plan *plan = &node->plan;
+ List *sub_tlist = lefttree->targetlist;
+ ListCell *l;
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
- copy_plan_costsize(plan, lefttree);
+ /*
+ * We will need at most list_length(sortcls) sort columns; possibly less
+ */
+ numsortkeys = list_length(sortcls);
+ sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
+ sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
+
+ numsortkeys = 0;
+
+ foreach(l, sortcls)
+ {
+ SortClause *sortcl = (SortClause *) lfirst(l);
+ TargetEntry *tle = get_sortgroupclause_tle(sortcl, sub_tlist);
+
+ /*
+ * Check for the possibility of duplicate order-by clauses --- the
+ * parser should have removed 'em, but no point in sorting
+ * redundantly.
+ */
+ numsortkeys = add_sort_column(tle->resno, sortcl->sortop,
+ numsortkeys, sortColIdx, sortOperators);
+ }
+
+ Assert(numsortkeys > 0);
+
+ return make_sort(root, lefttree, numsortkeys,
+ sortColIdx, sortOperators);
+}
+
+/*
+ * make_sort_from_groupcols
+ * Create sort plan to sort based on grouping columns
+ *
+ * 'groupcls' is the list of GroupClauses
+ * 'grpColIdx' gives the column numbers to use
+ *
+ * This might look like it could be merged with make_sort_from_sortclauses,
+ * but presently we *must* use the grpColIdx[] array to locate sort columns,
+ * because the child plan's tlist is not marked with ressortgroupref info
+ * appropriate to the grouping node. So, only the sortop is used from the
+ * GroupClause entries.
+ */
+Sort *
+make_sort_from_groupcols(PlannerInfo *root,
+ List *groupcls,
+ AttrNumber *grpColIdx,
+ Plan *lefttree)
+{
+ List *sub_tlist = lefttree->targetlist;
+ int grpno = 0;
+ ListCell *l;
+ int numsortkeys;
+ AttrNumber *sortColIdx;
+ Oid *sortOperators;
/*
- * For plausibility, make startup & total costs equal total cost of
- * input plan; this only affects EXPLAIN display not decisions.
- *
- * XXX shouldn't we charge some additional cost for materialization?
+ * We will need at most list_length(groupcls) sort columns; possibly less
*/
- plan->startup_cost = plan->total_cost;
- plan->state = (EState *) NULL;
- plan->targetlist = tlist;
+ numsortkeys = list_length(groupcls);
+ sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
+ sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
+
+ numsortkeys = 0;
+
+ foreach(l, groupcls)
+ {
+ GroupClause *grpcl = (GroupClause *) lfirst(l);
+ TargetEntry *tle = get_tle_by_resno(sub_tlist, grpColIdx[grpno]);
+
+ /*
+ * Check for the possibility of duplicate group-by clauses --- the
+ * parser should have removed 'em, but no point in sorting
+ * redundantly.
+ */
+ numsortkeys = add_sort_column(tle->resno, grpcl->sortop,
+ numsortkeys, sortColIdx, sortOperators);
+ grpno++;
+ }
+
+ Assert(numsortkeys > 0);
+
+ return make_sort(root, lefttree, numsortkeys,
+ sortColIdx, sortOperators);
+}
+
+Material *
+make_material(Plan *lefttree)
+{
+ Material *node = makeNode(Material);
+ Plan *plan = &node->plan;
+
+ /* cost should be inserted by caller */
+ plan->targetlist = copyObject(lefttree->targetlist);
plan->qual = NIL;
plan->lefttree = lefttree;
plan->righttree = NULL;
return node;
}
+/*
+ * materialize_finished_plan: stick a Material node atop a completed plan
+ *
+ * There are a couple of places where we want to attach a Material node
+ * after completion of subquery_planner(). This currently requires hackery.
+ * Since subquery_planner has already run SS_finalize_plan on the subplan
+ * tree, we have to kluge up parameter lists for the Material node.
+ * Possibly this could be fixed by postponing SS_finalize_plan processing
+ * until setrefs.c is run?
+ */
+Plan *
+materialize_finished_plan(Plan *subplan)
+{
+ Plan *matplan;
+ Path matpath; /* dummy for result of cost_material */
+
+ matplan = (Plan *) make_material(subplan);
+
+ /* Set cost data */
+ cost_material(&matpath,
+ subplan->total_cost,
+ subplan->plan_rows,
+ subplan->plan_width);
+ matplan->startup_cost = matpath.startup_cost;
+ matplan->total_cost = matpath.total_cost;
+ matplan->plan_rows = subplan->plan_rows;
+ matplan->plan_width = subplan->plan_width;
+
+ /* parameter kluge --- see comments above */
+ matplan->extParam = bms_copy(subplan->extParam);
+ matplan->allParam = bms_copy(subplan->allParam);
+
+ return matplan;
+}
+
Agg *
-make_agg(List *tlist, List *qual, Plan *lefttree)
+make_agg(PlannerInfo *root, List *tlist, List *qual,
+ AggStrategy aggstrategy,
+ int numGroupCols, AttrNumber *grpColIdx,
+ long numGroups, int numAggs,
+ Plan *lefttree)
{
Agg *node = makeNode(Agg);
Plan *plan = &node->plan;
+ Path agg_path; /* dummy for result of cost_agg */
+ QualCost qual_cost;
- copy_plan_costsize(plan, lefttree);
+ node->aggstrategy = aggstrategy;
+ node->numCols = numGroupCols;
+ node->grpColIdx = grpColIdx;
+ node->numGroups = numGroups;
+
+ copy_plan_costsize(plan, lefttree); /* only care about copying size */
+ cost_agg(&agg_path, root,
+ aggstrategy, numAggs,
+ numGroupCols, numGroups,
+ lefttree->startup_cost,
+ lefttree->total_cost,
+ lefttree->plan_rows);
+ plan->startup_cost = agg_path.startup_cost;
+ plan->total_cost = agg_path.total_cost;
/*
- * Charge one cpu_operator_cost per aggregate function per input
- * tuple.
+ * We will produce a single output tuple if not grouping, and a tuple per
+ * group otherwise.
*/
- plan->total_cost += cpu_operator_cost * plan->plan_rows *
- (length(pull_agg_clause((Node *) tlist)) +
- length(pull_agg_clause((Node *) qual)));
+ if (aggstrategy == AGG_PLAIN)
+ plan->plan_rows = 1;
+ else
+ plan->plan_rows = numGroups;
/*
- * We will produce a single output tuple if the input is not a Group,
- * and a tuple per group otherwise. For now, estimate the number of
- * groups as 10% of the number of tuples --- bogus, but how to do
- * better? (Note we assume the input Group node is in "tuplePerGroup"
- * mode, so it didn't reduce its row count already.)
+ * We also need to account for the cost of evaluation of the qual (ie, the
+ * HAVING clause) and the tlist. Note that cost_qual_eval doesn't charge
+ * anything for Aggref nodes; this is okay since they are really
+ * comparable to Vars.
+ *
+ * See notes in grouping_planner about why this routine and make_group are
+ * the only ones in this file that worry about tlist eval cost.
*/
- if (IsA(lefttree, Group))
- {
- plan->plan_rows *= 0.1;
- if (plan->plan_rows < 1)
- plan->plan_rows = 1;
- }
- else
+ if (qual)
{
- plan->plan_rows = 1;
- plan->startup_cost = plan->total_cost;
+ cost_qual_eval(&qual_cost, qual);
+ plan->startup_cost += qual_cost.startup;
+ plan->total_cost += qual_cost.startup;
+ plan->total_cost += qual_cost.per_tuple * plan->plan_rows;
}
+ cost_qual_eval(&qual_cost, tlist);
+ plan->startup_cost += qual_cost.startup;
+ plan->total_cost += qual_cost.startup;
+ plan->total_cost += qual_cost.per_tuple * plan->plan_rows;
- plan->state = (EState *) NULL;
plan->qual = qual;
plan->targetlist = tlist;
plan->lefttree = lefttree;
- plan->righttree = (Plan *) NULL;
+ plan->righttree = NULL;
return node;
}
Group *
-make_group(List *tlist,
- bool tuplePerGroup,
- int ngrp,
+make_group(PlannerInfo *root,
+ List *tlist,
+ List *qual,
+ int numGroupCols,
AttrNumber *grpColIdx,
+ double numGroups,
Plan *lefttree)
{
Group *node = makeNode(Group);
Plan *plan = &node->plan;
+ Path group_path; /* dummy for result of cost_group */
+ QualCost qual_cost;
- copy_plan_costsize(plan, lefttree);
+ node->numCols = numGroupCols;
+ node->grpColIdx = grpColIdx;
- /*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples.
- */
- plan->total_cost += cpu_operator_cost * plan->plan_rows * ngrp;
+ copy_plan_costsize(plan, lefttree); /* only care about copying size */
+ cost_group(&group_path, root,
+ numGroupCols, numGroups,
+ lefttree->startup_cost,
+ lefttree->total_cost,
+ lefttree->plan_rows);
+ plan->startup_cost = group_path.startup_cost;
+ plan->total_cost = group_path.total_cost;
+
+ /* One output tuple per estimated result group */
+ plan->plan_rows = numGroups;
/*
- * If tuplePerGroup (which is named exactly backwards) is true, we
- * will return all the input tuples, so the input node's row count is
- * OK. Otherwise, we'll return only one tuple from each group. For
- * now, estimate the number of groups as 10% of the number of tuples
- * --- bogus, but how to do better?
+ * We also need to account for the cost of evaluation of the qual (ie, the
+ * HAVING clause) and the tlist.
+ *
+ * XXX this double-counts the cost of evaluation of any expressions used
+ * for grouping, since in reality those will have been evaluated at a
+ * lower plan level and will only be copied by the Group node. Worth
+ * fixing?
+ *
+ * See notes in grouping_planner about why this routine and make_agg are
+ * the only ones in this file that worry about tlist eval cost.
*/
- if (!tuplePerGroup)
+ if (qual)
{
- plan->plan_rows *= 0.1;
- if (plan->plan_rows < 1)
- plan->plan_rows = 1;
+ cost_qual_eval(&qual_cost, qual);
+ plan->startup_cost += qual_cost.startup;
+ plan->total_cost += qual_cost.startup;
+ plan->total_cost += qual_cost.per_tuple * plan->plan_rows;
}
+ cost_qual_eval(&qual_cost, tlist);
+ plan->startup_cost += qual_cost.startup;
+ plan->total_cost += qual_cost.startup;
+ plan->total_cost += qual_cost.per_tuple * plan->plan_rows;
- plan->state = (EState *) NULL;
- plan->qual = NULL;
+ plan->qual = qual;
plan->targetlist = tlist;
plan->lefttree = lefttree;
- plan->righttree = (Plan *) NULL;
- node->tuplePerGroup = tuplePerGroup;
- node->numCols = ngrp;
- node->grpColIdx = grpColIdx;
+ plan->righttree = NULL;
return node;
}
* distinctList is a list of SortClauses, identifying the targetlist items
* that should be considered by the Unique filter.
*/
-
Unique *
-make_unique(List *tlist, Plan *lefttree, List *distinctList)
+make_unique(Plan *lefttree, List *distinctList)
{
Unique *node = makeNode(Unique);
Plan *plan = &node->plan;
- int numCols = length(distinctList);
+ int numCols = list_length(distinctList);
int keyno = 0;
AttrNumber *uniqColIdx;
- List *slitem;
+ ListCell *slitem;
copy_plan_costsize(plan, lefttree);
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples.
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples. (XXX probably this is
+ * an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
/*
- * As for Group, we make the unsupported assumption that there will be
- * 10% as many tuples out as in.
+ * plan->plan_rows is left as a copy of the input subplan's plan_rows; ie,
+ * we assume the filter removes nothing. The caller must alter this if he
+ * has a better idea.
*/
- plan->plan_rows *= 0.1;
- if (plan->plan_rows < 1)
- plan->plan_rows = 1;
- plan->state = (EState *) NULL;
- plan->targetlist = tlist;
+ plan->targetlist = copyObject(lefttree->targetlist);
plan->qual = NIL;
plan->lefttree = lefttree;
plan->righttree = NULL;
/*
- * convert SortClause list into array of attr indexes, as wanted by
- * exec
+ * convert SortClause list into array of attr indexes, as wanted by exec
*/
Assert(numCols > 0);
uniqColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
foreach(slitem, distinctList)
{
SortClause *sortcl = (SortClause *) lfirst(slitem);
- TargetEntry *tle = get_sortgroupclause_tle(sortcl, tlist);
+ TargetEntry *tle = get_sortgroupclause_tle(sortcl, plan->targetlist);
- uniqColIdx[keyno++] = tle->resdom->resno;
+ uniqColIdx[keyno++] = tle->resno;
}
node->numCols = numCols;
*/
SetOp *
-make_setop(SetOpCmd cmd, List *tlist, Plan *lefttree,
+make_setop(SetOpCmd cmd, Plan *lefttree,
List *distinctList, AttrNumber flagColIdx)
{
SetOp *node = makeNode(SetOp);
Plan *plan = &node->plan;
- int numCols = length(distinctList);
+ int numCols = list_length(distinctList);
int keyno = 0;
AttrNumber *dupColIdx;
- List *slitem;
+ ListCell *slitem;
copy_plan_costsize(plan, lefttree);
/*
- * Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples.
+ * Charge one cpu_operator_cost per comparison per input tuple. We assume
+ * all columns get compared at most of the tuples.
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
/*
- * As for Group, we make the unsupported assumption that there will be
- * 10% as many tuples out as in.
+ * We make the unsupported assumption that there will be 10% as many
+ * tuples out as in. Any way to do better?
*/
plan->plan_rows *= 0.1;
if (plan->plan_rows < 1)
plan->plan_rows = 1;
- plan->state = (EState *) NULL;
- plan->targetlist = tlist;
+ plan->targetlist = copyObject(lefttree->targetlist);
plan->qual = NIL;
plan->lefttree = lefttree;
plan->righttree = NULL;
/*
- * convert SortClause list into array of attr indexes, as wanted by
- * exec
+ * convert SortClause list into array of attr indexes, as wanted by exec
*/
Assert(numCols > 0);
dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols);
foreach(slitem, distinctList)
{
SortClause *sortcl = (SortClause *) lfirst(slitem);
- TargetEntry *tle = get_sortgroupclause_tle(sortcl, tlist);
+ TargetEntry *tle = get_sortgroupclause_tle(sortcl, plan->targetlist);
- dupColIdx[keyno++] = tle->resdom->resno;
+ dupColIdx[keyno++] = tle->resno;
}
node->cmd = cmd;
return node;
}
+/*
+ * Note: offset_est and count_est are passed in to save having to repeat
+ * work already done to estimate the values of the limitOffset and limitCount
+ * expressions. Their values are as returned by preprocess_limit (0 means
+ * "not relevant", -1 means "couldn't estimate"). Keep the code below in sync
+ * with that function!
+ */
Limit *
-make_limit(List *tlist, Plan *lefttree,
- Node *limitOffset, Node *limitCount)
+make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount,
+ int offset_est, int count_est)
{
Limit *node = makeNode(Limit);
Plan *plan = &node->plan;
copy_plan_costsize(plan, lefttree);
/*
- * If offset/count are constants, adjust the output rows count and
- * costs accordingly. This is only a cosmetic issue if we are at top
- * level, but if we are building a subquery then it's important to
- * report correct info to the outer planner.
+ * Adjust the output rows count and costs according to the offset/limit.
+ * This is only a cosmetic issue if we are at top level, but if we are
+ * building a subquery then it's important to report correct info to the
+ * outer planner.
+ *
+ * When the offset or count couldn't be estimated, use 10% of the
+ * estimated number of rows emitted from the subplan.
*/
- if (limitOffset && IsA(limitOffset, Const))
+ if (offset_est != 0)
{
- Const *limito = (Const *) limitOffset;
- int32 offset = DatumGetInt32(limito->constvalue);
+ double offset_rows;
- if (!limito->constisnull && offset > 0)
- {
- if (offset > plan->plan_rows)
- offset = (int32) plan->plan_rows;
- if (plan->plan_rows > 0)
- plan->startup_cost +=
- (plan->total_cost - plan->startup_cost)
- * ((double) offset) / plan->plan_rows;
- plan->plan_rows -= offset;
- if (plan->plan_rows < 1)
- plan->plan_rows = 1;
- }
+ if (offset_est > 0)
+ offset_rows = (double) offset_est;
+ else
+ offset_rows = clamp_row_est(lefttree->plan_rows * 0.10);
+ if (offset_rows > plan->plan_rows)
+ offset_rows = plan->plan_rows;
+ if (plan->plan_rows > 0)
+ plan->startup_cost +=
+ (plan->total_cost - plan->startup_cost)
+ * offset_rows / plan->plan_rows;
+ plan->plan_rows -= offset_rows;
+ if (plan->plan_rows < 1)
+ plan->plan_rows = 1;
}
- if (limitCount && IsA(limitCount, Const))
+
+ if (count_est != 0)
{
- Const *limitc = (Const *) limitCount;
- int32 count = DatumGetInt32(limitc->constvalue);
+ double count_rows;
- if (!limitc->constisnull && count >= 0)
- {
- if (count > plan->plan_rows)
- count = (int32) plan->plan_rows;
- if (plan->plan_rows > 0)
- plan->total_cost = plan->startup_cost +
- (plan->total_cost - plan->startup_cost)
- * ((double) count) / plan->plan_rows;
- plan->plan_rows = count;
- if (plan->plan_rows < 1)
- plan->plan_rows = 1;
- }
+ if (count_est > 0)
+ count_rows = (double) count_est;
+ else
+ count_rows = clamp_row_est(lefttree->plan_rows * 0.10);
+ if (count_rows > plan->plan_rows)
+ count_rows = plan->plan_rows;
+ if (plan->plan_rows > 0)
+ plan->total_cost = plan->startup_cost +
+ (plan->total_cost - plan->startup_cost)
+ * count_rows / plan->plan_rows;
+ plan->plan_rows = count_rows;
+ if (plan->plan_rows < 1)
+ plan->plan_rows = 1;
}
- plan->state = (EState *) NULL;
- plan->targetlist = tlist;
+ plan->targetlist = copyObject(lefttree->targetlist);
plan->qual = NIL;
plan->lefttree = lefttree;
plan->righttree = NULL;
return node;
}
+/*
+ * make_result
+ * Build a Result plan node
+ *
+ * If we have a subplan, assume that any evaluation costs for the gating qual
+ * were already factored into the subplan's startup cost, and just copy the
+ * subplan cost. If there's no subplan, we should include the qual eval
+ * cost. In either case, tlist eval cost is not to be included here.
+ */
Result *
make_result(List *tlist,
Node *resconstantqual,
Result *node = makeNode(Result);
Plan *plan = &node->plan;
-#ifdef NOT_USED
- tlist = generate_fjoin(tlist);
-#endif
if (subplan)
copy_plan_costsize(plan, subplan);
else
plan->startup_cost = 0;
plan->total_cost = cpu_tuple_cost;
plan->plan_rows = 1; /* wrong if we have a set-valued function? */
- plan->plan_width = 0; /* XXX try to be smarter? */
+ plan->plan_width = 0; /* XXX is it worth being smarter? */
+ if (resconstantqual)
+ {
+ QualCost qual_cost;
+
+ cost_qual_eval(&qual_cost, (List *) resconstantqual);
+ /* resconstantqual is evaluated once at startup */
+ plan->startup_cost += qual_cost.startup + qual_cost.per_tuple;
+ plan->total_cost += qual_cost.startup + qual_cost.per_tuple;
+ }
}
- plan->state = (EState *) NULL;
plan->targetlist = tlist;
plan->qual = NIL;
plan->lefttree = subplan;
plan->righttree = NULL;
node->resconstantqual = resconstantqual;
- node->resstate = NULL;
return node;
}
-#ifdef NOT_USED
-List *
-generate_fjoin(List *tlist)
+/*
+ * is_projection_capable_plan
+ * Check whether a given Plan node is able to do projection.
+ */
+bool
+is_projection_capable_plan(Plan *plan)
{
- List tlistP;
- List newTlist = NIL;
- List fjoinList = NIL;
- int nIters = 0;
-
- /*
- * Break the target list into elements with Iter nodes, and those
- * without them.
- */
- foreach(tlistP, tlist)
+ /* Most plan types can project, so just list the ones that can't */
+ switch (nodeTag(plan))
{
- List tlistElem;
-
- tlistElem = lfirst(tlistP);
- if (IsA(lsecond(tlistElem), Iter))
- {
- nIters++;
- fjoinList = lappend(fjoinList, tlistElem);
- }
- else
- newTlist = lappend(newTlist, tlistElem);
- }
-
- /*
- * if we have an Iter node then we need to flatten.
- */
- if (nIters > 0)
- {
- List *inner;
- List *tempList;
- Fjoin *fjoinNode;
- DatumPtr results = (DatumPtr) palloc(nIters * sizeof(Datum));
- BoolPtr alwaysDone = (BoolPtr) palloc(nIters * sizeof(bool));
-
- inner = lfirst(fjoinList);
- fjoinList = lnext(fjoinList);
- fjoinNode = (Fjoin) MakeFjoin(false,
- nIters,
- inner,
- results,
- alwaysDone);
- tempList = lcons(fjoinNode, fjoinList);
- newTlist = lappend(newTlist, tempList);
+ case T_Hash:
+ case T_Material:
+ case T_Sort:
+ case T_Unique:
+ case T_SetOp:
+ case T_Limit:
+ case T_Append:
+ return false;
+ default:
+ break;
}
- return newTlist;
- return tlist; /* do nothing for now - ay 10/94 */
+ return true;
}
-
-#endif