X-Git-Url: https://granicus.if.org/sourcecode?a=blobdiff_plain;f=src%2Fbackend%2Foptimizer%2Fplan%2Fplanner.c;h=841d85f7397da28f478290fdaae80de563a89c53;hb=511db38ace2690f19816465baed07cefe535bfec;hp=e53a18ac296e07ff7e763c81d837fe58d8f44aab;hpb=6c4996fa6b635db71e2348025a7ba77d71da69f3;p=postgresql diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index e53a18ac29..841d85f739 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -3,12 +3,12 @@ * planner.c * The query optimizer external interface. * - * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.152 2003/03/13 16:58:35 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.250 2009/01/01 17:23:44 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -18,13 +18,10 @@ #include #include "catalog/pg_operator.h" -#include "catalog/pg_type.h" #include "executor/executor.h" +#include "executor/nodeAgg.h" #include "miscadmin.h" #include "nodes/makefuncs.h" -#ifdef OPTIMIZER_DEBUG -#include "nodes/print.h" -#endif #include "optimizer/clauses.h" #include "optimizer/cost.h" #include "optimizer/pathnode.h" @@ -35,82 +32,152 @@ #include "optimizer/subselect.h" #include "optimizer/tlist.h" #include "optimizer/var.h" -#include "parser/analyze.h" -#include "parser/parsetree.h" +#ifdef OPTIMIZER_DEBUG +#include "nodes/print.h" +#endif #include "parser/parse_expr.h" #include "parser/parse_oper.h" -#include "utils/selfuncs.h" +#include "parser/parsetree.h" +#include "utils/lsyscache.h" #include "utils/syscache.h" +/* GUC parameter */ +double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION; + +/* Hook for plugins to get control in planner() */ +planner_hook_type planner_hook = NULL; + + /* Expression kind codes for preprocess_expression */ -#define EXPRKIND_QUAL 0 -#define EXPRKIND_TARGET 1 -#define EXPRKIND_RTFUNC 2 -#define EXPRKIND_ININFO 3 - - -static Node *preprocess_expression(Query *parse, Node *expr, int kind); -static void preprocess_qual_conditions(Query *parse, Node *jtnode); -static Plan *inheritance_planner(Query *parse, List *inheritlist); -static Plan *grouping_planner(Query *parse, double tuple_fraction); -static bool hash_safe_grouping(Query *parse); -static List *make_subplanTargetList(Query *parse, List *tlist, +#define EXPRKIND_QUAL 0 +#define EXPRKIND_TARGET 1 +#define EXPRKIND_RTFUNC 2 +#define EXPRKIND_VALUES 3 +#define EXPRKIND_LIMIT 4 +#define EXPRKIND_APPINFO 5 + + +static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind); +static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode); +static Plan *inheritance_planner(PlannerInfo *root); +static Plan *grouping_planner(PlannerInfo *root, double tuple_fraction); +static bool is_dummy_plan(Plan *plan); +static double preprocess_limit(PlannerInfo *root, + double tuple_fraction, + int64 *offset_est, int64 *count_est); +static void preprocess_groupclause(PlannerInfo *root); +static bool choose_hashed_grouping(PlannerInfo *root, + double tuple_fraction, double limit_tuples, + Path *cheapest_path, Path *sorted_path, + double dNumGroups, AggClauseCounts *agg_counts); +static bool choose_hashed_distinct(PlannerInfo *root, + Plan *input_plan, List *input_pathkeys, + double tuple_fraction, double limit_tuples, + double dNumDistinctRows); +static List *make_subplanTargetList(PlannerInfo *root, List *tlist, AttrNumber **groupColIdx, bool *need_tlist_eval); -static void locate_grouping_columns(Query *parse, - List *tlist, - List *sub_tlist, - AttrNumber *groupColIdx); -static Plan *make_groupsortplan(Query *parse, - List *groupClause, - AttrNumber *grpColIdx, - Plan *subplan); +static void locate_grouping_columns(PlannerInfo *root, + List *tlist, + List *sub_tlist, + AttrNumber *groupColIdx); static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist); +static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists); +static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, + List *tlist, bool canonicalize); +static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc, + List *tlist, + int numSortCols, AttrNumber *sortColIdx, + int *partNumCols, + AttrNumber **partColIdx, + Oid **partOperators, + int *ordNumCols, + AttrNumber **ordColIdx, + Oid **ordOperators); /***************************************************************************** * * Query optimizer entry point * + * To support loadable plugins that monitor or modify planner behavior, + * we provide a hook variable that lets a plugin get control before and + * after the standard planning process. The plugin would normally call + * standard_planner(). + * + * Note to plugin authors: standard_planner() scribbles on its Query input, + * so you'd better copy that data structure if you want to plan more than once. + * *****************************************************************************/ -Plan * -planner(Query *parse, bool isCursor, int cursorOptions) +PlannedStmt * +planner(Query *parse, int cursorOptions, ParamListInfo boundParams) { + PlannedStmt *result; + + if (planner_hook) + result = (*planner_hook) (parse, cursorOptions, boundParams); + else + result = standard_planner(parse, cursorOptions, boundParams); + return result; +} + +PlannedStmt * +standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) +{ + PlannedStmt *result; + PlannerGlobal *glob; double tuple_fraction; - Plan *result_plan; - Index save_PlannerQueryLevel; - List *save_PlannerParamVar; + PlannerInfo *root; + Plan *top_plan; + ListCell *lp, + *lr; + + /* Cursor options may come from caller or from DECLARE CURSOR stmt */ + if (parse->utilityStmt && + IsA(parse->utilityStmt, DeclareCursorStmt)) + cursorOptions |= ((DeclareCursorStmt *) parse->utilityStmt)->options; /* - * The planner can be called recursively (an example is when - * eval_const_expressions tries to pre-evaluate an SQL function). So, - * these global state variables must be saved and restored. - * - * These vars cannot be moved into the Query structure since their whole - * purpose is communication across multiple sub-Queries. - * - * Note we do NOT save and restore PlannerPlanId: it exists to assign - * unique IDs to SubPlan nodes, and we want those IDs to be unique for - * the life of a backend. Also, PlannerInitPlan is saved/restored in - * subquery_planner, not here. + * Set up global state for this planner invocation. This data is needed + * across all levels of sub-Query that might exist in the given command, + * so we keep it in a separate struct that's linked to by each per-Query + * PlannerInfo. */ - save_PlannerQueryLevel = PlannerQueryLevel; - save_PlannerParamVar = PlannerParamVar; - - /* Initialize state for handling outer-level references and params */ - PlannerQueryLevel = 0; /* will be 1 in top-level subquery_planner */ - PlannerParamVar = NIL; + glob = makeNode(PlannerGlobal); + + glob->boundParams = boundParams; + glob->paramlist = NIL; + glob->subplans = NIL; + glob->subrtables = NIL; + glob->rewindPlanIDs = NULL; + glob->finalrtable = NIL; + glob->relationOids = NIL; + glob->invalItems = NIL; + glob->lastPHId = 0; + glob->transientPlan = false; /* Determine what fraction of the plan is likely to be scanned */ - if (isCursor) + if (cursorOptions & CURSOR_OPT_FAST_PLAN) { /* - * We have no real idea how many tuples the user will ultimately - * FETCH from a cursor, but it seems a good bet that he - * doesn't want 'em all. Optimize for 10% retrieval (you - * gotta better number? Should this be a SETtable parameter?) + * We have no real idea how many tuples the user will ultimately FETCH + * from a cursor, but it is often the case that he doesn't want 'em + * all, or would prefer a fast-start plan anyway so that he can + * process some of the tuples sooner. Use a GUC parameter to decide + * what fraction to optimize for. + */ + tuple_fraction = cursor_tuple_fraction; + + /* + * We document cursor_tuple_fraction as simply being a fraction, + * which means the edge cases 0 and 1 have to be treated specially + * here. We convert 1 to 0 ("all the tuples") and 0 to a very small + * fraction. */ - tuple_fraction = 0.10; + if (tuple_fraction >= 1.0) + tuple_fraction = 0.0; + else if (tuple_fraction <= 0.0) + tuple_fraction = 1e-10; } else { @@ -119,31 +186,52 @@ planner(Query *parse, bool isCursor, int cursorOptions) } /* primary planning entry point (may recurse for subqueries) */ - result_plan = subquery_planner(parse, tuple_fraction); - - Assert(PlannerQueryLevel == 0); + top_plan = subquery_planner(glob, parse, NULL, + false, tuple_fraction, &root); /* - * If creating a plan for a scrollable cursor, make sure it can - * run backwards on demand. Add a Material node at the top at need. + * If creating a plan for a scrollable cursor, make sure it can run + * backwards on demand. Add a Material node at the top at need. */ - if (isCursor && (cursorOptions & CURSOR_OPT_SCROLL)) + if (cursorOptions & CURSOR_OPT_SCROLL) { - if (!ExecSupportsBackwardScan(result_plan)) - result_plan = materialize_finished_plan(result_plan); + if (!ExecSupportsBackwardScan(top_plan)) + top_plan = materialize_finished_plan(top_plan); } - /* executor wants to know total number of Params used overall */ - result_plan->nParamExec = length(PlannerParamVar); - /* final cleanup of the plan */ - set_plan_references(result_plan, parse->rtable); + Assert(glob->finalrtable == NIL); + top_plan = set_plan_references(glob, top_plan, root->parse->rtable); + /* ... and the subplans (both regular subplans and initplans) */ + Assert(list_length(glob->subplans) == list_length(glob->subrtables)); + forboth(lp, glob->subplans, lr, glob->subrtables) + { + Plan *subplan = (Plan *) lfirst(lp); + List *subrtable = (List *) lfirst(lr); - /* restore state for outer planner, if any */ - PlannerQueryLevel = save_PlannerQueryLevel; - PlannerParamVar = save_PlannerParamVar; + lfirst(lp) = set_plan_references(glob, subplan, subrtable); + } - return result_plan; + /* build the PlannedStmt result */ + result = makeNode(PlannedStmt); + + result->commandType = parse->commandType; + result->canSetTag = parse->canSetTag; + result->transientPlan = glob->transientPlan; + result->planTree = top_plan; + result->rtable = glob->finalrtable; + result->resultRelations = root->resultRelations; + result->utilityStmt = parse->utilityStmt; + result->intoClause = parse->intoClause; + result->subplans = glob->subplans; + result->rewindPlanIDs = glob->rewindPlanIDs; + result->returningLists = root->returningLists; + result->rowMarks = parse->rowMarks; + result->relationOids = glob->relationOids; + result->invalItems = glob->invalItems; + result->nParamExec = list_length(glob->paramlist); + + return result; } @@ -152,10 +240,16 @@ planner(Query *parse, bool isCursor, int cursorOptions) * Invokes the planner on a subquery. We recurse to here for each * sub-SELECT found in the query tree. * + * glob is the global state for the current planner run. * parse is the querytree produced by the parser & rewriter. + * parent_root is the immediate parent Query's info (NULL at the top level). + * hasRecursion is true if this is a recursive WITH query. * tuple_fraction is the fraction of tuples we expect will be retrieved. * tuple_fraction is interpreted as explained for grouping_planner, below. * + * If subroot isn't NULL, we pass back the query's final PlannerInfo struct; + * among other things this tells the output sort ordering of the plan. + * * Basically, this routine does the stuff that should only be done once * per Query object. It then calls grouping_planner. At one time, * grouping_planner could be invoked recursively on the same Query object; @@ -169,52 +263,82 @@ planner(Query *parse, bool isCursor, int cursorOptions) *-------------------- */ Plan * -subquery_planner(Query *parse, double tuple_fraction) +subquery_planner(PlannerGlobal *glob, Query *parse, + PlannerInfo *parent_root, + bool hasRecursion, double tuple_fraction, + PlannerInfo **subroot) { - List *saved_initplan = PlannerInitPlan; - int saved_planid = PlannerPlanId; - bool hasOuterJoins; + int num_old_subplans = list_length(glob->subplans); + PlannerInfo *root; Plan *plan; List *newHaving; - List *lst; + bool hasOuterJoins; + ListCell *l; + + /* Create a PlannerInfo data structure for this subquery */ + root = makeNode(PlannerInfo); + root->parse = parse; + root->glob = glob; + root->query_level = parent_root ? parent_root->query_level + 1 : 1; + root->parent_root = parent_root; + root->planner_cxt = CurrentMemoryContext; + root->init_plans = NIL; + root->cte_plan_ids = NIL; + root->eq_classes = NIL; + root->append_rel_list = NIL; + + root->hasRecursion = hasRecursion; + if (hasRecursion) + root->wt_param_id = SS_assign_worktable_param(root); + else + root->wt_param_id = -1; + root->non_recursive_plan = NULL; - /* Set up for a new level of subquery */ - PlannerQueryLevel++; - PlannerInitPlan = NIL; + /* + * If there is a WITH list, process each WITH query and build an + * initplan SubPlan structure for it. + */ + if (parse->cteList) + SS_process_ctes(root); /* - * Look for IN clauses at the top level of WHERE, and transform them - * into joins. Note that this step only handles IN clauses originally - * at top level of WHERE; if we pull up any subqueries in the next step, - * their INs are processed just before pulling them up. + * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try + * to transform them into joins. Note that this step does not descend + * into subqueries; if we pull up any subqueries below, their SubLinks are + * processed just before pulling them up. */ - parse->in_info_list = NIL; if (parse->hasSubLinks) - parse->jointree->quals = pull_up_IN_clauses(parse, - parse->jointree->quals); + pull_up_sublinks(root); + + /* + * Scan the rangetable for set-returning functions, and inline them + * if possible (producing subqueries that might get pulled up next). + * Recursion issues here are handled in the same way as for SubLinks. + */ + inline_set_returning_functions(root); /* * Check to see if any subqueries in the rangetable can be merged into * this query. */ parse->jointree = (FromExpr *) - pull_up_subqueries(parse, (Node *) parse->jointree, false); + pull_up_subqueries(root, (Node *) parse->jointree, false, false); /* - * Detect whether any rangetable entries are RTE_JOIN kind; if not, - * we can avoid the expense of doing flatten_join_alias_vars(). Also - * check for outer joins --- if none, we can skip reduce_outer_joins(). + * Detect whether any rangetable entries are RTE_JOIN kind; if not, we can + * avoid the expense of doing flatten_join_alias_vars(). Also check for + * outer joins --- if none, we can skip reduce_outer_joins(). * This must be done after we have done pull_up_subqueries, of course. */ - parse->hasJoinRTEs = false; + root->hasJoinRTEs = false; hasOuterJoins = false; - foreach(lst, parse->rtable) + foreach(l, parse->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lst); + RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); if (rte->rtekind == RTE_JOIN) { - parse->hasJoinRTEs = true; + root->hasJoinRTEs = true; if (IS_OUTER_JOIN(rte->jointype)) { hasOuterJoins = true; @@ -224,133 +348,149 @@ subquery_planner(Query *parse, double tuple_fraction) } } + /* + * Expand any rangetable entries that are inheritance sets into "append + * relations". This can add entries to the rangetable, but they must be + * plain base relations not joins, so it's OK (and marginally more + * efficient) to do it after checking for join RTEs. We must do it after + * pulling up subqueries, else we'd fail to handle inherited tables in + * subqueries. + */ + expand_inherited_tables(root); + + /* + * Set hasHavingQual to remember if HAVING clause is present. Needed + * because preprocess_expression will reduce a constant-true condition to + * an empty qual list ... but "HAVING TRUE" is not a semantic no-op. + */ + root->hasHavingQual = (parse->havingQual != NULL); + + /* Clear this flag; might get set in distribute_qual_to_rels */ + root->hasPseudoConstantQuals = false; + /* * Do expression preprocessing on targetlist and quals. */ parse->targetList = (List *) - preprocess_expression(parse, (Node *) parse->targetList, + preprocess_expression(root, (Node *) parse->targetList, + EXPRKIND_TARGET); + + parse->returningList = (List *) + preprocess_expression(root, (Node *) parse->returningList, EXPRKIND_TARGET); - preprocess_qual_conditions(parse, (Node *) parse->jointree); + preprocess_qual_conditions(root, (Node *) parse->jointree); - parse->havingQual = preprocess_expression(parse, parse->havingQual, + parse->havingQual = preprocess_expression(root, parse->havingQual, EXPRKIND_QUAL); - parse->in_info_list = (List *) - preprocess_expression(parse, (Node *) parse->in_info_list, - EXPRKIND_ININFO); + parse->limitOffset = preprocess_expression(root, parse->limitOffset, + EXPRKIND_LIMIT); + parse->limitCount = preprocess_expression(root, parse->limitCount, + EXPRKIND_LIMIT); - /* Also need to preprocess expressions for function RTEs */ - foreach(lst, parse->rtable) + root->append_rel_list = (List *) + preprocess_expression(root, (Node *) root->append_rel_list, + EXPRKIND_APPINFO); + + /* Also need to preprocess expressions for function and values RTEs */ + foreach(l, parse->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lst); + RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); if (rte->rtekind == RTE_FUNCTION) - rte->funcexpr = preprocess_expression(parse, rte->funcexpr, + rte->funcexpr = preprocess_expression(root, rte->funcexpr, EXPRKIND_RTFUNC); + else if (rte->rtekind == RTE_VALUES) + rte->values_lists = (List *) + preprocess_expression(root, (Node *) rte->values_lists, + EXPRKIND_VALUES); } /* - * A HAVING clause without aggregates is equivalent to a WHERE clause - * (except it can only refer to grouped fields). Transfer any - * agg-free clauses of the HAVING qual into WHERE. This may seem like - * wasting cycles to cater to stupidly-written queries, but there are - * other reasons for doing it. Firstly, if the query contains no aggs - * at all, then we aren't going to generate an Agg plan node, and so - * there'll be no place to execute HAVING conditions; without this - * transfer, we'd lose the HAVING condition entirely, which is wrong. - * Secondly, when we push down a qual condition into a sub-query, it's - * easiest to push the qual into HAVING always, in case it contains - * aggs, and then let this code sort it out. + * In some cases we may want to transfer a HAVING clause into WHERE. We + * cannot do so if the HAVING clause contains aggregates (obviously) or + * volatile functions (since a HAVING clause is supposed to be executed + * only once per group). Also, it may be that the clause is so expensive + * to execute that we're better off doing it only once per group, despite + * the loss of selectivity. This is hard to estimate short of doing the + * entire planning process twice, so we use a heuristic: clauses + * containing subplans are left in HAVING. Otherwise, we move or copy the + * HAVING clause into WHERE, in hopes of eliminating tuples before + * aggregation instead of after. + * + * If the query has explicit grouping then we can simply move such a + * clause into WHERE; any group that fails the clause will not be in the + * output because none of its tuples will reach the grouping or + * aggregation stage. Otherwise we must have a degenerate (variable-free) + * HAVING clause, which we put in WHERE so that query_planner() can use it + * in a gating Result node, but also keep in HAVING to ensure that we + * don't emit a bogus aggregated row. (This could be done better, but it + * seems not worth optimizing.) * * Note that both havingQual and parse->jointree->quals are in - * implicitly-ANDed-list form at this point, even though they are - * declared as Node *. Also note that contain_agg_clause does not - * recurse into sub-selects, which is exactly what we need here. + * implicitly-ANDed-list form at this point, even though they are declared + * as Node *. */ newHaving = NIL; - foreach(lst, (List *) parse->havingQual) + foreach(l, (List *) parse->havingQual) { - Node *havingclause = (Node *) lfirst(lst); + Node *havingclause = (Node *) lfirst(l); - if (contain_agg_clause(havingclause)) + if (contain_agg_clause(havingclause) || + contain_volatile_functions(havingclause) || + contain_subplans(havingclause)) + { + /* keep it in HAVING */ newHaving = lappend(newHaving, havingclause); - else + } + else if (parse->groupClause) + { + /* move it to WHERE */ parse->jointree->quals = (Node *) lappend((List *) parse->jointree->quals, havingclause); + } + else + { + /* put a copy in WHERE, keep it in HAVING */ + parse->jointree->quals = (Node *) + lappend((List *) parse->jointree->quals, + copyObject(havingclause)); + newHaving = lappend(newHaving, havingclause); + } } parse->havingQual = (Node *) newHaving; /* * If we have any outer joins, try to reduce them to plain inner joins. - * This step is most easily done after we've done expression preprocessing. + * This step is most easily done after we've done expression + * preprocessing. */ if (hasOuterJoins) - reduce_outer_joins(parse); + reduce_outer_joins(root); /* - * See if we can simplify the jointree; opportunities for this may come - * from having pulled up subqueries, or from flattening explicit JOIN - * syntax. We must do this after flattening JOIN alias variables, since - * eliminating explicit JOIN nodes from the jointree will cause - * get_relids_for_join() to fail. But it should happen after - * reduce_outer_joins, anyway. - */ - parse->jointree = (FromExpr *) - simplify_jointree(parse, (Node *) parse->jointree); - - /* - * Do the main planning. If we have an inherited target relation, - * that needs special processing, else go straight to - * grouping_planner. + * Do the main planning. If we have an inherited target relation, that + * needs special processing, else go straight to grouping_planner. */ if (parse->resultRelation && - (lst = expand_inherited_rtentry(parse, parse->resultRelation, - false)) != NIL) - plan = inheritance_planner(parse, lst); + rt_fetch(parse->resultRelation, parse->rtable)->inh) + plan = inheritance_planner(root); else - plan = grouping_planner(parse, tuple_fraction); + plan = grouping_planner(root, tuple_fraction); /* * If any subplans were generated, or if we're inside a subplan, build - * initPlan list and extParam/allParam sets for plan nodes. + * initPlan list and extParam/allParam sets for plan nodes, and attach the + * initPlans to the top plan node. */ - if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1) - { - Cost initplan_cost = 0; - - /* Prepare extParam/allParam sets for all nodes in tree */ - SS_finalize_plan(plan, parse->rtable); - - /* - * SS_finalize_plan doesn't handle initPlans, so we have to manually - * attach them to the topmost plan node, and add their extParams to - * the topmost node's, too. - * - * We also add the total_cost of each initPlan to the startup cost - * of the top node. This is a conservative overestimate, since in - * fact each initPlan might be executed later than plan startup, or - * even not at all. - */ - plan->initPlan = PlannerInitPlan; - - foreach(lst, plan->initPlan) - { - SubPlan *initplan = (SubPlan *) lfirst(lst); - - plan->extParam = bms_add_members(plan->extParam, - initplan->plan->extParam); - initplan_cost += initplan->plan->total_cost; - } - - plan->startup_cost += initplan_cost; - plan->total_cost += initplan_cost; - } + if (list_length(glob->subplans) != num_old_subplans || + root->query_level > 1) + SS_finalize_plan(root, plan, true); - /* Return to outer subquery context */ - PlannerQueryLevel--; - PlannerInitPlan = saved_initplan; - /* we do NOT restore PlannerPlanId; that's not an oversight! */ + /* Return internal info if caller wants it */ + if (subroot) + *subroot = root; return plan; } @@ -362,34 +502,48 @@ subquery_planner(Query *parse, double tuple_fraction) * conditions), or a HAVING clause. */ static Node * -preprocess_expression(Query *parse, Node *expr, int kind) +preprocess_expression(PlannerInfo *root, Node *expr, int kind) { + /* + * Fall out quickly if expression is empty. This occurs often enough to + * be worth checking. Note that null->null is the correct conversion for + * implicit-AND result format, too. + */ + if (expr == NULL) + return NULL; + /* * If the query has any join RTEs, replace join alias variables with * base-relation variables. We must do this before sublink processing, - * else sublinks expanded out from join aliases wouldn't get processed. + * else sublinks expanded out from join aliases wouldn't get processed. We + * can skip it in VALUES lists, however, since they can't contain any Vars + * at all. */ - if (parse->hasJoinRTEs) - expr = flatten_join_alias_vars(parse, expr); + if (root->hasJoinRTEs && kind != EXPRKIND_VALUES) + expr = flatten_join_alias_vars(root, expr); /* * Simplify constant expressions. * - * Note that at this point quals have not yet been converted to - * implicit-AND form, so we can apply eval_const_expressions directly. + * Note: one essential effect here is to insert the current actual values + * of any default arguments for functions. To ensure that happens, we + * *must* process all expressions here. Previous PG versions sometimes + * skipped const-simplification if it didn't seem worth the trouble, but + * we can't do that anymore. + * + * Note: this also flattens nested AND and OR expressions into N-argument + * form. All processing of a qual expression after this point must be + * careful to maintain AND/OR flatness --- that is, do not generate a tree + * with AND directly under AND, nor OR directly under OR. */ - expr = eval_const_expressions(expr); + expr = eval_const_expressions(root, expr); /* - * If it's a qual or havingQual, canonicalize it, and convert it to - * implicit-AND format. - * - * XXX Is there any value in re-applying eval_const_expressions after - * canonicalize_qual? + * If it's a qual or havingQual, canonicalize it. */ if (kind == EXPRKIND_QUAL) { - expr = (Node *) canonicalize_qual((Expr *) expr, true); + expr = (Node *) canonicalize_qual((Expr *) expr); #ifdef OPTIMIZER_DEBUG printf("After canonicalize_qual()\n"); @@ -398,12 +552,26 @@ preprocess_expression(Query *parse, Node *expr, int kind) } /* Expand SubLinks to SubPlans */ - if (parse->hasSubLinks) - expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL)); + if (root->parse->hasSubLinks) + expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL)); + + /* + * XXX do not insert anything here unless you have grokked the comments in + * SS_replace_correlation_vars ... + */ + + /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */ + if (root->query_level > 1) + expr = SS_replace_correlation_vars(root, expr); - /* Replace uplevel vars with Param nodes */ - if (PlannerQueryLevel > 1) - expr = SS_replace_correlation_vars(expr); + /* + * If it's a qual or havingQual, convert it to implicit-AND format. (We + * don't want to do this before eval_const_expressions, since the latter + * would be unable to simplify a top-level AND correctly. Also, + * SS_process_sublinks expects explicit-AND format.) + */ + if (kind == EXPRKIND_QUAL) + expr = (Node *) make_ands_implicit((Expr *) expr); return expr; } @@ -414,7 +582,7 @@ preprocess_expression(Query *parse, Node *expr, int kind) * preprocessing work on each qual condition found therein. */ static void -preprocess_qual_conditions(Query *parse, Node *jtnode) +preprocess_qual_conditions(PlannerInfo *root, Node *jtnode) { if (jtnode == NULL) return; @@ -425,101 +593,152 @@ preprocess_qual_conditions(Query *parse, Node *jtnode) else if (IsA(jtnode, FromExpr)) { FromExpr *f = (FromExpr *) jtnode; - List *l; + ListCell *l; foreach(l, f->fromlist) - preprocess_qual_conditions(parse, lfirst(l)); + preprocess_qual_conditions(root, lfirst(l)); - f->quals = preprocess_expression(parse, f->quals, EXPRKIND_QUAL); + f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL); } else if (IsA(jtnode, JoinExpr)) { JoinExpr *j = (JoinExpr *) jtnode; - preprocess_qual_conditions(parse, j->larg); - preprocess_qual_conditions(parse, j->rarg); + preprocess_qual_conditions(root, j->larg); + preprocess_qual_conditions(root, j->rarg); - j->quals = preprocess_expression(parse, j->quals, EXPRKIND_QUAL); + j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL); } else - elog(ERROR, "preprocess_qual_conditions: unexpected node type %d", - nodeTag(jtnode)); + elog(ERROR, "unrecognized node type: %d", + (int) nodeTag(jtnode)); } -/*-------------------- +/* * inheritance_planner * Generate a plan in the case where the result relation is an * inheritance set. * - * We have to handle this case differently from cases where a source - * relation is an inheritance set. Source inheritance is expanded at - * the bottom of the plan tree (see allpaths.c), but target inheritance - * has to be expanded at the top. The reason is that for UPDATE, each - * target relation needs a different targetlist matching its own column - * set. (This is not so critical for DELETE, but for simplicity we treat - * inherited DELETE the same way.) Fortunately, the UPDATE/DELETE target - * can never be the nullable side of an outer join, so it's OK to generate - * the plan this way. - * - * parse is the querytree produced by the parser & rewriter. - * inheritlist is an integer list of RT indexes for the result relation set. + * We have to handle this case differently from cases where a source relation + * is an inheritance set. Source inheritance is expanded at the bottom of the + * plan tree (see allpaths.c), but target inheritance has to be expanded at + * the top. The reason is that for UPDATE, each target relation needs a + * different targetlist matching its own column set. Also, for both UPDATE + * and DELETE, the executor needs the Append plan node at the top, else it + * can't keep track of which table is the current target table. Fortunately, + * the UPDATE/DELETE target can never be the nullable side of an outer join, + * so it's OK to generate the plan this way. * * Returns a query plan. - *-------------------- */ static Plan * -inheritance_planner(Query *parse, List *inheritlist) +inheritance_planner(PlannerInfo *root) { + Query *parse = root->parse; int parentRTindex = parse->resultRelation; - Oid parentOID = getrelid(parentRTindex, parse->rtable); - int mainrtlength = length(parse->rtable); List *subplans = NIL; + List *resultRelations = NIL; + List *returningLists = NIL; + List *rtable = NIL; List *tlist = NIL; - List *l; + PlannerInfo subroot; + ListCell *l; - foreach(l, inheritlist) + foreach(l, root->append_rel_list) { - int childRTindex = lfirsti(l); - Oid childOID = getrelid(childRTindex, parse->rtable); - int subrtlength; - Query *subquery; + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); Plan *subplan; - /* Generate modified query with this rel as target */ - subquery = (Query *) adjust_inherited_attrs((Node *) parse, - parentRTindex, parentOID, - childRTindex, childOID); + /* append_rel_list contains all append rels; ignore others */ + if (appinfo->parent_relid != parentRTindex) + continue; + + /* + * Generate modified query with this rel as target. + */ + memcpy(&subroot, root, sizeof(PlannerInfo)); + subroot.parse = (Query *) + adjust_appendrel_attrs((Node *) parse, + appinfo); + subroot.returningLists = NIL; + subroot.init_plans = NIL; + /* We needn't modify the child's append_rel_list */ + /* There shouldn't be any OJ info to translate, as yet */ + Assert(subroot.join_info_list == NIL); + /* and we haven't created PlaceHolderInfos, either */ + Assert(subroot.placeholder_list == NIL); + /* Generate plan */ - subplan = grouping_planner(subquery, 0.0 /* retrieve all tuples */ ); - subplans = lappend(subplans, subplan); + subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ ); + /* - * It's possible that additional RTEs got added to the rangetable - * due to expansion of inherited source tables (see allpaths.c). - * If so, we must copy 'em back to the main parse tree's rtable. - * - * XXX my goodness this is ugly. Really need to think about ways - * to rein in planner's habit of scribbling on its input. + * If this child rel was excluded by constraint exclusion, exclude it + * from the plan. */ - subrtlength = length(subquery->rtable); - if (subrtlength > mainrtlength) + if (is_dummy_plan(subplan)) + continue; + + /* Save rtable and tlist from first rel for use below */ + if (subplans == NIL) { - List *subrt = subquery->rtable; + rtable = subroot.parse->rtable; + tlist = subplan->targetlist; + } + + subplans = lappend(subplans, subplan); - while (mainrtlength-- > 0) /* wish we had nthcdr() */ - subrt = lnext(subrt); - parse->rtable = nconc(parse->rtable, subrt); - mainrtlength = subrtlength; + /* Make sure any initplans from this rel get into the outer list */ + root->init_plans = list_concat(root->init_plans, subroot.init_plans); + + /* Build target-relations list for the executor */ + resultRelations = lappend_int(resultRelations, appinfo->child_relid); + + /* Build list of per-relation RETURNING targetlists */ + if (parse->returningList) + { + Assert(list_length(subroot.returningLists) == 1); + returningLists = list_concat(returningLists, + subroot.returningLists); } - /* Save preprocessed tlist from first rel for use in Append */ - if (tlist == NIL) - tlist = subplan->targetlist; } - /* Save the target-relations list for the executor, too */ - parse->resultRelations = inheritlist; + root->resultRelations = resultRelations; + root->returningLists = returningLists; /* Mark result as unordered (probably unnecessary) */ - parse->query_pathkeys = NIL; + root->query_pathkeys = NIL; + + /* + * If we managed to exclude every child rel, return a dummy plan + */ + if (subplans == NIL) + { + root->resultRelations = list_make1_int(parentRTindex); + /* although dummy, it must have a valid tlist for executor */ + tlist = preprocess_targetlist(root, parse->targetList); + return (Plan *) make_result(root, + tlist, + (Node *) list_make1(makeBoolConst(false, + false)), + NULL); + } + + /* + * Planning might have modified the rangetable, due to changes of the + * Query structures inside subquery RTEs. We have to ensure that this + * gets propagated back to the master copy. But can't do this until we + * are done planning, because all the calls to grouping_planner need + * virgin sub-Queries to work from. (We are effectively assuming that + * sub-Queries will get planned identically each time, or at least that + * the impacts on their rangetables will be the same each time.) + * + * XXX should clean this up someday + */ + parse->rtable = rtable; + + /* Suppress Append if there's only one surviving child rel */ + if (list_length(subplans) == 1) + return (Plan *) linitial(subplans); return (Plan *) make_append(subplans, true, tlist); } @@ -530,7 +749,6 @@ inheritance_planner(Query *parse, List *inheritlist) * This primarily means adding top-level processing to the basic * query plan produced by query_planner. * - * parse is the querytree produced by the parser & rewriter. * tuple_fraction is the fraction of tuples we expect will be retrieved * * tuple_fraction is interpreted as follows: @@ -540,781 +758,1420 @@ inheritance_planner(Query *parse, List *inheritlist) * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples * expected to be retrieved (ie, a LIMIT specification) * - * Returns a query plan. Also, parse->query_pathkeys is returned as the + * Returns a query plan. Also, root->query_pathkeys is returned as the * actual output ordering of the plan (in pathkey format). *-------------------- */ static Plan * -grouping_planner(Query *parse, double tuple_fraction) +grouping_planner(PlannerInfo *root, double tuple_fraction) { + Query *parse = root->parse; List *tlist = parse->targetList; + int64 offset_est = 0; + int64 count_est = 0; + double limit_tuples = -1.0; Plan *result_plan; List *current_pathkeys; - List *sort_pathkeys; + double dNumGroups = 0; + + /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */ + if (parse->limitCount || parse->limitOffset) + { + tuple_fraction = preprocess_limit(root, tuple_fraction, + &offset_est, &count_est); + + /* + * If we have a known LIMIT, and don't have an unknown OFFSET, we can + * estimate the effects of using a bounded sort. + */ + if (count_est > 0 && offset_est >= 0) + limit_tuples = (double) count_est + (double) offset_est; + } if (parse->setOperations) { + List *set_sortclauses; + /* - * Construct the plan for set operations. The result will not - * need any work except perhaps a top-level sort and/or LIMIT. + * If there's a top-level ORDER BY, assume we have to fetch all the + * tuples. This might be too simplistic given all the hackery below + * to possibly avoid the sort; but the odds of accurate estimates + * here are pretty low anyway. */ - result_plan = plan_set_operations(parse); + if (parse->sortClause) + tuple_fraction = 0.0; /* - * We should not need to call preprocess_targetlist, since we must - * be in a SELECT query node. Instead, use the targetlist - * returned by plan_set_operations (since this tells whether it - * returned any resjunk columns!), and transfer any sort key - * information from the original tlist. + * Construct the plan for set operations. The result will not need + * any work except perhaps a top-level sort and/or LIMIT. Note that + * any special work for recursive unions is the responsibility of + * plan_set_operations. */ - Assert(parse->commandType == CMD_SELECT); + result_plan = plan_set_operations(root, tuple_fraction, + &set_sortclauses); - tlist = postprocess_setop_tlist(result_plan->targetlist, tlist); + /* + * Calculate pathkeys representing the sort order (if any) of the set + * operation's result. We have to do this before overwriting the sort + * key information... + */ + current_pathkeys = make_pathkeys_for_sortclauses(root, + set_sortclauses, + result_plan->targetlist, + true); /* - * Can't handle FOR UPDATE here (parser should have checked - * already, but let's make sure). + * We should not need to call preprocess_targetlist, since we must be + * in a SELECT query node. Instead, use the targetlist returned by + * plan_set_operations (since this tells whether it returned any + * resjunk columns!), and transfer any sort key information from the + * original tlist. */ - if (parse->rowMarks) - elog(ERROR, "SELECT FOR UPDATE is not allowed with UNION/INTERSECT/EXCEPT"); + Assert(parse->commandType == CMD_SELECT); + + tlist = postprocess_setop_tlist(copyObject(result_plan->targetlist), + tlist); /* - * We set current_pathkeys NIL indicating we do not know sort - * order. This is correct when the top set operation is UNION - * ALL, since the appended-together results are unsorted even if - * the subplans were sorted. For other set operations we could be - * smarter --- room for future improvement! + * Can't handle FOR UPDATE/SHARE here (parser should have checked + * already, but let's make sure). */ - current_pathkeys = NIL; + if (parse->rowMarks) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("SELECT FOR UPDATE/SHARE is not allowed with UNION/INTERSECT/EXCEPT"))); /* - * Calculate pathkeys that represent ordering requirements + * Calculate pathkeys that represent result ordering requirements */ - sort_pathkeys = make_pathkeys_for_sortclauses(parse->sortClause, - tlist); - sort_pathkeys = canonicalize_pathkeys(parse, sort_pathkeys); + Assert(parse->distinctClause == NIL); + root->sort_pathkeys = make_pathkeys_for_sortclauses(root, + parse->sortClause, + tlist, + true); } else { /* No set operations, do regular planning */ List *sub_tlist; - List *group_pathkeys; AttrNumber *groupColIdx = NULL; bool need_tlist_eval = true; QualCost tlist_cost; - double sub_tuple_fraction; Path *cheapest_path; Path *sorted_path; - double dNumGroups = 0; + Path *best_path; long numGroups = 0; - int numAggs = 0; - int numGroupCols = length(parse->groupClause); + AggClauseCounts agg_counts; + int numGroupCols; bool use_hashed_grouping = false; + WindowFuncLists *wflists = NULL; + List *activeWindows = NIL; - /* Preprocess targetlist in case we are inside an INSERT/UPDATE. */ - tlist = preprocess_targetlist(tlist, - parse->commandType, - parse->resultRelation, - parse->rtable); + MemSet(&agg_counts, 0, sizeof(AggClauseCounts)); - /* - * Add TID targets for rels selected FOR UPDATE (should this be - * done in preprocess_targetlist?). The executor uses the TID to - * know which rows to lock, much as for UPDATE or DELETE. - */ - if (parse->rowMarks) - { - List *l; + /* A recursive query should always have setOperations */ + Assert(!root->hasRecursion); - /* - * We've got trouble if the FOR UPDATE appears inside - * grouping, since grouping renders a reference to individual - * tuple CTIDs invalid. This is also checked at parse time, - * but that's insufficient because of rule substitution, query - * pullup, etc. - */ - CheckSelectForUpdate(parse); + /* Preprocess GROUP BY clause, if any */ + if (parse->groupClause) + preprocess_groupclause(root); + numGroupCols = list_length(parse->groupClause); - /* - * Currently the executor only supports FOR UPDATE at top - * level - */ - if (PlannerQueryLevel > 1) - elog(ERROR, "SELECT FOR UPDATE is not allowed in subselects"); + /* Preprocess targetlist */ + tlist = preprocess_targetlist(root, tlist); - foreach(l, parse->rowMarks) - { - Index rti = lfirsti(l); - char *resname; - Resdom *resdom; - Var *var; - TargetEntry *ctid; - - resname = (char *) palloc(32); - snprintf(resname, 32, "ctid%u", rti); - resdom = makeResdom(length(tlist) + 1, - TIDOID, - -1, - resname, - true); - - var = makeVar(rti, - SelfItemPointerAttributeNumber, - TIDOID, - -1, - 0); - - ctid = makeTargetEntry(resdom, (Expr *) var); - tlist = lappend(tlist, ctid); - } + /* + * Locate any window functions in the tlist. (We don't need to look + * anywhere else, since expressions used in ORDER BY will be in there + * too.) Note that they could all have been eliminated by constant + * folding, in which case we don't need to do any more work. + */ + if (parse->hasWindowFuncs) + { + wflists = find_window_functions((Node *) tlist, + list_length(parse->windowClause)); + if (wflists->numWindowFuncs > 0) + activeWindows = select_active_windows(root, wflists); + else + parse->hasWindowFuncs = false; } /* - * Generate appropriate target list for subplan; may be different - * from tlist if grouping or aggregation is needed. + * Generate appropriate target list for subplan; may be different from + * tlist if grouping or aggregation is needed. */ - sub_tlist = make_subplanTargetList(parse, tlist, + sub_tlist = make_subplanTargetList(root, tlist, &groupColIdx, &need_tlist_eval); /* - * Calculate pathkeys that represent grouping/ordering - * requirements + * Calculate pathkeys that represent grouping/ordering requirements. + * Stash them in PlannerInfo so that query_planner can canonicalize + * them after EquivalenceClasses have been formed. The sortClause + * is certainly sort-able, but GROUP BY and DISTINCT might not be, + * in which case we just leave their pathkeys empty. */ - group_pathkeys = make_pathkeys_for_sortclauses(parse->groupClause, - tlist); - sort_pathkeys = make_pathkeys_for_sortclauses(parse->sortClause, - tlist); + if (parse->groupClause && + grouping_is_sortable(parse->groupClause)) + root->group_pathkeys = + make_pathkeys_for_sortclauses(root, + parse->groupClause, + tlist, + false); + else + root->group_pathkeys = NIL; + + /* We consider only the first (bottom) window in pathkeys logic */ + if (activeWindows != NIL) + { + WindowClause *wc = (WindowClause *) linitial(activeWindows); + + root->window_pathkeys = make_pathkeys_for_window(root, + wc, + tlist, + false); + } + else + root->window_pathkeys = NIL; + + if (parse->distinctClause && + grouping_is_sortable(parse->distinctClause)) + root->distinct_pathkeys = + make_pathkeys_for_sortclauses(root, + parse->distinctClause, + tlist, + false); + else + root->distinct_pathkeys = NIL; + + root->sort_pathkeys = + make_pathkeys_for_sortclauses(root, + parse->sortClause, + tlist, + false); /* * Will need actual number of aggregates for estimating costs. - * Also, it's possible that optimization has eliminated all - * aggregates, and we may as well check for that here. * - * Note: we do not attempt to detect duplicate aggregates here; - * a somewhat-overestimated count is okay for our present purposes. + * Note: we do not attempt to detect duplicate aggregates here; a + * somewhat-overestimated count is okay for our present purposes. + * + * Note: think not that we can turn off hasAggs if we find no aggs. It + * is possible for constant-expression simplification to remove all + * explicit references to aggs, but we still have to follow the + * aggregate semantics (eg, producing only one output row). */ if (parse->hasAggs) { - numAggs = count_agg_clause((Node *) tlist) + - count_agg_clause(parse->havingQual); - if (numAggs == 0) - parse->hasAggs = false; + count_agg_clauses((Node *) tlist, &agg_counts); + count_agg_clauses(parse->havingQual, &agg_counts); } /* - * Figure out whether we need a sorted result from query_planner. + * Figure out whether we want a sorted result from query_planner. + * + * If we have a sortable GROUP BY clause, then we want a result sorted + * properly for grouping. Otherwise, if we have window functions to + * evaluate, we try to sort for the first window. Otherwise, if + * there's a sortable DISTINCT clause that's more rigorous than the + * ORDER BY clause, we try to produce output that's sufficiently well + * sorted for the DISTINCT. Otherwise, if there is an ORDER BY + * clause, we want to sort by the ORDER BY clause. * - * If we have a GROUP BY clause, then we want a result sorted - * properly for grouping. Otherwise, if there is an ORDER BY - * clause, we want to sort by the ORDER BY clause. (Note: if we - * have both, and ORDER BY is a superset of GROUP BY, it would be - * tempting to request sort by ORDER BY --- but that might just - * leave us failing to exploit an available sort order at all. - * Needs more thought...) + * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a + * superset of GROUP BY, it would be tempting to request sort by ORDER + * BY --- but that might just leave us failing to exploit an available + * sort order at all. Needs more thought. The choice for DISTINCT + * versus ORDER BY is much easier, since we know that the parser + * ensured that one is a superset of the other. */ - if (parse->groupClause) - parse->query_pathkeys = group_pathkeys; - else if (parse->sortClause) - parse->query_pathkeys = sort_pathkeys; + if (root->group_pathkeys) + root->query_pathkeys = root->group_pathkeys; + else if (root->window_pathkeys) + root->query_pathkeys = root->window_pathkeys; + else if (list_length(root->distinct_pathkeys) > + list_length(root->sort_pathkeys)) + root->query_pathkeys = root->distinct_pathkeys; + else if (root->sort_pathkeys) + root->query_pathkeys = root->sort_pathkeys; else - parse->query_pathkeys = NIL; + root->query_pathkeys = NIL; + + /* + * Generate the best unsorted and presorted paths for this Query (but + * note there may not be any presorted path). query_planner will also + * estimate the number of groups in the query, and canonicalize all + * the pathkeys. + */ + query_planner(root, sub_tlist, tuple_fraction, limit_tuples, + &cheapest_path, &sorted_path, &dNumGroups); /* - * Adjust tuple_fraction if we see that we are going to apply - * limiting/grouping/aggregation/etc. This is not overridable by - * the caller, since it reflects plan actions that this routine - * will certainly take, not assumptions about context. + * If grouping, decide whether to use sorted or hashed grouping. */ - if (parse->limitCount != NULL) + if (parse->groupClause) { + bool can_hash; + bool can_sort; + /* - * A LIMIT clause limits the absolute number of tuples - * returned. However, if it's not a constant LIMIT then we - * have to punt; for lack of a better idea, assume 10% of the - * plan's result is wanted. + * Executor doesn't support hashed aggregation with DISTINCT + * aggregates. (Doing so would imply storing *all* the input + * values in the hash table, which seems like a certain loser.) */ - double limit_fraction = 0.0; - - if (IsA(parse->limitCount, Const)) + can_hash = (agg_counts.numDistinctAggs == 0 && + grouping_is_hashable(parse->groupClause)); + can_sort = grouping_is_sortable(parse->groupClause); + if (can_hash && can_sort) { - Const *limitc = (Const *) parse->limitCount; - int32 count = DatumGetInt32(limitc->constvalue); - - /* - * A NULL-constant LIMIT represents "LIMIT ALL", which we - * treat the same as no limit (ie, expect to retrieve all - * the tuples). - */ - if (!limitc->constisnull && count > 0) - { - limit_fraction = (double) count; - /* We must also consider the OFFSET, if present */ - if (parse->limitOffset != NULL) - { - if (IsA(parse->limitOffset, Const)) - { - int32 offset; - - limitc = (Const *) parse->limitOffset; - offset = DatumGetInt32(limitc->constvalue); - if (!limitc->constisnull && offset > 0) - limit_fraction += (double) offset; - } - else - { - /* OFFSET is an expression ... punt ... */ - limit_fraction = 0.10; - } - } - } + /* we have a meaningful choice to make ... */ + use_hashed_grouping = + choose_hashed_grouping(root, + tuple_fraction, limit_tuples, + cheapest_path, sorted_path, + dNumGroups, &agg_counts); } + else if (can_hash) + use_hashed_grouping = true; + else if (can_sort) + use_hashed_grouping = false; else - { - /* LIMIT is an expression ... punt ... */ - limit_fraction = 0.10; - } + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("could not implement GROUP BY"), + errdetail("Some of the datatypes only support hashing, while others only support sorting."))); - if (limit_fraction > 0.0) - { - /* - * If we have absolute limits from both caller and LIMIT, - * use the smaller value; if one is fractional and the - * other absolute, treat the fraction as a fraction of the - * absolute value; else we can multiply the two fractions - * together. - */ - if (tuple_fraction >= 1.0) - { - if (limit_fraction >= 1.0) - { - /* both absolute */ - tuple_fraction = Min(tuple_fraction, limit_fraction); - } - else - { - /* caller absolute, limit fractional */ - tuple_fraction *= limit_fraction; - if (tuple_fraction < 1.0) - tuple_fraction = 1.0; - } - } - else if (tuple_fraction > 0.0) - { - if (limit_fraction >= 1.0) - { - /* caller fractional, limit absolute */ - tuple_fraction *= limit_fraction; - if (tuple_fraction < 1.0) - tuple_fraction = 1.0; - } - else - { - /* both fractional */ - tuple_fraction *= limit_fraction; - } - } - else - { - /* no info from caller, just use limit */ - tuple_fraction = limit_fraction; - } - } + /* Also convert # groups to long int --- but 'ware overflow! */ + numGroups = (long) Min(dNumGroups, (double) LONG_MAX); } /* - * With grouping or aggregation, the tuple fraction to pass to - * query_planner() may be different from what it is at top level. + * Select the best path. If we are doing hashed grouping, we will + * always read all the input tuples, so use the cheapest-total path. + * Otherwise, trust query_planner's decision about which to use. */ - sub_tuple_fraction = tuple_fraction; - - if (parse->groupClause) - { - /* - * In GROUP BY mode, we have the little problem that we don't - * really know how many input tuples will be needed to make a - * group, so we can't translate an output LIMIT count into an - * input count. For lack of a better idea, assume 25% of the - * input data will be processed if there is any output limit. - * However, if the caller gave us a fraction rather than an - * absolute count, we can keep using that fraction (which - * amounts to assuming that all the groups are about the same - * size). - */ - if (sub_tuple_fraction >= 1.0) - sub_tuple_fraction = 0.25; + if (use_hashed_grouping || !sorted_path) + best_path = cheapest_path; + else + best_path = sorted_path; - /* - * If both GROUP BY and ORDER BY are specified, we will need - * two levels of sort --- and, therefore, certainly need to - * read all the input tuples --- unless ORDER BY is a subset - * of GROUP BY. (We have not yet canonicalized the pathkeys, - * so must use the slower noncanonical comparison method.) - */ - if (parse->groupClause && parse->sortClause && - !noncanonical_pathkeys_contained_in(sort_pathkeys, - group_pathkeys)) - sub_tuple_fraction = 0.0; - } - else if (parse->hasAggs) + /* + * Check to see if it's possible to optimize MIN/MAX aggregates. If + * so, we will forget all the work we did so far to choose a "regular" + * path ... but we had to do it anyway to be able to tell which way is + * cheaper. + */ + result_plan = optimize_minmax_aggregates(root, + tlist, + best_path); + if (result_plan != NULL) { /* - * Ungrouped aggregate will certainly want all the input - * tuples. + * optimize_minmax_aggregates generated the full plan, with the + * right tlist, and it has no sort order. */ - sub_tuple_fraction = 0.0; + current_pathkeys = NIL; } - else if (parse->distinctClause) + else { /* - * SELECT DISTINCT, like GROUP, will absorb an unpredictable - * number of input tuples per output tuple. Handle the same - * way. + * Normal case --- create a plan according to query_planner's + * results. */ - if (sub_tuple_fraction >= 1.0) - sub_tuple_fraction = 0.25; - } - - /* - * Generate the best unsorted and presorted paths for this Query - * (but note there may not be any presorted path). - */ - query_planner(parse, sub_tlist, sub_tuple_fraction, - &cheapest_path, &sorted_path); - - /* - * We couldn't canonicalize group_pathkeys and sort_pathkeys before - * running query_planner(), so do it now. - */ - group_pathkeys = canonicalize_pathkeys(parse, group_pathkeys); - sort_pathkeys = canonicalize_pathkeys(parse, sort_pathkeys); + bool need_sort_for_grouping = false; - /* - * Consider whether we might want to use hashed grouping. - */ - if (parse->groupClause) - { - List *groupExprs; + result_plan = create_plan(root, best_path); + current_pathkeys = best_path->pathkeys; - /* - * Always estimate the number of groups. We can't do this until - * after running query_planner(), either. - */ - groupExprs = get_sortgrouplist_exprs(parse->groupClause, - parse->targetList); - dNumGroups = estimate_num_groups(parse, - groupExprs, - cheapest_path->parent->rows); - /* Also want it as a long int --- but 'ware overflow! */ - numGroups = (long) Min(dNumGroups, (double) LONG_MAX); + /* Detect if we'll need an explicit sort for grouping */ + if (parse->groupClause && !use_hashed_grouping && + !pathkeys_contained_in(root->group_pathkeys, current_pathkeys)) + { + need_sort_for_grouping = true; + /* + * Always override query_planner's tlist, so that we don't + * sort useless data from a "physical" tlist. + */ + need_tlist_eval = true; + } /* - * Check can't-do-it conditions, including whether the grouping - * operators are hashjoinable. - * - * Executor doesn't support hashed aggregation with DISTINCT - * aggregates. (Doing so would imply storing *all* the input - * values in the hash table, which seems like a certain loser.) + * create_plan() returns a plan with just a "flat" tlist of + * required Vars. Usually we need to insert the sub_tlist as the + * tlist of the top plan node. However, we can skip that if we + * determined that whatever query_planner chose to return will be + * good enough. */ - if (!enable_hashagg || !hash_safe_grouping(parse)) - use_hashed_grouping = false; - else if (parse->hasAggs && - (contain_distinct_agg_clause((Node *) tlist) || - contain_distinct_agg_clause(parse->havingQual))) - use_hashed_grouping = false; - else + if (need_tlist_eval) { /* - * Use hashed grouping if (a) we think we can fit the - * hashtable into SortMem, *and* (b) the estimated cost - * is no more than doing it the other way. While avoiding - * the need for sorted input is usually a win, the fact - * that the output won't be sorted may be a loss; so we - * need to do an actual cost comparison. - * - * In most cases we have no good way to estimate the size of - * the transition value needed by an aggregate; arbitrarily - * assume it is 100 bytes. Also set the overhead per hashtable - * entry at 64 bytes. + * If the top-level plan node is one that cannot do expression + * evaluation, we must insert a Result node to project the + * desired tlist. */ - int hashentrysize = cheapest_path->parent->width + 64 + - numAggs * 100; - - if (hashentrysize * dNumGroups <= SortMem * 1024L) + if (!is_projection_capable_plan(result_plan)) + { + result_plan = (Plan *) make_result(root, + sub_tlist, + NULL, + result_plan); + } + else { /* - * Okay, do the cost comparison. We need to consider - * cheapest_path + hashagg [+ final sort] - * versus either - * cheapest_path [+ sort] + group or agg [+ final sort] - * or - * presorted_path + group or agg [+ final sort] - * where brackets indicate a step that may not be needed. - * We assume query_planner() will have returned a - * presorted path only if it's a winner compared to - * cheapest_path for this purpose. - * - * These path variables are dummies that just hold cost - * fields; we don't make actual Paths for these steps. + * Otherwise, just replace the subplan's flat tlist with + * the desired tlist. */ - Path hashed_p; - Path sorted_p; - - cost_agg(&hashed_p, parse, - AGG_HASHED, numAggs, - numGroupCols, dNumGroups, - cheapest_path->startup_cost, - cheapest_path->total_cost, - cheapest_path->parent->rows); - /* Result of hashed agg is always unsorted */ - if (sort_pathkeys) - cost_sort(&hashed_p, parse, sort_pathkeys, - hashed_p.total_cost, - dNumGroups, - cheapest_path->parent->width); - - if (sorted_path) - { - sorted_p.startup_cost = sorted_path->startup_cost; - sorted_p.total_cost = sorted_path->total_cost; - current_pathkeys = sorted_path->pathkeys; - } - else - { - sorted_p.startup_cost = cheapest_path->startup_cost; - sorted_p.total_cost = cheapest_path->total_cost; - current_pathkeys = cheapest_path->pathkeys; - } - if (!pathkeys_contained_in(group_pathkeys, - current_pathkeys)) - { - cost_sort(&sorted_p, parse, group_pathkeys, - sorted_p.total_cost, - cheapest_path->parent->rows, - cheapest_path->parent->width); - current_pathkeys = group_pathkeys; - } - if (parse->hasAggs) - cost_agg(&sorted_p, parse, - AGG_SORTED, numAggs, - numGroupCols, dNumGroups, - sorted_p.startup_cost, - sorted_p.total_cost, - cheapest_path->parent->rows); - else - cost_group(&sorted_p, parse, - numGroupCols, dNumGroups, - sorted_p.startup_cost, - sorted_p.total_cost, - cheapest_path->parent->rows); - /* The Agg or Group node will preserve ordering */ - if (sort_pathkeys && - !pathkeys_contained_in(sort_pathkeys, - current_pathkeys)) + result_plan->targetlist = sub_tlist; + } + + /* + * Also, account for the cost of evaluation of the sub_tlist. + * + * Up to now, we have only been dealing with "flat" tlists, + * containing just Vars. So their evaluation cost is zero + * according to the model used by cost_qual_eval() (or if you + * prefer, the cost is factored into cpu_tuple_cost). Thus we + * can avoid accounting for tlist cost throughout + * query_planner() and subroutines. But now we've inserted a + * tlist that might contain actual operators, sub-selects, etc + * --- so we'd better account for its cost. + * + * Below this point, any tlist eval cost for added-on nodes + * should be accounted for as we create those nodes. + * Presently, of the node types we can add on, only Agg, + * WindowAgg, and Group project new tlists (the rest just copy + * their input tuples) --- so make_agg(), make_windowagg() and + * make_group() are responsible for computing the added cost. + */ + cost_qual_eval(&tlist_cost, sub_tlist, root); + result_plan->startup_cost += tlist_cost.startup; + result_plan->total_cost += tlist_cost.startup + + tlist_cost.per_tuple * result_plan->plan_rows; + } + else + { + /* + * Since we're using query_planner's tlist and not the one + * make_subplanTargetList calculated, we have to refigure any + * grouping-column indexes make_subplanTargetList computed. + */ + locate_grouping_columns(root, tlist, result_plan->targetlist, + groupColIdx); + } + + /* + * Insert AGG or GROUP node if needed, plus an explicit sort step + * if necessary. + * + * HAVING clause, if any, becomes qual of the Agg or Group node. + */ + if (use_hashed_grouping) + { + /* Hashed aggregate plan --- no sort needed */ + result_plan = (Plan *) make_agg(root, + tlist, + (List *) parse->havingQual, + AGG_HASHED, + numGroupCols, + groupColIdx, + extract_grouping_ops(parse->groupClause), + numGroups, + agg_counts.numAggs, + result_plan); + /* Hashed aggregation produces randomly-ordered results */ + current_pathkeys = NIL; + } + else if (parse->hasAggs) + { + /* Plain aggregate plan --- sort if needed */ + AggStrategy aggstrategy; + + if (parse->groupClause) + { + if (need_sort_for_grouping) { - cost_sort(&sorted_p, parse, sort_pathkeys, - sorted_p.total_cost, - dNumGroups, - cheapest_path->parent->width); + result_plan = (Plan *) + make_sort_from_groupcols(root, + parse->groupClause, + groupColIdx, + result_plan); + current_pathkeys = root->group_pathkeys; } + aggstrategy = AGG_SORTED; /* - * Now make the decision using the top-level tuple - * fraction. First we have to convert an absolute - * count (LIMIT) into fractional form. + * The AGG node will not change the sort ordering of its + * groups, so current_pathkeys describes the result too. */ - if (tuple_fraction >= 1.0) - tuple_fraction /= dNumGroups; + } + else + { + aggstrategy = AGG_PLAIN; + /* Result will be only one row anyway; no sort order */ + current_pathkeys = NIL; + } - if (compare_fractional_path_costs(&hashed_p, &sorted_p, - tuple_fraction) < 0) - { - /* Hashed is cheaper, so use it */ - use_hashed_grouping = true; - } + result_plan = (Plan *) make_agg(root, + tlist, + (List *) parse->havingQual, + aggstrategy, + numGroupCols, + groupColIdx, + extract_grouping_ops(parse->groupClause), + numGroups, + agg_counts.numAggs, + result_plan); + } + else if (parse->groupClause) + { + /* + * GROUP BY without aggregation, so insert a group node (plus + * the appropriate sort node, if necessary). + * + * Add an explicit sort if we couldn't make the path come out + * the way the GROUP node needs it. + */ + if (need_sort_for_grouping) + { + result_plan = (Plan *) + make_sort_from_groupcols(root, + parse->groupClause, + groupColIdx, + result_plan); + current_pathkeys = root->group_pathkeys; } + + result_plan = (Plan *) make_group(root, + tlist, + (List *) parse->havingQual, + numGroupCols, + groupColIdx, + extract_grouping_ops(parse->groupClause), + dNumGroups, + result_plan); + /* The Group node won't change sort ordering */ } - } + else if (root->hasHavingQual) + { + /* + * No aggregates, and no GROUP BY, but we have a HAVING qual. + * This is a degenerate case in which we are supposed to emit + * either 0 or 1 row depending on whether HAVING succeeds. + * Furthermore, there cannot be any variables in either HAVING + * or the targetlist, so we actually do not need the FROM + * table at all! We can just throw away the plan-so-far and + * generate a Result node. This is a sufficiently unusual + * corner case that it's not worth contorting the structure of + * this routine to avoid having to generate the plan in the + * first place. + */ + result_plan = (Plan *) make_result(root, + tlist, + parse->havingQual, + NULL); + } + } /* end of non-minmax-aggregate case */ /* - * Select the best path and create a plan to execute it. - * - * If we are doing hashed grouping, we will always read all the - * input tuples, so use the cheapest-total path. Otherwise, - * trust query_planner's decision about which to use. + * Since each window function could require a different sort order, + * we stack up a WindowAgg node for each window, with sort steps + * between them as needed. */ - if (sorted_path && !use_hashed_grouping) + if (activeWindows) { - result_plan = create_plan(parse, sorted_path); - current_pathkeys = sorted_path->pathkeys; - } - else - { - result_plan = create_plan(parse, cheapest_path); - current_pathkeys = cheapest_path->pathkeys; - } + List *window_tlist; + ListCell *l; - /* - * create_plan() returns a plan with just a "flat" tlist of required - * Vars. Usually we need to insert the sub_tlist as the tlist of the - * top plan node. However, we can skip that if we determined that - * whatever query_planner chose to return will be good enough. - */ - if (need_tlist_eval) - { /* * If the top-level plan node is one that cannot do expression - * evaluation, we must insert a Result node to project the desired - * tlist. - * Currently, the only plan node we might see here that falls into - * that category is Append. + * evaluation, we must insert a Result node to project the + * desired tlist. (In some cases this might not really be + * required, but it's not worth trying to avoid it.) Note that + * on second and subsequent passes through the following loop, + * the top-level node will be a WindowAgg which we know can + * project; so we only need to check once. */ - if (IsA(result_plan, Append)) + if (!is_projection_capable_plan(result_plan)) { - result_plan = (Plan *) make_result(sub_tlist, NULL, + result_plan = (Plan *) make_result(root, + NIL, + NULL, result_plan); } - else + + /* + * The "base" targetlist for all steps of the windowing process + * is a flat tlist of all Vars and Aggs needed in the result. + * (In some cases we wouldn't need to propagate all of these + * all the way to the top, since they might only be needed as + * inputs to WindowFuncs. It's probably not worth trying to + * optimize that though.) As we climb up the stack, we add + * outputs for the WindowFuncs computed at each level. Also, + * each input tlist has to present all the columns needed to + * sort the data for the next WindowAgg step. That's handled + * internally by make_sort_from_pathkeys, but we need the + * copyObject steps here to ensure that each plan node has + * a separately modifiable tlist. + */ + window_tlist = flatten_tlist(tlist); + if (parse->hasAggs) + window_tlist = add_to_flat_tlist(window_tlist, + pull_agg_clause((Node *) tlist)); + result_plan->targetlist = (List *) copyObject(window_tlist); + + foreach(l, activeWindows) { + WindowClause *wc = (WindowClause *) lfirst(l); + List *window_pathkeys; + int partNumCols; + AttrNumber *partColIdx; + Oid *partOperators; + int ordNumCols; + AttrNumber *ordColIdx; + Oid *ordOperators; + + window_pathkeys = make_pathkeys_for_window(root, + wc, + tlist, + true); + /* - * Otherwise, just replace the subplan's flat tlist with - * the desired tlist. + * This is a bit tricky: we build a sort node even if we don't + * really have to sort. Even when no explicit sort is needed, + * we need to have suitable resjunk items added to the input + * plan's tlist for any partitioning or ordering columns that + * aren't plain Vars. Furthermore, this way we can use + * existing infrastructure to identify which input columns are + * the interesting ones. */ - result_plan->targetlist = sub_tlist; + if (window_pathkeys) + { + Sort *sort_plan; + + sort_plan = make_sort_from_pathkeys(root, + result_plan, + window_pathkeys, + -1.0); + if (!pathkeys_contained_in(window_pathkeys, + current_pathkeys)) + { + /* we do indeed need to sort */ + result_plan = (Plan *) sort_plan; + current_pathkeys = window_pathkeys; + } + /* In either case, extract the per-column information */ + get_column_info_for_window(root, wc, tlist, + sort_plan->numCols, + sort_plan->sortColIdx, + &partNumCols, + &partColIdx, + &partOperators, + &ordNumCols, + &ordColIdx, + &ordOperators); + } + else + { + /* empty window specification, nothing to sort */ + partNumCols = 0; + partColIdx = NULL; + partOperators = NULL; + ordNumCols = 0; + ordColIdx = NULL; + ordOperators = NULL; + } + + if (lnext(l)) + { + /* Add the current WindowFuncs to the running tlist */ + window_tlist = add_to_flat_tlist(window_tlist, + wflists->windowFuncs[wc->winref]); + } + else + { + /* Install the original tlist in the topmost WindowAgg */ + window_tlist = tlist; + } + + /* ... and make the WindowAgg plan node */ + result_plan = (Plan *) + make_windowagg(root, + (List *) copyObject(window_tlist), + list_length(wflists->windowFuncs[wc->winref]), + wc->winref, + partNumCols, + partColIdx, + partOperators, + ordNumCols, + ordColIdx, + ordOperators, + wc->frameOptions, + result_plan); } - /* - * Also, account for the cost of evaluation of the sub_tlist. - * - * Up to now, we have only been dealing with "flat" tlists, - * containing just Vars. So their evaluation cost is zero - * according to the model used by cost_qual_eval() (or if you - * prefer, the cost is factored into cpu_tuple_cost). Thus we can - * avoid accounting for tlist cost throughout query_planner() and - * subroutines. But now we've inserted a tlist that might contain - * actual operators, sub-selects, etc --- so we'd better account - * for its cost. - * - * Below this point, any tlist eval cost for added-on nodes should - * be accounted for as we create those nodes. Presently, of the - * node types we can add on, only Agg and Group project new tlists - * (the rest just copy their input tuples) --- so make_agg() and - * make_group() are responsible for computing the added cost. - */ - cost_qual_eval(&tlist_cost, sub_tlist); - result_plan->startup_cost += tlist_cost.startup; - result_plan->total_cost += tlist_cost.startup + - tlist_cost.per_tuple * result_plan->plan_rows; } + } /* end of if (setOperations) */ + + /* + * If there is a DISTINCT clause, add the necessary node(s). + */ + if (parse->distinctClause) + { + double dNumDistinctRows; + long numDistinctRows; + bool use_hashed_distinct; + bool can_sort; + bool can_hash; + + /* + * If there was grouping or aggregation, use the current number of + * rows as the estimated number of DISTINCT rows (ie, assume the + * result was already mostly unique). If not, use the number of + * distinct-groups calculated by query_planner. + */ + if (parse->groupClause || root->hasHavingQual || parse->hasAggs) + dNumDistinctRows = result_plan->plan_rows; else - { - /* - * Since we're using query_planner's tlist and not the one - * make_subplanTargetList calculated, we have to refigure - * any grouping-column indexes make_subplanTargetList computed. - */ - locate_grouping_columns(parse, tlist, result_plan->targetlist, - groupColIdx); - } + dNumDistinctRows = dNumGroups; + + /* Also convert to long int --- but 'ware overflow! */ + numDistinctRows = (long) Min(dNumDistinctRows, (double) LONG_MAX); /* - * Insert AGG or GROUP node if needed, plus an explicit sort step - * if necessary. - * - * HAVING clause, if any, becomes qual of the Agg node + * If we have a sortable DISTINCT ON clause, we always use sorting. + * This enforces the expected behavior of DISTINCT ON. */ - if (use_hashed_grouping) - { - /* Hashed aggregate plan --- no sort needed */ - result_plan = (Plan *) make_agg(parse, - tlist, - (List *) parse->havingQual, - AGG_HASHED, - numGroupCols, - groupColIdx, - numGroups, - numAggs, - result_plan); - /* Hashed aggregation produces randomly-ordered results */ - current_pathkeys = NIL; - } - else if (parse->hasAggs) + can_sort = grouping_is_sortable(parse->distinctClause); + if (can_sort && parse->hasDistinctOn) + use_hashed_distinct = false; + else { - /* Plain aggregate plan --- sort if needed */ - AggStrategy aggstrategy; - - if (parse->groupClause) + can_hash = grouping_is_hashable(parse->distinctClause); + if (can_hash && can_sort) { - if (!pathkeys_contained_in(group_pathkeys, current_pathkeys)) - { - result_plan = make_groupsortplan(parse, - parse->groupClause, - groupColIdx, - result_plan); - current_pathkeys = group_pathkeys; - } - aggstrategy = AGG_SORTED; - /* - * The AGG node will not change the sort ordering of its - * groups, so current_pathkeys describes the result too. - */ + /* we have a meaningful choice to make ... */ + use_hashed_distinct = + choose_hashed_distinct(root, + result_plan, current_pathkeys, + tuple_fraction, limit_tuples, + dNumDistinctRows); } + else if (can_hash) + use_hashed_distinct = true; + else if (can_sort) + use_hashed_distinct = false; else { - aggstrategy = AGG_PLAIN; - /* Result will be only one row anyway; no sort order */ - current_pathkeys = NIL; + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("could not implement DISTINCT"), + errdetail("Some of the datatypes only support hashing, while others only support sorting."))); + use_hashed_distinct = false; /* keep compiler quiet */ } + } - result_plan = (Plan *) make_agg(parse, - tlist, - (List *) parse->havingQual, - aggstrategy, - numGroupCols, - groupColIdx, - numGroups, - numAggs, + if (use_hashed_distinct) + { + /* Hashed aggregate plan --- no sort needed */ + result_plan = (Plan *) make_agg(root, + result_plan->targetlist, + NIL, + AGG_HASHED, + list_length(parse->distinctClause), + extract_grouping_cols(parse->distinctClause, + result_plan->targetlist), + extract_grouping_ops(parse->distinctClause), + numDistinctRows, + 0, result_plan); + /* Hashed aggregation produces randomly-ordered results */ + current_pathkeys = NIL; } else { /* - * If there are no Aggs, we shouldn't have any HAVING qual anymore + * Use a Unique node to implement DISTINCT. Add an explicit sort + * if we couldn't make the path come out the way the Unique node + * needs it. If we do have to sort, always sort by the more + * rigorous of DISTINCT and ORDER BY, to avoid a second sort + * below. However, for regular DISTINCT, don't sort now if we + * don't have to --- sorting afterwards will likely be cheaper, + * and also has the possibility of optimizing via LIMIT. But + * for DISTINCT ON, we *must* force the final sort now, else + * it won't have the desired behavior. */ - Assert(parse->havingQual == NULL); + List *needed_pathkeys; - /* - * If we have a GROUP BY clause, insert a group node (plus the - * appropriate sort node, if necessary). - */ - if (parse->groupClause) + if (parse->hasDistinctOn && + list_length(root->distinct_pathkeys) < + list_length(root->sort_pathkeys)) + needed_pathkeys = root->sort_pathkeys; + else + needed_pathkeys = root->distinct_pathkeys; + + if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys)) { - /* - * Add an explicit sort if we couldn't make the path come out - * the way the GROUP node needs it. - */ - if (!pathkeys_contained_in(group_pathkeys, current_pathkeys)) + if (list_length(root->distinct_pathkeys) >= + list_length(root->sort_pathkeys)) + current_pathkeys = root->distinct_pathkeys; + else { - result_plan = make_groupsortplan(parse, - parse->groupClause, - groupColIdx, - result_plan); - current_pathkeys = group_pathkeys; + current_pathkeys = root->sort_pathkeys; + /* Assert checks that parser didn't mess up... */ + Assert(pathkeys_contained_in(root->distinct_pathkeys, + current_pathkeys)); } - result_plan = (Plan *) make_group(parse, - tlist, - numGroupCols, - groupColIdx, - dNumGroups, - result_plan); - /* The Group node won't change sort ordering */ + result_plan = (Plan *) make_sort_from_pathkeys(root, + result_plan, + current_pathkeys, + -1.0); } + + result_plan = (Plan *) make_unique(result_plan, + parse->distinctClause); + result_plan->plan_rows = dNumDistinctRows; + /* The Unique node won't change sort ordering */ } - } /* end of if (setOperations) */ + } /* - * If we were not able to make the plan come out in the right order, - * add an explicit sort step. + * If ORDER BY was given and we were not able to make the plan come out in + * the right order, add an explicit sort step. */ if (parse->sortClause) { - if (!pathkeys_contained_in(sort_pathkeys, current_pathkeys)) + if (!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys)) { - result_plan = (Plan *) make_sort_from_sortclauses(parse, - tlist, - result_plan, - parse->sortClause); - current_pathkeys = sort_pathkeys; + result_plan = (Plan *) make_sort_from_pathkeys(root, + result_plan, + root->sort_pathkeys, + limit_tuples); + current_pathkeys = root->sort_pathkeys; } } /* - * If there is a DISTINCT clause, add the UNIQUE node. + * Finally, if there is a LIMIT/OFFSET clause, add the LIMIT node. */ - if (parse->distinctClause) + if (parse->limitCount || parse->limitOffset) + { + result_plan = (Plan *) make_limit(result_plan, + parse->limitOffset, + parse->limitCount, + offset_est, + count_est); + } + + /* + * Deal with the RETURNING clause if any. It's convenient to pass the + * returningList through setrefs.c now rather than at top level (if we + * waited, handling inherited UPDATE/DELETE would be much harder). + */ + if (parse->returningList) + { + List *rlist; + + Assert(parse->resultRelation); + rlist = set_returning_clause_references(root->glob, + parse->returningList, + result_plan, + parse->resultRelation); + root->returningLists = list_make1(rlist); + } + else + root->returningLists = NIL; + + /* Compute result-relations list if needed */ + if (parse->resultRelation) + root->resultRelations = list_make1_int(parse->resultRelation); + else + root->resultRelations = NIL; + + /* + * Return the actual output ordering in query_pathkeys for possible use by + * an outer query level. + */ + root->query_pathkeys = current_pathkeys; + + return result_plan; +} + +/* + * Detect whether a plan node is a "dummy" plan created when a relation + * is deemed not to need scanning due to constraint exclusion. + * + * Currently, such dummy plans are Result nodes with constant FALSE + * filter quals. + */ +static bool +is_dummy_plan(Plan *plan) +{ + if (IsA(plan, Result)) + { + List *rcqual = (List *) ((Result *) plan)->resconstantqual; + + if (list_length(rcqual) == 1) + { + Const *constqual = (Const *) linitial(rcqual); + + if (constqual && IsA(constqual, Const)) + { + if (!constqual->constisnull && + !DatumGetBool(constqual->constvalue)) + return true; + } + } + } + return false; +} + +/* + * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses + * + * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the + * results back in *count_est and *offset_est. These variables are set to + * 0 if the corresponding clause is not present, and -1 if it's present + * but we couldn't estimate the value for it. (The "0" convention is OK + * for OFFSET but a little bit bogus for LIMIT: effectively we estimate + * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's + * usual practice of never estimating less than one row.) These values will + * be passed to make_limit, which see if you change this code. + * + * The return value is the suitably adjusted tuple_fraction to use for + * planning the query. This adjustment is not overridable, since it reflects + * plan actions that grouping_planner() will certainly take, not assumptions + * about context. + */ +static double +preprocess_limit(PlannerInfo *root, double tuple_fraction, + int64 *offset_est, int64 *count_est) +{ + Query *parse = root->parse; + Node *est; + double limit_fraction; + + /* Should not be called unless LIMIT or OFFSET */ + Assert(parse->limitCount || parse->limitOffset); + + /* + * Try to obtain the clause values. We use estimate_expression_value + * primarily because it can sometimes do something useful with Params. + */ + if (parse->limitCount) + { + est = estimate_expression_value(root, parse->limitCount); + if (est && IsA(est, Const)) + { + if (((Const *) est)->constisnull) + { + /* NULL indicates LIMIT ALL, ie, no limit */ + *count_est = 0; /* treat as not present */ + } + else + { + *count_est = DatumGetInt64(((Const *) est)->constvalue); + if (*count_est <= 0) + *count_est = 1; /* force to at least 1 */ + } + } + else + *count_est = -1; /* can't estimate */ + } + else + *count_est = 0; /* not present */ + + if (parse->limitOffset) + { + est = estimate_expression_value(root, parse->limitOffset); + if (est && IsA(est, Const)) + { + if (((Const *) est)->constisnull) + { + /* Treat NULL as no offset; the executor will too */ + *offset_est = 0; /* treat as not present */ + } + else + { + *offset_est = DatumGetInt64(((Const *) est)->constvalue); + if (*offset_est < 0) + *offset_est = 0; /* less than 0 is same as 0 */ + } + } + else + *offset_est = -1; /* can't estimate */ + } + else + *offset_est = 0; /* not present */ + + if (*count_est != 0) + { + /* + * A LIMIT clause limits the absolute number of tuples returned. + * However, if it's not a constant LIMIT then we have to guess; for + * lack of a better idea, assume 10% of the plan's result is wanted. + */ + if (*count_est < 0 || *offset_est < 0) + { + /* LIMIT or OFFSET is an expression ... punt ... */ + limit_fraction = 0.10; + } + else + { + /* LIMIT (plus OFFSET, if any) is max number of tuples needed */ + limit_fraction = (double) *count_est + (double) *offset_est; + } + + /* + * If we have absolute limits from both caller and LIMIT, use the + * smaller value; likewise if they are both fractional. If one is + * fractional and the other absolute, we can't easily determine which + * is smaller, but we use the heuristic that the absolute will usually + * be smaller. + */ + if (tuple_fraction >= 1.0) + { + if (limit_fraction >= 1.0) + { + /* both absolute */ + tuple_fraction = Min(tuple_fraction, limit_fraction); + } + else + { + /* caller absolute, limit fractional; use caller's value */ + } + } + else if (tuple_fraction > 0.0) + { + if (limit_fraction >= 1.0) + { + /* caller fractional, limit absolute; use limit */ + tuple_fraction = limit_fraction; + } + else + { + /* both fractional */ + tuple_fraction = Min(tuple_fraction, limit_fraction); + } + } + else + { + /* no info from caller, just use limit */ + tuple_fraction = limit_fraction; + } + } + else if (*offset_est != 0 && tuple_fraction > 0.0) { - result_plan = (Plan *) make_unique(tlist, result_plan, - parse->distinctClause); /* - * If there was grouping or aggregation, leave plan_rows as-is - * (ie, assume the result was already mostly unique). If not, - * it's reasonable to assume the UNIQUE filter has effects - * comparable to GROUP BY. + * We have an OFFSET but no LIMIT. This acts entirely differently + * from the LIMIT case: here, we need to increase rather than decrease + * the caller's tuple_fraction, because the OFFSET acts to cause more + * tuples to be fetched instead of fewer. This only matters if we got + * a tuple_fraction > 0, however. + * + * As above, use 10% if OFFSET is present but unestimatable. */ - if (!parse->groupClause && !parse->hasAggs) + if (*offset_est < 0) + limit_fraction = 0.10; + else + limit_fraction = (double) *offset_est; + + /* + * If we have absolute counts from both caller and OFFSET, add them + * together; likewise if they are both fractional. If one is + * fractional and the other absolute, we want to take the larger, and + * we heuristically assume that's the fractional one. + */ + if (tuple_fraction >= 1.0) + { + if (limit_fraction >= 1.0) + { + /* both absolute, so add them together */ + tuple_fraction += limit_fraction; + } + else + { + /* caller absolute, limit fractional; use limit */ + tuple_fraction = limit_fraction; + } + } + else { - List *distinctExprs; + if (limit_fraction >= 1.0) + { + /* caller fractional, limit absolute; use caller's value */ + } + else + { + /* both fractional, so add them together */ + tuple_fraction += limit_fraction; + if (tuple_fraction >= 1.0) + tuple_fraction = 0.0; /* assume fetch all */ + } + } + } - distinctExprs = get_sortgrouplist_exprs(parse->distinctClause, - parse->targetList); - result_plan->plan_rows = estimate_num_groups(parse, - distinctExprs, - result_plan->plan_rows); + return tuple_fraction; +} + + +/* + * preprocess_groupclause - do preparatory work on GROUP BY clause + * + * The idea here is to adjust the ordering of the GROUP BY elements + * (which in itself is semantically insignificant) to match ORDER BY, + * thereby allowing a single sort operation to both implement the ORDER BY + * requirement and set up for a Unique step that implements GROUP BY. + * + * In principle it might be interesting to consider other orderings of the + * GROUP BY elements, which could match the sort ordering of other + * possible plans (eg an indexscan) and thereby reduce cost. We don't + * bother with that, though. Hashed grouping will frequently win anyway. + * + * Note: we need no comparable processing of the distinctClause because + * the parser already enforced that that matches ORDER BY. + */ +static void +preprocess_groupclause(PlannerInfo *root) +{ + Query *parse = root->parse; + List *new_groupclause; + bool partial_match; + ListCell *sl; + ListCell *gl; + + /* If no ORDER BY, nothing useful to do here */ + if (parse->sortClause == NIL) + return; + + /* + * Scan the ORDER BY clause and construct a list of matching GROUP BY + * items, but only as far as we can make a matching prefix. + * + * This code assumes that the sortClause contains no duplicate items. + */ + new_groupclause = NIL; + foreach(sl, parse->sortClause) + { + SortGroupClause *sc = (SortGroupClause *) lfirst(sl); + + foreach(gl, parse->groupClause) + { + SortGroupClause *gc = (SortGroupClause *) lfirst(gl); + + if (equal(gc, sc)) + { + new_groupclause = lappend(new_groupclause, gc); + break; + } } + if (gl == NULL) + break; /* no match, so stop scanning */ } + /* Did we match all of the ORDER BY list, or just some of it? */ + partial_match = (sl != NULL); + + /* If no match at all, no point in reordering GROUP BY */ + if (new_groupclause == NIL) + return; + /* - * Finally, if there is a LIMIT/OFFSET clause, add the LIMIT node. + * Add any remaining GROUP BY items to the new list, but only if we + * were able to make a complete match. In other words, we only + * rearrange the GROUP BY list if the result is that one list is a + * prefix of the other --- otherwise there's no possibility of a + * common sort. Also, give up if there are any non-sortable GROUP BY + * items, since then there's no hope anyway. */ - if (parse->limitOffset || parse->limitCount) + foreach(gl, parse->groupClause) { - result_plan = (Plan *) make_limit(tlist, result_plan, - parse->limitOffset, - parse->limitCount); + SortGroupClause *gc = (SortGroupClause *) lfirst(gl); + + if (list_member_ptr(new_groupclause, gc)) + continue; /* it matched an ORDER BY item */ + if (partial_match) + return; /* give up, no common sort possible */ + if (!OidIsValid(gc->sortop)) + return; /* give up, GROUP BY can't be sorted */ + new_groupclause = lappend(new_groupclause, gc); } + /* Success --- install the rearranged GROUP BY list */ + Assert(list_length(parse->groupClause) == list_length(new_groupclause)); + parse->groupClause = new_groupclause; +} + +/* + * choose_hashed_grouping - should we use hashed grouping? + * + * Note: this is only applied when both alternatives are actually feasible. + */ +static bool +choose_hashed_grouping(PlannerInfo *root, + double tuple_fraction, double limit_tuples, + Path *cheapest_path, Path *sorted_path, + double dNumGroups, AggClauseCounts *agg_counts) +{ + int numGroupCols = list_length(root->parse->groupClause); + double cheapest_path_rows; + int cheapest_path_width; + Size hashentrysize; + List *target_pathkeys; + List *current_pathkeys; + Path hashed_p; + Path sorted_p; + + /* Prefer sorting when enable_hashagg is off */ + if (!enable_hashagg) + return false; + /* - * Return the actual output ordering in query_pathkeys for possible - * use by an outer query level. + * Don't do it if it doesn't look like the hashtable will fit into + * work_mem. + * + * Beware here of the possibility that cheapest_path->parent is NULL. This + * could happen if user does something silly like SELECT 'foo' GROUP BY 1; */ - parse->query_pathkeys = current_pathkeys; + if (cheapest_path->parent) + { + cheapest_path_rows = cheapest_path->parent->rows; + cheapest_path_width = cheapest_path->parent->width; + } + else + { + cheapest_path_rows = 1; /* assume non-set result */ + cheapest_path_width = 100; /* arbitrary */ + } - return result_plan; + /* Estimate per-hash-entry space at tuple width... */ + hashentrysize = MAXALIGN(cheapest_path_width) + MAXALIGN(sizeof(MinimalTupleData)); + /* plus space for pass-by-ref transition values... */ + hashentrysize += agg_counts->transitionSpace; + /* plus the per-hash-entry overhead */ + hashentrysize += hash_agg_entry_size(agg_counts->numAggs); + + if (hashentrysize * dNumGroups > work_mem * 1024L) + return false; + + /* + * When we have both GROUP BY and DISTINCT, use the more-rigorous of + * DISTINCT and ORDER BY as the assumed required output sort order. + * This is an oversimplification because the DISTINCT might get + * implemented via hashing, but it's not clear that the case is common + * enough (or that our estimates are good enough) to justify trying to + * solve it exactly. + */ + if (list_length(root->distinct_pathkeys) > + list_length(root->sort_pathkeys)) + target_pathkeys = root->distinct_pathkeys; + else + target_pathkeys = root->sort_pathkeys; + + /* + * See if the estimated cost is no more than doing it the other way. While + * avoiding the need for sorted input is usually a win, the fact that the + * output won't be sorted may be a loss; so we need to do an actual cost + * comparison. + * + * We need to consider cheapest_path + hashagg [+ final sort] versus + * either cheapest_path [+ sort] + group or agg [+ final sort] or + * presorted_path + group or agg [+ final sort] where brackets indicate a + * step that may not be needed. We assume query_planner() will have + * returned a presorted path only if it's a winner compared to + * cheapest_path for this purpose. + * + * These path variables are dummies that just hold cost fields; we don't + * make actual Paths for these steps. + */ + cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs, + numGroupCols, dNumGroups, + cheapest_path->startup_cost, cheapest_path->total_cost, + cheapest_path_rows); + /* Result of hashed agg is always unsorted */ + if (target_pathkeys) + cost_sort(&hashed_p, root, target_pathkeys, hashed_p.total_cost, + dNumGroups, cheapest_path_width, limit_tuples); + + if (sorted_path) + { + sorted_p.startup_cost = sorted_path->startup_cost; + sorted_p.total_cost = sorted_path->total_cost; + current_pathkeys = sorted_path->pathkeys; + } + else + { + sorted_p.startup_cost = cheapest_path->startup_cost; + sorted_p.total_cost = cheapest_path->total_cost; + current_pathkeys = cheapest_path->pathkeys; + } + if (!pathkeys_contained_in(root->group_pathkeys, current_pathkeys)) + { + cost_sort(&sorted_p, root, root->group_pathkeys, sorted_p.total_cost, + cheapest_path_rows, cheapest_path_width, -1.0); + current_pathkeys = root->group_pathkeys; + } + + if (root->parse->hasAggs) + cost_agg(&sorted_p, root, AGG_SORTED, agg_counts->numAggs, + numGroupCols, dNumGroups, + sorted_p.startup_cost, sorted_p.total_cost, + cheapest_path_rows); + else + cost_group(&sorted_p, root, numGroupCols, dNumGroups, + sorted_p.startup_cost, sorted_p.total_cost, + cheapest_path_rows); + /* The Agg or Group node will preserve ordering */ + if (target_pathkeys && + !pathkeys_contained_in(target_pathkeys, current_pathkeys)) + cost_sort(&sorted_p, root, target_pathkeys, sorted_p.total_cost, + dNumGroups, cheapest_path_width, limit_tuples); + + /* + * Now make the decision using the top-level tuple fraction. First we + * have to convert an absolute count (LIMIT) into fractional form. + */ + if (tuple_fraction >= 1.0) + tuple_fraction /= dNumGroups; + + if (compare_fractional_path_costs(&hashed_p, &sorted_p, + tuple_fraction) < 0) + { + /* Hashed is cheaper, so use it */ + return true; + } + return false; } /* - * hash_safe_grouping - are grouping operators hashable? + * choose_hashed_distinct - should we use hashing for DISTINCT? + * + * This is fairly similar to choose_hashed_grouping, but there are enough + * differences that it doesn't seem worth trying to unify the two functions. + * + * But note that making the two choices independently is a bit bogus in + * itself. If the two could be combined into a single choice operation + * it'd probably be better, but that seems far too unwieldy to be practical, + * especially considering that the combination of GROUP BY and DISTINCT + * isn't very common in real queries. By separating them, we are giving + * extra preference to using a sorting implementation when a common sort key + * is available ... and that's not necessarily wrong anyway. * - * We assume hashed aggregation will work if the datatype's equality operator - * is marked hashjoinable. + * Note: this is only applied when both alternatives are actually feasible. */ static bool -hash_safe_grouping(Query *parse) +choose_hashed_distinct(PlannerInfo *root, + Plan *input_plan, List *input_pathkeys, + double tuple_fraction, double limit_tuples, + double dNumDistinctRows) { - List *gl; + int numDistinctCols = list_length(root->parse->distinctClause); + Size hashentrysize; + List *current_pathkeys; + List *needed_pathkeys; + Path hashed_p; + Path sorted_p; - foreach(gl, parse->groupClause) + /* Prefer sorting when enable_hashagg is off */ + if (!enable_hashagg) + return false; + + /* + * Don't do it if it doesn't look like the hashtable will fit into + * work_mem. + */ + hashentrysize = MAXALIGN(input_plan->plan_width) + MAXALIGN(sizeof(MinimalTupleData)); + + if (hashentrysize * dNumDistinctRows > work_mem * 1024L) + return false; + + /* + * See if the estimated cost is no more than doing it the other way. While + * avoiding the need for sorted input is usually a win, the fact that the + * output won't be sorted may be a loss; so we need to do an actual cost + * comparison. + * + * We need to consider input_plan + hashagg [+ final sort] versus + * input_plan [+ sort] + group [+ final sort] where brackets indicate + * a step that may not be needed. + * + * These path variables are dummies that just hold cost fields; we don't + * make actual Paths for these steps. + */ + cost_agg(&hashed_p, root, AGG_HASHED, 0, + numDistinctCols, dNumDistinctRows, + input_plan->startup_cost, input_plan->total_cost, + input_plan->plan_rows); + /* + * Result of hashed agg is always unsorted, so if ORDER BY is present + * we need to charge for the final sort. + */ + if (root->parse->sortClause) + cost_sort(&hashed_p, root, root->sort_pathkeys, hashed_p.total_cost, + dNumDistinctRows, input_plan->plan_width, limit_tuples); + + /* + * Now for the GROUP case. See comments in grouping_planner about the + * sorting choices here --- this code should match that code. + */ + sorted_p.startup_cost = input_plan->startup_cost; + sorted_p.total_cost = input_plan->total_cost; + current_pathkeys = input_pathkeys; + if (root->parse->hasDistinctOn && + list_length(root->distinct_pathkeys) < + list_length(root->sort_pathkeys)) + needed_pathkeys = root->sort_pathkeys; + else + needed_pathkeys = root->distinct_pathkeys; + if (!pathkeys_contained_in(needed_pathkeys, current_pathkeys)) + { + if (list_length(root->distinct_pathkeys) >= + list_length(root->sort_pathkeys)) + current_pathkeys = root->distinct_pathkeys; + else + current_pathkeys = root->sort_pathkeys; + cost_sort(&sorted_p, root, current_pathkeys, sorted_p.total_cost, + input_plan->plan_rows, input_plan->plan_width, -1.0); + } + cost_group(&sorted_p, root, numDistinctCols, dNumDistinctRows, + sorted_p.startup_cost, sorted_p.total_cost, + input_plan->plan_rows); + if (root->parse->sortClause && + !pathkeys_contained_in(root->sort_pathkeys, current_pathkeys)) + cost_sort(&sorted_p, root, root->sort_pathkeys, sorted_p.total_cost, + dNumDistinctRows, input_plan->plan_width, limit_tuples); + + /* + * Now make the decision using the top-level tuple fraction. First we + * have to convert an absolute count (LIMIT) into fractional form. + */ + if (tuple_fraction >= 1.0) + tuple_fraction /= dNumDistinctRows; + + if (compare_fractional_path_costs(&hashed_p, &sorted_p, + tuple_fraction) < 0) { - GroupClause *grpcl = (GroupClause *) lfirst(gl); - TargetEntry *tle = get_sortgroupclause_tle(grpcl, parse->targetList); - Operator optup; - bool oprcanhash; - - optup = equality_oper(tle->resdom->restype, false); - oprcanhash = ((Form_pg_operator) GETSTRUCT(optup))->oprcanhash; - ReleaseSysCache(optup); - if (!oprcanhash) - return false; + /* Hashed is cheaper, so use it */ + return true; } - return true; + return false; } /*--------------- * make_subplanTargetList * Generate appropriate target list when grouping is required. * - * When grouping_planner inserts Aggregate or Group plan nodes above - * the result of query_planner, we typically want to pass a different + * When grouping_planner inserts Aggregate, Group, or Result plan nodes + * above the result of query_planner, we typically want to pass a different * target list to query_planner than the outer plan nodes should have. * This routine generates the correct target list for the subplan. * * The initial target list passed from the parser already contains entries * for all ORDER BY and GROUP BY expressions, but it will not have entries * for variables used only in HAVING clauses; so we need to add those - * variables to the subplan target list. Also, if we are doing either - * grouping or aggregation, we flatten all expressions except GROUP BY items - * into their component variables; the other expressions will be computed by - * the inserted nodes rather than by the subplan. For example, - * given a query like + * variables to the subplan target list. Also, we flatten all expressions + * except GROUP BY items into their component variables; the other expressions + * will be computed by the inserted nodes rather than by the subplan. + * For example, given a query like * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b; * we want to pass this targetlist to the subplan: * a,b,c,d,a+b @@ -1324,14 +2181,13 @@ hash_safe_grouping(Query *parse) * pass down only c,d,a+b, but it's not really worth the trouble to * eliminate simple var references from the subplan. We will avoid doing * the extra computation to recompute a+b at the outer level; see - * replace_vars_with_subplan_refs() in setrefs.c.) + * fix_upper_expr() in setrefs.c.) * * If we are grouping or aggregating, *and* there are no non-Var grouping * expressions, then the returned tlist is effectively dummy; we do not * need to force it to be evaluated, because all the Vars it contains * should be present in the output of query_planner anyway. * - * 'parse' is the query being processed. * 'tlist' is the query's target list. * 'groupColIdx' receives an array of column numbers for the GROUP BY * expressions (if there are any) in the subplan's target list. @@ -1342,11 +2198,12 @@ hash_safe_grouping(Query *parse) *--------------- */ static List * -make_subplanTargetList(Query *parse, +make_subplanTargetList(PlannerInfo *root, List *tlist, AttrNumber **groupColIdx, bool *need_tlist_eval) { + Query *parse = root->parse; List *sub_tlist; List *extravars; int numCols; @@ -1354,10 +2211,11 @@ make_subplanTargetList(Query *parse, *groupColIdx = NULL; /* - * If we're not grouping or aggregating, nothing to do here; + * If we're not grouping or aggregating, there's nothing to do here; * query_planner should receive the unmodified target list. */ - if (!parse->hasAggs && !parse->groupClause && !parse->havingQual) + if (!parse->hasAggs && !parse->groupClause && !root->hasHavingQual && + !parse->hasWindowFuncs) { *need_tlist_eval = true; return tlist; @@ -1365,59 +2223,60 @@ make_subplanTargetList(Query *parse, /* * Otherwise, start with a "flattened" tlist (having just the vars - * mentioned in the targetlist and HAVING qual --- but not upper- - * level Vars; they will be replaced by Params later on). + * mentioned in the targetlist and HAVING qual --- but not upper-level + * Vars; they will be replaced by Params later on). Note this includes + * vars used in resjunk items, so we are covering the needs of ORDER BY + * and window specifications. */ sub_tlist = flatten_tlist(tlist); - extravars = pull_var_clause(parse->havingQual, false); + extravars = pull_var_clause(parse->havingQual, true); sub_tlist = add_to_flat_tlist(sub_tlist, extravars); - freeList(extravars); + list_free(extravars); *need_tlist_eval = false; /* only eval if not flat tlist */ /* * If grouping, create sub_tlist entries for all GROUP BY expressions - * (GROUP BY items that are simple Vars should be in the list - * already), and make an array showing where the group columns are in - * the sub_tlist. + * (GROUP BY items that are simple Vars should be in the list already), + * and make an array showing where the group columns are in the sub_tlist. */ - numCols = length(parse->groupClause); + numCols = list_length(parse->groupClause); if (numCols > 0) { int keyno = 0; AttrNumber *grpColIdx; - List *gl; + ListCell *gl; grpColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); *groupColIdx = grpColIdx; foreach(gl, parse->groupClause) { - GroupClause *grpcl = (GroupClause *) lfirst(gl); + SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl); Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist); - TargetEntry *te = NULL; - List *sl; + TargetEntry *te; - /* Find or make a matching sub_tlist entry */ - foreach(sl, sub_tlist) - { - te = (TargetEntry *) lfirst(sl); - if (equal(groupexpr, te->expr)) - break; - } - if (!sl) + /* + * Find or make a matching sub_tlist entry. If the groupexpr + * isn't a Var, no point in searching. (Note that the parser + * won't make multiple groupClause entries for the same TLE.) + */ + if (groupexpr && IsA(groupexpr, Var)) + te = tlist_member(groupexpr, sub_tlist); + else + te = NULL; + + if (!te) { - te = makeTargetEntry(makeResdom(length(sub_tlist) + 1, - exprType(groupexpr), - exprTypmod(groupexpr), - NULL, - false), - (Expr *) groupexpr); + te = makeTargetEntry((Expr *) groupexpr, + list_length(sub_tlist) + 1, + NULL, + false); sub_tlist = lappend(sub_tlist, te); - *need_tlist_eval = true; /* it's not flat anymore */ + *need_tlist_eval = true; /* it's not flat anymore */ } /* and save its resno */ - grpColIdx[keyno++] = te->resdom->resno; + grpColIdx[keyno++] = te->resno; } } @@ -1429,95 +2288,40 @@ make_subplanTargetList(Query *parse, * Locate grouping columns in the tlist chosen by query_planner. * * This is only needed if we don't use the sub_tlist chosen by - * make_subplanTargetList. We have to forget the column indexes found - * by that routine and re-locate the grouping vars in the real sub_tlist. + * make_subplanTargetList. We have to forget the column indexes found + * by that routine and re-locate the grouping exprs in the real sub_tlist. */ static void -locate_grouping_columns(Query *parse, +locate_grouping_columns(PlannerInfo *root, List *tlist, List *sub_tlist, AttrNumber *groupColIdx) { int keyno = 0; - List *gl; + ListCell *gl; /* * No work unless grouping. */ - if (!parse->groupClause) + if (!root->parse->groupClause) { Assert(groupColIdx == NULL); return; } Assert(groupColIdx != NULL); - foreach(gl, parse->groupClause) + foreach(gl, root->parse->groupClause) { - GroupClause *grpcl = (GroupClause *) lfirst(gl); + SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl); Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist); - TargetEntry *te = NULL; - List *sl; + TargetEntry *te = tlist_member(groupexpr, sub_tlist); - foreach(sl, sub_tlist) - { - te = (TargetEntry *) lfirst(sl); - if (equal(groupexpr, te->expr)) - break; - } - if (!sl) - elog(ERROR, "locate_grouping_columns: failed"); - - groupColIdx[keyno++] = te->resdom->resno; + if (!te) + elog(ERROR, "failed to locate grouping columns"); + groupColIdx[keyno++] = te->resno; } } -/* - * make_groupsortplan - * Add a Sort node to explicitly sort according to the GROUP BY clause. - * - * Note: the Sort node always just takes a copy of the subplan's tlist - * plus ordering information. (This might seem inefficient if the - * subplan contains complex GROUP BY expressions, but in fact Sort - * does not evaluate its targetlist --- it only outputs the same - * tuples in a new order. So the expressions we might be copying - * are just dummies with no extra execution cost.) - */ -static Plan * -make_groupsortplan(Query *parse, - List *groupClause, - AttrNumber *grpColIdx, - Plan *subplan) -{ - List *sort_tlist = new_unsorted_tlist(subplan->targetlist); - int grpno = 0; - int keyno = 0; - List *gl; - - foreach(gl, groupClause) - { - GroupClause *grpcl = (GroupClause *) lfirst(gl); - TargetEntry *te = nth(grpColIdx[grpno] - 1, sort_tlist); - Resdom *resdom = te->resdom; - - /* - * Check for the possibility of duplicate group-by clauses --- - * the parser should have removed 'em, but the Sort executor - * will get terribly confused if any get through! - */ - if (resdom->reskey == 0) - { - /* OK, insert the ordering info needed by the executor. */ - resdom->reskey = ++keyno; - resdom->reskeyop = grpcl->sortop; - } - grpno++; - } - - Assert(keyno > 0); - - return (Plan *) make_sort(parse, sort_tlist, subplan, keyno); -} - /* * postprocess_setop_tlist * Fix up targetlist returned by plan_set_operations(). @@ -1525,13 +2329,14 @@ make_groupsortplan(Query *parse, * We need to transpose sort key info from the orig_tlist into new_tlist. * NOTE: this would not be good enough if we supported resjunk sort keys * for results of set operations --- then, we'd need to project a whole - * new tlist to evaluate the resjunk columns. For now, just elog if we + * new tlist to evaluate the resjunk columns. For now, just ereport if we * find any resjunk columns in orig_tlist. */ static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist) { - List *l; + ListCell *l; + ListCell *orig_tlist_item = list_head(orig_tlist); foreach(l, new_tlist) { @@ -1539,19 +2344,235 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist) TargetEntry *orig_tle; /* ignore resjunk columns in setop result */ - if (new_tle->resdom->resjunk) + if (new_tle->resjunk) continue; - Assert(orig_tlist != NIL); - orig_tle = (TargetEntry *) lfirst(orig_tlist); - orig_tlist = lnext(orig_tlist); - if (orig_tle->resdom->resjunk) - elog(ERROR, "postprocess_setop_tlist: resjunk output columns not implemented"); - Assert(new_tle->resdom->resno == orig_tle->resdom->resno); - Assert(new_tle->resdom->restype == orig_tle->resdom->restype); - new_tle->resdom->ressortgroupref = orig_tle->resdom->ressortgroupref; + Assert(orig_tlist_item != NULL); + orig_tle = (TargetEntry *) lfirst(orig_tlist_item); + orig_tlist_item = lnext(orig_tlist_item); + if (orig_tle->resjunk) /* should not happen */ + elog(ERROR, "resjunk output columns are not implemented"); + Assert(new_tle->resno == orig_tle->resno); + new_tle->ressortgroupref = orig_tle->ressortgroupref; } - if (orig_tlist != NIL) - elog(ERROR, "postprocess_setop_tlist: resjunk output columns not implemented"); + if (orig_tlist_item != NULL) + elog(ERROR, "resjunk output columns are not implemented"); return new_tlist; } + +/* + * select_active_windows + * Create a list of the "active" window clauses (ie, those referenced + * by non-deleted WindowFuncs) in the order they are to be executed. + */ +static List * +select_active_windows(PlannerInfo *root, WindowFuncLists *wflists) +{ + List *result; + List *actives; + ListCell *lc; + + /* First, make a list of the active windows */ + actives = NIL; + foreach(lc, root->parse->windowClause) + { + WindowClause *wc = (WindowClause *) lfirst(lc); + + /* It's only active if wflists shows some related WindowFuncs */ + Assert(wc->winref <= wflists->maxWinRef); + if (wflists->windowFuncs[wc->winref] != NIL) + actives = lappend(actives, wc); + } + + /* + * Now, ensure that windows with identical partitioning/ordering clauses + * are adjacent in the list. This is required by the SQL standard, which + * says that only one sort is to be used for such windows, even if they + * are otherwise distinct (eg, different names or framing clauses). + * + * There is room to be much smarter here, for example detecting whether + * one window's sort keys are a prefix of another's (so that sorting + * for the latter would do for the former), or putting windows first + * that match a sort order available for the underlying query. For the + * moment we are content with meeting the spec. + */ + result = NIL; + while (actives != NIL) + { + WindowClause *wc = (WindowClause *) linitial(actives); + ListCell *prev; + ListCell *next; + + /* Move wc from actives to result */ + actives = list_delete_first(actives); + result = lappend(result, wc); + + /* Now move any matching windows from actives to result */ + prev = NULL; + for (lc = list_head(actives); lc; lc = next) + { + WindowClause *wc2 = (WindowClause *) lfirst(lc); + + next = lnext(lc); + /* framing options are NOT to be compared here! */ + if (equal(wc->partitionClause, wc2->partitionClause) && + equal(wc->orderClause, wc2->orderClause)) + { + actives = list_delete_cell(actives, lc, prev); + result = lappend(result, wc2); + } + else + prev = lc; + } + } + + return result; +} + +/* + * make_pathkeys_for_window + * Create a pathkeys list describing the required input ordering + * for the given WindowClause. + * + * The required ordering is first the PARTITION keys, then the ORDER keys. + * In the future we might try to implement windowing using hashing, in which + * case the ordering could be relaxed, but for now we always sort. + */ +static List * +make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, + List *tlist, bool canonicalize) +{ + List *window_pathkeys; + List *window_sortclauses; + + /* Throw error if can't sort */ + if (!grouping_is_sortable(wc->partitionClause)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("could not implement window PARTITION BY"), + errdetail("Window partitioning columns must be of sortable datatypes."))); + if (!grouping_is_sortable(wc->orderClause)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("could not implement window ORDER BY"), + errdetail("Window ordering columns must be of sortable datatypes."))); + + /* Okay, make the combined pathkeys */ + window_sortclauses = list_concat(list_copy(wc->partitionClause), + list_copy(wc->orderClause)); + window_pathkeys = make_pathkeys_for_sortclauses(root, + window_sortclauses, + tlist, + canonicalize); + list_free(window_sortclauses); + return window_pathkeys; +} + +/*---------- + * get_column_info_for_window + * Get the partitioning/ordering column numbers and equality operators + * for a WindowAgg node. + * + * This depends on the behavior of make_pathkeys_for_window()! + * + * We are given the target WindowClause and an array of the input column + * numbers associated with the resulting pathkeys. In the easy case, there + * are the same number of pathkey columns as partitioning + ordering columns + * and we just have to copy some data around. However, it's possible that + * some of the original partitioning + ordering columns were eliminated as + * redundant during the transformation to pathkeys. (This can happen even + * though the parser gets rid of obvious duplicates. A typical scenario is a + * window specification "PARTITION BY x ORDER BY y" coupled with a clause + * "WHERE x = y" that causes the two sort columns to be recognized as + * redundant.) In that unusual case, we have to work a lot harder to + * determine which keys are significant. + * + * The method used here is a bit brute-force: add the sort columns to a list + * one at a time and note when the resulting pathkey list gets longer. But + * it's a sufficiently uncommon case that a faster way doesn't seem worth + * the amount of code refactoring that'd be needed. + *---------- + */ +static void +get_column_info_for_window(PlannerInfo *root, WindowClause *wc, List *tlist, + int numSortCols, AttrNumber *sortColIdx, + int *partNumCols, + AttrNumber **partColIdx, + Oid **partOperators, + int *ordNumCols, + AttrNumber **ordColIdx, + Oid **ordOperators) +{ + int numPart = list_length(wc->partitionClause); + int numOrder = list_length(wc->orderClause); + + if (numSortCols == numPart + numOrder) + { + /* easy case */ + *partNumCols = numPart; + *partColIdx = sortColIdx; + *partOperators = extract_grouping_ops(wc->partitionClause); + *ordNumCols = numOrder; + *ordColIdx = sortColIdx + numPart; + *ordOperators = extract_grouping_ops(wc->orderClause); + } + else + { + List *sortclauses; + List *pathkeys; + int scidx; + ListCell *lc; + + /* first, allocate what's certainly enough space for the arrays */ + *partNumCols = 0; + *partColIdx = (AttrNumber *) palloc(numPart * sizeof(AttrNumber)); + *partOperators = (Oid *) palloc(numPart * sizeof(Oid)); + *ordNumCols = 0; + *ordColIdx = (AttrNumber *) palloc(numOrder * sizeof(AttrNumber)); + *ordOperators = (Oid *) palloc(numOrder * sizeof(Oid)); + sortclauses = NIL; + pathkeys = NIL; + scidx = 0; + foreach(lc, wc->partitionClause) + { + SortGroupClause *sgc = (SortGroupClause *) lfirst(lc); + List *new_pathkeys; + + sortclauses = lappend(sortclauses, sgc); + new_pathkeys = make_pathkeys_for_sortclauses(root, + sortclauses, + tlist, + true); + if (list_length(new_pathkeys) > list_length(pathkeys)) + { + /* this sort clause is actually significant */ + *partColIdx[*partNumCols] = sortColIdx[scidx++]; + *partOperators[*partNumCols] = sgc->eqop; + (*partNumCols)++; + pathkeys = new_pathkeys; + } + } + foreach(lc, wc->orderClause) + { + SortGroupClause *sgc = (SortGroupClause *) lfirst(lc); + List *new_pathkeys; + + sortclauses = lappend(sortclauses, sgc); + new_pathkeys = make_pathkeys_for_sortclauses(root, + sortclauses, + tlist, + true); + if (list_length(new_pathkeys) > list_length(pathkeys)) + { + /* this sort clause is actually significant */ + *ordColIdx[*ordNumCols] = sortColIdx[scidx++]; + *ordOperators[*ordNumCols] = sgc->eqop; + (*ordNumCols)++; + pathkeys = new_pathkeys; + } + } + /* complain if we didn't eat exactly the right number of sort cols */ + if (scidx != numSortCols) + elog(ERROR, "failed to deconstruct sort operators into partitioning/ordering operators"); + } +}