1 /*-------------------------------------------------------------------------
4 * The query optimizer external interface.
6 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/optimizer/plan/planner.c
13 *-------------------------------------------------------------------------
21 #include "access/genam.h"
22 #include "access/htup_details.h"
23 #include "access/parallel.h"
24 #include "access/sysattr.h"
25 #include "access/table.h"
26 #include "access/xact.h"
27 #include "catalog/pg_constraint.h"
28 #include "catalog/pg_inherits.h"
29 #include "catalog/pg_proc.h"
30 #include "catalog/pg_type.h"
31 #include "executor/executor.h"
32 #include "executor/nodeAgg.h"
33 #include "foreign/fdwapi.h"
34 #include "miscadmin.h"
36 #include "lib/bipartite_match.h"
37 #include "lib/knapsack.h"
38 #include "nodes/makefuncs.h"
39 #include "nodes/nodeFuncs.h"
40 #ifdef OPTIMIZER_DEBUG
41 #include "nodes/print.h"
43 #include "optimizer/appendinfo.h"
44 #include "optimizer/clauses.h"
45 #include "optimizer/cost.h"
46 #include "optimizer/inherit.h"
47 #include "optimizer/optimizer.h"
48 #include "optimizer/paramassign.h"
49 #include "optimizer/pathnode.h"
50 #include "optimizer/paths.h"
51 #include "optimizer/plancat.h"
52 #include "optimizer/planmain.h"
53 #include "optimizer/planner.h"
54 #include "optimizer/prep.h"
55 #include "optimizer/subselect.h"
56 #include "optimizer/tlist.h"
57 #include "parser/analyze.h"
58 #include "parser/parsetree.h"
59 #include "parser/parse_agg.h"
60 #include "partitioning/partdesc.h"
61 #include "rewrite/rewriteManip.h"
62 #include "storage/dsm_impl.h"
63 #include "utils/rel.h"
64 #include "utils/selfuncs.h"
65 #include "utils/lsyscache.h"
66 #include "utils/syscache.h"
70 double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
71 int force_parallel_mode = FORCE_PARALLEL_OFF;
72 bool parallel_leader_participation = true;
74 /* Hook for plugins to get control in planner() */
75 planner_hook_type planner_hook = NULL;
77 /* Hook for plugins to get control when grouping_planner() plans upper rels */
78 create_upper_paths_hook_type create_upper_paths_hook = NULL;
81 /* Expression kind codes for preprocess_expression */
82 #define EXPRKIND_QUAL 0
83 #define EXPRKIND_TARGET 1
84 #define EXPRKIND_RTFUNC 2
85 #define EXPRKIND_RTFUNC_LATERAL 3
86 #define EXPRKIND_VALUES 4
87 #define EXPRKIND_VALUES_LATERAL 5
88 #define EXPRKIND_LIMIT 6
89 #define EXPRKIND_APPINFO 7
90 #define EXPRKIND_PHV 8
91 #define EXPRKIND_TABLESAMPLE 9
92 #define EXPRKIND_ARBITER_ELEM 10
93 #define EXPRKIND_TABLEFUNC 11
94 #define EXPRKIND_TABLEFUNC_LATERAL 12
96 /* Passthrough data for standard_qp_callback */
99 List *activeWindows; /* active windows, if any */
100 List *groupClause; /* overrides parse->groupClause */
104 * Data specific to grouping sets
111 double dNumHashGroups;
113 Bitmapset *unsortable_refs;
114 Bitmapset *unhashable_refs;
115 List *unsortable_sets;
116 int *tleref_to_colnum_map;
117 } grouping_sets_data;
120 * Temporary structure for use during WindowClause reordering in order to be
121 * able to sort WindowClauses on partitioning/ordering prefix.
126 List *uniqueOrder; /* A List of unique ordering/partitioning
127 * clauses per Window */
128 } WindowClauseSortData;
130 /* Local functions */
131 static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
132 static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
133 static void inheritance_planner(PlannerInfo *root);
134 static void grouping_planner(PlannerInfo *root, bool inheritance_update,
135 double tuple_fraction);
136 static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
137 static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
138 int *tleref_to_colnum_map);
139 static void preprocess_rowmarks(PlannerInfo *root);
140 static double preprocess_limit(PlannerInfo *root,
141 double tuple_fraction,
142 int64 *offset_est, int64 *count_est);
143 static void remove_useless_groupby_columns(PlannerInfo *root);
144 static List *preprocess_groupclause(PlannerInfo *root, List *force);
145 static List *extract_rollup_sets(List *groupingSets);
146 static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
147 static void standard_qp_callback(PlannerInfo *root, void *extra);
148 static double get_number_of_groups(PlannerInfo *root,
150 grouping_sets_data *gd,
152 static RelOptInfo *create_grouping_paths(PlannerInfo *root,
153 RelOptInfo *input_rel,
155 bool target_parallel_safe,
156 const AggClauseCosts *agg_costs,
157 grouping_sets_data *gd);
158 static bool is_degenerate_grouping(PlannerInfo *root);
159 static void create_degenerate_grouping_paths(PlannerInfo *root,
160 RelOptInfo *input_rel,
161 RelOptInfo *grouped_rel);
162 static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
163 PathTarget *target, bool target_parallel_safe,
165 static void create_ordinary_grouping_paths(PlannerInfo *root,
166 RelOptInfo *input_rel,
167 RelOptInfo *grouped_rel,
168 const AggClauseCosts *agg_costs,
169 grouping_sets_data *gd,
170 GroupPathExtraData *extra,
171 RelOptInfo **partially_grouped_rel_p);
172 static void consider_groupingsets_paths(PlannerInfo *root,
173 RelOptInfo *grouped_rel,
177 grouping_sets_data *gd,
178 const AggClauseCosts *agg_costs,
180 static RelOptInfo *create_window_paths(PlannerInfo *root,
181 RelOptInfo *input_rel,
182 PathTarget *input_target,
183 PathTarget *output_target,
184 bool output_target_parallel_safe,
185 WindowFuncLists *wflists,
186 List *activeWindows);
187 static void create_one_window_path(PlannerInfo *root,
188 RelOptInfo *window_rel,
190 PathTarget *input_target,
191 PathTarget *output_target,
192 WindowFuncLists *wflists,
193 List *activeWindows);
194 static RelOptInfo *create_distinct_paths(PlannerInfo *root,
195 RelOptInfo *input_rel);
196 static RelOptInfo *create_ordered_paths(PlannerInfo *root,
197 RelOptInfo *input_rel,
199 bool target_parallel_safe,
200 double limit_tuples);
201 static PathTarget *make_group_input_target(PlannerInfo *root,
202 PathTarget *final_target);
203 static PathTarget *make_partial_grouping_target(PlannerInfo *root,
204 PathTarget *grouping_target,
206 static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
207 static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
208 static PathTarget *make_window_input_target(PlannerInfo *root,
209 PathTarget *final_target,
210 List *activeWindows);
211 static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
213 static PathTarget *make_sort_input_target(PlannerInfo *root,
214 PathTarget *final_target,
215 bool *have_postponed_srfs);
216 static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
217 List *targets, List *targets_contain_srfs);
218 static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
219 RelOptInfo *grouped_rel,
220 RelOptInfo *partially_grouped_rel,
221 const AggClauseCosts *agg_costs,
222 grouping_sets_data *gd,
224 GroupPathExtraData *extra);
225 static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
226 RelOptInfo *grouped_rel,
227 RelOptInfo *input_rel,
228 grouping_sets_data *gd,
229 GroupPathExtraData *extra,
230 bool force_rel_creation);
231 static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
232 static bool can_partial_agg(PlannerInfo *root,
233 const AggClauseCosts *agg_costs);
234 static void apply_scanjoin_target_to_paths(PlannerInfo *root,
236 List *scanjoin_targets,
237 List *scanjoin_targets_contain_srfs,
238 bool scanjoin_target_parallel_safe,
239 bool tlist_same_exprs);
240 static void create_partitionwise_grouping_paths(PlannerInfo *root,
241 RelOptInfo *input_rel,
242 RelOptInfo *grouped_rel,
243 RelOptInfo *partially_grouped_rel,
244 const AggClauseCosts *agg_costs,
245 grouping_sets_data *gd,
246 PartitionwiseAggregateType patype,
247 GroupPathExtraData *extra);
248 static bool group_by_has_partkey(RelOptInfo *input_rel,
251 static int common_prefix_cmp(const void *a, const void *b);
254 /*****************************************************************************
256 * Query optimizer entry point
258 * To support loadable plugins that monitor or modify planner behavior,
259 * we provide a hook variable that lets a plugin get control before and
260 * after the standard planning process. The plugin would normally call
261 * standard_planner().
263 * Note to plugin authors: standard_planner() scribbles on its Query input,
264 * so you'd better copy that data structure if you want to plan more than once.
266 *****************************************************************************/
268 planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
273 result = (*planner_hook) (parse, cursorOptions, boundParams);
275 result = standard_planner(parse, cursorOptions, boundParams);
280 standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
284 double tuple_fraction;
286 RelOptInfo *final_rel;
293 * Set up global state for this planner invocation. This data is needed
294 * across all levels of sub-Query that might exist in the given command,
295 * so we keep it in a separate struct that's linked to by each per-Query
298 glob = makeNode(PlannerGlobal);
300 glob->boundParams = boundParams;
301 glob->subplans = NIL;
302 glob->subroots = NIL;
303 glob->rewindPlanIDs = NULL;
304 glob->finalrtable = NIL;
305 glob->finalrowmarks = NIL;
306 glob->resultRelations = NIL;
307 glob->rootResultRelations = NIL;
308 glob->relationOids = NIL;
309 glob->invalItems = NIL;
310 glob->paramExecTypes = NIL;
312 glob->lastRowMarkId = 0;
313 glob->lastPlanNodeId = 0;
314 glob->transientPlan = false;
315 glob->dependsOnRole = false;
318 * Assess whether it's feasible to use parallel mode for this query. We
319 * can't do this in a standalone backend, or if the command will try to
320 * modify any data, or if this is a cursor operation, or if GUCs are set
321 * to values that don't permit parallelism, or if parallel-unsafe
322 * functions are present in the query tree.
324 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
325 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
326 * the command is writing into a completely new table which workers won't
327 * be able to see. If the workers could see the table, the fact that
328 * group locking would cause them to ignore the leader's heavyweight
329 * relation extension lock and GIN page locks would make this unsafe.
330 * We'll have to fix that somehow if we want to allow parallel inserts in
331 * general; updates and deletes have additional problems especially around
334 * For now, we don't try to use parallel mode if we're running inside a
335 * parallel worker. We might eventually be able to relax this
336 * restriction, but for now it seems best not to have parallel workers
337 * trying to create their own parallel workers.
339 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
341 parse->commandType == CMD_SELECT &&
342 !parse->hasModifyingCTE &&
343 max_parallel_workers_per_gather > 0 &&
346 /* all the cheap tests pass, so scan the query tree */
347 glob->maxParallelHazard = max_parallel_hazard(parse);
348 glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
352 /* skip the query tree scan, just assume it's unsafe */
353 glob->maxParallelHazard = PROPARALLEL_UNSAFE;
354 glob->parallelModeOK = false;
358 * glob->parallelModeNeeded is normally set to false here and changed to
359 * true during plan creation if a Gather or Gather Merge plan is actually
360 * created (cf. create_gather_plan, create_gather_merge_plan).
362 * However, if force_parallel_mode = on or force_parallel_mode = regress,
363 * then we impose parallel mode whenever it's safe to do so, even if the
364 * final plan doesn't use parallelism. It's not safe to do so if the
365 * query contains anything parallel-unsafe; parallelModeOK will be false
366 * in that case. Note that parallelModeOK can't change after this point.
367 * Otherwise, everything in the query is either parallel-safe or
368 * parallel-restricted, and in either case it should be OK to impose
369 * parallel-mode restrictions. If that ends up breaking something, then
370 * either some function the user included in the query is incorrectly
371 * labelled as parallel-safe or parallel-restricted when in reality it's
372 * parallel-unsafe, or else the query planner itself has a bug.
374 glob->parallelModeNeeded = glob->parallelModeOK &&
375 (force_parallel_mode != FORCE_PARALLEL_OFF);
377 /* Determine what fraction of the plan is likely to be scanned */
378 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
381 * We have no real idea how many tuples the user will ultimately FETCH
382 * from a cursor, but it is often the case that he doesn't want 'em
383 * all, or would prefer a fast-start plan anyway so that he can
384 * process some of the tuples sooner. Use a GUC parameter to decide
385 * what fraction to optimize for.
387 tuple_fraction = cursor_tuple_fraction;
390 * We document cursor_tuple_fraction as simply being a fraction, which
391 * means the edge cases 0 and 1 have to be treated specially here. We
392 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
394 if (tuple_fraction >= 1.0)
395 tuple_fraction = 0.0;
396 else if (tuple_fraction <= 0.0)
397 tuple_fraction = 1e-10;
401 /* Default assumption is we need all the tuples */
402 tuple_fraction = 0.0;
405 /* primary planning entry point (may recurse for subqueries) */
406 root = subquery_planner(glob, parse, NULL,
407 false, tuple_fraction);
409 /* Select best Path and turn it into a Plan */
410 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
411 best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
413 top_plan = create_plan(root, best_path);
416 * If creating a plan for a scrollable cursor, make sure it can run
417 * backwards on demand. Add a Material node at the top at need.
419 if (cursorOptions & CURSOR_OPT_SCROLL)
421 if (!ExecSupportsBackwardScan(top_plan))
422 top_plan = materialize_finished_plan(top_plan);
426 * Optionally add a Gather node for testing purposes, provided this is
427 * actually a safe thing to do.
429 if (force_parallel_mode != FORCE_PARALLEL_OFF && top_plan->parallel_safe)
431 Gather *gather = makeNode(Gather);
434 * If there are any initPlans attached to the formerly-top plan node,
435 * move them up to the Gather node; same as we do for Material node in
436 * materialize_finished_plan.
438 gather->plan.initPlan = top_plan->initPlan;
439 top_plan->initPlan = NIL;
441 gather->plan.targetlist = top_plan->targetlist;
442 gather->plan.qual = NIL;
443 gather->plan.lefttree = top_plan;
444 gather->plan.righttree = NULL;
445 gather->num_workers = 1;
446 gather->single_copy = true;
447 gather->invisible = (force_parallel_mode == FORCE_PARALLEL_REGRESS);
450 * Since this Gather has no parallel-aware descendants to signal to,
451 * we don't need a rescan Param.
453 gather->rescan_param = -1;
456 * Ideally we'd use cost_gather here, but setting up dummy path data
457 * to satisfy it doesn't seem much cleaner than knowing what it does.
459 gather->plan.startup_cost = top_plan->startup_cost +
461 gather->plan.total_cost = top_plan->total_cost +
462 parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
463 gather->plan.plan_rows = top_plan->plan_rows;
464 gather->plan.plan_width = top_plan->plan_width;
465 gather->plan.parallel_aware = false;
466 gather->plan.parallel_safe = false;
468 /* use parallel mode for parallel plans. */
469 root->glob->parallelModeNeeded = true;
471 top_plan = &gather->plan;
475 * If any Params were generated, run through the plan tree and compute
476 * each plan node's extParam/allParam sets. Ideally we'd merge this into
477 * set_plan_references' tree traversal, but for now it has to be separate
478 * because we need to visit subplans before not after main plan.
480 if (glob->paramExecTypes != NIL)
482 Assert(list_length(glob->subplans) == list_length(glob->subroots));
483 forboth(lp, glob->subplans, lr, glob->subroots)
485 Plan *subplan = (Plan *) lfirst(lp);
486 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
488 SS_finalize_plan(subroot, subplan);
490 SS_finalize_plan(root, top_plan);
493 /* final cleanup of the plan */
494 Assert(glob->finalrtable == NIL);
495 Assert(glob->finalrowmarks == NIL);
496 Assert(glob->resultRelations == NIL);
497 Assert(glob->rootResultRelations == NIL);
498 top_plan = set_plan_references(root, top_plan);
499 /* ... and the subplans (both regular subplans and initplans) */
500 Assert(list_length(glob->subplans) == list_length(glob->subroots));
501 forboth(lp, glob->subplans, lr, glob->subroots)
503 Plan *subplan = (Plan *) lfirst(lp);
504 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
506 lfirst(lp) = set_plan_references(subroot, subplan);
509 /* build the PlannedStmt result */
510 result = makeNode(PlannedStmt);
512 result->commandType = parse->commandType;
513 result->queryId = parse->queryId;
514 result->hasReturning = (parse->returningList != NIL);
515 result->hasModifyingCTE = parse->hasModifyingCTE;
516 result->canSetTag = parse->canSetTag;
517 result->transientPlan = glob->transientPlan;
518 result->dependsOnRole = glob->dependsOnRole;
519 result->parallelModeNeeded = glob->parallelModeNeeded;
520 result->planTree = top_plan;
521 result->rtable = glob->finalrtable;
522 result->resultRelations = glob->resultRelations;
523 result->rootResultRelations = glob->rootResultRelations;
524 result->subplans = glob->subplans;
525 result->rewindPlanIDs = glob->rewindPlanIDs;
526 result->rowMarks = glob->finalrowmarks;
527 result->relationOids = glob->relationOids;
528 result->invalItems = glob->invalItems;
529 result->paramExecTypes = glob->paramExecTypes;
530 /* utilityStmt should be null, but we might as well copy it */
531 result->utilityStmt = parse->utilityStmt;
532 result->stmt_location = parse->stmt_location;
533 result->stmt_len = parse->stmt_len;
535 result->jitFlags = PGJIT_NONE;
536 if (jit_enabled && jit_above_cost >= 0 &&
537 top_plan->total_cost > jit_above_cost)
539 result->jitFlags |= PGJIT_PERFORM;
542 * Decide how much effort should be put into generating better code.
544 if (jit_optimize_above_cost >= 0 &&
545 top_plan->total_cost > jit_optimize_above_cost)
546 result->jitFlags |= PGJIT_OPT3;
547 if (jit_inline_above_cost >= 0 &&
548 top_plan->total_cost > jit_inline_above_cost)
549 result->jitFlags |= PGJIT_INLINE;
552 * Decide which operations should be JITed.
555 result->jitFlags |= PGJIT_EXPR;
556 if (jit_tuple_deforming)
557 result->jitFlags |= PGJIT_DEFORM;
560 if (glob->partition_directory != NULL)
561 DestroyPartitionDirectory(glob->partition_directory);
567 /*--------------------
569 * Invokes the planner on a subquery. We recurse to here for each
570 * sub-SELECT found in the query tree.
572 * glob is the global state for the current planner run.
573 * parse is the querytree produced by the parser & rewriter.
574 * parent_root is the immediate parent Query's info (NULL at the top level).
575 * hasRecursion is true if this is a recursive WITH query.
576 * tuple_fraction is the fraction of tuples we expect will be retrieved.
577 * tuple_fraction is interpreted as explained for grouping_planner, below.
579 * Basically, this routine does the stuff that should only be done once
580 * per Query object. It then calls grouping_planner. At one time,
581 * grouping_planner could be invoked recursively on the same Query object;
582 * that's not currently true, but we keep the separation between the two
583 * routines anyway, in case we need it again someday.
585 * subquery_planner will be called recursively to handle sub-Query nodes
586 * found within the query's expressions and rangetable.
588 * Returns the PlannerInfo struct ("root") that contains all data generated
589 * while planning the subquery. In particular, the Path(s) attached to
590 * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
591 * cheapest way(s) to implement the query. The top level will select the
592 * best Path and pass it through createplan.c to produce a finished Plan.
593 *--------------------
596 subquery_planner(PlannerGlobal *glob, Query *parse,
597 PlannerInfo *parent_root,
598 bool hasRecursion, double tuple_fraction)
601 List *newWithCheckOptions;
605 RelOptInfo *final_rel;
608 /* Create a PlannerInfo data structure for this subquery */
609 root = makeNode(PlannerInfo);
612 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
613 root->parent_root = parent_root;
614 root->plan_params = NIL;
615 root->outer_params = NULL;
616 root->planner_cxt = CurrentMemoryContext;
617 root->init_plans = NIL;
618 root->cte_plan_ids = NIL;
619 root->multiexpr_params = NIL;
620 root->eq_classes = NIL;
621 root->ec_merging_done = false;
622 root->append_rel_list = NIL;
623 root->rowMarks = NIL;
624 memset(root->upper_rels, 0, sizeof(root->upper_rels));
625 memset(root->upper_targets, 0, sizeof(root->upper_targets));
626 root->processed_tlist = NIL;
627 root->grouping_map = NULL;
628 root->minmax_aggs = NIL;
629 root->qual_security_level = 0;
630 root->inhTargetKind = INHKIND_NONE;
631 root->hasRecursion = hasRecursion;
633 root->wt_param_id = assign_special_exec_param(root);
635 root->wt_param_id = -1;
636 root->non_recursive_path = NULL;
637 root->partColsUpdated = false;
640 * If there is a WITH list, process each WITH query and either convert it
641 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
644 SS_process_ctes(root);
647 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
648 * that we don't need so many special cases to deal with that situation.
650 replace_empty_jointree(parse);
653 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
654 * to transform them into joins. Note that this step does not descend
655 * into subqueries; if we pull up any subqueries below, their SubLinks are
656 * processed just before pulling them up.
658 if (parse->hasSubLinks)
659 pull_up_sublinks(root);
662 * Scan the rangetable for function RTEs, do const-simplification on them,
663 * and then inline them if possible (producing subqueries that might get
664 * pulled up next). Recursion issues here are handled in the same way as
667 preprocess_function_rtes(root);
670 * Check to see if any subqueries in the jointree can be merged into this
673 pull_up_subqueries(root);
676 * If this is a simple UNION ALL query, flatten it into an appendrel. We
677 * do this now because it requires applying pull_up_subqueries to the leaf
678 * queries of the UNION ALL, which weren't touched above because they
679 * weren't referenced by the jointree (they will be after we do this).
681 if (parse->setOperations)
682 flatten_simple_union_all(root);
685 * Survey the rangetable to see what kinds of entries are present. We can
686 * skip some later processing if relevant SQL features are not used; for
687 * example if there are no JOIN RTEs we can avoid the expense of doing
688 * flatten_join_alias_vars(). This must be done after we have finished
689 * adding rangetable entries, of course. (Note: actually, processing of
690 * inherited or partitioned rels can cause RTEs for their child tables to
691 * get added later; but those must all be RTE_RELATION entries, so they
692 * don't invalidate the conclusions drawn here.)
694 root->hasJoinRTEs = false;
695 root->hasLateralRTEs = false;
696 hasOuterJoins = false;
697 hasResultRTEs = false;
698 foreach(l, parse->rtable)
700 RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
702 switch (rte->rtekind)
708 * Check to see if the relation actually has any children;
709 * if not, clear the inh flag so we can treat it as a
710 * plain base relation.
712 * Note: this could give a false-positive result, if the
713 * rel once had children but no longer does. We used to
714 * be able to clear rte->inh later on when we discovered
715 * that, but no more; we have to handle such cases as
716 * full-fledged inheritance.
718 rte->inh = has_subclass(rte->relid);
722 root->hasJoinRTEs = true;
723 if (IS_OUTER_JOIN(rte->jointype))
724 hasOuterJoins = true;
727 hasResultRTEs = true;
730 /* No work here for other RTE types */
735 root->hasLateralRTEs = true;
738 * We can also determine the maximum security level required for any
739 * securityQuals now. Addition of inheritance-child RTEs won't affect
740 * this, because child tables don't have their own securityQuals; see
741 * expand_single_inheritance_child().
743 if (rte->securityQuals)
744 root->qual_security_level = Max(root->qual_security_level,
745 list_length(rte->securityQuals));
749 * Preprocess RowMark information. We need to do this after subquery
750 * pullup, so that all base relations are present.
752 preprocess_rowmarks(root);
755 * Set hasHavingQual to remember if HAVING clause is present. Needed
756 * because preprocess_expression will reduce a constant-true condition to
757 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
759 root->hasHavingQual = (parse->havingQual != NULL);
761 /* Clear this flag; might get set in distribute_qual_to_rels */
762 root->hasPseudoConstantQuals = false;
765 * Do expression preprocessing on targetlist and quals, as well as other
766 * random expressions in the querytree. Note that we do not need to
767 * handle sort/group expressions explicitly, because they are actually
768 * part of the targetlist.
770 parse->targetList = (List *)
771 preprocess_expression(root, (Node *) parse->targetList,
774 /* Constant-folding might have removed all set-returning functions */
775 if (parse->hasTargetSRFs)
776 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
778 newWithCheckOptions = NIL;
779 foreach(l, parse->withCheckOptions)
781 WithCheckOption *wco = lfirst_node(WithCheckOption, l);
783 wco->qual = preprocess_expression(root, wco->qual,
785 if (wco->qual != NULL)
786 newWithCheckOptions = lappend(newWithCheckOptions, wco);
788 parse->withCheckOptions = newWithCheckOptions;
790 parse->returningList = (List *)
791 preprocess_expression(root, (Node *) parse->returningList,
794 preprocess_qual_conditions(root, (Node *) parse->jointree);
796 parse->havingQual = preprocess_expression(root, parse->havingQual,
799 foreach(l, parse->windowClause)
801 WindowClause *wc = lfirst_node(WindowClause, l);
803 /* partitionClause/orderClause are sort/group expressions */
804 wc->startOffset = preprocess_expression(root, wc->startOffset,
806 wc->endOffset = preprocess_expression(root, wc->endOffset,
810 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
812 parse->limitCount = preprocess_expression(root, parse->limitCount,
815 if (parse->onConflict)
817 parse->onConflict->arbiterElems = (List *)
818 preprocess_expression(root,
819 (Node *) parse->onConflict->arbiterElems,
820 EXPRKIND_ARBITER_ELEM);
821 parse->onConflict->arbiterWhere =
822 preprocess_expression(root,
823 parse->onConflict->arbiterWhere,
825 parse->onConflict->onConflictSet = (List *)
826 preprocess_expression(root,
827 (Node *) parse->onConflict->onConflictSet,
829 parse->onConflict->onConflictWhere =
830 preprocess_expression(root,
831 parse->onConflict->onConflictWhere,
833 /* exclRelTlist contains only Vars, so no preprocessing needed */
836 root->append_rel_list = (List *)
837 preprocess_expression(root, (Node *) root->append_rel_list,
840 /* Also need to preprocess expressions within RTEs */
841 foreach(l, parse->rtable)
843 RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
847 if (rte->rtekind == RTE_RELATION)
849 if (rte->tablesample)
850 rte->tablesample = (TableSampleClause *)
851 preprocess_expression(root,
852 (Node *) rte->tablesample,
853 EXPRKIND_TABLESAMPLE);
855 else if (rte->rtekind == RTE_SUBQUERY)
858 * We don't want to do all preprocessing yet on the subquery's
859 * expressions, since that will happen when we plan it. But if it
860 * contains any join aliases of our level, those have to get
861 * expanded now, because planning of the subquery won't do it.
862 * That's only possible if the subquery is LATERAL.
864 if (rte->lateral && root->hasJoinRTEs)
865 rte->subquery = (Query *)
866 flatten_join_alias_vars(root->parse,
867 (Node *) rte->subquery);
869 else if (rte->rtekind == RTE_FUNCTION)
871 /* Preprocess the function expression(s) fully */
872 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
873 rte->functions = (List *)
874 preprocess_expression(root, (Node *) rte->functions, kind);
876 else if (rte->rtekind == RTE_TABLEFUNC)
878 /* Preprocess the function expression(s) fully */
879 kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
880 rte->tablefunc = (TableFunc *)
881 preprocess_expression(root, (Node *) rte->tablefunc, kind);
883 else if (rte->rtekind == RTE_VALUES)
885 /* Preprocess the values lists fully */
886 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
887 rte->values_lists = (List *)
888 preprocess_expression(root, (Node *) rte->values_lists, kind);
892 * Process each element of the securityQuals list as if it were a
893 * separate qual expression (as indeed it is). We need to do it this
894 * way to get proper canonicalization of AND/OR structure. Note that
895 * this converts each element into an implicit-AND sublist.
897 foreach(lcsq, rte->securityQuals)
899 lfirst(lcsq) = preprocess_expression(root,
900 (Node *) lfirst(lcsq),
906 * Now that we are done preprocessing expressions, and in particular done
907 * flattening join alias variables, get rid of the joinaliasvars lists.
908 * They no longer match what expressions in the rest of the tree look
909 * like, because we have not preprocessed expressions in those lists (and
910 * do not want to; for example, expanding a SubLink there would result in
911 * a useless unreferenced subplan). Leaving them in place simply creates
912 * a hazard for later scans of the tree. We could try to prevent that by
913 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
914 * but that doesn't sound very reliable.
916 if (root->hasJoinRTEs)
918 foreach(l, parse->rtable)
920 RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
922 rte->joinaliasvars = NIL;
927 * In some cases we may want to transfer a HAVING clause into WHERE. We
928 * cannot do so if the HAVING clause contains aggregates (obviously) or
929 * volatile functions (since a HAVING clause is supposed to be executed
930 * only once per group). We also can't do this if there are any nonempty
931 * grouping sets; moving such a clause into WHERE would potentially change
932 * the results, if any referenced column isn't present in all the grouping
933 * sets. (If there are only empty grouping sets, then the HAVING clause
934 * must be degenerate as discussed below.)
936 * Also, it may be that the clause is so expensive to execute that we're
937 * better off doing it only once per group, despite the loss of
938 * selectivity. This is hard to estimate short of doing the entire
939 * planning process twice, so we use a heuristic: clauses containing
940 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
941 * clause into WHERE, in hopes of eliminating tuples before aggregation
944 * If the query has explicit grouping then we can simply move such a
945 * clause into WHERE; any group that fails the clause will not be in the
946 * output because none of its tuples will reach the grouping or
947 * aggregation stage. Otherwise we must have a degenerate (variable-free)
948 * HAVING clause, which we put in WHERE so that query_planner() can use it
949 * in a gating Result node, but also keep in HAVING to ensure that we
950 * don't emit a bogus aggregated row. (This could be done better, but it
951 * seems not worth optimizing.)
953 * Note that both havingQual and parse->jointree->quals are in
954 * implicitly-ANDed-list form at this point, even though they are declared
958 foreach(l, (List *) parse->havingQual)
960 Node *havingclause = (Node *) lfirst(l);
962 if ((parse->groupClause && parse->groupingSets) ||
963 contain_agg_clause(havingclause) ||
964 contain_volatile_functions(havingclause) ||
965 contain_subplans(havingclause))
967 /* keep it in HAVING */
968 newHaving = lappend(newHaving, havingclause);
970 else if (parse->groupClause && !parse->groupingSets)
972 /* move it to WHERE */
973 parse->jointree->quals = (Node *)
974 lappend((List *) parse->jointree->quals, havingclause);
978 /* put a copy in WHERE, keep it in HAVING */
979 parse->jointree->quals = (Node *)
980 lappend((List *) parse->jointree->quals,
981 copyObject(havingclause));
982 newHaving = lappend(newHaving, havingclause);
985 parse->havingQual = (Node *) newHaving;
987 /* Remove any redundant GROUP BY columns */
988 remove_useless_groupby_columns(root);
991 * If we have any outer joins, try to reduce them to plain inner joins.
992 * This step is most easily done after we've done expression
996 reduce_outer_joins(root);
999 * If we have any RTE_RESULT relations, see if they can be deleted from
1000 * the jointree. This step is most effectively done after we've done
1001 * expression preprocessing and outer join reduction.
1004 remove_useless_result_rtes(root);
1007 * Do the main planning. If we have an inherited target relation, that
1008 * needs special processing, else go straight to grouping_planner.
1010 if (parse->resultRelation &&
1011 rt_fetch(parse->resultRelation, parse->rtable)->inh)
1012 inheritance_planner(root);
1014 grouping_planner(root, false, tuple_fraction);
1017 * Capture the set of outer-level param IDs we have access to, for use in
1018 * extParam/allParam calculations later.
1020 SS_identify_outer_params(root);
1023 * If any initPlans were created in this query level, adjust the surviving
1024 * Paths' costs and parallel-safety flags to account for them. The
1025 * initPlans won't actually get attached to the plan tree till
1026 * create_plan() runs, but we must include their effects now.
1028 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1029 SS_charge_for_initplans(root, final_rel);
1032 * Make sure we've identified the cheapest Path for the final rel. (By
1033 * doing this here not in grouping_planner, we include initPlan costs in
1034 * the decision, though it's unlikely that will change anything.)
1036 set_cheapest(final_rel);
1042 * preprocess_expression
1043 * Do subquery_planner's preprocessing work for an expression,
1044 * which can be a targetlist, a WHERE clause (including JOIN/ON
1045 * conditions), a HAVING clause, or a few other things.
1048 preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1051 * Fall out quickly if expression is empty. This occurs often enough to
1052 * be worth checking. Note that null->null is the correct conversion for
1053 * implicit-AND result format, too.
1059 * If the query has any join RTEs, replace join alias variables with
1060 * base-relation variables. We must do this first, since any expressions
1061 * we may extract from the joinaliasvars lists have not been preprocessed.
1062 * For example, if we did this after sublink processing, sublinks expanded
1063 * out from join aliases would not get processed. But we can skip this in
1064 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1065 * they can't contain any Vars of the current query level.
1067 if (root->hasJoinRTEs &&
1068 !(kind == EXPRKIND_RTFUNC ||
1069 kind == EXPRKIND_VALUES ||
1070 kind == EXPRKIND_TABLESAMPLE ||
1071 kind == EXPRKIND_TABLEFUNC))
1072 expr = flatten_join_alias_vars(root->parse, expr);
1075 * Simplify constant expressions. For function RTEs, this was already
1076 * done by preprocess_function_rtes ... but we have to do it again if the
1077 * RTE is LATERAL and might have contained join alias variables.
1079 * Note: an essential effect of this is to convert named-argument function
1080 * calls to positional notation and insert the current actual values of
1081 * any default arguments for functions. To ensure that happens, we *must*
1082 * process all expressions here. Previous PG versions sometimes skipped
1083 * const-simplification if it didn't seem worth the trouble, but we can't
1086 * Note: this also flattens nested AND and OR expressions into N-argument
1087 * form. All processing of a qual expression after this point must be
1088 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1089 * with AND directly under AND, nor OR directly under OR.
1091 if (!(kind == EXPRKIND_RTFUNC ||
1092 (kind == EXPRKIND_RTFUNC_LATERAL && !root->hasJoinRTEs)))
1093 expr = eval_const_expressions(root, expr);
1096 * If it's a qual or havingQual, canonicalize it.
1098 if (kind == EXPRKIND_QUAL)
1100 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1102 #ifdef OPTIMIZER_DEBUG
1103 printf("After canonicalize_qual()\n");
1108 /* Expand SubLinks to SubPlans */
1109 if (root->parse->hasSubLinks)
1110 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1113 * XXX do not insert anything here unless you have grokked the comments in
1114 * SS_replace_correlation_vars ...
1117 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1118 if (root->query_level > 1)
1119 expr = SS_replace_correlation_vars(root, expr);
1122 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1123 * don't want to do this before eval_const_expressions, since the latter
1124 * would be unable to simplify a top-level AND correctly. Also,
1125 * SS_process_sublinks expects explicit-AND format.)
1127 if (kind == EXPRKIND_QUAL)
1128 expr = (Node *) make_ands_implicit((Expr *) expr);
1134 * preprocess_qual_conditions
1135 * Recursively scan the query's jointree and do subquery_planner's
1136 * preprocessing work on each qual condition found therein.
1139 preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1143 if (IsA(jtnode, RangeTblRef))
1145 /* nothing to do here */
1147 else if (IsA(jtnode, FromExpr))
1149 FromExpr *f = (FromExpr *) jtnode;
1152 foreach(l, f->fromlist)
1153 preprocess_qual_conditions(root, lfirst(l));
1155 f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1157 else if (IsA(jtnode, JoinExpr))
1159 JoinExpr *j = (JoinExpr *) jtnode;
1161 preprocess_qual_conditions(root, j->larg);
1162 preprocess_qual_conditions(root, j->rarg);
1164 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1167 elog(ERROR, "unrecognized node type: %d",
1168 (int) nodeTag(jtnode));
1172 * preprocess_phv_expression
1173 * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1175 * If a LATERAL subquery references an output of another subquery, and that
1176 * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1177 * join, then we'll push the PlaceHolderVar expression down into the subquery
1178 * and later pull it back up during find_lateral_references, which runs after
1179 * subquery_planner has preprocessed all the expressions that were in the
1180 * current query level to start with. So we need to preprocess it then.
1183 preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1185 return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1189 * inheritance_planner
1190 * Generate Paths in the case where the result relation is an
1193 * We have to handle this case differently from cases where a source relation
1194 * is an inheritance set. Source inheritance is expanded at the bottom of the
1195 * plan tree (see allpaths.c), but target inheritance has to be expanded at
1196 * the top. The reason is that for UPDATE, each target relation needs a
1197 * different targetlist matching its own column set. Fortunately,
1198 * the UPDATE/DELETE target can never be the nullable side of an outer join,
1199 * so it's OK to generate the plan this way.
1201 * Returns nothing; the useful output is in the Paths we attach to
1202 * the (UPPERREL_FINAL, NULL) upperrel stored in *root.
1204 * Note that we have not done set_cheapest() on the final rel; it's convenient
1205 * to leave this to the caller.
1208 inheritance_planner(PlannerInfo *root)
1210 Query *parse = root->parse;
1211 int top_parentRTindex = parse->resultRelation;
1212 List *select_rtable;
1213 List *select_appinfos;
1214 List *child_appinfos;
1215 List *old_child_rtis;
1216 List *new_child_rtis;
1217 Bitmapset *subqueryRTindexes;
1218 Index next_subquery_rti;
1219 int nominalRelation = -1;
1220 Index rootRelation = 0;
1221 List *final_rtable = NIL;
1222 List *final_rowmarks = NIL;
1223 int save_rel_array_size = 0;
1224 RelOptInfo **save_rel_array = NULL;
1225 AppendRelInfo **save_append_rel_array = NULL;
1226 List *subpaths = NIL;
1227 List *subroots = NIL;
1228 List *resultRelations = NIL;
1229 List *withCheckOptionLists = NIL;
1230 List *returningLists = NIL;
1232 RelOptInfo *final_rel;
1236 RangeTblEntry *parent_rte;
1237 Bitmapset *parent_relids;
1238 Query **parent_parses;
1240 /* Should only get here for UPDATE or DELETE */
1241 Assert(parse->commandType == CMD_UPDATE ||
1242 parse->commandType == CMD_DELETE);
1245 * We generate a modified instance of the original Query for each target
1246 * relation, plan that, and put all the plans into a list that will be
1247 * controlled by a single ModifyTable node. All the instances share the
1248 * same rangetable, but each instance must have its own set of subquery
1249 * RTEs within the finished rangetable because (1) they are likely to get
1250 * scribbled on during planning, and (2) it's not inconceivable that
1251 * subqueries could get planned differently in different cases. We need
1252 * not create duplicate copies of other RTE kinds, in particular not the
1253 * target relations, because they don't have either of those issues. Not
1254 * having to duplicate the target relations is important because doing so
1255 * (1) would result in a rangetable of length O(N^2) for N targets, with
1256 * at least O(N^3) work expended here; and (2) would greatly complicate
1257 * management of the rowMarks list.
1259 * To begin with, generate a bitmapset of the relids of the subquery RTEs.
1261 subqueryRTindexes = NULL;
1263 foreach(lc, parse->rtable)
1265 RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc);
1267 if (rte->rtekind == RTE_SUBQUERY)
1268 subqueryRTindexes = bms_add_member(subqueryRTindexes, rti);
1273 * If the parent RTE is a partitioned table, we should use that as the
1274 * nominal target relation, because the RTEs added for partitioned tables
1275 * (including the root parent) as child members of the inheritance set do
1276 * not appear anywhere else in the plan, so the confusion explained below
1277 * for non-partitioning inheritance cases is not possible.
1279 parent_rte = rt_fetch(top_parentRTindex, parse->rtable);
1280 Assert(parent_rte->inh);
1281 if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE)
1283 nominalRelation = top_parentRTindex;
1284 rootRelation = top_parentRTindex;
1288 * Before generating the real per-child-relation plans, do a cycle of
1289 * planning as though the query were a SELECT. The objective here is to
1290 * find out which child relations need to be processed, using the same
1291 * expansion and pruning logic as for a SELECT. We'll then pull out the
1292 * RangeTblEntry-s generated for the child rels, and make use of the
1293 * AppendRelInfo entries for them to guide the real planning. (This is
1294 * rather inefficient; we could perhaps stop short of making a full Path
1295 * tree. But this whole function is inefficient and slated for
1296 * destruction, so let's not contort query_planner for that.)
1299 PlannerInfo *subroot;
1302 * Flat-copy the PlannerInfo to prevent modification of the original.
1304 subroot = makeNode(PlannerInfo);
1305 memcpy(subroot, root, sizeof(PlannerInfo));
1308 * Make a deep copy of the parsetree for this planning cycle to mess
1309 * around with, and change it to look like a SELECT. (Hack alert: the
1310 * target RTE still has updatedCols set if this is an UPDATE, so that
1311 * expand_partitioned_rtentry will correctly update
1312 * subroot->partColsUpdated.)
1314 subroot->parse = copyObject(root->parse);
1316 subroot->parse->commandType = CMD_SELECT;
1317 subroot->parse->resultRelation = 0;
1320 * Ensure the subroot has its own copy of the original
1321 * append_rel_list, since it'll be scribbled on. (Note that at this
1322 * point, the list only contains AppendRelInfos for flattened UNION
1325 subroot->append_rel_list = copyObject(root->append_rel_list);
1328 * Better make a private copy of the rowMarks, too.
1330 subroot->rowMarks = copyObject(root->rowMarks);
1332 /* There shouldn't be any OJ info to translate, as yet */
1333 Assert(subroot->join_info_list == NIL);
1334 /* and we haven't created PlaceHolderInfos, either */
1335 Assert(subroot->placeholder_list == NIL);
1337 /* Generate Path(s) for accessing this result relation */
1338 grouping_planner(subroot, true, 0.0 /* retrieve all tuples */ );
1340 /* Extract the info we need. */
1341 select_rtable = subroot->parse->rtable;
1342 select_appinfos = subroot->append_rel_list;
1345 * We need to propagate partColsUpdated back, too. (The later
1346 * planning cycles will not set this because they won't run
1347 * expand_partitioned_rtentry for the UPDATE target.)
1349 root->partColsUpdated = subroot->partColsUpdated;
1353 * Since only one rangetable can exist in the final plan, we need to make
1354 * sure that it contains all the RTEs needed for any child plan. This is
1355 * complicated by the need to use separate subquery RTEs for each child.
1356 * We arrange the final rtable as follows:
1357 * 1. All original rtable entries (with their original RT indexes).
1358 * 2. All the relation RTEs generated for children of the target table.
1359 * 3. Subquery RTEs for children after the first. We need N * (K - 1)
1360 * RT slots for this, if there are N subqueries and K child tables.
1361 * 4. Additional RTEs generated during the child planning runs, such as
1362 * children of inheritable RTEs other than the target table.
1363 * We assume that each child planning run will create an identical set
1366 * So the next thing to do is append the type-2 RTEs (the target table's
1367 * children) to the original rtable. We look through select_appinfos
1370 * To identify which AppendRelInfos are relevant as we thumb through
1371 * select_appinfos, we need to look for both direct and indirect children
1372 * of top_parentRTindex, so we use a bitmap of known parent relids.
1373 * expand_inherited_rtentry() always processes a parent before any of that
1374 * parent's children, so we should see an intermediate parent before its
1378 child_appinfos = NIL;
1379 old_child_rtis = NIL;
1380 new_child_rtis = NIL;
1381 parent_relids = bms_make_singleton(top_parentRTindex);
1382 foreach(lc, select_appinfos)
1384 AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
1385 RangeTblEntry *child_rte;
1387 /* append_rel_list contains all append rels; ignore others */
1388 if (!bms_is_member(appinfo->parent_relid, parent_relids))
1391 /* remember relevant AppendRelInfos for use below */
1392 child_appinfos = lappend(child_appinfos, appinfo);
1394 /* extract RTE for this child rel */
1395 child_rte = rt_fetch(appinfo->child_relid, select_rtable);
1397 /* and append it to the original rtable */
1398 parse->rtable = lappend(parse->rtable, child_rte);
1400 /* remember child's index in the SELECT rtable */
1401 old_child_rtis = lappend_int(old_child_rtis, appinfo->child_relid);
1403 /* and its new index in the final rtable */
1404 new_child_rtis = lappend_int(new_child_rtis, list_length(parse->rtable));
1406 /* if child is itself partitioned, update parent_relids */
1409 Assert(child_rte->relkind == RELKIND_PARTITIONED_TABLE);
1410 parent_relids = bms_add_member(parent_relids, appinfo->child_relid);
1415 * It's possible that the RTIs we just assigned for the child rels in the
1416 * final rtable are different from what they were in the SELECT query.
1417 * Adjust the AppendRelInfos so that they will correctly map RT indexes to
1418 * the final indexes. We can do this left-to-right since no child rel's
1419 * final RT index could be greater than what it had in the SELECT query.
1421 forboth(lc, old_child_rtis, lc2, new_child_rtis)
1423 int old_child_rti = lfirst_int(lc);
1424 int new_child_rti = lfirst_int(lc2);
1426 if (old_child_rti == new_child_rti)
1427 continue; /* nothing to do */
1429 Assert(old_child_rti > new_child_rti);
1431 ChangeVarNodes((Node *) child_appinfos,
1432 old_child_rti, new_child_rti, 0);
1436 * Now set up rangetable entries for subqueries for additional children
1437 * (the first child will just use the original ones). These all have to
1438 * look more or less real, or EXPLAIN will get unhappy; so we just make
1439 * them all clones of the original subqueries.
1441 next_subquery_rti = list_length(parse->rtable) + 1;
1442 if (subqueryRTindexes != NULL)
1444 int n_children = list_length(child_appinfos);
1446 while (n_children-- > 1)
1450 while ((oldrti = bms_next_member(subqueryRTindexes, oldrti)) >= 0)
1452 RangeTblEntry *subqrte;
1454 subqrte = rt_fetch(oldrti, parse->rtable);
1455 parse->rtable = lappend(parse->rtable, copyObject(subqrte));
1461 * The query for each child is obtained by translating the query for its
1462 * immediate parent, since the AppendRelInfo data we have shows deltas
1463 * between parents and children. We use the parent_parses array to
1464 * remember the appropriate query trees. This is indexed by parent relid.
1465 * Since the maximum number of parents is limited by the number of RTEs in
1466 * the SELECT query, we use that number to allocate the array. An extra
1467 * entry is needed since relids start from 1.
1469 parent_parses = (Query **) palloc0((list_length(select_rtable) + 1) *
1471 parent_parses[top_parentRTindex] = parse;
1474 * And now we can get on with generating a plan for each child table.
1476 foreach(lc, child_appinfos)
1478 AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
1479 Index this_subquery_rti = next_subquery_rti;
1480 Query *parent_parse;
1481 PlannerInfo *subroot;
1482 RangeTblEntry *child_rte;
1483 RelOptInfo *sub_final_rel;
1487 * expand_inherited_rtentry() always processes a parent before any of
1488 * that parent's children, so the parent query for this relation
1489 * should already be available.
1491 parent_parse = parent_parses[appinfo->parent_relid];
1492 Assert(parent_parse != NULL);
1495 * We need a working copy of the PlannerInfo so that we can control
1496 * propagation of information back to the main copy.
1498 subroot = makeNode(PlannerInfo);
1499 memcpy(subroot, root, sizeof(PlannerInfo));
1502 * Generate modified query with this rel as target. We first apply
1503 * adjust_appendrel_attrs, which copies the Query and changes
1504 * references to the parent RTE to refer to the current child RTE,
1505 * then fool around with subquery RTEs.
1507 subroot->parse = (Query *)
1508 adjust_appendrel_attrs(subroot,
1509 (Node *) parent_parse,
1513 * If there are securityQuals attached to the parent, move them to the
1514 * child rel (they've already been transformed properly for that).
1516 parent_rte = rt_fetch(appinfo->parent_relid, subroot->parse->rtable);
1517 child_rte = rt_fetch(appinfo->child_relid, subroot->parse->rtable);
1518 child_rte->securityQuals = parent_rte->securityQuals;
1519 parent_rte->securityQuals = NIL;
1522 * HACK: setting this to a value other than INHKIND_NONE signals to
1523 * relation_excluded_by_constraints() to treat the result relation as
1524 * being an appendrel member.
1526 subroot->inhTargetKind =
1527 (rootRelation != 0) ? INHKIND_PARTITIONED : INHKIND_INHERITED;
1530 * If this child is further partitioned, remember it as a parent.
1531 * Since a partitioned table does not have any data, we don't need to
1532 * create a plan for it, and we can stop processing it here. We do,
1533 * however, need to remember its modified PlannerInfo for use when
1534 * processing its children, since we'll update their varnos based on
1535 * the delta from immediate parent to child, not from top to child.
1537 * Note: a very non-obvious point is that we have not yet added
1538 * duplicate subquery RTEs to the subroot's rtable. We mustn't,
1539 * because then its children would have two sets of duplicates,
1540 * confusing matters.
1544 Assert(child_rte->relkind == RELKIND_PARTITIONED_TABLE);
1545 parent_parses[appinfo->child_relid] = subroot->parse;
1550 * Set the nominal target relation of the ModifyTable node if not
1551 * already done. If the target is a partitioned table, we already set
1552 * nominalRelation to refer to the partition root, above. For
1553 * non-partitioned inheritance cases, we'll use the first child
1554 * relation (even if it's excluded) as the nominal target relation.
1555 * Because of the way expand_inherited_rtentry works, that should be
1556 * the RTE representing the parent table in its role as a simple
1557 * member of the inheritance set.
1559 * It would be logically cleaner to *always* use the inheritance
1560 * parent RTE as the nominal relation; but that RTE is not otherwise
1561 * referenced in the plan in the non-partitioned inheritance case.
1562 * Instead the duplicate child RTE created by expand_inherited_rtentry
1563 * is used elsewhere in the plan, so using the original parent RTE
1564 * would give rise to confusing use of multiple aliases in EXPLAIN
1565 * output for what the user will think is the "same" table. OTOH,
1566 * it's not a problem in the partitioned inheritance case, because
1567 * there is no duplicate RTE for the parent.
1569 if (nominalRelation < 0)
1570 nominalRelation = appinfo->child_relid;
1573 * As above, each child plan run needs its own append_rel_list and
1574 * rowmarks, which should start out as pristine copies of the
1575 * originals. There can't be any references to UPDATE/DELETE target
1576 * rels in them; but there could be subquery references, which we'll
1577 * fix up in a moment.
1579 subroot->append_rel_list = copyObject(root->append_rel_list);
1580 subroot->rowMarks = copyObject(root->rowMarks);
1583 * If this isn't the first child Query, adjust Vars and jointree
1584 * entries to reference the appropriate set of subquery RTEs.
1586 if (final_rtable != NIL && subqueryRTindexes != NULL)
1590 while ((oldrti = bms_next_member(subqueryRTindexes, oldrti)) >= 0)
1592 Index newrti = next_subquery_rti++;
1594 ChangeVarNodes((Node *) subroot->parse, oldrti, newrti, 0);
1595 ChangeVarNodes((Node *) subroot->append_rel_list,
1597 ChangeVarNodes((Node *) subroot->rowMarks, oldrti, newrti, 0);
1601 /* There shouldn't be any OJ info to translate, as yet */
1602 Assert(subroot->join_info_list == NIL);
1603 /* and we haven't created PlaceHolderInfos, either */
1604 Assert(subroot->placeholder_list == NIL);
1606 /* Generate Path(s) for accessing this result relation */
1607 grouping_planner(subroot, true, 0.0 /* retrieve all tuples */ );
1610 * Select cheapest path in case there's more than one. We always run
1611 * modification queries to conclusion, so we care only for the
1612 * cheapest-total path.
1614 sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
1615 set_cheapest(sub_final_rel);
1616 subpath = sub_final_rel->cheapest_total_path;
1619 * If this child rel was excluded by constraint exclusion, exclude it
1620 * from the result plan.
1622 if (IS_DUMMY_REL(sub_final_rel))
1626 * If this is the first non-excluded child, its post-planning rtable
1627 * becomes the initial contents of final_rtable; otherwise, copy its
1628 * modified subquery RTEs into final_rtable, to ensure we have sane
1629 * copies of those. Also save the first non-excluded child's version
1630 * of the rowmarks list; we assume all children will end up with
1631 * equivalent versions of that.
1633 if (final_rtable == NIL)
1635 final_rtable = subroot->parse->rtable;
1636 final_rowmarks = subroot->rowMarks;
1640 Assert(list_length(final_rtable) ==
1641 list_length(subroot->parse->rtable));
1642 if (subqueryRTindexes != NULL)
1646 while ((oldrti = bms_next_member(subqueryRTindexes, oldrti)) >= 0)
1648 Index newrti = this_subquery_rti++;
1649 RangeTblEntry *subqrte;
1650 ListCell *newrticell;
1652 subqrte = rt_fetch(newrti, subroot->parse->rtable);
1653 newrticell = list_nth_cell(final_rtable, newrti - 1);
1654 lfirst(newrticell) = subqrte;
1660 * We need to collect all the RelOptInfos from all child plans into
1661 * the main PlannerInfo, since setrefs.c will need them. We use the
1662 * last child's simple_rel_array, so we have to propagate forward the
1663 * RelOptInfos that were already built in previous children.
1665 Assert(subroot->simple_rel_array_size >= save_rel_array_size);
1666 for (rti = 1; rti < save_rel_array_size; rti++)
1668 RelOptInfo *brel = save_rel_array[rti];
1671 subroot->simple_rel_array[rti] = brel;
1673 save_rel_array_size = subroot->simple_rel_array_size;
1674 save_rel_array = subroot->simple_rel_array;
1675 save_append_rel_array = subroot->append_rel_array;
1678 * Make sure any initplans from this rel get into the outer list. Note
1679 * we're effectively assuming all children generate the same
1682 root->init_plans = subroot->init_plans;
1684 /* Build list of sub-paths */
1685 subpaths = lappend(subpaths, subpath);
1687 /* Build list of modified subroots, too */
1688 subroots = lappend(subroots, subroot);
1690 /* Build list of target-relation RT indexes */
1691 resultRelations = lappend_int(resultRelations, appinfo->child_relid);
1693 /* Build lists of per-relation WCO and RETURNING targetlists */
1694 if (parse->withCheckOptions)
1695 withCheckOptionLists = lappend(withCheckOptionLists,
1696 subroot->parse->withCheckOptions);
1697 if (parse->returningList)
1698 returningLists = lappend(returningLists,
1699 subroot->parse->returningList);
1701 Assert(!parse->onConflict);
1704 /* Result path must go into outer query's FINAL upperrel */
1705 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1708 * We don't currently worry about setting final_rel's consider_parallel
1709 * flag in this case, nor about allowing FDWs or create_upper_paths_hook
1710 * to get control here.
1713 if (subpaths == NIL)
1716 * We managed to exclude every child rel, so generate a dummy path
1717 * representing the empty set. Although it's clear that no data will
1718 * be updated or deleted, we will still need to have a ModifyTable
1719 * node so that any statement triggers are executed. (This could be
1720 * cleaner if we fixed nodeModifyTable.c to support zero child nodes,
1721 * but that probably wouldn't be a net win.)
1725 /* tlist processing never got done, either */
1726 root->processed_tlist = preprocess_targetlist(root);
1727 final_rel->reltarget = create_pathtarget(root, root->processed_tlist);
1729 /* Make a dummy path, cf set_dummy_rel_pathlist() */
1730 dummy_path = (Path *) create_append_path(NULL, final_rel, NIL, NIL,
1731 NIL, NULL, 0, false,
1734 /* These lists must be nonempty to make a valid ModifyTable node */
1735 subpaths = list_make1(dummy_path);
1736 subroots = list_make1(root);
1737 resultRelations = list_make1_int(parse->resultRelation);
1738 if (parse->withCheckOptions)
1739 withCheckOptionLists = list_make1(parse->withCheckOptions);
1740 if (parse->returningList)
1741 returningLists = list_make1(parse->returningList);
1742 /* Disable tuple routing, too, just to be safe */
1743 root->partColsUpdated = false;
1748 * Put back the final adjusted rtable into the master copy of the
1749 * Query. (We mustn't do this if we found no non-excluded children,
1750 * since we never saved an adjusted rtable at all.)
1752 parse->rtable = final_rtable;
1753 root->simple_rel_array_size = save_rel_array_size;
1754 root->simple_rel_array = save_rel_array;
1755 root->append_rel_array = save_append_rel_array;
1757 /* Must reconstruct master's simple_rte_array, too */
1758 root->simple_rte_array = (RangeTblEntry **)
1759 palloc0((list_length(final_rtable) + 1) * sizeof(RangeTblEntry *));
1761 foreach(lc, final_rtable)
1763 RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc);
1765 root->simple_rte_array[rti++] = rte;
1768 /* Put back adjusted rowmarks, too */
1769 root->rowMarks = final_rowmarks;
1773 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will
1774 * have dealt with fetching non-locked marked rows, else we need to have
1775 * ModifyTable do that.
1777 if (parse->rowMarks)
1780 rowMarks = root->rowMarks;
1782 /* Create Path representing a ModifyTable to do the UPDATE/DELETE work */
1783 add_path(final_rel, (Path *)
1784 create_modifytable_path(root, final_rel,
1789 root->partColsUpdated,
1793 withCheckOptionLists,
1797 assign_special_exec_param(root)));
1800 /*--------------------
1802 * Perform planning steps related to grouping, aggregation, etc.
1804 * This function adds all required top-level processing to the scan/join
1805 * Path(s) produced by query_planner.
1807 * If inheritance_update is true, we're being called from inheritance_planner
1808 * and should not include a ModifyTable step in the resulting Path(s).
1809 * (inheritance_planner will create a single ModifyTable node covering all the
1812 * tuple_fraction is the fraction of tuples we expect will be retrieved.
1813 * tuple_fraction is interpreted as follows:
1814 * 0: expect all tuples to be retrieved (normal case)
1815 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1816 * from the plan to be retrieved
1817 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1818 * expected to be retrieved (ie, a LIMIT specification)
1820 * Returns nothing; the useful output is in the Paths we attach to the
1821 * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1822 * root->processed_tlist contains the final processed targetlist.
1824 * Note that we have not done set_cheapest() on the final rel; it's convenient
1825 * to leave this to the caller.
1826 *--------------------
1829 grouping_planner(PlannerInfo *root, bool inheritance_update,
1830 double tuple_fraction)
1832 Query *parse = root->parse;
1833 int64 offset_est = 0;
1834 int64 count_est = 0;
1835 double limit_tuples = -1.0;
1836 bool have_postponed_srfs = false;
1837 PathTarget *final_target;
1838 List *final_targets;
1839 List *final_targets_contain_srfs;
1840 bool final_target_parallel_safe;
1841 RelOptInfo *current_rel;
1842 RelOptInfo *final_rel;
1843 FinalPathExtraData extra;
1846 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1847 if (parse->limitCount || parse->limitOffset)
1849 tuple_fraction = preprocess_limit(root, tuple_fraction,
1850 &offset_est, &count_est);
1853 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1854 * estimate the effects of using a bounded sort.
1856 if (count_est > 0 && offset_est >= 0)
1857 limit_tuples = (double) count_est + (double) offset_est;
1860 /* Make tuple_fraction accessible to lower-level routines */
1861 root->tuple_fraction = tuple_fraction;
1863 if (parse->setOperations)
1866 * If there's a top-level ORDER BY, assume we have to fetch all the
1867 * tuples. This might be too simplistic given all the hackery below
1868 * to possibly avoid the sort; but the odds of accurate estimates here
1869 * are pretty low anyway. XXX try to get rid of this in favor of
1870 * letting plan_set_operations generate both fast-start and
1871 * cheapest-total paths.
1873 if (parse->sortClause)
1874 root->tuple_fraction = 0.0;
1877 * Construct Paths for set operations. The results will not need any
1878 * work except perhaps a top-level sort and/or LIMIT. Note that any
1879 * special work for recursive unions is the responsibility of
1880 * plan_set_operations.
1882 current_rel = plan_set_operations(root);
1885 * We should not need to call preprocess_targetlist, since we must be
1886 * in a SELECT query node. Instead, use the processed_tlist returned
1887 * by plan_set_operations (since this tells whether it returned any
1888 * resjunk columns!), and transfer any sort key information from the
1891 Assert(parse->commandType == CMD_SELECT);
1893 /* for safety, copy processed_tlist instead of modifying in-place */
1894 root->processed_tlist =
1895 postprocess_setop_tlist(copyObject(root->processed_tlist),
1898 /* Also extract the PathTarget form of the setop result tlist */
1899 final_target = current_rel->cheapest_total_path->pathtarget;
1901 /* And check whether it's parallel safe */
1902 final_target_parallel_safe =
1903 is_parallel_safe(root, (Node *) final_target->exprs);
1905 /* The setop result tlist couldn't contain any SRFs */
1906 Assert(!parse->hasTargetSRFs);
1907 final_targets = final_targets_contain_srfs = NIL;
1910 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1911 * checked already, but let's make sure).
1913 if (parse->rowMarks)
1915 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1917 translator: %s is a SQL row locking clause such as FOR UPDATE */
1918 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1919 LCS_asString(linitial_node(RowMarkClause,
1920 parse->rowMarks)->strength))));
1923 * Calculate pathkeys that represent result ordering requirements
1925 Assert(parse->distinctClause == NIL);
1926 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1928 root->processed_tlist);
1932 /* No set operations, do regular planning */
1933 PathTarget *sort_input_target;
1934 List *sort_input_targets;
1935 List *sort_input_targets_contain_srfs;
1936 bool sort_input_target_parallel_safe;
1937 PathTarget *grouping_target;
1938 List *grouping_targets;
1939 List *grouping_targets_contain_srfs;
1940 bool grouping_target_parallel_safe;
1941 PathTarget *scanjoin_target;
1942 List *scanjoin_targets;
1943 List *scanjoin_targets_contain_srfs;
1944 bool scanjoin_target_parallel_safe;
1945 bool scanjoin_target_same_exprs;
1947 AggClauseCosts agg_costs;
1948 WindowFuncLists *wflists = NULL;
1949 List *activeWindows = NIL;
1950 grouping_sets_data *gset_data = NULL;
1951 standard_qp_extra qp_extra;
1953 /* A recursive query should always have setOperations */
1954 Assert(!root->hasRecursion);
1956 /* Preprocess grouping sets and GROUP BY clause, if any */
1957 if (parse->groupingSets)
1959 gset_data = preprocess_grouping_sets(root);
1963 /* Preprocess regular GROUP BY clause, if any */
1964 if (parse->groupClause)
1965 parse->groupClause = preprocess_groupclause(root, NIL);
1969 * Preprocess targetlist. Note that much of the remaining planning
1970 * work will be done with the PathTarget representation of tlists, but
1971 * we must also maintain the full representation of the final tlist so
1972 * that we can transfer its decoration (resnames etc) to the topmost
1973 * tlist of the finished Plan. This is kept in processed_tlist.
1975 root->processed_tlist = preprocess_targetlist(root);
1978 * Collect statistics about aggregates for estimating costs, and mark
1979 * all the aggregates with resolved aggtranstypes. We must do this
1980 * before slicing and dicing the tlist into various pathtargets, else
1981 * some copies of the Aggref nodes might escape being marked with the
1982 * correct transtypes.
1984 * Note: currently, we do not detect duplicate aggregates here. This
1985 * may result in somewhat-overestimated cost, which is fine for our
1986 * purposes since all Paths will get charged the same. But at some
1987 * point we might wish to do that detection in the planner, rather
1988 * than during executor startup.
1990 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
1993 get_agg_clause_costs(root, (Node *) root->processed_tlist,
1994 AGGSPLIT_SIMPLE, &agg_costs);
1995 get_agg_clause_costs(root, parse->havingQual, AGGSPLIT_SIMPLE,
2000 * Locate any window functions in the tlist. (We don't need to look
2001 * anywhere else, since expressions used in ORDER BY will be in there
2002 * too.) Note that they could all have been eliminated by constant
2003 * folding, in which case we don't need to do any more work.
2005 if (parse->hasWindowFuncs)
2007 wflists = find_window_functions((Node *) root->processed_tlist,
2008 list_length(parse->windowClause));
2009 if (wflists->numWindowFuncs > 0)
2010 activeWindows = select_active_windows(root, wflists);
2012 parse->hasWindowFuncs = false;
2016 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
2017 * adding logic between here and the query_planner() call. Anything
2018 * that is needed in MIN/MAX-optimizable cases will have to be
2019 * duplicated in planagg.c.
2022 preprocess_minmax_aggregates(root);
2025 * Figure out whether there's a hard limit on the number of rows that
2026 * query_planner's result subplan needs to return. Even if we know a
2027 * hard limit overall, it doesn't apply if the query has any
2028 * grouping/aggregation operations, or SRFs in the tlist.
2030 if (parse->groupClause ||
2031 parse->groupingSets ||
2032 parse->distinctClause ||
2034 parse->hasWindowFuncs ||
2035 parse->hasTargetSRFs ||
2036 root->hasHavingQual)
2037 root->limit_tuples = -1.0;
2039 root->limit_tuples = limit_tuples;
2041 /* Set up data needed by standard_qp_callback */
2042 qp_extra.activeWindows = activeWindows;
2043 qp_extra.groupClause = (gset_data
2044 ? (gset_data->rollups ? linitial_node(RollupData, gset_data->rollups)->groupClause : NIL)
2045 : parse->groupClause);
2048 * Generate the best unsorted and presorted paths for the scan/join
2049 * portion of this Query, ie the processing represented by the
2050 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
2051 * We also generate (in standard_qp_callback) pathkey representations
2052 * of the query's sort clause, distinct clause, etc.
2054 current_rel = query_planner(root, standard_qp_callback, &qp_extra);
2057 * Convert the query's result tlist into PathTarget format.
2059 * Note: this cannot be done before query_planner() has performed
2060 * appendrel expansion, because that might add resjunk entries to
2061 * root->processed_tlist. Waiting till afterwards is also helpful
2062 * because the target width estimates can use per-Var width numbers
2063 * that were obtained within query_planner().
2065 final_target = create_pathtarget(root, root->processed_tlist);
2066 final_target_parallel_safe =
2067 is_parallel_safe(root, (Node *) final_target->exprs);
2070 * If ORDER BY was given, consider whether we should use a post-sort
2071 * projection, and compute the adjusted target for preceding steps if
2074 if (parse->sortClause)
2076 sort_input_target = make_sort_input_target(root,
2078 &have_postponed_srfs);
2079 sort_input_target_parallel_safe =
2080 is_parallel_safe(root, (Node *) sort_input_target->exprs);
2084 sort_input_target = final_target;
2085 sort_input_target_parallel_safe = final_target_parallel_safe;
2089 * If we have window functions to deal with, the output from any
2090 * grouping step needs to be what the window functions want;
2091 * otherwise, it should be sort_input_target.
2095 grouping_target = make_window_input_target(root,
2098 grouping_target_parallel_safe =
2099 is_parallel_safe(root, (Node *) grouping_target->exprs);
2103 grouping_target = sort_input_target;
2104 grouping_target_parallel_safe = sort_input_target_parallel_safe;
2108 * If we have grouping or aggregation to do, the topmost scan/join
2109 * plan node must emit what the grouping step wants; otherwise, it
2110 * should emit grouping_target.
2112 have_grouping = (parse->groupClause || parse->groupingSets ||
2113 parse->hasAggs || root->hasHavingQual);
2116 scanjoin_target = make_group_input_target(root, final_target);
2117 scanjoin_target_parallel_safe =
2118 is_parallel_safe(root, (Node *) scanjoin_target->exprs);
2122 scanjoin_target = grouping_target;
2123 scanjoin_target_parallel_safe = grouping_target_parallel_safe;
2127 * If there are any SRFs in the targetlist, we must separate each of
2128 * these PathTargets into SRF-computing and SRF-free targets. Replace
2129 * each of the named targets with a SRF-free version, and remember the
2130 * list of additional projection steps we need to add afterwards.
2132 if (parse->hasTargetSRFs)
2134 /* final_target doesn't recompute any SRFs in sort_input_target */
2135 split_pathtarget_at_srfs(root, final_target, sort_input_target,
2137 &final_targets_contain_srfs);
2138 final_target = linitial_node(PathTarget, final_targets);
2139 Assert(!linitial_int(final_targets_contain_srfs));
2140 /* likewise for sort_input_target vs. grouping_target */
2141 split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
2142 &sort_input_targets,
2143 &sort_input_targets_contain_srfs);
2144 sort_input_target = linitial_node(PathTarget, sort_input_targets);
2145 Assert(!linitial_int(sort_input_targets_contain_srfs));
2146 /* likewise for grouping_target vs. scanjoin_target */
2147 split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
2149 &grouping_targets_contain_srfs);
2150 grouping_target = linitial_node(PathTarget, grouping_targets);
2151 Assert(!linitial_int(grouping_targets_contain_srfs));
2152 /* scanjoin_target will not have any SRFs precomputed for it */
2153 split_pathtarget_at_srfs(root, scanjoin_target, NULL,
2155 &scanjoin_targets_contain_srfs);
2156 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
2157 Assert(!linitial_int(scanjoin_targets_contain_srfs));
2161 /* initialize lists; for most of these, dummy values are OK */
2162 final_targets = final_targets_contain_srfs = NIL;
2163 sort_input_targets = sort_input_targets_contain_srfs = NIL;
2164 grouping_targets = grouping_targets_contain_srfs = NIL;
2165 scanjoin_targets = list_make1(scanjoin_target);
2166 scanjoin_targets_contain_srfs = NIL;
2169 /* Apply scan/join target. */
2170 scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
2171 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
2172 apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
2173 scanjoin_targets_contain_srfs,
2174 scanjoin_target_parallel_safe,
2175 scanjoin_target_same_exprs);
2178 * Save the various upper-rel PathTargets we just computed into
2179 * root->upper_targets[]. The core code doesn't use this, but it
2180 * provides a convenient place for extensions to get at the info. For
2181 * consistency, we save all the intermediate targets, even though some
2182 * of the corresponding upperrels might not be needed for this query.
2184 root->upper_targets[UPPERREL_FINAL] = final_target;
2185 root->upper_targets[UPPERREL_ORDERED] = final_target;
2186 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
2187 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
2188 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
2191 * If we have grouping and/or aggregation, consider ways to implement
2192 * that. We build a new upperrel representing the output of this
2197 current_rel = create_grouping_paths(root,
2200 grouping_target_parallel_safe,
2203 /* Fix things up if grouping_target contains SRFs */
2204 if (parse->hasTargetSRFs)
2205 adjust_paths_for_srfs(root, current_rel,
2207 grouping_targets_contain_srfs);
2211 * If we have window functions, consider ways to implement those. We
2212 * build a new upperrel representing the output of this phase.
2216 current_rel = create_window_paths(root,
2220 sort_input_target_parallel_safe,
2223 /* Fix things up if sort_input_target contains SRFs */
2224 if (parse->hasTargetSRFs)
2225 adjust_paths_for_srfs(root, current_rel,
2227 sort_input_targets_contain_srfs);
2231 * If there is a DISTINCT clause, consider ways to implement that. We
2232 * build a new upperrel representing the output of this phase.
2234 if (parse->distinctClause)
2236 current_rel = create_distinct_paths(root,
2239 } /* end of if (setOperations) */
2242 * If ORDER BY was given, consider ways to implement that, and generate a
2243 * new upperrel containing only paths that emit the correct ordering and
2244 * project the correct final_target. We can apply the original
2245 * limit_tuples limit in sort costing here, but only if there are no
2248 if (parse->sortClause)
2250 current_rel = create_ordered_paths(root,
2253 final_target_parallel_safe,
2254 have_postponed_srfs ? -1.0 :
2256 /* Fix things up if final_target contains SRFs */
2257 if (parse->hasTargetSRFs)
2258 adjust_paths_for_srfs(root, current_rel,
2260 final_targets_contain_srfs);
2264 * Now we are prepared to build the final-output upperrel.
2266 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
2269 * If the input rel is marked consider_parallel and there's nothing that's
2270 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
2271 * consider_parallel as well. Note that if the query has rowMarks or is
2272 * not a SELECT, consider_parallel will be false for every relation in the
2275 if (current_rel->consider_parallel &&
2276 is_parallel_safe(root, parse->limitOffset) &&
2277 is_parallel_safe(root, parse->limitCount))
2278 final_rel->consider_parallel = true;
2281 * If the current_rel belongs to a single FDW, so does the final_rel.
2283 final_rel->serverid = current_rel->serverid;
2284 final_rel->userid = current_rel->userid;
2285 final_rel->useridiscurrent = current_rel->useridiscurrent;
2286 final_rel->fdwroutine = current_rel->fdwroutine;
2289 * Generate paths for the final_rel. Insert all surviving paths, with
2290 * LockRows, Limit, and/or ModifyTable steps added if needed.
2292 foreach(lc, current_rel->pathlist)
2294 Path *path = (Path *) lfirst(lc);
2297 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
2298 * (Note: we intentionally test parse->rowMarks not root->rowMarks
2299 * here. If there are only non-locking rowmarks, they should be
2300 * handled by the ModifyTable node instead. However, root->rowMarks
2301 * is what goes into the LockRows node.)
2303 if (parse->rowMarks)
2305 path = (Path *) create_lockrows_path(root, final_rel, path,
2307 assign_special_exec_param(root));
2311 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
2313 if (limit_needed(parse))
2315 path = (Path *) create_limit_path(root, final_rel, path,
2318 offset_est, count_est);
2322 * If this is an INSERT/UPDATE/DELETE, and we're not being called from
2323 * inheritance_planner, add the ModifyTable node.
2325 if (parse->commandType != CMD_SELECT && !inheritance_update)
2328 List *withCheckOptionLists;
2329 List *returningLists;
2333 * If target is a partition root table, we need to mark the
2334 * ModifyTable node appropriately for that.
2336 if (rt_fetch(parse->resultRelation, parse->rtable)->relkind ==
2337 RELKIND_PARTITIONED_TABLE)
2338 rootRelation = parse->resultRelation;
2343 * Set up the WITH CHECK OPTION and RETURNING lists-of-lists, if
2346 if (parse->withCheckOptions)
2347 withCheckOptionLists = list_make1(parse->withCheckOptions);
2349 withCheckOptionLists = NIL;
2351 if (parse->returningList)
2352 returningLists = list_make1(parse->returningList);
2354 returningLists = NIL;
2357 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2358 * will have dealt with fetching non-locked marked rows, else we
2359 * need to have ModifyTable do that.
2361 if (parse->rowMarks)
2364 rowMarks = root->rowMarks;
2367 create_modifytable_path(root, final_rel,
2370 parse->resultRelation,
2373 list_make1_int(parse->resultRelation),
2376 withCheckOptionLists,
2380 assign_special_exec_param(root));
2383 /* And shove it into final_rel */
2384 add_path(final_rel, path);
2388 * Generate partial paths for final_rel, too, if outer query levels might
2389 * be able to make use of them.
2391 if (final_rel->consider_parallel && root->query_level > 1 &&
2392 !limit_needed(parse))
2394 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2395 foreach(lc, current_rel->partial_pathlist)
2397 Path *partial_path = (Path *) lfirst(lc);
2399 add_partial_path(final_rel, partial_path);
2403 extra.limit_needed = limit_needed(parse);
2404 extra.limit_tuples = limit_tuples;
2405 extra.count_est = count_est;
2406 extra.offset_est = offset_est;
2409 * If there is an FDW that's responsible for all baserels of the query,
2410 * let it consider adding ForeignPaths.
2412 if (final_rel->fdwroutine &&
2413 final_rel->fdwroutine->GetForeignUpperPaths)
2414 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2415 current_rel, final_rel,
2418 /* Let extensions possibly add some more paths */
2419 if (create_upper_paths_hook)
2420 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2421 current_rel, final_rel, &extra);
2423 /* Note: currently, we leave it to callers to do set_cheapest() */
2427 * Do preprocessing for groupingSets clause and related data. This handles the
2428 * preliminary steps of expanding the grouping sets, organizing them into lists
2429 * of rollups, and preparing annotations which will later be filled in with
2432 static grouping_sets_data *
2433 preprocess_grouping_sets(PlannerInfo *root)
2435 Query *parse = root->parse;
2440 grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
2442 parse->groupingSets = expand_grouping_sets(parse->groupingSets, -1);
2444 gd->any_hashable = false;
2445 gd->unhashable_refs = NULL;
2446 gd->unsortable_refs = NULL;
2447 gd->unsortable_sets = NIL;
2449 if (parse->groupClause)
2453 foreach(lc, parse->groupClause)
2455 SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2456 Index ref = gc->tleSortGroupRef;
2462 gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2464 if (!OidIsValid(gc->sortop))
2465 gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2469 /* Allocate workspace array for remapping */
2470 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2473 * If we have any unsortable sets, we must extract them before trying to
2474 * prepare rollups. Unsortable sets don't go through
2475 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2478 if (!bms_is_empty(gd->unsortable_refs))
2480 List *sortable_sets = NIL;
2482 foreach(lc, parse->groupingSets)
2484 List *gset = (List *) lfirst(lc);
2486 if (bms_overlap_list(gd->unsortable_refs, gset))
2488 GroupingSetData *gs = makeNode(GroupingSetData);
2491 gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2494 * We must enforce here that an unsortable set is hashable;
2495 * later code assumes this. Parse analysis only checks that
2496 * every individual column is either hashable or sortable.
2498 * Note that passing this test doesn't guarantee we can
2499 * generate a plan; there might be other showstoppers.
2501 if (bms_overlap_list(gd->unhashable_refs, gset))
2503 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2504 errmsg("could not implement GROUP BY"),
2505 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2508 sortable_sets = lappend(sortable_sets, gset);
2512 sets = extract_rollup_sets(sortable_sets);
2517 sets = extract_rollup_sets(parse->groupingSets);
2519 foreach(lc_set, sets)
2521 List *current_sets = (List *) lfirst(lc_set);
2522 RollupData *rollup = makeNode(RollupData);
2523 GroupingSetData *gs;
2526 * Reorder the current list of grouping sets into correct prefix
2527 * order. If only one aggregation pass is needed, try to make the
2528 * list match the ORDER BY clause; if more than one pass is needed, we
2529 * don't bother with that.
2531 * Note that this reorders the sets from smallest-member-first to
2532 * largest-member-first, and applies the GroupingSetData annotations,
2533 * though the data will be filled in later.
2535 current_sets = reorder_grouping_sets(current_sets,
2536 (list_length(sets) == 1
2541 * Get the initial (and therefore largest) grouping set.
2543 gs = linitial_node(GroupingSetData, current_sets);
2546 * Order the groupClause appropriately. If the first grouping set is
2547 * empty, then the groupClause must also be empty; otherwise we have
2548 * to force the groupClause to match that grouping set's order.
2550 * (The first grouping set can be empty even though parse->groupClause
2551 * is not empty only if all non-empty grouping sets are unsortable.
2552 * The groupClauses for hashed grouping sets are built later on.)
2555 rollup->groupClause = preprocess_groupclause(root, gs->set);
2557 rollup->groupClause = NIL;
2560 * Is it hashable? We pretend empty sets are hashable even though we
2561 * actually force them not to be hashed later. But don't bother if
2562 * there's nothing but empty sets (since in that case we can't hash
2566 !bms_overlap_list(gd->unhashable_refs, gs->set))
2568 rollup->hashable = true;
2569 gd->any_hashable = true;
2573 * Now that we've pinned down an order for the groupClause for this
2574 * list of grouping sets, we need to remap the entries in the grouping
2575 * sets from sortgrouprefs to plain indices (0-based) into the
2576 * groupClause for this collection of grouping sets. We keep the
2577 * original form for later use, though.
2579 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2581 gd->tleref_to_colnum_map);
2582 rollup->gsets_data = current_sets;
2584 gd->rollups = lappend(gd->rollups, rollup);
2587 if (gd->unsortable_sets)
2590 * We have not yet pinned down a groupclause for this, but we will
2591 * need index-based lists for estimation purposes. Construct
2592 * hash_sets_idx based on the entire original groupclause for now.
2594 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2595 gd->unsortable_sets,
2596 gd->tleref_to_colnum_map);
2597 gd->any_hashable = true;
2604 * Given a groupclause and a list of GroupingSetData, return equivalent sets
2605 * (without annotation) mapped to indexes into the given groupclause.
2608 remap_to_groupclause_idx(List *groupClause,
2610 int *tleref_to_colnum_map)
2616 foreach(lc, groupClause)
2618 SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2620 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2627 GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2629 foreach(lc2, gs->set)
2631 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2634 result = lappend(result, set);
2642 * preprocess_rowmarks - set up PlanRowMarks if needed
2645 preprocess_rowmarks(PlannerInfo *root)
2647 Query *parse = root->parse;
2653 if (parse->rowMarks)
2656 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2657 * grouping, since grouping renders a reference to individual tuple
2658 * CTIDs invalid. This is also checked at parse time, but that's
2659 * insufficient because of rule substitution, query pullup, etc.
2661 CheckSelectLocking(parse, linitial_node(RowMarkClause,
2662 parse->rowMarks)->strength);
2667 * We only need rowmarks for UPDATE, DELETE, or FOR [KEY]
2670 if (parse->commandType != CMD_UPDATE &&
2671 parse->commandType != CMD_DELETE)
2676 * We need to have rowmarks for all base relations except the target. We
2677 * make a bitmapset of all base rels and then remove the items we don't
2678 * need or have FOR [KEY] UPDATE/SHARE marks for.
2680 rels = get_relids_in_jointree((Node *) parse->jointree, false);
2681 if (parse->resultRelation)
2682 rels = bms_del_member(rels, parse->resultRelation);
2685 * Convert RowMarkClauses to PlanRowMark representation.
2688 foreach(l, parse->rowMarks)
2690 RowMarkClause *rc = lfirst_node(RowMarkClause, l);
2691 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2695 * Currently, it is syntactically impossible to have FOR UPDATE et al
2696 * applied to an update/delete target rel. If that ever becomes
2697 * possible, we should drop the target from the PlanRowMark list.
2699 Assert(rc->rti != parse->resultRelation);
2702 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2703 * can't support true locking. Subqueries that got flattened into the
2704 * main query should be ignored completely. Any that didn't will get
2705 * ROW_MARK_COPY items in the next loop.
2707 if (rte->rtekind != RTE_RELATION)
2710 rels = bms_del_member(rels, rc->rti);
2712 newrc = makeNode(PlanRowMark);
2713 newrc->rti = newrc->prti = rc->rti;
2714 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2715 newrc->markType = select_rowmark_type(rte, rc->strength);
2716 newrc->allMarkTypes = (1 << newrc->markType);
2717 newrc->strength = rc->strength;
2718 newrc->waitPolicy = rc->waitPolicy;
2719 newrc->isParent = false;
2721 prowmarks = lappend(prowmarks, newrc);
2725 * Now, add rowmarks for any non-target, non-locked base relations.
2728 foreach(l, parse->rtable)
2730 RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
2734 if (!bms_is_member(i, rels))
2737 newrc = makeNode(PlanRowMark);
2738 newrc->rti = newrc->prti = i;
2739 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2740 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2741 newrc->allMarkTypes = (1 << newrc->markType);
2742 newrc->strength = LCS_NONE;
2743 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2744 newrc->isParent = false;
2746 prowmarks = lappend(prowmarks, newrc);
2749 root->rowMarks = prowmarks;
2753 * Select RowMarkType to use for a given table
2756 select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2758 if (rte->rtekind != RTE_RELATION)
2760 /* If it's not a table at all, use ROW_MARK_COPY */
2761 return ROW_MARK_COPY;
2763 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2765 /* Let the FDW select the rowmark type, if it wants to */
2766 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2768 if (fdwroutine->GetForeignRowMarkType != NULL)
2769 return fdwroutine->GetForeignRowMarkType(rte, strength);
2770 /* Otherwise, use ROW_MARK_COPY by default */
2771 return ROW_MARK_COPY;
2775 /* Regular table, apply the appropriate lock type */
2781 * We don't need a tuple lock, only the ability to re-fetch
2784 return ROW_MARK_REFERENCE;
2786 case LCS_FORKEYSHARE:
2787 return ROW_MARK_KEYSHARE;
2790 return ROW_MARK_SHARE;
2792 case LCS_FORNOKEYUPDATE:
2793 return ROW_MARK_NOKEYEXCLUSIVE;
2796 return ROW_MARK_EXCLUSIVE;
2799 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2800 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2805 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2807 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2808 * results back in *count_est and *offset_est. These variables are set to
2809 * 0 if the corresponding clause is not present, and -1 if it's present
2810 * but we couldn't estimate the value for it. (The "0" convention is OK
2811 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2812 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2813 * usual practice of never estimating less than one row.) These values will
2814 * be passed to create_limit_path, which see if you change this code.
2816 * The return value is the suitably adjusted tuple_fraction to use for
2817 * planning the query. This adjustment is not overridable, since it reflects
2818 * plan actions that grouping_planner() will certainly take, not assumptions
2822 preprocess_limit(PlannerInfo *root, double tuple_fraction,
2823 int64 *offset_est, int64 *count_est)
2825 Query *parse = root->parse;
2827 double limit_fraction;
2829 /* Should not be called unless LIMIT or OFFSET */
2830 Assert(parse->limitCount || parse->limitOffset);
2833 * Try to obtain the clause values. We use estimate_expression_value
2834 * primarily because it can sometimes do something useful with Params.
2836 if (parse->limitCount)
2838 est = estimate_expression_value(root, parse->limitCount);
2839 if (est && IsA(est, Const))
2841 if (((Const *) est)->constisnull)
2843 /* NULL indicates LIMIT ALL, ie, no limit */
2844 *count_est = 0; /* treat as not present */
2848 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2849 if (*count_est <= 0)
2850 *count_est = 1; /* force to at least 1 */
2854 *count_est = -1; /* can't estimate */
2857 *count_est = 0; /* not present */
2859 if (parse->limitOffset)
2861 est = estimate_expression_value(root, parse->limitOffset);
2862 if (est && IsA(est, Const))
2864 if (((Const *) est)->constisnull)
2866 /* Treat NULL as no offset; the executor will too */
2867 *offset_est = 0; /* treat as not present */
2871 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2872 if (*offset_est < 0)
2873 *offset_est = 0; /* treat as not present */
2877 *offset_est = -1; /* can't estimate */
2880 *offset_est = 0; /* not present */
2882 if (*count_est != 0)
2885 * A LIMIT clause limits the absolute number of tuples returned.
2886 * However, if it's not a constant LIMIT then we have to guess; for
2887 * lack of a better idea, assume 10% of the plan's result is wanted.
2889 if (*count_est < 0 || *offset_est < 0)
2891 /* LIMIT or OFFSET is an expression ... punt ... */
2892 limit_fraction = 0.10;
2896 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2897 limit_fraction = (double) *count_est + (double) *offset_est;
2901 * If we have absolute limits from both caller and LIMIT, use the
2902 * smaller value; likewise if they are both fractional. If one is
2903 * fractional and the other absolute, we can't easily determine which
2904 * is smaller, but we use the heuristic that the absolute will usually
2907 if (tuple_fraction >= 1.0)
2909 if (limit_fraction >= 1.0)
2912 tuple_fraction = Min(tuple_fraction, limit_fraction);
2916 /* caller absolute, limit fractional; use caller's value */
2919 else if (tuple_fraction > 0.0)
2921 if (limit_fraction >= 1.0)
2923 /* caller fractional, limit absolute; use limit */
2924 tuple_fraction = limit_fraction;
2928 /* both fractional */
2929 tuple_fraction = Min(tuple_fraction, limit_fraction);
2934 /* no info from caller, just use limit */
2935 tuple_fraction = limit_fraction;
2938 else if (*offset_est != 0 && tuple_fraction > 0.0)
2941 * We have an OFFSET but no LIMIT. This acts entirely differently
2942 * from the LIMIT case: here, we need to increase rather than decrease
2943 * the caller's tuple_fraction, because the OFFSET acts to cause more
2944 * tuples to be fetched instead of fewer. This only matters if we got
2945 * a tuple_fraction > 0, however.
2947 * As above, use 10% if OFFSET is present but unestimatable.
2949 if (*offset_est < 0)
2950 limit_fraction = 0.10;
2952 limit_fraction = (double) *offset_est;
2955 * If we have absolute counts from both caller and OFFSET, add them
2956 * together; likewise if they are both fractional. If one is
2957 * fractional and the other absolute, we want to take the larger, and
2958 * we heuristically assume that's the fractional one.
2960 if (tuple_fraction >= 1.0)
2962 if (limit_fraction >= 1.0)
2964 /* both absolute, so add them together */
2965 tuple_fraction += limit_fraction;
2969 /* caller absolute, limit fractional; use limit */
2970 tuple_fraction = limit_fraction;
2975 if (limit_fraction >= 1.0)
2977 /* caller fractional, limit absolute; use caller's value */
2981 /* both fractional, so add them together */
2982 tuple_fraction += limit_fraction;
2983 if (tuple_fraction >= 1.0)
2984 tuple_fraction = 0.0; /* assume fetch all */
2989 return tuple_fraction;
2993 * limit_needed - do we actually need a Limit plan node?
2995 * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2996 * a Limit node. This is worth checking for because "OFFSET 0" is a common
2997 * locution for an optimization fence. (Because other places in the planner
2998 * merely check whether parse->limitOffset isn't NULL, it will still work as
2999 * an optimization fence --- we're just suppressing unnecessary run-time
3002 * This might look like it could be merged into preprocess_limit, but there's
3003 * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
3004 * in preprocess_limit it's good enough to consider estimated values.
3007 limit_needed(Query *parse)
3011 node = parse->limitCount;
3014 if (IsA(node, Const))
3016 /* NULL indicates LIMIT ALL, ie, no limit */
3017 if (!((Const *) node)->constisnull)
3018 return true; /* LIMIT with a constant value */
3021 return true; /* non-constant LIMIT */
3024 node = parse->limitOffset;
3027 if (IsA(node, Const))
3029 /* Treat NULL as no offset; the executor would too */
3030 if (!((Const *) node)->constisnull)
3032 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
3035 return true; /* OFFSET with a nonzero value */
3039 return true; /* non-constant OFFSET */
3042 return false; /* don't need a Limit plan node */
3047 * remove_useless_groupby_columns
3048 * Remove any columns in the GROUP BY clause that are redundant due to
3049 * being functionally dependent on other GROUP BY columns.
3051 * Since some other DBMSes do not allow references to ungrouped columns, it's
3052 * not unusual to find all columns listed in GROUP BY even though listing the
3053 * primary-key columns would be sufficient. Deleting such excess columns
3054 * avoids redundant sorting work, so it's worth doing. When we do this, we
3055 * must mark the plan as dependent on the pkey constraint (compare the
3056 * parser's check_ungrouped_columns() and check_functional_grouping()).
3058 * In principle, we could treat any NOT-NULL columns appearing in a UNIQUE
3059 * index as the determining columns. But as with check_functional_grouping(),
3060 * there's currently no way to represent dependency on a NOT NULL constraint,
3061 * so we consider only the pkey for now.
3064 remove_useless_groupby_columns(PlannerInfo *root)
3066 Query *parse = root->parse;
3067 Bitmapset **groupbyattnos;
3068 Bitmapset **surplusvars;
3072 /* No chance to do anything if there are less than two GROUP BY items */
3073 if (list_length(parse->groupClause) < 2)
3076 /* Don't fiddle with the GROUP BY clause if the query has grouping sets */
3077 if (parse->groupingSets)
3081 * Scan the GROUP BY clause to find GROUP BY items that are simple Vars.
3082 * Fill groupbyattnos[k] with a bitmapset of the column attnos of RTE k
3083 * that are GROUP BY items.
3085 groupbyattnos = (Bitmapset **) palloc0(sizeof(Bitmapset *) *
3086 (list_length(parse->rtable) + 1));
3087 foreach(lc, parse->groupClause)
3089 SortGroupClause *sgc = lfirst_node(SortGroupClause, lc);
3090 TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList);
3091 Var *var = (Var *) tle->expr;
3094 * Ignore non-Vars and Vars from other query levels.
3096 * XXX in principle, stable expressions containing Vars could also be
3097 * removed, if all the Vars are functionally dependent on other GROUP
3098 * BY items. But it's not clear that such cases occur often enough to
3099 * be worth troubling over.
3101 if (!IsA(var, Var) ||
3102 var->varlevelsup > 0)
3105 /* OK, remember we have this Var */
3107 Assert(relid <= list_length(parse->rtable));
3108 groupbyattnos[relid] = bms_add_member(groupbyattnos[relid],
3109 var->varattno - FirstLowInvalidHeapAttributeNumber);
3113 * Consider each relation and see if it is possible to remove some of its
3114 * Vars from GROUP BY. For simplicity and speed, we do the actual removal
3115 * in a separate pass. Here, we just fill surplusvars[k] with a bitmapset
3116 * of the column attnos of RTE k that are removable GROUP BY items.
3118 surplusvars = NULL; /* don't allocate array unless required */
3120 foreach(lc, parse->rtable)
3122 RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc);
3123 Bitmapset *relattnos;
3124 Bitmapset *pkattnos;
3129 /* Only plain relations could have primary-key constraints */
3130 if (rte->rtekind != RTE_RELATION)
3134 * We must skip inheritance parent tables as some of the child rels
3135 * may cause duplicate rows. This cannot happen with partitioned
3138 if (rte->inh && rte->relkind != RELKIND_PARTITIONED_TABLE)
3141 /* Nothing to do unless this rel has multiple Vars in GROUP BY */
3142 relattnos = groupbyattnos[relid];
3143 if (bms_membership(relattnos) != BMS_MULTIPLE)
3147 * Can't remove any columns for this rel if there is no suitable
3148 * (i.e., nondeferrable) primary key constraint.
3150 pkattnos = get_primary_key_attnos(rte->relid, false, &constraintOid);
3151 if (pkattnos == NULL)
3155 * If the primary key is a proper subset of relattnos then we have
3156 * some items in the GROUP BY that can be removed.
3158 if (bms_subset_compare(pkattnos, relattnos) == BMS_SUBSET1)
3161 * To easily remember whether we've found anything to do, we don't
3162 * allocate the surplusvars[] array until we find something.
3164 if (surplusvars == NULL)
3165 surplusvars = (Bitmapset **) palloc0(sizeof(Bitmapset *) *
3166 (list_length(parse->rtable) + 1));
3168 /* Remember the attnos of the removable columns */
3169 surplusvars[relid] = bms_difference(relattnos, pkattnos);
3171 /* Also, mark the resulting plan as dependent on this constraint */
3172 parse->constraintDeps = lappend_oid(parse->constraintDeps,
3178 * If we found any surplus Vars, build a new GROUP BY clause without them.
3179 * (Note: this may leave some TLEs with unreferenced ressortgroupref
3180 * markings, but that's harmless.)
3182 if (surplusvars != NULL)
3184 List *new_groupby = NIL;
3186 foreach(lc, parse->groupClause)
3188 SortGroupClause *sgc = lfirst_node(SortGroupClause, lc);
3189 TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList);
3190 Var *var = (Var *) tle->expr;
3193 * New list must include non-Vars, outer Vars, and anything not
3194 * marked as surplus.
3196 if (!IsA(var, Var) ||
3197 var->varlevelsup > 0 ||
3198 !bms_is_member(var->varattno - FirstLowInvalidHeapAttributeNumber,
3199 surplusvars[var->varno]))
3200 new_groupby = lappend(new_groupby, sgc);
3203 parse->groupClause = new_groupby;
3208 * preprocess_groupclause - do preparatory work on GROUP BY clause
3210 * The idea here is to adjust the ordering of the GROUP BY elements
3211 * (which in itself is semantically insignificant) to match ORDER BY,
3212 * thereby allowing a single sort operation to both implement the ORDER BY
3213 * requirement and set up for a Unique step that implements GROUP BY.
3215 * In principle it might be interesting to consider other orderings of the
3216 * GROUP BY elements, which could match the sort ordering of other
3217 * possible plans (eg an indexscan) and thereby reduce cost. We don't
3218 * bother with that, though. Hashed grouping will frequently win anyway.
3220 * Note: we need no comparable processing of the distinctClause because
3221 * the parser already enforced that that matches ORDER BY.
3223 * For grouping sets, the order of items is instead forced to agree with that
3224 * of the grouping set (and items not in the grouping set are skipped). The
3225 * work of sorting the order of grouping set elements to match the ORDER BY if
3226 * possible is done elsewhere.
3229 preprocess_groupclause(PlannerInfo *root, List *force)
3231 Query *parse = root->parse;
3232 List *new_groupclause = NIL;
3237 /* For grouping sets, we need to force the ordering */
3242 Index ref = lfirst_int(sl);
3243 SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
3245 new_groupclause = lappend(new_groupclause, cl);
3248 return new_groupclause;
3251 /* If no ORDER BY, nothing useful to do here */
3252 if (parse->sortClause == NIL)
3253 return parse->groupClause;
3256 * Scan the ORDER BY clause and construct a list of matching GROUP BY
3257 * items, but only as far as we can make a matching prefix.
3259 * This code assumes that the sortClause contains no duplicate items.
3261 foreach(sl, parse->sortClause)
3263 SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
3265 foreach(gl, parse->groupClause)
3267 SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
3271 new_groupclause = lappend(new_groupclause, gc);
3276 break; /* no match, so stop scanning */
3279 /* Did we match all of the ORDER BY list, or just some of it? */
3280 partial_match = (sl != NULL);
3282 /* If no match at all, no point in reordering GROUP BY */
3283 if (new_groupclause == NIL)
3284 return parse->groupClause;
3287 * Add any remaining GROUP BY items to the new list, but only if we were
3288 * able to make a complete match. In other words, we only rearrange the
3289 * GROUP BY list if the result is that one list is a prefix of the other
3290 * --- otherwise there's no possibility of a common sort. Also, give up
3291 * if there are any non-sortable GROUP BY items, since then there's no
3294 foreach(gl, parse->groupClause)
3296 SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
3298 if (list_member_ptr(new_groupclause, gc))
3299 continue; /* it matched an ORDER BY item */
3301 return parse->groupClause; /* give up, no common sort possible */
3302 if (!OidIsValid(gc->sortop))
3303 return parse->groupClause; /* give up, GROUP BY can't be sorted */
3304 new_groupclause = lappend(new_groupclause, gc);
3307 /* Success --- install the rearranged GROUP BY list */
3308 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
3309 return new_groupclause;
3313 * Extract lists of grouping sets that can be implemented using a single
3314 * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
3316 * Input must be sorted with smallest sets first. Result has each sublist
3317 * sorted with smallest sets first.
3319 * We want to produce the absolute minimum possible number of lists here to
3320 * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
3321 * of finding the minimal partition of a partially-ordered set into chains
3322 * (which is what we need, taking the list of grouping sets as a poset ordered
3323 * by set inclusion) can be mapped to the problem of finding the maximum
3324 * cardinality matching on a bipartite graph, which is solvable in polynomial
3325 * time with a worst case of no worse than O(n^2.5) and usually much
3326 * better. Since our N is at most 4096, we don't need to consider fallbacks to
3327 * heuristic or approximate methods. (Planning time for a 12-d cube is under
3328 * half a second on my modest system even with optimization off and assertions
3332 extract_rollup_sets(List *groupingSets)
3334 int num_sets_raw = list_length(groupingSets);
3336 int num_sets = 0; /* distinct sets */
3341 Bitmapset **set_masks;
3344 short *adjacency_buf;
3345 BipartiteMatchState *state;
3349 ListCell *lc1 = list_head(groupingSets);
3353 * Start by stripping out empty sets. The algorithm doesn't require this,
3354 * but the planner currently needs all empty sets to be returned in the
3355 * first list, so we strip them here and add them back after.
3357 while (lc1 && lfirst(lc1) == NIL)
3360 lc1 = lnext(groupingSets, lc1);
3363 /* bail out now if it turns out that all we had were empty sets. */
3365 return list_make1(groupingSets);
3368 * We don't strictly need to remove duplicate sets here, but if we don't,
3369 * they tend to become scattered through the result, which is a bit
3370 * confusing (and irritating if we ever decide to optimize them out).
3371 * So we remove them here and add them back after.
3373 * For each non-duplicate set, we fill in the following:
3375 * orig_sets[i] = list of the original set lists
3376 * set_masks[i] = bitmapset for testing inclusion
3377 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3379 * chains[i] will be the result group this set is assigned to.
3381 * We index all of these from 1 rather than 0 because it is convenient
3382 * to leave 0 free for the NIL node in the graph algorithm.
3385 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3386 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3387 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3388 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3394 for_each_cell(lc, groupingSets, lc1)
3396 List *candidate = (List *) lfirst(lc);
3397 Bitmapset *candidate_set = NULL;
3401 foreach(lc2, candidate)
3403 candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3406 /* we can only be a dup if we're the same length as a previous set */
3407 if (j_size == list_length(candidate))
3411 for (k = j; k < i; ++k)
3413 if (bms_equal(set_masks[k], candidate_set))
3420 else if (j_size < list_length(candidate))
3422 j_size = list_length(candidate);
3428 orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3429 bms_free(candidate_set);
3436 orig_sets[i] = list_make1(candidate);
3437 set_masks[i] = candidate_set;
3439 /* fill in adjacency list; no need to compare equal-size sets */
3441 for (k = j - 1; k > 0; --k)
3443 if (bms_is_subset(set_masks[k], candidate_set))
3444 adjacency_buf[++n_adj] = k;
3449 adjacency_buf[0] = n_adj;
3450 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3451 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3454 adjacency[i] = NULL;
3463 * Apply the graph matching algorithm to do the work.
3465 state = BipartiteMatch(num_sets, num_sets, adjacency);
3468 * Now, the state->pair* fields have the info we need to assign sets to
3469 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3470 * pair_vu[v] = u (both will be true, but we check both so that we can do
3473 chains = palloc0((num_sets + 1) * sizeof(int));
3475 for (i = 1; i <= num_sets; ++i)
3477 int u = state->pair_vu[i];
3478 int v = state->pair_uv[i];
3481 chains[i] = chains[u];
3482 else if (v > 0 && v < i)
3483 chains[i] = chains[v];
3485 chains[i] = ++num_chains;
3488 /* build result lists. */
3489 results = palloc0((num_chains + 1) * sizeof(List *));
3491 for (i = 1; i <= num_sets; ++i)
3497 results[c] = list_concat(results[c], orig_sets[i]);
3500 /* push any empty sets back on the first list. */
3501 while (num_empty-- > 0)
3502 results[1] = lcons(NIL, results[1]);
3504 /* make result list */
3505 for (i = 1; i <= num_chains; ++i)
3506 result = lappend(result, results[i]);
3509 * Free all the things.
3511 * (This is over-fussy for small sets but for large sets we could have
3512 * tied up a nontrivial amount of memory.)
3514 BipartiteMatchFree(state);
3517 for (i = 1; i <= num_sets; ++i)
3519 pfree(adjacency[i]);
3521 pfree(adjacency_buf);
3523 for (i = 1; i <= num_sets; ++i)
3524 bms_free(set_masks[i]);
3531 * Reorder the elements of a list of grouping sets such that they have correct
3532 * prefix relationships. Also inserts the GroupingSetData annotations.
3534 * The input must be ordered with smallest sets first; the result is returned
3535 * with largest sets first. Note that the result shares no list substructure
3536 * with the input, so it's safe for the caller to modify it later.
3538 * If we're passed in a sortclause, we follow its order of columns to the
3539 * extent possible, to minimize the chance that we add unnecessary sorts.
3540 * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3541 * gets implemented in one pass.)
3544 reorder_grouping_sets(List *groupingsets, List *sortclause)
3547 List *previous = NIL;
3550 foreach(lc, groupingsets)
3552 List *candidate = (List *) lfirst(lc);
3553 List *new_elems = list_difference_int(candidate, previous);
3554 GroupingSetData *gs = makeNode(GroupingSetData);
3556 while (list_length(sortclause) > list_length(previous) &&
3557 list_length(new_elems) > 0)
3559 SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3560 int ref = sc->tleSortGroupRef;
3562 if (list_member_int(new_elems, ref))
3564 previous = lappend_int(previous, ref);
3565 new_elems = list_delete_int(new_elems, ref);
3569 /* diverged from the sortclause; give up on it */
3576 * Safe to use list_concat (which shares cells of the second arg)
3577 * because we know that new_elems does not share cells with anything.
3579 previous = list_concat(previous, new_elems);
3581 gs->set = list_copy(previous);
3582 result = lcons(gs, result);
3585 list_free(previous);
3591 * Compute query_pathkeys and other pathkeys during plan generation
3594 standard_qp_callback(PlannerInfo *root, void *extra)
3596 Query *parse = root->parse;
3597 standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3598 List *tlist = root->processed_tlist;
3599 List *activeWindows = qp_extra->activeWindows;
3602 * Calculate pathkeys that represent grouping/ordering requirements. The
3603 * sortClause is certainly sort-able, but GROUP BY and DISTINCT might not
3604 * be, in which case we just leave their pathkeys empty.
3606 if (qp_extra->groupClause &&
3607 grouping_is_sortable(qp_extra->groupClause))
3608 root->group_pathkeys =
3609 make_pathkeys_for_sortclauses(root,
3610 qp_extra->groupClause,
3613 root->group_pathkeys = NIL;
3615 /* We consider only the first (bottom) window in pathkeys logic */
3616 if (activeWindows != NIL)
3618 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3620 root->window_pathkeys = make_pathkeys_for_window(root,
3625 root->window_pathkeys = NIL;
3627 if (parse->distinctClause &&
3628 grouping_is_sortable(parse->distinctClause))
3629 root->distinct_pathkeys =
3630 make_pathkeys_for_sortclauses(root,
3631 parse->distinctClause,
3634 root->distinct_pathkeys = NIL;
3636 root->sort_pathkeys =
3637 make_pathkeys_for_sortclauses(root,
3642 * Figure out whether we want a sorted result from query_planner.
3644 * If we have a sortable GROUP BY clause, then we want a result sorted
3645 * properly for grouping. Otherwise, if we have window functions to
3646 * evaluate, we try to sort for the first window. Otherwise, if there's a
3647 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3648 * we try to produce output that's sufficiently well sorted for the
3649 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3650 * by the ORDER BY clause.
3652 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3653 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3654 * that might just leave us failing to exploit an available sort order at
3655 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3656 * much easier, since we know that the parser ensured that one is a
3657 * superset of the other.
3659 if (root->group_pathkeys)
3660 root->query_pathkeys = root->group_pathkeys;
3661 else if (root->window_pathkeys)
3662 root->query_pathkeys = root->window_pathkeys;
3663 else if (list_length(root->distinct_pathkeys) >
3664 list_length(root->sort_pathkeys))
3665 root->query_pathkeys = root->distinct_pathkeys;
3666 else if (root->sort_pathkeys)
3667 root->query_pathkeys = root->sort_pathkeys;
3669 root->query_pathkeys = NIL;
3673 * Estimate number of groups produced by grouping clauses (1 if not grouping)
3675 * path_rows: number of output rows from scan/join step
3676 * gd: grouping sets data including list of grouping sets and their clauses
3677 * target_list: target list containing group clause references
3679 * If doing grouping sets, we also annotate the gsets data with the estimates
3680 * for each set and each individual rollup list, with a view to later
3681 * determining whether some combination of them could be hashed instead.
3684 get_number_of_groups(PlannerInfo *root,
3686 grouping_sets_data *gd,
3689 Query *parse = root->parse;
3692 if (parse->groupClause)
3696 if (parse->groupingSets)
3698 /* Add up the estimates for each grouping set */
3702 Assert(gd); /* keep Coverity happy */
3706 foreach(lc, gd->rollups)
3708 RollupData *rollup = lfirst_node(RollupData, lc);
3711 groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3714 rollup->numGroups = 0.0;
3716 forboth(lc, rollup->gsets, lc2, rollup->gsets_data)
3718 List *gset = (List *) lfirst(lc);
3719 GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3720 double numGroups = estimate_num_groups(root,
3725 gs->numGroups = numGroups;
3726 rollup->numGroups += numGroups;
3729 dNumGroups += rollup->numGroups;
3732 if (gd->hash_sets_idx)
3736 gd->dNumHashGroups = 0;
3738 groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3741 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3743 List *gset = (List *) lfirst(lc);
3744 GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3745 double numGroups = estimate_num_groups(root,
3750 gs->numGroups = numGroups;
3751 gd->dNumHashGroups += numGroups;
3754 dNumGroups += gd->dNumHashGroups;
3759 /* Plain GROUP BY */
3760 groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3763 dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3767 else if (parse->groupingSets)
3769 /* Empty grouping sets ... one result row for each one */
3770 dNumGroups = list_length(parse->groupingSets);
3772 else if (parse->hasAggs || root->hasHavingQual)
3774 /* Plain aggregation, one result row */
3787 * create_grouping_paths
3789 * Build a new upperrel containing Paths for grouping and/or aggregation.
3790 * Along the way, we also build an upperrel for Paths which are partially
3791 * grouped and/or aggregated. A partially grouped and/or aggregated path
3792 * needs a FinalizeAggregate node to complete the aggregation. Currently,
3793 * the only partially grouped paths we build are also partial paths; that
3794 * is, they need a Gather and then a FinalizeAggregate.
3796 * input_rel: contains the source-data Paths
3797 * target: the pathtarget for the result Paths to compute
3798 * agg_costs: cost info about all aggregates in query (in AGGSPLIT_SIMPLE mode)
3799 * gd: grouping sets data including list of grouping sets and their clauses
3801 * Note: all Paths in input_rel are expected to return the target computed
3802 * by make_group_input_target.
3805 create_grouping_paths(PlannerInfo *root,
3806 RelOptInfo *input_rel,
3808 bool target_parallel_safe,
3809 const AggClauseCosts *agg_costs,
3810 grouping_sets_data *gd)
3812 Query *parse = root->parse;
3813 RelOptInfo *grouped_rel;
3814 RelOptInfo *partially_grouped_rel;
3817 * Create grouping relation to hold fully aggregated grouping and/or
3818 * aggregation paths.
3820 grouped_rel = make_grouping_rel(root, input_rel, target,
3821 target_parallel_safe, parse->havingQual);
3824 * Create either paths for a degenerate grouping or paths for ordinary
3825 * grouping, as appropriate.
3827 if (is_degenerate_grouping(root))
3828 create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3832 GroupPathExtraData extra;
3835 * Determine whether it's possible to perform sort-based
3836 * implementations of grouping. (Note that if groupClause is empty,
3837 * grouping_is_sortable() is trivially true, and all the
3838 * pathkeys_contained_in() tests will succeed too, so that we'll
3839 * consider every surviving input path.)
3841 * If we have grouping sets, we might be able to sort some but not all
3842 * of them; in this case, we need can_sort to be true as long as we
3843 * must consider any sorted-input plan.
3845 if ((gd && gd->rollups != NIL)
3846 || grouping_is_sortable(parse->groupClause))
3847 flags |= GROUPING_CAN_USE_SORT;
3850 * Determine whether we should consider hash-based implementations of
3853 * Hashed aggregation only applies if we're grouping. If we have
3854 * grouping sets, some groups might be hashable but others not; in
3855 * this case we set can_hash true as long as there is nothing globally
3856 * preventing us from hashing (and we should therefore consider plans
3859 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3860 * BY aggregates. (Doing so would imply storing *all* the input
3861 * values in the hash table, and/or running many sorts in parallel,
3862 * either of which seems like a certain loser.) We similarly don't
3863 * support ordered-set aggregates in hashed aggregation, but that case
3864 * is also included in the numOrderedAggs count.
3866 * Note: grouping_is_hashable() is much more expensive to check than
3867 * the other gating conditions, so we want to do it last.
3869 if ((parse->groupClause != NIL &&
3870 agg_costs->numOrderedAggs == 0 &&
3871 (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause))))
3872 flags |= GROUPING_CAN_USE_HASH;
3875 * Determine whether partial aggregation is possible.
3877 if (can_partial_agg(root, agg_costs))
3878 flags |= GROUPING_CAN_PARTIAL_AGG;
3880 extra.flags = flags;
3881 extra.target_parallel_safe = target_parallel_safe;
3882 extra.havingQual = parse->havingQual;
3883 extra.targetList = parse->targetList;
3884 extra.partial_costs_set = false;
3887 * Determine whether partitionwise aggregation is in theory possible.
3888 * It can be disabled by the user, and for now, we don't try to
3889 * support grouping sets. create_ordinary_grouping_paths() will check
3890 * additional conditions, such as whether input_rel is partitioned.
3892 if (enable_partitionwise_aggregate && !parse->groupingSets)
3893 extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3895 extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3897 create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3898 agg_costs, gd, &extra,
3899 &partially_grouped_rel);
3902 set_cheapest(grouped_rel);
3909 * Create a new grouping rel and set basic properties.
3911 * input_rel represents the underlying scan/join relation.
3912 * target is the output expected from the grouping relation.
3915 make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3916 PathTarget *target, bool target_parallel_safe,
3919 RelOptInfo *grouped_rel;
3921 if (IS_OTHER_REL(input_rel))
3923 grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3925 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3930 * By tradition, the relids set for the main grouping relation is
3931 * NULL. (This could be changed, but might require adjustments
3934 grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3938 grouped_rel->reltarget = target;
3941 * If the input relation is not parallel-safe, then the grouped relation
3942 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3943 * target list and HAVING quals are parallel-safe.
3945 if (input_rel->consider_parallel && target_parallel_safe &&
3946 is_parallel_safe(root, (Node *) havingQual))
3947 grouped_rel->consider_parallel = true;
3950 * If the input rel belongs to a single FDW, so does the grouped rel.
3952 grouped_rel->serverid = input_rel->serverid;
3953 grouped_rel->userid = input_rel->userid;
3954 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3955 grouped_rel->fdwroutine = input_rel->fdwroutine;
3961 * is_degenerate_grouping
3963 * A degenerate grouping is one in which the query has a HAVING qual and/or
3964 * grouping sets, but no aggregates and no GROUP BY (which implies that the
3965 * grouping sets are all empty).
3968 is_degenerate_grouping(PlannerInfo *root)
3970 Query *parse = root->parse;
3972 return (root->hasHavingQual || parse->groupingSets) &&
3973 !parse->hasAggs && parse->groupClause == NIL;
3977 * create_degenerate_grouping_paths
3979 * When the grouping is degenerate (see is_degenerate_grouping), we are
3980 * supposed to emit either zero or one row for each grouping set depending on
3981 * whether HAVING succeeds. Furthermore, there cannot be any variables in
3982 * either HAVING or the targetlist, so we actually do not need the FROM table
3983 * at all! We can just throw away the plan-so-far and generate a Result node.
3984 * This is a sufficiently unusual corner case that it's not worth contorting
3985 * the structure of this module to avoid having to generate the earlier paths
3986 * in the first place.
3989 create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
3990 RelOptInfo *grouped_rel)
3992 Query *parse = root->parse;
3996 nrows = list_length(parse->groupingSets);
4000 * Doesn't seem worthwhile writing code to cons up a generate_series
4001 * or a values scan to emit multiple rows. Instead just make N clones
4002 * and append them. (With a volatile HAVING clause, this means you
4003 * might get between 0 and N output rows. Offhand I think that's
4008 while (--nrows >= 0)
4011 create_group_result_path(root, grouped_rel,
4012 grouped_rel->reltarget,
4013 (List *) parse->havingQual);
4014 paths = lappend(paths, path);
4017 create_append_path(root,
4030 /* No grouping sets, or just one, so one output row */
4032 create_group_result_path(root, grouped_rel,
4033 grouped_rel->reltarget,
4034 (List *) parse->havingQual);
4037 add_path(grouped_rel, path);
4041 * create_ordinary_grouping_paths
4043 * Create grouping paths for the ordinary (that is, non-degenerate) case.
4045 * We need to consider sorted and hashed aggregation in the same function,
4046 * because otherwise (1) it would be harder to throw an appropriate error
4047 * message if neither way works, and (2) we should not allow hashtable size
4048 * considerations to dissuade us from using hashing if sorting is not possible.
4050 * *partially_grouped_rel_p will be set to the partially grouped rel which this
4051 * function creates, or to NULL if it doesn't create one.
4054 create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
4055 RelOptInfo *grouped_rel,
4056 const AggClauseCosts *agg_costs,
4057 grouping_sets_data *gd,
4058 GroupPathExtraData *extra,
4059 RelOptInfo **partially_grouped_rel_p)
4061 Path *cheapest_path = input_rel->cheapest_total_path;
4062 RelOptInfo *partially_grouped_rel = NULL;
4064 PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
4067 * If this is the topmost grouping relation or if the parent relation is
4068 * doing some form of partitionwise aggregation, then we may be able to do
4069 * it at this level also. However, if the input relation is not
4070 * partitioned, partitionwise aggregate is impossible.
4072 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4073 IS_PARTITIONED_REL(input_rel))
4076 * If this is the topmost relation or if the parent relation is doing
4077 * full partitionwise aggregation, then we can do full partitionwise
4078 * aggregation provided that the GROUP BY clause contains all of the
4079 * partitioning columns at this level. Otherwise, we can do at most
4080 * partial partitionwise aggregation. But if partial aggregation is
4081 * not supported in general then we can't use it for partitionwise
4082 * aggregation either.
4084 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4085 group_by_has_partkey(input_rel, extra->targetList,
4086 root->parse->groupClause))
4087 patype = PARTITIONWISE_AGGREGATE_FULL;
4088 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4089 patype = PARTITIONWISE_AGGREGATE_PARTIAL;
4091 patype = PARTITIONWISE_AGGREGATE_NONE;
4095 * Before generating paths for grouped_rel, we first generate any possible
4096 * partially grouped paths; that way, later code can easily consider both
4097 * parallel and non-parallel approaches to grouping.
4099 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4101 bool force_rel_creation;
4104 * If we're doing partitionwise aggregation at this level, force
4105 * creation of a partially_grouped_rel so we can add partitionwise
4108 force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4110 partially_grouped_rel =
4111 create_partial_grouping_paths(root,
4116 force_rel_creation);
4119 /* Set out parameter. */
4120 *partially_grouped_rel_p = partially_grouped_rel;
4122 /* Apply partitionwise aggregation technique, if possible. */
4123 if (patype != PARTITIONWISE_AGGREGATE_NONE)
4124 create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4125 partially_grouped_rel, agg_costs,
4128 /* If we are doing partial aggregation only, return. */
4129 if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
4131 Assert(partially_grouped_rel);
4133 if (partially_grouped_rel->pathlist)
4134 set_cheapest(partially_grouped_rel);
4139 /* Gather any partially grouped partial paths. */
4140 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4142 gather_grouping_paths(root, partially_grouped_rel);
4143 set_cheapest(partially_grouped_rel);
4147 * Estimate number of groups.
4149 dNumGroups = get_number_of_groups(root,
4150 cheapest_path->rows,
4154 /* Build final grouping paths */
4155 add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4156 partially_grouped_rel, agg_costs, gd,
4159 /* Give a helpful error if we failed to find any implementation */
4160 if (grouped_rel->pathlist == NIL)
4162 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4163 errmsg("could not implement GROUP BY"),
4164 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4167 * If there is an FDW that's responsible for all baserels of the query,
4168 * let it consider adding ForeignPaths.
4170 if (grouped_rel->fdwroutine &&
4171 grouped_rel->fdwroutine->GetForeignUpperPaths)
4172 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4173 input_rel, grouped_rel,
4176 /* Let extensions possibly add some more paths */
4177 if (create_upper_paths_hook)
4178 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4179 input_rel, grouped_rel,
4184 * For a given input path, consider the possible ways of doing grouping sets on
4185 * it, by combinations of hashing and sorting. This can be called multiple
4186 * times, so it's important that it not scribble on input. No result is
4187 * returned, but any generated paths are added to grouped_rel.
4190 consider_groupingsets_paths(PlannerInfo *root,
4191 RelOptInfo *grouped_rel,
4195 grouping_sets_data *gd,
4196 const AggClauseCosts *agg_costs,
4199 Query *parse = root->parse;
4202 * If we're not being offered sorted input, then only consider plans that
4203 * can be done entirely by hashing.
4205 * We can hash everything if it looks like it'll fit in work_mem. But if
4206 * the input is actually sorted despite not being advertised as such, we
4207 * prefer to make use of that in order to use less memory.
4209 * If none of the grouping sets are sortable, then ignore the work_mem
4210 * limit and generate a path anyway, since otherwise we'll just fail.
4214 List *new_rollups = NIL;
4215 RollupData *unhashed_rollup = NULL;
4217 List *empty_sets_data = NIL;
4218 List *empty_sets = NIL;
4220 ListCell *l_start = list_head(gd->rollups);
4221 AggStrategy strat = AGG_HASHED;
4223 double exclude_groups = 0.0;
4228 * If the input is coincidentally sorted usefully (which can happen
4229 * even if is_sorted is false, since that only means that our caller
4230 * has set up the sorting for us), then save some hashtable space by
4231 * making use of that. But we need to watch out for degenerate cases:
4233 * 1) If there are any empty grouping sets, then group_pathkeys might
4234 * be NIL if all non-empty grouping sets are unsortable. In this case,
4235 * there will be a rollup containing only empty groups, and the
4236 * pathkeys_contained_in test is vacuously true; this is ok.
4238 * XXX: the above relies on the fact that group_pathkeys is generated
4239 * from the first rollup. If we add the ability to consider multiple
4240 * sort orders for grouping input, this assumption might fail.
4242 * 2) If there are no empty sets and only unsortable sets, then the
4243 * rollups list will be empty (and thus l_start == NULL), and
4244 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4245 * pathkeys_contain_in test doesn't cause us to crash.
4247 if (l_start != NULL &&
4248 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4250 unhashed_rollup = lfirst_node(RollupData, l_start);
4251 exclude_groups = unhashed_rollup->numGroups;
4252 l_start = lnext(gd->rollups, l_start);
4255 hashsize = estimate_hashagg_tablesize(path,
4257 dNumGroups - exclude_groups);
4260 * gd->rollups is empty if we have only unsortable columns to work
4261 * with. Override work_mem in that case; otherwise, we'll rely on the
4262 * sorted-input case to generate usable mixed paths.
4264 if (hashsize > work_mem * 1024L && gd->rollups)
4265 return; /* nope, won't fit */
4268 * We need to burst the existing rollups list into individual grouping
4269 * sets and recompute a groupClause for each set.
4271 sets_data = list_copy(gd->unsortable_sets);
4273 for_each_cell(lc, gd->rollups, l_start)
4275 RollupData *rollup = lfirst_node(RollupData, lc);
4278 * If we find an unhashable rollup that's not been skipped by the
4279 * "actually sorted" check above, we can't cope; we'd need sorted
4280 * input (with a different sort order) but we can't get that here.
4281 * So bail out; we'll get a valid path from the is_sorted case
4284 * The mere presence of empty grouping sets doesn't make a rollup
4285 * unhashable (see preprocess_grouping_sets), we handle those
4288 if (!rollup->hashable)
4291 sets_data = list_concat(sets_data, list_copy(rollup->gsets_data));
4293 foreach(lc, sets_data)
4295 GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4296 List *gset = gs->set;
4301 /* Empty grouping sets can't be hashed. */
4302 empty_sets_data = lappend(empty_sets_data, gs);
4303 empty_sets = lappend(empty_sets, NIL);
4307 rollup = makeNode(RollupData);
4309 rollup->groupClause = preprocess_groupclause(root, gset);
4310 rollup->gsets_data = list_make1(gs);
4311 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4313 gd->tleref_to_colnum_map);
4314 rollup->numGroups = gs->numGroups;
4315 rollup->hashable = true;
4316 rollup->is_hashed = true;
4317 new_rollups = lappend(new_rollups, rollup);
4322 * If we didn't find anything nonempty to hash, then bail. We'll
4323 * generate a path from the is_sorted case.
4325 if (new_rollups == NIL)
4329 * If there were empty grouping sets they should have been in the
4332 Assert(!unhashed_rollup || !empty_sets);
4334 if (unhashed_rollup)
4336 new_rollups = lappend(new_rollups, unhashed_rollup);
4339 else if (empty_sets)
4341 RollupData *rollup = makeNode(RollupData);
4343 rollup->groupClause = NIL;
4344 rollup->gsets_data = empty_sets_data;
4345 rollup->gsets = empty_sets;
4346 rollup->numGroups = list_length(empty_sets);
4347 rollup->hashable = false;
4348 rollup->is_hashed = false;
4349 new_rollups = lappend(new_rollups, rollup);
4353 add_path(grouped_rel, (Path *)
4354 create_groupingsets_path(root,
4357 (List *) parse->havingQual,
4366 * If we have sorted input but nothing we can do with it, bail.
4368 if (list_length(gd->rollups) == 0)
4372 * Given sorted input, we try and make two paths: one sorted and one mixed
4373 * sort/hash. (We need to try both because hashagg might be disabled, or
4374 * some columns might not be sortable.)
4376 * can_hash is passed in as false if some obstacle elsewhere (such as
4377 * ordered aggs) means that we shouldn't consider hashing at all.
4379 if (can_hash && gd->any_hashable)
4381 List *rollups = NIL;
4382 List *hash_sets = list_copy(gd->unsortable_sets);
4383 double availspace = (work_mem * 1024.0);
4387 * Account first for space needed for groups we can't sort at all.
4389 availspace -= estimate_hashagg_tablesize(path,
4391 gd->dNumHashGroups);
4393 if (availspace > 0 && list_length(gd->rollups) > 1)
4396 int num_rollups = list_length(gd->rollups);
4398 int *k_weights = palloc(num_rollups * sizeof(int));
4399 Bitmapset *hash_items = NULL;
4403 * We treat this as a knapsack problem: the knapsack capacity
4404 * represents work_mem, the item weights are the estimated memory
4405 * usage of the hashtables needed to implement a single rollup,
4406 * and we really ought to use the cost saving as the item value;
4407 * however, currently the costs assigned to sort nodes don't
4408 * reflect the comparison costs well, and so we treat all items as
4409 * of equal value (each rollup we hash instead saves us one sort).
4411 * To use the discrete knapsack, we need to scale the values to a
4412 * reasonably small bounded range. We choose to allow a 5% error
4413 * margin; we have no more than 4096 rollups in the worst possible
4414 * case, which with a 5% error margin will require a bit over 42MB
4415 * of workspace. (Anyone wanting to plan queries that complex had
4416 * better have the memory for it. In more reasonable cases, with
4417 * no more than a couple of dozen rollups, the memory usage will
4420 * k_capacity is naturally bounded, but we clamp the values for
4421 * scale and weight (below) to avoid overflows or underflows (or
4422 * uselessly trying to use a scale factor less than 1 byte).
4424 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4425 k_capacity = (int) floor(availspace / scale);
4428 * We leave the first rollup out of consideration since it's the
4429 * one that matches the input sort order. We assign indexes "i"
4430 * to only those entries considered for hashing; the second loop,
4431 * below, must use the same condition.
4434 for_each_cell(lc, gd->rollups, list_second_cell(gd->rollups))
4436 RollupData *rollup = lfirst_node(RollupData, lc);
4438 if (rollup->hashable)
4440 double sz = estimate_hashagg_tablesize(path,
4445 * If sz is enormous, but work_mem (and hence scale) is
4446 * small, avoid integer overflow here.
4448 k_weights[i] = (int) Min(floor(sz / scale),
4455 * Apply knapsack algorithm; compute the set of items which
4456 * maximizes the value stored (in this case the number of sorts
4457 * saved) while keeping the total size (approximately) within
4461 hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4463 if (!bms_is_empty(hash_items))
4465 rollups = list_make1(linitial(gd->rollups));
4468 for_each_cell(lc, gd->rollups, list_second_cell(gd->rollups))
4470 RollupData *rollup = lfirst_node(RollupData, lc);
4472 if (rollup->hashable)
4474 if (bms_is_member(i, hash_items))
4475 hash_sets = list_concat(hash_sets,
4476 list_copy(rollup->gsets_data));
4478 rollups = lappend(rollups, rollup);
4482 rollups = lappend(rollups, rollup);
4487 if (!rollups && hash_sets)
4488 rollups = list_copy(gd->rollups);
4490 foreach(lc, hash_sets)
4492 GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4493 RollupData *rollup = makeNode(RollupData);
4495 Assert(gs->set != NIL);
4497 rollup->groupClause = preprocess_groupclause(root, gs->set);
4498 rollup->gsets_data = list_make1(gs);
4499 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4501 gd->tleref_to_colnum_map);
4502 rollup->numGroups = gs->numGroups;
4503 rollup->hashable = true;
4504 rollup->is_hashed = true;
4505 rollups = lcons(rollup, rollups);
4510 add_path(grouped_rel, (Path *)
4511 create_groupingsets_path(root,
4514 (List *) parse->havingQual,
4523 * Now try the simple sorted case.
4525 if (!gd->unsortable_sets)
4526 add_path(grouped_rel, (Path *)
4527 create_groupingsets_path(root,
4530 (List *) parse->havingQual,
4538 * create_window_paths
4540 * Build a new upperrel containing Paths for window-function evaluation.
4542 * input_rel: contains the source-data Paths
4543 * input_target: result of make_window_input_target
4544 * output_target: what the topmost WindowAggPath should return
4545 * wflists: result of find_window_functions
4546 * activeWindows: result of select_active_windows
4548 * Note: all Paths in input_rel are expected to return input_target.
4551 create_window_paths(PlannerInfo *root,
4552 RelOptInfo *input_rel,
4553 PathTarget *input_target,
4554 PathTarget *output_target,
4555 bool output_target_parallel_safe,
4556 WindowFuncLists *wflists,
4557 List *activeWindows)
4559 RelOptInfo *window_rel;
4562 /* For now, do all work in the (WINDOW, NULL) upperrel */
4563 window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4566 * If the input relation is not parallel-safe, then the window relation
4567 * can't be parallel-safe, either. Otherwise, we need to examine the
4568 * target list and active windows for non-parallel-safe constructs.
4570 if (input_rel->consider_parallel && output_target_parallel_safe &&
4571 is_parallel_safe(root, (Node *) activeWindows))
4572 window_rel->consider_parallel = true;
4575 * If the input rel belongs to a single FDW, so does the window rel.
4577 window_rel->serverid = input_rel->serverid;
4578 window_rel->userid = input_rel->userid;
4579 window_rel->useridiscurrent = input_rel->useridiscurrent;
4580 window_rel->fdwroutine = input_rel->fdwroutine;
4583 * Consider computing window functions starting from the existing
4584 * cheapest-total path (which will likely require a sort) as well as any
4585 * existing paths that satisfy root->window_pathkeys (which won't).
4587 foreach(lc, input_rel->pathlist)
4589 Path *path = (Path *) lfirst(lc);
4591 if (path == input_rel->cheapest_total_path ||
4592 pathkeys_contained_in(root->window_pathkeys, path->pathkeys))
4593 create_one_window_path(root,
4603 * If there is an FDW that's responsible for all baserels of the query,
4604 * let it consider adding ForeignPaths.
4606 if (window_rel->fdwroutine &&
4607 window_rel->fdwroutine->GetForeignUpperPaths)
4608 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4609 input_rel, window_rel,
4612 /* Let extensions possibly add some more paths */
4613 if (create_upper_paths_hook)
4614 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4615 input_rel, window_rel, NULL);
4617 /* Now choose the best path(s) */
4618 set_cheapest(window_rel);
4624 * Stack window-function implementation steps atop the given Path, and
4625 * add the result to window_rel.
4627 * window_rel: upperrel to contain result
4628 * path: input Path to use (must return input_target)
4629 * input_target: result of make_window_input_target
4630 * output_target: what the topmost WindowAggPath should return
4631 * wflists: result of find_window_functions
4632 * activeWindows: result of select_active_windows
4635 create_one_window_path(PlannerInfo *root,
4636 RelOptInfo *window_rel,
4638 PathTarget *input_target,
4639 PathTarget *output_target,
4640 WindowFuncLists *wflists,
4641 List *activeWindows)
4643 PathTarget *window_target;
4647 * Since each window clause could require a different sort order, we stack
4648 * up a WindowAgg node for each clause, with sort steps between them as
4649 * needed. (We assume that select_active_windows chose a good order for
4650 * executing the clauses in.)
4652 * input_target should contain all Vars and Aggs needed for the result.
4653 * (In some cases we wouldn't need to propagate all of these all the way
4654 * to the top, since they might only be needed as inputs to WindowFuncs.
4655 * It's probably not worth trying to optimize that though.) It must also
4656 * contain all window partitioning and sorting expressions, to ensure
4657 * they're computed only once at the bottom of the stack (that's critical
4658 * for volatile functions). As we climb up the stack, we'll add outputs
4659 * for the WindowFuncs computed at each level.
4661 window_target = input_target;
4663 foreach(l, activeWindows)
4665 WindowClause *wc = lfirst_node(WindowClause, l);
4666 List *window_pathkeys;
4668 window_pathkeys = make_pathkeys_for_window(root,
4670 root->processed_tlist);
4672 /* Sort if necessary */
4673 if (!pathkeys_contained_in(window_pathkeys, path->pathkeys))
4675 path = (Path *) create_sort_path(root, window_rel,
4681 if (lnext(activeWindows, l))
4684 * Add the current WindowFuncs to the output target for this
4685 * intermediate WindowAggPath. We must copy window_target to
4686 * avoid changing the previous path's target.
4688 * Note: a WindowFunc adds nothing to the target's eval costs; but
4689 * we do need to account for the increase in tlist width.
4693 window_target = copy_pathtarget(window_target);
4694 foreach(lc2, wflists->windowFuncs[wc->winref])
4696 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4698 add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4699 window_target->width += get_typavgwidth(wfunc->wintype, -1);
4704 /* Install the goal target in the topmost WindowAgg */
4705 window_target = output_target;
4709 create_windowagg_path(root, window_rel, path, window_target,
4710 wflists->windowFuncs[wc->winref],
4714 add_path(window_rel, path);
4718 * create_distinct_paths
4720 * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4722 * input_rel: contains the source-data Paths
4724 * Note: input paths should already compute the desired pathtarget, since
4725 * Sort/Unique won't project anything.
4728 create_distinct_paths(PlannerInfo *root,
4729 RelOptInfo *input_rel)
4731 Query *parse = root->parse;
4732 Path *cheapest_input_path = input_rel->cheapest_total_path;
4733 RelOptInfo *distinct_rel;
4734 double numDistinctRows;
4739 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4740 distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4743 * We don't compute anything at this level, so distinct_rel will be
4744 * parallel-safe if the input rel is parallel-safe. In particular, if
4745 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4746 * output those expressions, and will not be parallel-safe unless those
4747 * expressions are parallel-safe.
4749 distinct_rel->consider_parallel = input_rel->consider_parallel;
4752 * If the input rel belongs to a single FDW, so does the distinct_rel.
4754 distinct_rel->serverid = input_rel->serverid;
4755 distinct_rel->userid = input_rel->userid;
4756 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4757 distinct_rel->fdwroutine = input_rel->fdwroutine;
4759 /* Estimate number of distinct rows there will be */
4760 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4761 root->hasHavingQual)
4764 * If there was grouping or aggregation, use the number of input rows
4765 * as the estimated number of DISTINCT rows (ie, assume the input is
4766 * already mostly unique).
4768 numDistinctRows = cheapest_input_path->rows;
4773 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4775 List *distinctExprs;
4777 distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
4779 numDistinctRows = estimate_num_groups(root, distinctExprs,
4780 cheapest_input_path->rows,
4785 * Consider sort-based implementations of DISTINCT, if possible.
4787 if (grouping_is_sortable(parse->distinctClause))
4790 * First, if we have any adequately-presorted paths, just stick a
4791 * Unique node on those. Then consider doing an explicit sort of the
4792 * cheapest input path and Unique'ing that.
4794 * When we have DISTINCT ON, we must sort by the more rigorous of
4795 * DISTINCT and ORDER BY, else it won't have the desired behavior.
4796 * Also, if we do have to do an explicit sort, we might as well use
4797 * the more rigorous ordering to avoid a second sort later. (Note
4798 * that the parser will have ensured that one clause is a prefix of
4801 List *needed_pathkeys;
4803 if (parse->hasDistinctOn &&
4804 list_length(root->distinct_pathkeys) <
4805 list_length(root->sort_pathkeys))
4806 needed_pathkeys = root->sort_pathkeys;
4808 needed_pathkeys = root->distinct_pathkeys;
4810 foreach(lc, input_rel->pathlist)
4812 Path *path = (Path *) lfirst(lc);
4814 if (pathkeys_contained_in(needed_pathkeys, path->pathkeys))
4816 add_path(distinct_rel, (Path *)
4817 create_upper_unique_path(root, distinct_rel,
4819 list_length(root->distinct_pathkeys),
4824 /* For explicit-sort case, always use the more rigorous clause */
4825 if (list_length(root->distinct_pathkeys) <
4826 list_length(root->sort_pathkeys))
4828 needed_pathkeys = root->sort_pathkeys;
4829 /* Assert checks that parser didn't mess up... */
4830 Assert(pathkeys_contained_in(root->distinct_pathkeys,
4834 needed_pathkeys = root->distinct_pathkeys;
4836 path = cheapest_input_path;
4837 if (!pathkeys_contained_in(needed_pathkeys, path->pathkeys))
4838 path = (Path *) create_sort_path(root, distinct_rel,
4843 add_path(distinct_rel, (Path *)
4844 create_upper_unique_path(root, distinct_rel,
4846 list_length(root->distinct_pathkeys),
4851 * Consider hash-based implementations of DISTINCT, if possible.
4853 * If we were not able to make any other types of path, we *must* hash or
4854 * die trying. If we do have other choices, there are several things that
4855 * should prevent selection of hashing: if the query uses DISTINCT ON
4856 * (because it won't really have the expected behavior if we hash), or if
4857 * enable_hashagg is off, or if it looks like the hashtable will exceed
4860 * Note: grouping_is_hashable() is much more expensive to check than the
4861 * other gating conditions, so we want to do it last.
4863 if (distinct_rel->pathlist == NIL)
4864 allow_hash = true; /* we have no alternatives */
4865 else if (parse->hasDistinctOn || !enable_hashagg)
4866 allow_hash = false; /* policy-based decision not to hash */
4871 /* Estimate per-hash-entry space at tuple width... */
4872 hashentrysize = MAXALIGN(cheapest_input_path->pathtarget->width) +
4873 MAXALIGN(SizeofMinimalTupleHeader);
4874 /* plus the per-hash-entry overhead */
4875 hashentrysize += hash_agg_entry_size(0);
4877 /* Allow hashing only if hashtable is predicted to fit in work_mem */
4878 allow_hash = (hashentrysize * numDistinctRows <= work_mem * 1024L);
4881 if (allow_hash && grouping_is_hashable(parse->distinctClause))
4883 /* Generate hashed aggregate path --- no sort needed */
4884 add_path(distinct_rel, (Path *)
4885 create_agg_path(root,
4887 cheapest_input_path,
4888 cheapest_input_path->pathtarget,
4891 parse->distinctClause,
4897 /* Give a helpful error if we failed to find any implementation */
4898 if (distinct_rel->pathlist == NIL)
4900 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4901 errmsg("could not implement DISTINCT"),
4902 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4905 * If there is an FDW that's responsible for all baserels of the query,
4906 * let it consider adding ForeignPaths.
4908 if (distinct_rel->fdwroutine &&
4909 distinct_rel->fdwroutine->GetForeignUpperPaths)
4910 distinct_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_DISTINCT,
4911 input_rel, distinct_rel,
4914 /* Let extensions possibly add some more paths */
4915 if (create_upper_paths_hook)
4916 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT,
4917 input_rel, distinct_rel, NULL);
4919 /* Now choose the best path(s) */
4920 set_cheapest(distinct_rel);
4922 return distinct_rel;
4926 * create_ordered_paths
4928 * Build a new upperrel containing Paths for ORDER BY evaluation.
4930 * All paths in the result must satisfy the ORDER BY ordering.
4931 * The only new path we need consider is an explicit sort on the
4932 * cheapest-total existing path.
4934 * input_rel: contains the source-data Paths
4935 * target: the output tlist the result Paths must emit
4936 * limit_tuples: estimated bound on the number of output tuples,
4937 * or -1 if no LIMIT or couldn't estimate
4940 create_ordered_paths(PlannerInfo *root,
4941 RelOptInfo *input_rel,
4943 bool target_parallel_safe,
4944 double limit_tuples)
4946 Path *cheapest_input_path = input_rel->cheapest_total_path;
4947 RelOptInfo *ordered_rel;
4950 /* For now, do all work in the (ORDERED, NULL) upperrel */
4951 ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
4954 * If the input relation is not parallel-safe, then the ordered relation
4955 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
4956 * target list is parallel-safe.
4958 if (input_rel->consider_parallel && target_parallel_safe)
4959 ordered_rel->consider_parallel = true;
4962 * If the input rel belongs to a single FDW, so does the ordered_rel.
4964 ordered_rel->serverid = input_rel->serverid;
4965 ordered_rel->userid = input_rel->userid;
4966 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
4967 ordered_rel->fdwroutine = input_rel->fdwroutine;
4969 foreach(lc, input_rel->pathlist)
4971 Path *path = (Path *) lfirst(lc);
4974 is_sorted = pathkeys_contained_in(root->sort_pathkeys,
4976 if (path == cheapest_input_path || is_sorted)
4980 /* An explicit sort here can take advantage of LIMIT */
4981 path = (Path *) create_sort_path(root,
4984 root->sort_pathkeys,
4988 /* Add projection step if needed */
4989 if (path->pathtarget != target)
4990 path = apply_projection_to_path(root, ordered_rel,
4993 add_path(ordered_rel, path);
4998 * generate_gather_paths() will have already generated a simple Gather
4999 * path for the best parallel path, if any, and the loop above will have
5000 * considered sorting it. Similarly, generate_gather_paths() will also
5001 * have generated order-preserving Gather Merge plans which can be used
5002 * without sorting if they happen to match the sort_pathkeys, and the loop
5003 * above will have handled those as well. However, there's one more
5004 * possibility: it may make sense to sort the cheapest partial path
5005 * according to the required output order and then use Gather Merge.
5007 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5008 input_rel->partial_pathlist != NIL)
5010 Path *cheapest_partial_path;
5012 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5015 * If cheapest partial path doesn't need a sort, this is redundant
5016 * with what's already been tried.
5018 if (!pathkeys_contained_in(root->sort_pathkeys,
5019 cheapest_partial_path->pathkeys))
5022 double total_groups;
5024 path = (Path *) create_sort_path(root,
5026 cheapest_partial_path,
5027 root->sort_pathkeys,
5030 total_groups = cheapest_partial_path->rows *
5031 cheapest_partial_path->parallel_workers;
5033 create_gather_merge_path(root, ordered_rel,
5036 root->sort_pathkeys, NULL,
5039 /* Add projection step if needed */
5040 if (path->pathtarget != target)
5041 path = apply_projection_to_path(root, ordered_rel,
5044 add_path(ordered_rel, path);
5049 * If there is an FDW that's responsible for all baserels of the query,
5050 * let it consider adding ForeignPaths.
5052 if (ordered_rel->fdwroutine &&
5053 ordered_rel->fdwroutine->GetForeignUpperPaths)
5054 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5055 input_rel, ordered_rel,
5058 /* Let extensions possibly add some more paths */
5059 if (create_upper_paths_hook)
5060 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5061 input_rel, ordered_rel, NULL);
5064 * No need to bother with set_cheapest here; grouping_planner does not
5067 Assert(ordered_rel->pathlist != NIL);
5074 * make_group_input_target
5075 * Generate appropriate PathTarget for initial input to grouping nodes.
5077 * If there is grouping or aggregation, the scan/join subplan cannot emit
5078 * the query's final targetlist; for example, it certainly can't emit any
5079 * aggregate function calls. This routine generates the correct target
5080 * for the scan/join subplan.
5082 * The query target list passed from the parser already contains entries
5083 * for all ORDER BY and GROUP BY expressions, but it will not have entries
5084 * for variables used only in HAVING clauses; so we need to add those
5085 * variables to the subplan target list. Also, we flatten all expressions
5086 * except GROUP BY items into their component variables; other expressions
5087 * will be computed by the upper plan nodes rather than by the subplan.
5088 * For example, given a query like
5089 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5090 * we want to pass this targetlist to the subplan:
5092 * where the a+b target will be used by the Sort/Group steps, and the
5093 * other targets will be used for computing the final results.
5095 * 'final_target' is the query's final target list (in PathTarget form)
5097 * The result is the PathTarget to be computed by the Paths returned from
5101 make_group_input_target(PlannerInfo *root, PathTarget *final_target)
5103 Query *parse = root->parse;
5104 PathTarget *input_target;
5105 List *non_group_cols;
5106 List *non_group_vars;
5111 * We must build a target containing all grouping columns, plus any other
5112 * Vars mentioned in the query's targetlist and HAVING qual.
5114 input_target = create_empty_pathtarget();
5115 non_group_cols = NIL;
5118 foreach(lc, final_target->exprs)
5120 Expr *expr = (Expr *) lfirst(lc);
5121 Index sgref = get_pathtarget_sortgroupref(final_target, i);
5123 if (sgref && parse->groupClause &&
5124 get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
5127 * It's a grouping column, so add it to the input target as-is.
5129 add_column_to_pathtarget(input_target, expr, sgref);
5134 * Non-grouping column, so just remember the expression for later
5135 * call to pull_var_clause.
5137 non_group_cols = lappend(non_group_cols, expr);
5144 * If there's a HAVING clause, we'll need the Vars it uses, too.
5146 if (parse->havingQual)
5147 non_group_cols = lappend(non_group_cols, parse->havingQual);
5150 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5151 * add them to the input target if not already present. (A Var used
5152 * directly as a GROUP BY item will be present already.) Note this
5153 * includes Vars used in resjunk items, so we are covering the needs of
5154 * ORDER BY and window specifications. Vars used within Aggrefs and
5155 * WindowFuncs will be pulled out here, too.
5157 non_group_vars = pull_var_clause((Node *) non_group_cols,
5158 PVC_RECURSE_AGGREGATES |
5159 PVC_RECURSE_WINDOWFUNCS |
5160 PVC_INCLUDE_PLACEHOLDERS);
5161 add_new_columns_to_pathtarget(input_target, non_group_vars);
5163 /* clean up cruft */
5164 list_free(non_group_vars);
5165 list_free(non_group_cols);
5167 /* XXX this causes some redundant cost calculation ... */
5168 return set_pathtarget_cost_width(root, input_target);
5172 * make_partial_grouping_target
5173 * Generate appropriate PathTarget for output of partial aggregate
5174 * (or partial grouping, if there are no aggregates) nodes.
5176 * A partial aggregation node needs to emit all the same aggregates that
5177 * a regular aggregation node would, plus any aggregates used in HAVING;
5178 * except that the Aggref nodes should be marked as partial aggregates.
5180 * In addition, we'd better emit any Vars and PlaceholderVars that are
5181 * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5182 * these would be Vars that are grouped by or used in grouping expressions.)
5184 * grouping_target is the tlist to be emitted by the topmost aggregation step.
5185 * havingQual represents the HAVING clause.
5188 make_partial_grouping_target(PlannerInfo *root,
5189 PathTarget *grouping_target,
5192 Query *parse = root->parse;
5193 PathTarget *partial_target;
5194 List *non_group_cols;
5195 List *non_group_exprs;
5199 partial_target = create_empty_pathtarget();
5200 non_group_cols = NIL;
5203 foreach(lc, grouping_target->exprs)
5205 Expr *expr = (Expr *) lfirst(lc);
5206 Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5208 if (sgref && parse->groupClause &&
5209 get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
5212 * It's a grouping column, so add it to the partial_target as-is.
5213 * (This allows the upper agg step to repeat the grouping calcs.)
5215 add_column_to_pathtarget(partial_target, expr, sgref);
5220 * Non-grouping column, so just remember the expression for later
5221 * call to pull_var_clause.
5223 non_group_cols = lappend(non_group_cols, expr);
5230 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5233 non_group_cols = lappend(non_group_cols, havingQual);
5236 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5237 * non-group cols (plus HAVING), and add them to the partial_target if not
5238 * already present. (An expression used directly as a GROUP BY item will
5239 * be present already.) Note this includes Vars used in resjunk items, so
5240 * we are covering the needs of ORDER BY and window specifications.
5242 non_group_exprs = pull_var_clause((Node *) non_group_cols,
5243 PVC_INCLUDE_AGGREGATES |
5244 PVC_RECURSE_WINDOWFUNCS |
5245 PVC_INCLUDE_PLACEHOLDERS);
5247 add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5250 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5251 * are at the top level of the target list, so we can just scan the list
5252 * rather than recursing through the expression trees.
5254 foreach(lc, partial_target->exprs)
5256 Aggref *aggref = (Aggref *) lfirst(lc);
5258 if (IsA(aggref, Aggref))
5263 * We shouldn't need to copy the substructure of the Aggref node,
5264 * but flat-copy the node itself to avoid damaging other trees.
5266 newaggref = makeNode(Aggref);
5267 memcpy(newaggref, aggref, sizeof(Aggref));
5269 /* For now, assume serialization is required */
5270 mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
5272 lfirst(lc) = newaggref;
5276 /* clean up cruft */
5277 list_free(non_group_exprs);
5278 list_free(non_group_cols);
5280 /* XXX this causes some redundant cost calculation ... */
5281 return set_pathtarget_cost_width(root, partial_target);
5285 * mark_partial_aggref
5286 * Adjust an Aggref to make it represent a partial-aggregation step.
5288 * The Aggref node is modified in-place; caller must do any copying required.
5291 mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
5293 /* aggtranstype should be computed by this point */
5294 Assert(OidIsValid(agg->aggtranstype));
5295 /* ... but aggsplit should still be as the parser left it */
5296 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5298 /* Mark the Aggref with the intended partial-aggregation mode */
5299 agg->aggsplit = aggsplit;
5302 * Adjust result type if needed. Normally, a partial aggregate returns
5303 * the aggregate's transition type; but if that's INTERNAL and we're
5304 * serializing, it returns BYTEA instead.
5306 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5308 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5309 agg->aggtype = BYTEAOID;
5311 agg->aggtype = agg->aggtranstype;
5316 * postprocess_setop_tlist
5317 * Fix up targetlist returned by plan_set_operations().
5319 * We need to transpose sort key info from the orig_tlist into new_tlist.
5320 * NOTE: this would not be good enough if we supported resjunk sort keys
5321 * for results of set operations --- then, we'd need to project a whole
5322 * new tlist to evaluate the resjunk columns. For now, just ereport if we
5323 * find any resjunk columns in orig_tlist.
5326 postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5329 ListCell *orig_tlist_item = list_head(orig_tlist);
5331 foreach(l, new_tlist)
5333 TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5334 TargetEntry *orig_tle;
5336 /* ignore resjunk columns in setop result */
5337 if (new_tle->resjunk)
5340 Assert(orig_tlist_item != NULL);
5341 orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5342 orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5343 if (orig_tle->resjunk) /* should not happen */
5344 elog(ERROR, "resjunk output columns are not implemented");
5345 Assert(new_tle->resno == orig_tle->resno);
5346 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5348 if (orig_tlist_item != NULL)
5349 elog(ERROR, "resjunk output columns are not implemented");
5354 * select_active_windows
5355 * Create a list of the "active" window clauses (ie, those referenced
5356 * by non-deleted WindowFuncs) in the order they are to be executed.
5359 select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
5361 List *windowClause = root->parse->windowClause;
5365 WindowClauseSortData *actives = palloc(sizeof(WindowClauseSortData)
5366 * list_length(windowClause));
5368 /* First, construct an array of the active windows */
5369 foreach(lc, windowClause)
5371 WindowClause *wc = lfirst_node(WindowClause, lc);
5373 /* It's only active if wflists shows some related WindowFuncs */
5374 Assert(wc->winref <= wflists->maxWinRef);
5375 if (wflists->windowFuncs[wc->winref] == NIL)
5378 actives[nActive].wc = wc; /* original clause */
5381 * For sorting, we want the list of partition keys followed by the
5382 * list of sort keys. But pathkeys construction will remove duplicates
5383 * between the two, so we can as well (even though we can't detect all
5384 * of the duplicates, since some may come from ECs - that might mean
5385 * we miss optimization chances here). We must, however, ensure that
5386 * the order of entries is preserved with respect to the ones we do
5389 * partitionClause and orderClause had their own duplicates removed in
5390 * parse analysis, so we're only concerned here with removing
5391 * orderClause entries that also appear in partitionClause.
5393 actives[nActive].uniqueOrder =
5394 list_concat_unique(list_copy(wc->partitionClause),
5400 * Sort active windows by their partitioning/ordering clauses, ignoring
5401 * any framing clauses, so that the windows that need the same sorting are
5402 * adjacent in the list. When we come to generate paths, this will avoid
5403 * inserting additional Sort nodes.
5405 * This is how we implement a specific requirement from the SQL standard,
5406 * which says that when two or more windows are order-equivalent (i.e.
5407 * have matching partition and order clauses, even if their names or
5408 * framing clauses differ), then all peer rows must be presented in the
5409 * same order in all of them. If we allowed multiple sort nodes for such
5410 * cases, we'd risk having the peer rows end up in different orders in
5411 * equivalent windows due to sort instability. (See General Rule 4 of
5412 * <window clause> in SQL2008 - SQL2016.)
5414 * Additionally, if the entire list of clauses of one window is a prefix
5415 * of another, put first the window with stronger sorting requirements.
5416 * This way we will first sort for stronger window, and won't have to sort
5417 * again for the weaker one.
5419 qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5421 /* build ordered list of the original WindowClause nodes */
5422 for (int i = 0; i < nActive; i++)
5423 result = lappend(result, actives[i].wc);
5432 * QSort comparison function for WindowClauseSortData
5434 * Sort the windows by the required sorting clauses. First, compare the sort
5435 * clauses themselves. Second, if one window's clauses are a prefix of another
5436 * one's clauses, put the window with more sort clauses first.
5439 common_prefix_cmp(const void *a, const void *b)
5441 const WindowClauseSortData *wcsa = a;
5442 const WindowClauseSortData *wcsb = b;
5446 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5448 SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
5449 SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
5451 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
5453 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5455 else if (sca->sortop > scb->sortop)
5457 else if (sca->sortop < scb->sortop)
5459 else if (sca->nulls_first && !scb->nulls_first)
5461 else if (!sca->nulls_first && scb->nulls_first)
5463 /* no need to compare eqop, since it is fully determined by sortop */
5466 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
5468 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5475 * make_window_input_target
5476 * Generate appropriate PathTarget for initial input to WindowAgg nodes.
5478 * When the query has window functions, this function computes the desired
5479 * target to be computed by the node just below the first WindowAgg.
5480 * This tlist must contain all values needed to evaluate the window functions,
5481 * compute the final target list, and perform any required final sort step.
5482 * If multiple WindowAggs are needed, each intermediate one adds its window
5483 * function results onto this base tlist; only the topmost WindowAgg computes
5484 * the actual desired target list.
5486 * This function is much like make_group_input_target, though not quite enough
5487 * like it to share code. As in that function, we flatten most expressions
5488 * into their component variables. But we do not want to flatten window
5489 * PARTITION BY/ORDER BY clauses, since that might result in multiple
5490 * evaluations of them, which would be bad (possibly even resulting in
5491 * inconsistent answers, if they contain volatile functions).
5492 * Also, we must not flatten GROUP BY clauses that were left unflattened by
5493 * make_group_input_target, because we may no longer have access to the
5494 * individual Vars in them.
5496 * Another key difference from make_group_input_target is that we don't
5497 * flatten Aggref expressions, since those are to be computed below the
5498 * window functions and just referenced like Vars above that.
5500 * 'final_target' is the query's final target list (in PathTarget form)
5501 * 'activeWindows' is the list of active windows previously identified by
5502 * select_active_windows.
5504 * The result is the PathTarget to be computed by the plan node immediately
5505 * below the first WindowAgg node.
5508 make_window_input_target(PlannerInfo *root,
5509 PathTarget *final_target,
5510 List *activeWindows)
5512 Query *parse = root->parse;
5513 PathTarget *input_target;
5515 List *flattenable_cols;
5516 List *flattenable_vars;
5520 Assert(parse->hasWindowFuncs);
5523 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
5524 * into a bitmapset for convenient reference below.
5527 foreach(lc, activeWindows)
5529 WindowClause *wc = lfirst_node(WindowClause, lc);
5532 foreach(lc2, wc->partitionClause)
5534 SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
5536 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
5538 foreach(lc2, wc->orderClause)
5540 SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
5542 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
5546 /* Add in sortgroupref numbers of GROUP BY clauses, too */
5547 foreach(lc, parse->groupClause)
5549 SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
5551 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
5555 * Construct a target containing all the non-flattenable targetlist items,
5556 * and save aside the others for a moment.
5558 input_target = create_empty_pathtarget();
5559 flattenable_cols = NIL;
5562 foreach(lc, final_target->exprs)
5564 Expr *expr = (Expr *) lfirst(lc);
5565 Index sgref = get_pathtarget_sortgroupref(final_target, i);
5568 * Don't want to deconstruct window clauses or GROUP BY items. (Note
5569 * that such items can't contain window functions, so it's okay to
5570 * compute them below the WindowAgg nodes.)
5572 if (sgref != 0 && bms_is_member(sgref, sgrefs))
5575 * Don't want to deconstruct this value, so add it to the input
5578 add_column_to_pathtarget(input_target, expr, sgref);
5583 * Column is to be flattened, so just remember the expression for
5584 * later call to pull_var_clause.
5586 flattenable_cols = lappend(flattenable_cols, expr);
5593 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
5594 * add them to the input target if not already present. (Some might be
5595 * there already because they're used directly as window/group clauses.)
5597 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
5598 * Aggrefs are placed in the Agg node's tlist and not left to be computed
5599 * at higher levels. On the other hand, we should recurse into
5600 * WindowFuncs to make sure their input expressions are available.
5602 flattenable_vars = pull_var_clause((Node *) flattenable_cols,
5603 PVC_INCLUDE_AGGREGATES |
5604 PVC_RECURSE_WINDOWFUNCS |
5605 PVC_INCLUDE_PLACEHOLDERS);
5606 add_new_columns_to_pathtarget(input_target, flattenable_vars);
5608 /* clean up cruft */
5609 list_free(flattenable_vars);
5610 list_free(flattenable_cols);
5612 /* XXX this causes some redundant cost calculation ... */
5613 return set_pathtarget_cost_width(root, input_target);
5617 * make_pathkeys_for_window
5618 * Create a pathkeys list describing the required input ordering
5619 * for the given WindowClause.
5621 * The required ordering is first the PARTITION keys, then the ORDER keys.
5622 * In the future we might try to implement windowing using hashing, in which
5623 * case the ordering could be relaxed, but for now we always sort.
5626 make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
5629 List *window_pathkeys;
5630 List *window_sortclauses;
5632 /* Throw error if can't sort */
5633 if (!grouping_is_sortable(wc->partitionClause))
5635 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5636 errmsg("could not implement window PARTITION BY"),
5637 errdetail("Window partitioning columns must be of sortable datatypes.")));
5638 if (!grouping_is_sortable(wc->orderClause))
5640 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5641 errmsg("could not implement window ORDER BY"),
5642 errdetail("Window ordering columns must be of sortable datatypes.")));
5644 /* Okay, make the combined pathkeys */
5645 window_sortclauses = list_concat(list_copy(wc->partitionClause),
5646 list_copy(wc->orderClause));
5647 window_pathkeys = make_pathkeys_for_sortclauses(root,
5650 list_free(window_sortclauses);
5651 return window_pathkeys;
5655 * make_sort_input_target
5656 * Generate appropriate PathTarget for initial input to Sort step.
5658 * If the query has ORDER BY, this function chooses the target to be computed
5659 * by the node just below the Sort (and DISTINCT, if any, since Unique can't
5660 * project) steps. This might or might not be identical to the query's final
5663 * The main argument for keeping the sort-input tlist the same as the final
5664 * is that we avoid a separate projection node (which will be needed if
5665 * they're different, because Sort can't project). However, there are also
5666 * advantages to postponing tlist evaluation till after the Sort: it ensures
5667 * a consistent order of evaluation for any volatile functions in the tlist,
5668 * and if there's also a LIMIT, we can stop the query without ever computing
5669 * tlist functions for later rows, which is beneficial for both volatile and
5670 * expensive functions.
5672 * Our current policy is to postpone volatile expressions till after the sort
5673 * unconditionally (assuming that that's possible, ie they are in plain tlist
5674 * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
5675 * postpone set-returning expressions, because running them beforehand would
5676 * bloat the sort dataset, and because it might cause unexpected output order
5677 * if the sort isn't stable. However there's a constraint on that: all SRFs
5678 * in the tlist should be evaluated at the same plan step, so that they can
5679 * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
5680 * mustn't postpone any SRFs. (Note that in principle that policy should
5681 * probably get applied to the group/window input targetlists too, but we
5682 * have not done that historically.) Lastly, expensive expressions are
5683 * postponed if there is a LIMIT, or if root->tuple_fraction shows that
5684 * partial evaluation of the query is possible (if neither is true, we expect
5685 * to have to evaluate the expressions for every row anyway), or if there are
5686 * any volatile or set-returning expressions (since once we've put in a
5687 * projection at all, it won't cost any more to postpone more stuff).
5689 * Another issue that could potentially be considered here is that
5690 * evaluating tlist expressions could result in data that's either wider
5691 * or narrower than the input Vars, thus changing the volume of data that
5692 * has to go through the Sort. However, we usually have only a very bad
5693 * idea of the output width of any expression more complex than a Var,
5694 * so for now it seems too risky to try to optimize on that basis.
5696 * Note that if we do produce a modified sort-input target, and then the
5697 * query ends up not using an explicit Sort, no particular harm is done:
5698 * we'll initially use the modified target for the preceding path nodes,
5699 * but then change them to the final target with apply_projection_to_path.
5700 * Moreover, in such a case the guarantees about evaluation order of
5701 * volatile functions still hold, since the rows are sorted already.
5703 * This function has some things in common with make_group_input_target and
5704 * make_window_input_target, though the detailed rules for what to do are
5705 * different. We never flatten/postpone any grouping or ordering columns;
5706 * those are needed before the sort. If we do flatten a particular
5707 * expression, we leave Aggref and WindowFunc nodes alone, since those were
5710 * 'final_target' is the query's final target list (in PathTarget form)
5711 * 'have_postponed_srfs' is an output argument, see below
5713 * The result is the PathTarget to be computed by the plan node immediately
5714 * below the Sort step (and the Distinct step, if any). This will be
5715 * exactly final_target if we decide a projection step wouldn't be helpful.
5717 * In addition, *have_postponed_srfs is set to true if we choose to postpone
5718 * any set-returning functions to after the Sort.
5721 make_sort_input_target(PlannerInfo *root,
5722 PathTarget *final_target,
5723 bool *have_postponed_srfs)
5725 Query *parse = root->parse;
5726 PathTarget *input_target;
5732 bool have_expensive;
5733 bool have_srf_sortcols;
5735 List *postponable_cols;
5736 List *postponable_vars;
5740 /* Shouldn't get here unless query has ORDER BY */
5741 Assert(parse->sortClause);
5743 *have_postponed_srfs = false; /* default result */
5745 /* Inspect tlist and collect per-column information */
5746 ncols = list_length(final_target->exprs);
5747 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
5748 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
5749 have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
5752 foreach(lc, final_target->exprs)
5754 Expr *expr = (Expr *) lfirst(lc);
5757 * If the column has a sortgroupref, assume it has to be evaluated
5758 * before sorting. Generally such columns would be ORDER BY, GROUP
5759 * BY, etc targets. One exception is columns that were removed from
5760 * GROUP BY by remove_useless_groupby_columns() ... but those would
5761 * only be Vars anyway. There don't seem to be any cases where it
5762 * would be worth the trouble to double-check.
5764 if (get_pathtarget_sortgroupref(final_target, i) == 0)
5767 * Check for SRF or volatile functions. Check the SRF case first
5768 * because we must know whether we have any postponed SRFs.
5770 if (parse->hasTargetSRFs &&
5771 expression_returns_set((Node *) expr))
5773 /* We'll decide below whether these are postponable */
5774 col_is_srf[i] = true;
5777 else if (contain_volatile_functions((Node *) expr))
5779 /* Unconditionally postpone */
5780 postpone_col[i] = true;
5781 have_volatile = true;
5786 * Else check the cost. XXX it's annoying to have to do this
5787 * when set_pathtarget_cost_width() just did it. Refactor to
5788 * allow sharing the work?
5792 cost_qual_eval_node(&cost, (Node *) expr, root);
5795 * We arbitrarily define "expensive" as "more than 10X
5796 * cpu_operator_cost". Note this will take in any PL function
5797 * with default cost.
5799 if (cost.per_tuple > 10 * cpu_operator_cost)
5801 postpone_col[i] = true;
5802 have_expensive = true;
5808 /* For sortgroupref cols, just check if any contain SRFs */
5809 if (!have_srf_sortcols &&
5810 parse->hasTargetSRFs &&
5811 expression_returns_set((Node *) expr))
5812 have_srf_sortcols = true;
5819 * We can postpone SRFs if we have some but none are in sortgroupref cols.
5821 postpone_srfs = (have_srf && !have_srf_sortcols);
5824 * If we don't need a post-sort projection, just return final_target.
5826 if (!(postpone_srfs || have_volatile ||
5828 (parse->limitCount || root->tuple_fraction > 0))))
5829 return final_target;
5832 * Report whether the post-sort projection will contain set-returning
5833 * functions. This is important because it affects whether the Sort can
5834 * rely on the query's LIMIT (if any) to bound the number of rows it needs
5837 *have_postponed_srfs = postpone_srfs;
5840 * Construct the sort-input target, taking all non-postponable columns and
5841 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
5842 * the postponable ones.
5844 input_target = create_empty_pathtarget();
5845 postponable_cols = NIL;
5848 foreach(lc, final_target->exprs)
5850 Expr *expr = (Expr *) lfirst(lc);
5852 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
5853 postponable_cols = lappend(postponable_cols, expr);
5855 add_column_to_pathtarget(input_target, expr,
5856 get_pathtarget_sortgroupref(final_target, i));
5862 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
5863 * postponable columns, and add them to the sort-input target if not
5864 * already present. (Some might be there already.) We mustn't
5865 * deconstruct Aggrefs or WindowFuncs here, since the projection node
5866 * would be unable to recompute them.
5868 postponable_vars = pull_var_clause((Node *) postponable_cols,
5869 PVC_INCLUDE_AGGREGATES |
5870 PVC_INCLUDE_WINDOWFUNCS |
5871 PVC_INCLUDE_PLACEHOLDERS);
5872 add_new_columns_to_pathtarget(input_target, postponable_vars);
5874 /* clean up cruft */
5875 list_free(postponable_vars);
5876 list_free(postponable_cols);
5878 /* XXX this represents even more redundant cost calculation ... */
5879 return set_pathtarget_cost_width(root, input_target);
5883 * get_cheapest_fractional_path
5884 * Find the cheapest path for retrieving a specified fraction of all
5885 * the tuples expected to be returned by the given relation.
5887 * We interpret tuple_fraction the same way as grouping_planner.
5889 * We assume set_cheapest() has been run on the given rel.
5892 get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
5894 Path *best_path = rel->cheapest_total_path;
5897 /* If all tuples will be retrieved, just return the cheapest-total path */
5898 if (tuple_fraction <= 0.0)
5901 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
5902 if (tuple_fraction >= 1.0 && best_path->rows > 0)
5903 tuple_fraction /= best_path->rows;
5905 foreach(l, rel->pathlist)
5907 Path *path = (Path *) lfirst(l);
5909 if (path == rel->cheapest_total_path ||
5910 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
5920 * adjust_paths_for_srfs
5921 * Fix up the Paths of the given upperrel to handle tSRFs properly.
5923 * The executor can only handle set-returning functions that appear at the
5924 * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
5925 * that are not at top level, we need to split up the evaluation into multiple
5926 * plan levels in which each level satisfies this constraint. This function
5927 * modifies each Path of an upperrel that (might) compute any SRFs in its
5928 * output tlist to insert appropriate projection steps.
5930 * The given targets and targets_contain_srfs lists are from
5931 * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
5932 * target in targets.
5935 adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
5936 List *targets, List *targets_contain_srfs)
5940 Assert(list_length(targets) == list_length(targets_contain_srfs));
5941 Assert(!linitial_int(targets_contain_srfs));
5943 /* If no SRFs appear at this plan level, nothing to do */
5944 if (list_length(targets) == 1)
5948 * Stack SRF-evaluation nodes atop each path for the rel.
5950 * In principle we should re-run set_cheapest() here to identify the
5951 * cheapest path, but it seems unlikely that adding the same tlist eval
5952 * costs to all the paths would change that, so we don't bother. Instead,
5953 * just assume that the cheapest-startup and cheapest-total paths remain
5954 * so. (There should be no parameterized paths anymore, so we needn't
5955 * worry about updating cheapest_parameterized_paths.)
5957 foreach(lc, rel->pathlist)
5959 Path *subpath = (Path *) lfirst(lc);
5960 Path *newpath = subpath;
5964 Assert(subpath->param_info == NULL);
5965 forboth(lc1, targets, lc2, targets_contain_srfs)
5967 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
5968 bool contains_srfs = (bool) lfirst_int(lc2);
5970 /* If this level doesn't contain SRFs, do regular projection */
5972 newpath = (Path *) create_set_projection_path(root,
5977 newpath = (Path *) apply_projection_to_path(root,
5982 lfirst(lc) = newpath;
5983 if (subpath == rel->cheapest_startup_path)
5984 rel->cheapest_startup_path = newpath;
5985 if (subpath == rel->cheapest_total_path)
5986 rel->cheapest_total_path = newpath;
5989 /* Likewise for partial paths, if any */
5990 foreach(lc, rel->partial_pathlist)
5992 Path *subpath = (Path *) lfirst(lc);
5993 Path *newpath = subpath;
5997 Assert(subpath->param_info == NULL);
5998 forboth(lc1, targets, lc2, targets_contain_srfs)
6000 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6001 bool contains_srfs = (bool) lfirst_int(lc2);
6003 /* If this level doesn't contain SRFs, do regular projection */
6005 newpath = (Path *) create_set_projection_path(root,
6011 /* avoid apply_projection_to_path, in case of multiple refs */
6012 newpath = (Path *) create_projection_path(root,
6018 lfirst(lc) = newpath;
6023 * expression_planner
6024 * Perform planner's transformations on a standalone expression.
6026 * Various utility commands need to evaluate expressions that are not part
6027 * of a plannable query. They can do so using the executor's regular
6028 * expression-execution machinery, but first the expression has to be fed
6029 * through here to transform it from parser output to something executable.
6031 * Currently, we disallow sublinks in standalone expressions, so there's no
6032 * real "planning" involved here. (That might not always be true though.)
6033 * What we must do is run eval_const_expressions to ensure that any function
6034 * calls are converted to positional notation and function default arguments
6035 * get inserted. The fact that constant subexpressions get simplified is a
6036 * side-effect that is useful when the expression will get evaluated more than
6037 * once. Also, we must fix operator function IDs.
6039 * This does not return any information about dependencies of the expression.
6040 * Hence callers should use the results only for the duration of the current
6041 * query. Callers that would like to cache the results for longer should use
6042 * expression_planner_with_deps, probably via the plancache.
6044 * Note: this must not make any damaging changes to the passed-in expression
6045 * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6046 * we first do an expression_tree_mutator-based walk, what is returned will
6047 * be a new node tree.) The result is constructed in the current memory
6048 * context; beware that this can leak a lot of additional stuff there, too.
6051 expression_planner(Expr *expr)
6056 * Convert named-argument function calls, insert default arguments and
6057 * simplify constant subexprs
6059 result = eval_const_expressions(NULL, (Node *) expr);
6061 /* Fill in opfuncid values if missing */
6062 fix_opfuncids(result);
6064 return (Expr *) result;
6068 * expression_planner_with_deps
6069 * Perform planner's transformations on a standalone expression,
6070 * returning expression dependency information along with the result.
6072 * This is identical to expression_planner() except that it also returns
6073 * information about possible dependencies of the expression, ie identities of
6074 * objects whose definitions affect the result. As in a PlannedStmt, these
6075 * are expressed as a list of relation Oids and a list of PlanInvalItems.
6078 expression_planner_with_deps(Expr *expr,
6079 List **relationOids,
6086 /* Make up dummy planner state so we can use setrefs machinery */
6087 MemSet(&glob, 0, sizeof(glob));
6088 glob.type = T_PlannerGlobal;
6089 glob.relationOids = NIL;
6090 glob.invalItems = NIL;
6092 MemSet(&root, 0, sizeof(root));
6093 root.type = T_PlannerInfo;
6097 * Convert named-argument function calls, insert default arguments and
6098 * simplify constant subexprs. Collect identities of inlined functions
6099 * and elided domains, too.
6101 result = eval_const_expressions(&root, (Node *) expr);
6103 /* Fill in opfuncid values if missing */
6104 fix_opfuncids(result);
6107 * Now walk the finished expression to find anything else we ought to
6108 * record as an expression dependency.
6110 (void) extract_query_dependencies_walker(result, &root);
6112 *relationOids = glob.relationOids;
6113 *invalItems = glob.invalItems;
6115 return (Expr *) result;
6120 * plan_cluster_use_sort
6121 * Use the planner to decide how CLUSTER should implement sorting
6123 * tableOid is the OID of a table to be clustered on its index indexOid
6124 * (which is already known to be a btree index). Decide whether it's
6125 * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6126 * Return true to use sorting, false to use an indexscan.
6128 * Note: caller had better already hold some type of lock on the table.
6131 plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6135 PlannerGlobal *glob;
6138 IndexOptInfo *indexInfo;
6139 QualCost indexExprCost;
6140 Cost comparisonCost;
6142 Path seqScanAndSortPath;
6143 IndexPath *indexScanPath;
6146 /* We can short-circuit the cost comparison if indexscans are disabled */
6147 if (!enable_indexscan)
6148 return true; /* use sort */
6150 /* Set up mostly-dummy planner state */
6151 query = makeNode(Query);
6152 query->commandType = CMD_SELECT;
6154 glob = makeNode(PlannerGlobal);
6156 root = makeNode(PlannerInfo);
6157 root->parse = query;
6159 root->query_level = 1;
6160 root->planner_cxt = CurrentMemoryContext;
6161 root->wt_param_id = -1;
6163 /* Build a minimal RTE for the rel */
6164 rte = makeNode(RangeTblEntry);
6165 rte->rtekind = RTE_RELATION;
6166 rte->relid = tableOid;
6167 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6168 rte->rellockmode = AccessShareLock;
6169 rte->lateral = false;
6171 rte->inFromCl = true;
6172 query->rtable = list_make1(rte);
6174 /* Set up RTE/RelOptInfo arrays */
6175 setup_simple_rel_arrays(root);
6177 /* Build RelOptInfo */
6178 rel = build_simple_rel(root, 1, NULL);
6180 /* Locate IndexOptInfo for the target index */
6182 foreach(lc, rel->indexlist)
6184 indexInfo = lfirst_node(IndexOptInfo, lc);
6185 if (indexInfo->indexoid == indexOid)
6190 * It's possible that get_relation_info did not generate an IndexOptInfo
6191 * for the desired index; this could happen if it's not yet reached its
6192 * indcheckxmin usability horizon, or if it's a system index and we're
6193 * ignoring system indexes. In such cases we should tell CLUSTER to not
6194 * trust the index contents but use seqscan-and-sort.
6196 if (lc == NULL) /* not in the list? */
6197 return true; /* use sort */
6200 * Rather than doing all the pushups that would be needed to use
6201 * set_baserel_size_estimates, just do a quick hack for rows and width.
6203 rel->rows = rel->tuples;
6204 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6206 root->total_table_pages = rel->pages;
6209 * Determine eval cost of the index expressions, if any. We need to
6210 * charge twice that amount for each tuple comparison that happens during
6211 * the sort, since tuplesort.c will have to re-evaluate the index
6212 * expressions each time. (XXX that's pretty inefficient...)
6214 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6215 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6217 /* Estimate the cost of seq scan + sort */
6218 seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6219 cost_sort(&seqScanAndSortPath, root, NIL,
6220 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6221 comparisonCost, maintenance_work_mem, -1.0);
6223 /* Estimate the cost of index scan */
6224 indexScanPath = create_index_path(root, indexInfo,
6226 ForwardScanDirection, false,
6229 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6233 * plan_create_index_workers
6234 * Use the planner to decide how many parallel worker processes
6235 * CREATE INDEX should request for use
6237 * tableOid is the table on which the index is to be built. indexOid is the
6238 * OID of an index to be created or reindexed (which must be a btree index).
6240 * Return value is the number of parallel worker processes to request. It
6241 * may be unsafe to proceed if this is 0. Note that this does not include the
6242 * leader participating as a worker (value is always a number of parallel
6243 * worker processes).
6245 * Note: caller had better already hold some type of lock on the table and
6249 plan_create_index_workers(Oid tableOid, Oid indexOid)
6253 PlannerGlobal *glob;
6258 int parallel_workers;
6259 BlockNumber heap_blocks;
6263 /* Return immediately when parallelism disabled */
6264 if (max_parallel_maintenance_workers == 0)
6267 /* Set up largely-dummy planner state */
6268 query = makeNode(Query);
6269 query->commandType = CMD_SELECT;
6271 glob = makeNode(PlannerGlobal);
6273 root = makeNode(PlannerInfo);
6274 root->parse = query;
6276 root->query_level = 1;
6277 root->planner_cxt = CurrentMemoryContext;
6278 root->wt_param_id = -1;
6281 * Build a minimal RTE.
6283 * Mark the RTE with inh = true. This is a kludge to prevent
6284 * get_relation_info() from fetching index info, which is necessary
6285 * because it does not expect that any IndexOptInfo is currently
6286 * undergoing REINDEX.
6288 rte = makeNode(RangeTblEntry);
6289 rte->rtekind = RTE_RELATION;
6290 rte->relid = tableOid;
6291 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6292 rte->rellockmode = AccessShareLock;
6293 rte->lateral = false;
6295 rte->inFromCl = true;
6296 query->rtable = list_make1(rte);
6298 /* Set up RTE/RelOptInfo arrays */
6299 setup_simple_rel_arrays(root);
6301 /* Build RelOptInfo */
6302 rel = build_simple_rel(root, 1, NULL);
6304 /* Rels are assumed already locked by the caller */
6305 heap = table_open(tableOid, NoLock);
6306 index = index_open(indexOid, NoLock);
6309 * Determine if it's safe to proceed.
6311 * Currently, parallel workers can't access the leader's temporary tables.
6312 * Furthermore, any index predicate or index expressions must be parallel
6315 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6316 !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
6317 !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
6319 parallel_workers = 0;
6324 * If parallel_workers storage parameter is set for the table, accept that
6325 * as the number of parallel worker processes to launch (though still cap
6326 * at max_parallel_maintenance_workers). Note that we deliberately do not
6327 * consider any other factor when parallel_workers is set. (e.g., memory
6330 if (rel->rel_parallel_workers != -1)
6332 parallel_workers = Min(rel->rel_parallel_workers,
6333 max_parallel_maintenance_workers);
6338 * Estimate heap relation size ourselves, since rel->pages cannot be
6339 * trusted (heap RTE was marked as inheritance parent)
6341 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6344 * Determine number of workers to scan the heap relation using generic
6347 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
6348 max_parallel_maintenance_workers);
6351 * Cap workers based on available maintenance_work_mem as needed.
6353 * Note that each tuplesort participant receives an even share of the
6354 * total maintenance_work_mem budget. Aim to leave participants
6355 * (including the leader as a participant) with no less than 32MB of
6356 * memory. This leaves cases where maintenance_work_mem is set to 64MB
6357 * immediately past the threshold of being capable of launching a single
6358 * parallel worker to sort.
6360 while (parallel_workers > 0 &&
6361 maintenance_work_mem / (parallel_workers + 1) < 32768L)
6365 index_close(index, NoLock);
6366 table_close(heap, NoLock);
6368 return parallel_workers;
6372 * add_paths_to_grouping_rel
6374 * Add non-partial paths to grouping relation.
6377 add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
6378 RelOptInfo *grouped_rel,
6379 RelOptInfo *partially_grouped_rel,
6380 const AggClauseCosts *agg_costs,
6381 grouping_sets_data *gd, double dNumGroups,
6382 GroupPathExtraData *extra)
6384 Query *parse = root->parse;
6385 Path *cheapest_path = input_rel->cheapest_total_path;
6387 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6388 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6389 List *havingQual = (List *) extra->havingQual;
6390 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6395 * Use any available suitably-sorted path as input, and also consider
6396 * sorting the cheapest-total path.
6398 foreach(lc, input_rel->pathlist)
6400 Path *path = (Path *) lfirst(lc);
6403 is_sorted = pathkeys_contained_in(root->group_pathkeys,
6405 if (path == cheapest_path || is_sorted)
6407 /* Sort the cheapest-total path if it isn't already sorted */
6409 path = (Path *) create_sort_path(root,
6412 root->group_pathkeys,
6415 /* Now decide what to stick atop it */
6416 if (parse->groupingSets)
6418 consider_groupingsets_paths(root, grouped_rel,
6419 path, true, can_hash,
6420 gd, agg_costs, dNumGroups);
6422 else if (parse->hasAggs)
6425 * We have aggregation, possibly with plain GROUP BY. Make
6428 add_path(grouped_rel, (Path *)
6429 create_agg_path(root,
6432 grouped_rel->reltarget,
6433 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6440 else if (parse->groupClause)
6443 * We have GROUP BY without aggregation or grouping sets.
6446 add_path(grouped_rel, (Path *)
6447 create_group_path(root,
6456 /* Other cases should have been handled above */
6463 * Instead of operating directly on the input relation, we can
6464 * consider finalizing a partially aggregated path.
6466 if (partially_grouped_rel != NULL)
6468 foreach(lc, partially_grouped_rel->pathlist)
6470 Path *path = (Path *) lfirst(lc);
6473 * Insert a Sort node, if required. But there's no point in
6474 * sorting anything but the cheapest path.
6476 if (!pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
6478 if (path != partially_grouped_rel->cheapest_total_path)
6480 path = (Path *) create_sort_path(root,
6483 root->group_pathkeys,
6488 add_path(grouped_rel, (Path *)
6489 create_agg_path(root,
6492 grouped_rel->reltarget,
6493 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6494 AGGSPLIT_FINAL_DESERIAL,
6500 add_path(grouped_rel, (Path *)
6501 create_group_path(root,
6513 double hashaggtablesize;
6515 if (parse->groupingSets)
6518 * Try for a hash-only groupingsets path over unsorted input.
6520 consider_groupingsets_paths(root, grouped_rel,
6521 cheapest_path, false, true,
6522 gd, agg_costs, dNumGroups);
6526 hashaggtablesize = estimate_hashagg_tablesize(cheapest_path,
6531 * Provided that the estimated size of the hashtable does not
6532 * exceed work_mem, we'll generate a HashAgg Path, although if we
6533 * were unable to sort above, then we'd better generate a Path, so
6534 * that we at least have one.
6536 if (hashaggtablesize < work_mem * 1024L ||
6537 grouped_rel->pathlist == NIL)
6540 * We just need an Agg over the cheapest-total input path,
6541 * since input order won't matter.
6543 add_path(grouped_rel, (Path *)
6544 create_agg_path(root, grouped_rel,
6546 grouped_rel->reltarget,
6557 * Generate a Finalize HashAgg Path atop of the cheapest partially
6558 * grouped path, assuming there is one. Once again, we'll only do this
6559 * if it looks as though the hash table won't exceed work_mem.
6561 if (partially_grouped_rel && partially_grouped_rel->pathlist)
6563 Path *path = partially_grouped_rel->cheapest_total_path;
6565 hashaggtablesize = estimate_hashagg_tablesize(path,
6569 if (hashaggtablesize < work_mem * 1024L)
6570 add_path(grouped_rel, (Path *)
6571 create_agg_path(root,
6574 grouped_rel->reltarget,
6576 AGGSPLIT_FINAL_DESERIAL,
6585 * When partitionwise aggregate is used, we might have fully aggregated
6586 * paths in the partial pathlist, because add_paths_to_append_rel() will
6587 * consider a path for grouped_rel consisting of a Parallel Append of
6588 * non-partial paths from each child.
6590 if (grouped_rel->partial_pathlist != NIL)
6591 gather_grouping_paths(root, grouped_rel);
6595 * create_partial_grouping_paths
6597 * Create a new upper relation representing the result of partial aggregation
6598 * and populate it with appropriate paths. Note that we don't finalize the
6599 * lists of paths here, so the caller can add additional partial or non-partial
6600 * paths and must afterward call gather_grouping_paths and set_cheapest on
6601 * the returned upper relation.
6603 * All paths for this new upper relation -- both partial and non-partial --
6604 * have been partially aggregated but require a subsequent FinalizeAggregate
6607 * NB: This function is allowed to return NULL if it determines that there is
6608 * no real need to create a new RelOptInfo.
6611 create_partial_grouping_paths(PlannerInfo *root,
6612 RelOptInfo *grouped_rel,
6613 RelOptInfo *input_rel,
6614 grouping_sets_data *gd,
6615 GroupPathExtraData *extra,
6616 bool force_rel_creation)
6618 Query *parse = root->parse;
6619 RelOptInfo *partially_grouped_rel;
6620 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
6621 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6622 Path *cheapest_partial_path = NULL;
6623 Path *cheapest_total_path = NULL;
6624 double dNumPartialGroups = 0;
6625 double dNumPartialPartialGroups = 0;
6627 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6628 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6631 * Consider whether we should generate partially aggregated non-partial
6632 * paths. We can only do this if we have a non-partial path, and only if
6633 * the parent of the input rel is performing partial partitionwise
6634 * aggregation. (Note that extra->patype is the type of partitionwise
6635 * aggregation being used at the parent level, not this level.)
6637 if (input_rel->pathlist != NIL &&
6638 extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
6639 cheapest_total_path = input_rel->cheapest_total_path;
6642 * If parallelism is possible for grouped_rel, then we should consider
6643 * generating partially-grouped partial paths. However, if the input rel
6644 * has no partial paths, then we can't.
6646 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
6647 cheapest_partial_path = linitial(input_rel->partial_pathlist);
6650 * If we can't partially aggregate partial paths, and we can't partially
6651 * aggregate non-partial paths, then don't bother creating the new
6652 * RelOptInfo at all, unless the caller specified force_rel_creation.
6654 if (cheapest_total_path == NULL &&
6655 cheapest_partial_path == NULL &&
6656 !force_rel_creation)
6660 * Build a new upper relation to represent the result of partially
6661 * aggregating the rows from the input relation.
6663 partially_grouped_rel = fetch_upper_rel(root,
6664 UPPERREL_PARTIAL_GROUP_AGG,
6665 grouped_rel->relids);
6666 partially_grouped_rel->consider_parallel =
6667 grouped_rel->consider_parallel;
6668 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
6669 partially_grouped_rel->serverid = grouped_rel->serverid;
6670 partially_grouped_rel->userid = grouped_rel->userid;
6671 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
6672 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
6675 * Build target list for partial aggregate paths. These paths cannot just
6676 * emit the same tlist as regular aggregate paths, because (1) we must
6677 * include Vars and Aggrefs needed in HAVING, which might not appear in
6678 * the result tlist, and (2) the Aggrefs must be set in partial mode.
6680 partially_grouped_rel->reltarget =
6681 make_partial_grouping_target(root, grouped_rel->reltarget,
6684 if (!extra->partial_costs_set)
6687 * Collect statistics about aggregates for estimating costs of
6688 * performing aggregation in parallel.
6690 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
6691 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
6694 List *partial_target_exprs;
6697 partial_target_exprs = partially_grouped_rel->reltarget->exprs;
6698 get_agg_clause_costs(root, (Node *) partial_target_exprs,
6699 AGGSPLIT_INITIAL_SERIAL,
6703 get_agg_clause_costs(root, (Node *) grouped_rel->reltarget->exprs,
6704 AGGSPLIT_FINAL_DESERIAL,
6706 get_agg_clause_costs(root, extra->havingQual,
6707 AGGSPLIT_FINAL_DESERIAL,
6711 extra->partial_costs_set = true;
6714 /* Estimate number of partial groups. */
6715 if (cheapest_total_path != NULL)
6717 get_number_of_groups(root,
6718 cheapest_total_path->rows,
6721 if (cheapest_partial_path != NULL)
6722 dNumPartialPartialGroups =
6723 get_number_of_groups(root,
6724 cheapest_partial_path->rows,
6728 if (can_sort && cheapest_total_path != NULL)
6730 /* This should have been checked previously */
6731 Assert(parse->hasAggs || parse->groupClause);
6734 * Use any available suitably-sorted path as input, and also consider
6735 * sorting the cheapest partial path.
6737 foreach(lc, input_rel->pathlist)
6739 Path *path = (Path *) lfirst(lc);
6742 is_sorted = pathkeys_contained_in(root->group_pathkeys,
6744 if (path == cheapest_total_path || is_sorted)
6746 /* Sort the cheapest partial path, if it isn't already */
6748 path = (Path *) create_sort_path(root,
6749 partially_grouped_rel,
6751 root->group_pathkeys,
6755 add_path(partially_grouped_rel, (Path *)
6756 create_agg_path(root,
6757 partially_grouped_rel,
6759 partially_grouped_rel->reltarget,
6760 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6761 AGGSPLIT_INITIAL_SERIAL,
6765 dNumPartialGroups));
6767 add_path(partially_grouped_rel, (Path *)
6768 create_group_path(root,
6769 partially_grouped_rel,
6773 dNumPartialGroups));
6778 if (can_sort && cheapest_partial_path != NULL)
6780 /* Similar to above logic, but for partial paths. */
6781 foreach(lc, input_rel->partial_pathlist)
6783 Path *path = (Path *) lfirst(lc);
6786 is_sorted = pathkeys_contained_in(root->group_pathkeys,
6788 if (path == cheapest_partial_path || is_sorted)
6790 /* Sort the cheapest partial path, if it isn't already */
6792 path = (Path *) create_sort_path(root,
6793 partially_grouped_rel,
6795 root->group_pathkeys,
6799 add_partial_path(partially_grouped_rel, (Path *)
6800 create_agg_path(root,
6801 partially_grouped_rel,
6803 partially_grouped_rel->reltarget,
6804 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6805 AGGSPLIT_INITIAL_SERIAL,
6809 dNumPartialPartialGroups));
6811 add_partial_path(partially_grouped_rel, (Path *)
6812 create_group_path(root,
6813 partially_grouped_rel,
6817 dNumPartialPartialGroups));
6822 if (can_hash && cheapest_total_path != NULL)
6824 double hashaggtablesize;
6827 Assert(parse->hasAggs || parse->groupClause);
6830 estimate_hashagg_tablesize(cheapest_total_path,
6835 * Tentatively produce a partial HashAgg Path, depending on if it
6836 * looks as if the hash table will fit in work_mem.
6838 if (hashaggtablesize < work_mem * 1024L &&
6839 cheapest_total_path != NULL)
6841 add_path(partially_grouped_rel, (Path *)
6842 create_agg_path(root,
6843 partially_grouped_rel,
6844 cheapest_total_path,
6845 partially_grouped_rel->reltarget,
6847 AGGSPLIT_INITIAL_SERIAL,
6851 dNumPartialGroups));
6855 if (can_hash && cheapest_partial_path != NULL)
6857 double hashaggtablesize;
6860 estimate_hashagg_tablesize(cheapest_partial_path,
6862 dNumPartialPartialGroups);
6864 /* Do the same for partial paths. */
6865 if (hashaggtablesize < work_mem * 1024L &&
6866 cheapest_partial_path != NULL)
6868 add_partial_path(partially_grouped_rel, (Path *)
6869 create_agg_path(root,
6870 partially_grouped_rel,
6871 cheapest_partial_path,
6872 partially_grouped_rel->reltarget,
6874 AGGSPLIT_INITIAL_SERIAL,
6878 dNumPartialPartialGroups));
6883 * If there is an FDW that's responsible for all baserels of the query,
6884 * let it consider adding partially grouped ForeignPaths.
6886 if (partially_grouped_rel->fdwroutine &&
6887 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
6889 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
6891 fdwroutine->GetForeignUpperPaths(root,
6892 UPPERREL_PARTIAL_GROUP_AGG,
6893 input_rel, partially_grouped_rel,
6897 return partially_grouped_rel;
6901 * Generate Gather and Gather Merge paths for a grouping relation or partial
6902 * grouping relation.
6904 * generate_gather_paths does most of the work, but we also consider a special
6905 * case: we could try sorting the data by the group_pathkeys and then applying
6908 * NB: This function shouldn't be used for anything other than a grouped or
6909 * partially grouped relation not only because of the fact that it explicitly
6910 * references group_pathkeys but we pass "true" as the third argument to
6911 * generate_gather_paths().
6914 gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
6916 Path *cheapest_partial_path;
6918 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
6919 generate_gather_paths(root, rel, true);
6921 /* Try cheapest partial path + explicit Sort + Gather Merge. */
6922 cheapest_partial_path = linitial(rel->partial_pathlist);
6923 if (!pathkeys_contained_in(root->group_pathkeys,
6924 cheapest_partial_path->pathkeys))
6927 double total_groups;
6930 cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
6931 path = (Path *) create_sort_path(root, rel, cheapest_partial_path,
6932 root->group_pathkeys,
6935 create_gather_merge_path(root,
6939 root->group_pathkeys,
6943 add_path(rel, path);
6950 * Determines whether or not partial grouping and/or aggregation is possible.
6951 * Returns true when possible, false otherwise.
6954 can_partial_agg(PlannerInfo *root, const AggClauseCosts *agg_costs)
6956 Query *parse = root->parse;
6958 if (!parse->hasAggs && parse->groupClause == NIL)
6961 * We don't know how to do parallel aggregation unless we have either
6962 * some aggregates or a grouping clause.
6966 else if (parse->groupingSets)
6968 /* We don't know how to do grouping sets in parallel. */
6971 else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial)
6973 /* Insufficient support for partial mode. */
6977 /* Everything looks good. */
6982 * apply_scanjoin_target_to_paths
6984 * Adjust the final scan/join relation, and recursively all of its children,
6985 * to generate the final scan/join target. It would be more correct to model
6986 * this as a separate planning step with a new RelOptInfo at the toplevel and
6987 * for each child relation, but doing it this way is noticeably cheaper.
6988 * Maybe that problem can be solved at some point, but for now we do this.
6990 * If tlist_same_exprs is true, then the scan/join target to be applied has
6991 * the same expressions as the existing reltarget, so we need only insert the
6992 * appropriate sortgroupref information. By avoiding the creation of
6993 * projection paths we save effort both immediately and at plan creation time.
6996 apply_scanjoin_target_to_paths(PlannerInfo *root,
6998 List *scanjoin_targets,
6999 List *scanjoin_targets_contain_srfs,
7000 bool scanjoin_target_parallel_safe,
7001 bool tlist_same_exprs)
7003 bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7004 PathTarget *scanjoin_target;
7007 /* This recurses, so be paranoid. */
7008 check_stack_depth();
7011 * If the rel is partitioned, we want to drop its existing paths and
7012 * generate new ones. This function would still be correct if we kept the
7013 * existing paths: we'd modify them to generate the correct target above
7014 * the partitioning Append, and then they'd compete on cost with paths
7015 * generating the target below the Append. However, in our current cost
7016 * model the latter way is always the same or cheaper cost, so modifying
7017 * the existing paths would just be useless work. Moreover, when the cost
7018 * is the same, varying roundoff errors might sometimes allow an existing
7019 * path to be picked, resulting in undesirable cross-platform plan
7020 * variations. So we drop old paths and thereby force the work to be done
7021 * below the Append, except in the case of a non-parallel-safe target.
7023 * Some care is needed, because we have to allow generate_gather_paths to
7024 * see the old partial paths in the next stanza. Hence, zap the main
7025 * pathlist here, then allow generate_gather_paths to add path(s) to the
7026 * main list, and finally zap the partial pathlist.
7028 if (rel_is_partitioned)
7029 rel->pathlist = NIL;
7032 * If the scan/join target is not parallel-safe, partial paths cannot
7035 if (!scanjoin_target_parallel_safe)
7038 * Since we can't generate the final scan/join target in parallel
7039 * workers, this is our last opportunity to use any partial paths that
7040 * exist; so build Gather path(s) that use them and emit whatever the
7041 * current reltarget is. We don't do this in the case where the
7042 * target is parallel-safe, since we will be able to generate superior
7043 * paths by doing it after the final scan/join target has been
7046 generate_gather_paths(root, rel, false);
7048 /* Can't use parallel query above this level. */
7049 rel->partial_pathlist = NIL;
7050 rel->consider_parallel = false;
7053 /* Finish dropping old paths for a partitioned rel, per comment above */
7054 if (rel_is_partitioned)
7055 rel->partial_pathlist = NIL;
7057 /* Extract SRF-free scan/join target. */
7058 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7061 * Apply the SRF-free scan/join target to each existing path.
7063 * If the tlist exprs are the same, we can just inject the sortgroupref
7064 * information into the existing pathtargets. Otherwise, replace each
7065 * path with a projection path that generates the SRF-free scan/join
7066 * target. This can't change the ordering of paths within rel->pathlist,
7067 * so we just modify the list in place.
7069 foreach(lc, rel->pathlist)
7071 Path *subpath = (Path *) lfirst(lc);
7073 /* Shouldn't have any parameterized paths anymore */
7074 Assert(subpath->param_info == NULL);
7076 if (tlist_same_exprs)
7077 subpath->pathtarget->sortgrouprefs =
7078 scanjoin_target->sortgrouprefs;
7083 newpath = (Path *) create_projection_path(root, rel, subpath,
7085 lfirst(lc) = newpath;
7089 /* Likewise adjust the targets for any partial paths. */
7090 foreach(lc, rel->partial_pathlist)
7092 Path *subpath = (Path *) lfirst(lc);
7094 /* Shouldn't have any parameterized paths anymore */
7095 Assert(subpath->param_info == NULL);
7097 if (tlist_same_exprs)
7098 subpath->pathtarget->sortgrouprefs =
7099 scanjoin_target->sortgrouprefs;
7104 newpath = (Path *) create_projection_path(root, rel, subpath,
7106 lfirst(lc) = newpath;
7111 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7112 * atop each existing path. (Note that this function doesn't look at the
7113 * cheapest-path fields, which is a good thing because they're bogus right
7116 if (root->parse->hasTargetSRFs)
7117 adjust_paths_for_srfs(root, rel,
7119 scanjoin_targets_contain_srfs);
7122 * Update the rel's target to be the final (with SRFs) scan/join target.
7123 * This now matches the actual output of all the paths, and we might get
7124 * confused in createplan.c if they don't agree. We must do this now so
7125 * that any append paths made in the next part will use the correct
7126 * pathtarget (cf. create_append_path).
7128 * Note that this is also necessary if GetForeignUpperPaths() gets called
7129 * on the final scan/join relation or on any of its children, since the
7130 * FDW might look at the rel's target to create ForeignPaths.
7132 rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7135 * If the relation is partitioned, recursively apply the scan/join target
7136 * to all partitions, and generate brand-new Append paths in which the
7137 * scan/join target is computed below the Append rather than above it.
7138 * Since Append is not projection-capable, that might save a separate
7139 * Result node, and it also is important for partitionwise aggregate.
7141 if (rel_is_partitioned)
7143 List *live_children = NIL;
7146 /* Adjust each partition. */
7147 for (partition_idx = 0; partition_idx < rel->nparts; partition_idx++)
7149 RelOptInfo *child_rel = rel->part_rels[partition_idx];
7150 AppendRelInfo **appinfos;
7152 List *child_scanjoin_targets = NIL;
7155 /* Pruned or dummy children can be ignored. */
7156 if (child_rel == NULL || IS_DUMMY_REL(child_rel))
7159 /* Translate scan/join targets for this child. */
7160 appinfos = find_appinfos_by_relids(root, child_rel->relids,
7162 foreach(lc, scanjoin_targets)
7164 PathTarget *target = lfirst_node(PathTarget, lc);
7166 target = copy_pathtarget(target);
7167 target->exprs = (List *)
7168 adjust_appendrel_attrs(root,
7169 (Node *) target->exprs,
7170 nappinfos, appinfos);
7171 child_scanjoin_targets = lappend(child_scanjoin_targets,
7176 /* Recursion does the real work. */
7177 apply_scanjoin_target_to_paths(root, child_rel,
7178 child_scanjoin_targets,
7179 scanjoin_targets_contain_srfs,
7180 scanjoin_target_parallel_safe,
7183 /* Save non-dummy children for Append paths. */
7184 if (!IS_DUMMY_REL(child_rel))
7185 live_children = lappend(live_children, child_rel);
7188 /* Build new paths for this relation by appending child paths. */
7189 add_paths_to_append_rel(root, rel, live_children);
7193 * Consider generating Gather or Gather Merge paths. We must only do this
7194 * if the relation is parallel safe, and we don't do it for child rels to
7195 * avoid creating multiple Gather nodes within the same plan. We must do
7196 * this after all paths have been generated and before set_cheapest, since
7197 * one of the generated paths may turn out to be the cheapest one.
7199 if (rel->consider_parallel && !IS_OTHER_REL(rel))
7200 generate_gather_paths(root, rel, false);
7203 * Reassess which paths are the cheapest, now that we've potentially added
7204 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7211 * create_partitionwise_grouping_paths
7213 * If the partition keys of input relation are part of the GROUP BY clause, all
7214 * the rows belonging to a given group come from a single partition. This
7215 * allows aggregation/grouping over a partitioned relation to be broken down
7216 * into aggregation/grouping on each partition. This should be no worse, and
7217 * often better, than the normal approach.
7219 * However, if the GROUP BY clause does not contain all the partition keys,
7220 * rows from a given group may be spread across multiple partitions. In that
7221 * case, we perform partial aggregation for each group, append the results,
7222 * and then finalize aggregation. This is less certain to win than the
7223 * previous case. It may win if the PartialAggregate stage greatly reduces
7224 * the number of groups, because fewer rows will pass through the Append node.
7225 * It may lose if we have lots of small groups.
7228 create_partitionwise_grouping_paths(PlannerInfo *root,
7229 RelOptInfo *input_rel,
7230 RelOptInfo *grouped_rel,
7231 RelOptInfo *partially_grouped_rel,
7232 const AggClauseCosts *agg_costs,
7233 grouping_sets_data *gd,
7234 PartitionwiseAggregateType patype,
7235 GroupPathExtraData *extra)
7237 int nparts = input_rel->nparts;
7239 List *grouped_live_children = NIL;
7240 List *partially_grouped_live_children = NIL;
7241 PathTarget *target = grouped_rel->reltarget;
7242 bool partial_grouping_valid = true;
7244 Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
7245 Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
7246 partially_grouped_rel != NULL);
7248 /* Add paths for partitionwise aggregation/grouping. */
7249 for (cnt_parts = 0; cnt_parts < nparts; cnt_parts++)
7251 RelOptInfo *child_input_rel = input_rel->part_rels[cnt_parts];
7252 PathTarget *child_target = copy_pathtarget(target);
7253 AppendRelInfo **appinfos;
7255 GroupPathExtraData child_extra;
7256 RelOptInfo *child_grouped_rel;
7257 RelOptInfo *child_partially_grouped_rel;
7259 /* Pruned or dummy children can be ignored. */
7260 if (child_input_rel == NULL || IS_DUMMY_REL(child_input_rel))
7264 * Copy the given "extra" structure as is and then override the
7265 * members specific to this child.
7267 memcpy(&child_extra, extra, sizeof(child_extra));
7269 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7272 child_target->exprs = (List *)
7273 adjust_appendrel_attrs(root,
7274 (Node *) target->exprs,
7275 nappinfos, appinfos);
7277 /* Translate havingQual and targetList. */
7278 child_extra.havingQual = (Node *)
7279 adjust_appendrel_attrs(root,
7281 nappinfos, appinfos);
7282 child_extra.targetList = (List *)
7283 adjust_appendrel_attrs(root,
7284 (Node *) extra->targetList,
7285 nappinfos, appinfos);
7288 * extra->patype was the value computed for our parent rel; patype is
7289 * the value for this relation. For the child, our value is its
7290 * parent rel's value.
7292 child_extra.patype = patype;
7295 * Create grouping relation to hold fully aggregated grouping and/or
7296 * aggregation paths for the child.
7298 child_grouped_rel = make_grouping_rel(root, child_input_rel,
7300 extra->target_parallel_safe,
7301 child_extra.havingQual);
7303 /* Create grouping paths for this child relation. */
7304 create_ordinary_grouping_paths(root, child_input_rel,
7306 agg_costs, gd, &child_extra,
7307 &child_partially_grouped_rel);
7309 if (child_partially_grouped_rel)
7311 partially_grouped_live_children =
7312 lappend(partially_grouped_live_children,
7313 child_partially_grouped_rel);
7316 partial_grouping_valid = false;
7318 if (patype == PARTITIONWISE_AGGREGATE_FULL)
7320 set_cheapest(child_grouped_rel);
7321 grouped_live_children = lappend(grouped_live_children,
7329 * Try to create append paths for partially grouped children. For full
7330 * partitionwise aggregation, we might have paths in the partial_pathlist
7331 * if parallel aggregation is possible. For partial partitionwise
7332 * aggregation, we may have paths in both pathlist and partial_pathlist.
7334 * NB: We must have a partially grouped path for every child in order to
7335 * generate a partially grouped path for this relation.
7337 if (partially_grouped_rel && partial_grouping_valid)
7339 Assert(partially_grouped_live_children != NIL);
7341 add_paths_to_append_rel(root, partially_grouped_rel,
7342 partially_grouped_live_children);
7345 * We need call set_cheapest, since the finalization step will use the
7346 * cheapest path from the rel.
7348 if (partially_grouped_rel->pathlist)
7349 set_cheapest(partially_grouped_rel);
7352 /* If possible, create append paths for fully grouped children. */
7353 if (patype == PARTITIONWISE_AGGREGATE_FULL)
7355 Assert(grouped_live_children != NIL);
7357 add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
7362 * group_by_has_partkey
7364 * Returns true, if all the partition keys of the given relation are part of
7365 * the GROUP BY clauses, false otherwise.
7368 group_by_has_partkey(RelOptInfo *input_rel,
7372 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
7376 /* Input relation should be partitioned. */
7377 Assert(input_rel->part_scheme);
7379 /* Rule out early, if there are no partition keys present. */
7380 if (!input_rel->partexprs)
7383 partnatts = input_rel->part_scheme->partnatts;
7385 for (cnt = 0; cnt < partnatts; cnt++)
7387 List *partexprs = input_rel->partexprs[cnt];
7391 foreach(lc, partexprs)
7393 Expr *partexpr = lfirst(lc);
7395 if (list_member(groupexprs, partexpr))
7403 * If none of the partition key expressions match with any of the
7404 * GROUP BY expression, return false.