}
/* Collect all the attributes needed for joins or final output. */
- pull_varattnos((Node *) baserel->reltarget.exprs, baserel->relid,
+ pull_varattnos((Node *) baserel->reltarget->exprs, baserel->relid,
&attrs_used);
/* Add all the attributes used by restriction clauses. */
*/
int tuple_width;
- tuple_width = MAXALIGN(baserel->reltarget.width) +
+ tuple_width = MAXALIGN(baserel->reltarget->width) +
MAXALIGN(SizeofHeapTupleHeader);
ntuples = clamp_row_est((double) stat_buf.st_size /
(double) tuple_width);
PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private;
/*
- * We require columns specified in foreignrel->reltarget.exprs and those
+ * We require columns specified in foreignrel->reltarget->exprs and those
* required for evaluating the local conditions.
*/
- tlist = add_to_flat_tlist(tlist, foreignrel->reltarget.exprs);
+ tlist = add_to_flat_tlist(tlist, foreignrel->reltarget->exprs);
tlist = add_to_flat_tlist(tlist,
pull_var_clause((Node *) fpinfo->local_conds,
PVC_RECURSE_PLACEHOLDERS));
* columns used in them. Doesn't seem worth detecting that case though.)
*/
fpinfo->attrs_used = NULL;
- pull_varattnos((Node *) baserel->reltarget.exprs, baserel->relid,
+ pull_varattnos((Node *) baserel->reltarget->exprs, baserel->relid,
&fpinfo->attrs_used);
foreach(lc, fpinfo->local_conds)
{
/* Report estimated baserel size to planner. */
baserel->rows = fpinfo->rows;
- baserel->reltarget.width = fpinfo->width;
+ baserel->reltarget->width = fpinfo->width;
}
else
{
{
baserel->pages = 10;
baserel->tuples =
- (10 * BLCKSZ) / (baserel->reltarget.width +
+ (10 * BLCKSZ) / (baserel->reltarget->width +
MAXALIGN(SizeofHeapTupleHeader));
}
* between foreign relations.
*/
rows = foreignrel->rows;
- width = foreignrel->reltarget.width;
+ width = foreignrel->reltarget->width;
/* Back into an estimate of the number of retrieved rows. */
retrieved_rows = clamp_row_est(rows / fpinfo->local_conds_sel);
&width, &startup_cost, &total_cost);
/* Now update this information in the joinrel */
joinrel->rows = rows;
- joinrel->reltarget.width = width;
+ joinrel->reltarget->width = width;
fpinfo->rows = rows;
fpinfo->width = width;
fpinfo->startup_cost = startup_cost;
it contains restriction quals (<literal>WHERE</> clauses) that should be
used to filter the rows to be fetched. (The FDW itself is not required
to enforce these quals, as the core executor can check them instead.)
- <literal>baserel->reltarget.exprs</> can be used to determine which
+ <literal>baserel->reltarget->exprs</> can be used to determine which
columns need to be fetched; but note that it only lists columns that
have to be emitted by the <structname>ForeignScan</> plan node, not
columns that are used in qual evaluation but not output by the query.
WRITE_ENUM_FIELD(pathtype, NodeTag);
appendStringInfoString(str, " :parent_relids ");
_outBitmapset(str, node->parent->relids);
- if (node->pathtarget != &(node->parent->reltarget))
- {
- WRITE_NODE_FIELD(pathtarget->exprs);
- if (node->pathtarget->sortgrouprefs)
- {
- int i;
-
- appendStringInfoString(str, " :pathtarget->sortgrouprefs");
- for (i = 0; i < list_length(node->pathtarget->exprs); i++)
- appendStringInfo(str, " %u",
- node->pathtarget->sortgrouprefs[i]);
- }
- WRITE_FLOAT_FIELD(pathtarget->cost.startup, "%.2f");
- WRITE_FLOAT_FIELD(pathtarget->cost.per_tuple, "%.2f");
- WRITE_INT_FIELD(pathtarget->width);
- }
+ if (node->pathtarget != node->parent->reltarget)
+ WRITE_NODE_FIELD(pathtarget);
appendStringInfoString(str, " :required_outer ");
if (node->param_info)
_outBitmapset(str, node->param_info->ppi_req_outer);
WRITE_BOOL_FIELD(consider_startup);
WRITE_BOOL_FIELD(consider_param_startup);
WRITE_BOOL_FIELD(consider_parallel);
- WRITE_NODE_FIELD(reltarget.exprs);
- /* reltarget.sortgrouprefs is never interesting, at present anyway */
- WRITE_FLOAT_FIELD(reltarget.cost.startup, "%.2f");
- WRITE_FLOAT_FIELD(reltarget.cost.per_tuple, "%.2f");
- WRITE_INT_FIELD(reltarget.width);
+ WRITE_NODE_FIELD(reltarget);
WRITE_NODE_FIELD(pathlist);
WRITE_NODE_FIELD(ppilist);
WRITE_NODE_FIELD(partial_pathlist);
WRITE_BOOL_FIELD(pk_nulls_first);
}
+static void
+_outPathTarget(StringInfo str, const PathTarget *node)
+{
+ WRITE_NODE_TYPE("PATHTARGET");
+
+ WRITE_NODE_FIELD(exprs);
+ if (node->sortgrouprefs)
+ {
+ int i;
+
+ appendStringInfoString(str, " :sortgrouprefs");
+ for (i = 0; i < list_length(node->exprs); i++)
+ appendStringInfo(str, " %u", node->sortgrouprefs[i]);
+ }
+ WRITE_FLOAT_FIELD(cost.startup, "%.2f");
+ WRITE_FLOAT_FIELD(cost.per_tuple, "%.2f");
+ WRITE_INT_FIELD(width);
+}
+
static void
_outParamPathInfo(StringInfo str, const ParamPathInfo *node)
{
case T_PathKey:
_outPathKey(str, obj);
break;
+ case T_PathTarget:
+ _outPathTarget(str, obj);
+ break;
case T_ParamPathInfo:
_outParamPathInfo(str, obj);
break;
/*
* CE failed, so finish copying/modifying targetlist and join quals.
*
- * Note: the resulting childrel->reltarget.exprs may contain arbitrary
+ * NB: the resulting childrel->reltarget->exprs may contain arbitrary
* expressions, which otherwise would not occur in a rel's targetlist.
* Code that might be looking at an appendrel child must cope with
* such. (Normally, a rel's targetlist would only include Vars and
adjust_appendrel_attrs(root,
(Node *) rel->joininfo,
appinfo);
- childrel->reltarget.exprs = (List *)
+ childrel->reltarget->exprs = (List *)
adjust_appendrel_attrs(root,
- (Node *) rel->reltarget.exprs,
+ (Node *) rel->reltarget->exprs,
appinfo);
/*
Assert(childrel->rows > 0);
parent_rows += childrel->rows;
- parent_size += childrel->reltarget.width * childrel->rows;
+ parent_size += childrel->reltarget->width * childrel->rows;
/*
* Accumulate per-column estimates too. We need not do anything for
*
* By construction, child's targetlist is 1-to-1 with parent's.
*/
- forboth(parentvars, rel->reltarget.exprs,
- childvars, childrel->reltarget.exprs)
+ forboth(parentvars, rel->reltarget->exprs,
+ childvars, childrel->reltarget->exprs)
{
Var *parentvar = (Var *) lfirst(parentvars);
Node *childvar = (Node *) lfirst(childvars);
Assert(parent_rows > 0);
rel->rows = parent_rows;
- rel->reltarget.width = rint(parent_size / parent_rows);
+ rel->reltarget->width = rint(parent_size / parent_rows);
for (i = 0; i < nattrs; i++)
rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
{
/* Set dummy size estimates --- we leave attr_widths[] as zeroes */
rel->rows = 0;
- rel->reltarget.width = 0;
+ rel->reltarget->width = 0;
/* Discard any pre-existing paths; no further need for them */
rel->pathlist = NIL;
* not reference the ordinality column, or at least not in any way
* that would be interesting for sorting.
*/
- foreach(lc, rel->reltarget.exprs)
+ foreach(lc, rel->reltarget->exprs)
{
Var *node = (Var *) lfirst(lc);
* isn't computed for inheritance child rels, cf set_append_rel_size().
* (XXX might be worth changing that sometime.)
*/
- pull_varattnos((Node *) rel->reltarget.exprs, rel->relid, &attrs_used);
+ pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
/* Add all the attributes used by un-pushed-down restriction clauses. */
foreach(lc, rel->baserestrictinfo)
printf("RELOPTINFO (");
print_relids(rel->relids);
- printf("): rows=%.0f width=%d\n", rel->rows, rel->reltarget.width);
+ printf("): rows=%.0f width=%d\n", rel->rows, rel->reltarget->width);
if (rel->baserestrictinfo)
{
* that have to be calculated at this relation. This is the amount of data
* we'd need to pass upwards in case of a sort, hash, etc.
*
- * This function also sets reltarget.cost, so it's a bit misnamed now.
+ * This function also sets reltarget->cost, so it's a bit misnamed now.
*
* NB: this works best on plain relations because it prefers to look at
* real Vars. For subqueries, set_subquery_size_estimates will already have
ListCell *lc;
/* Vars are assumed to have cost zero, but other exprs do not */
- rel->reltarget.cost.startup = 0;
- rel->reltarget.cost.per_tuple = 0;
+ rel->reltarget->cost.startup = 0;
+ rel->reltarget->cost.per_tuple = 0;
- foreach(lc, rel->reltarget.exprs)
+ foreach(lc, rel->reltarget->exprs)
{
Node *node = (Node *) lfirst(lc);
{
/*
* We will need to evaluate the PHV's contained expression while
- * scanning this rel, so be sure to include it in reltarget.cost.
+ * scanning this rel, so be sure to include it in reltarget->cost.
*/
PlaceHolderVar *phv = (PlaceHolderVar *) node;
PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
tuple_width += phinfo->ph_width;
cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
- rel->reltarget.cost.startup += cost.startup;
- rel->reltarget.cost.per_tuple += cost.per_tuple;
+ rel->reltarget->cost.startup += cost.startup;
+ rel->reltarget->cost.per_tuple += cost.per_tuple;
}
else
{
tuple_width += item_width;
/* Not entirely clear if we need to account for cost, but do so */
cost_qual_eval_node(&cost, node, root);
- rel->reltarget.cost.startup += cost.startup;
- rel->reltarget.cost.per_tuple += cost.per_tuple;
+ rel->reltarget->cost.startup += cost.startup;
+ rel->reltarget->cost.per_tuple += cost.per_tuple;
}
}
}
Assert(tuple_width >= 0);
- rel->reltarget.width = tuple_width;
+ rel->reltarget->width = tuple_width;
}
/*
bpath.path.type = T_BitmapHeapPath;
bpath.path.pathtype = T_BitmapHeapScan;
bpath.path.parent = rel;
- bpath.path.pathtarget = &(rel->reltarget);
+ bpath.path.pathtarget = rel->reltarget;
bpath.path.param_info = get_baserel_parampathinfo(root, rel,
required_outer);
bpath.path.pathkeys = NIL;
apath.path.type = T_BitmapAndPath;
apath.path.pathtype = T_BitmapAnd;
apath.path.parent = rel;
- apath.path.pathtarget = &(rel->reltarget);
+ apath.path.pathtarget = rel->reltarget;
apath.path.param_info = NULL; /* not used in bitmap trees */
apath.path.pathkeys = NIL;
apath.bitmapquals = paths;
bpath.path.type = T_BitmapHeapPath;
bpath.path.pathtype = T_BitmapHeapScan;
bpath.path.parent = rel;
- bpath.path.pathtarget = &(rel->reltarget);
+ bpath.path.pathtarget = rel->reltarget;
bpath.path.param_info = get_baserel_parampathinfo(root, rel,
required_outer);
bpath.path.pathkeys = NIL;
* look at rel's targetlist, not the attr_needed data, because attr_needed
* isn't computed for inheritance child rels.
*/
- pull_varattnos((Node *) rel->reltarget.exprs, rel->relid, &attrs_used);
+ pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
/* Add all the attributes used by restriction clauses. */
foreach(lc, rel->baserestrictinfo)
* Note: we must look at rel's targetlist, not the attr_needed data,
* because attr_needed isn't computed for inheritance child rels.
*/
- pull_varattnos((Node *) rel->reltarget.exprs, scan_relid, &attrs_used);
+ pull_varattnos((Node *) rel->reltarget->exprs, scan_relid, &attrs_used);
/* Add all the attributes used by restriction clauses. */
foreach(lc, rel->baserestrictinfo)
{
/* Variable not yet requested, so add to rel's targetlist */
/* XXX is copyObject necessary here? */
- rel->reltarget.exprs = lappend(rel->reltarget.exprs,
- copyObject(var));
+ rel->reltarget->exprs = lappend(rel->reltarget->exprs,
+ copyObject(var));
/* reltarget cost and width will be computed later */
}
rel->attr_needed[attno] = bms_add_members(rel->attr_needed[attno],
/* The only path for it is a trivial Result path */
add_path(final_rel, (Path *)
create_result_path(root, final_rel,
- &(final_rel->reltarget),
+ final_rel->reltarget,
(List *) parse->jointree->quals));
/* Select cheapest path (pretty easy in this case...) */
* set_baserel_size_estimates, just do a quick hack for rows and width.
*/
rel->rows = rel->tuples;
- rel->reltarget.width = get_relation_data_width(tableOid, NULL);
+ rel->reltarget->width = get_relation_data_width(tableOid, NULL);
root->total_table_pages = rel->pages;
/* Estimate the cost of seq scan + sort */
seqScanPath = create_seqscan_path(root, rel, NULL, 0);
cost_sort(&seqScanAndSortPath, root, NIL,
- seqScanPath->total_cost, rel->tuples, rel->reltarget.width,
+ seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
comparisonCost, maintenance_work_mem, -1.0);
/* Estimate the cost of index scan */
pathnode->pathtype = T_SeqScan;
pathnode->parent = rel;
- pathnode->pathtarget = &(rel->reltarget);
+ pathnode->pathtarget = rel->reltarget;
pathnode->param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->parallel_aware = parallel_degree > 0 ? true : false;
pathnode->pathtype = T_SampleScan;
pathnode->parent = rel;
- pathnode->pathtarget = &(rel->reltarget);
+ pathnode->pathtarget = rel->reltarget;
pathnode->param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->parallel_aware = false;
pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->path.parallel_aware = false;
pathnode->path.pathtype = T_BitmapHeapScan;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->path.parallel_aware = false;
pathnode->path.pathtype = T_BitmapAnd;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = NULL; /* not used in bitmap trees */
/*
pathnode->path.pathtype = T_BitmapOr;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = NULL; /* not used in bitmap trees */
/*
pathnode->path.pathtype = T_TidScan;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->path.parallel_aware = false;
pathnode->path.pathtype = T_Append;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_appendrel_parampathinfo(rel,
required_outer);
pathnode->path.parallel_aware = false;
pathnode->path.pathtype = T_MergeAppend;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_appendrel_parampathinfo(rel,
required_outer);
pathnode->path.parallel_aware = false;
pathnode->path.pathtype = T_Material;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = subpath->param_info;
pathnode->path.parallel_aware = false;
pathnode->path.parallel_safe = rel->consider_parallel &&
pathnode->path.pathtype = T_Unique;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = subpath->param_info;
pathnode->path.parallel_aware = false;
pathnode->path.parallel_safe = rel->consider_parallel &&
pathnode->path.pathtype = T_Gather;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->path.parallel_aware = false;
pathnode->path.pathtype = T_SubqueryScan;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->path.parallel_aware = false;
pathnode->pathtype = T_FunctionScan;
pathnode->parent = rel;
- pathnode->pathtarget = &(rel->reltarget);
+ pathnode->pathtarget = rel->reltarget;
pathnode->param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->parallel_aware = false;
pathnode->pathtype = T_ValuesScan;
pathnode->parent = rel;
- pathnode->pathtarget = &(rel->reltarget);
+ pathnode->pathtarget = rel->reltarget;
pathnode->param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->parallel_aware = false;
pathnode->pathtype = T_CteScan;
pathnode->parent = rel;
- pathnode->pathtarget = &(rel->reltarget);
+ pathnode->pathtarget = rel->reltarget;
pathnode->param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->parallel_aware = false;
pathnode->pathtype = T_WorkTableScan;
pathnode->parent = rel;
- pathnode->pathtarget = &(rel->reltarget);
+ pathnode->pathtarget = rel->reltarget;
pathnode->param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->parallel_aware = false;
pathnode->path.pathtype = T_ForeignScan;
pathnode->path.parent = rel;
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
required_outer);
pathnode->path.parallel_aware = false;
pathnode->path.pathtype = T_NestLoop;
pathnode->path.parent = joinrel;
- pathnode->path.pathtarget = &(joinrel->reltarget);
+ pathnode->path.pathtarget = joinrel->reltarget;
pathnode->path.param_info =
get_joinrel_parampathinfo(root,
joinrel,
pathnode->jpath.path.pathtype = T_MergeJoin;
pathnode->jpath.path.parent = joinrel;
- pathnode->jpath.path.pathtarget = &(joinrel->reltarget);
+ pathnode->jpath.path.pathtarget = joinrel->reltarget;
pathnode->jpath.path.param_info =
get_joinrel_parampathinfo(root,
joinrel,
pathnode->jpath.path.pathtype = T_HashJoin;
pathnode->jpath.path.parent = joinrel;
- pathnode->jpath.path.pathtarget = &(joinrel->reltarget);
+ pathnode->jpath.path.pathtarget = joinrel->reltarget;
pathnode->jpath.path.param_info =
get_joinrel_parampathinfo(root,
joinrel,
pathnode->path.pathtype = T_ModifyTable;
pathnode->path.parent = rel;
/* pathtarget is not interesting, just make it minimally valid */
- pathnode->path.pathtarget = &(rel->reltarget);
+ pathnode->path.pathtarget = rel->reltarget;
/* For now, assume we are above any joins, so no parameterization */
pathnode->path.param_info = NULL;
pathnode->path.parallel_aware = false;
{
RelOptInfo *rel = find_base_rel(root, varno);
- rel->reltarget.exprs = lappend(rel->reltarget.exprs,
- copyObject(phinfo->ph_var));
+ rel->reltarget->exprs = lappend(rel->reltarget->exprs,
+ copyObject(phinfo->ph_var));
/* reltarget's cost and width fields will be updated later */
}
}
if (bms_is_subset(phinfo->ph_eval_at, relids))
{
/* Yup, add it to the output */
- joinrel->reltarget.exprs = lappend(joinrel->reltarget.exprs,
- phinfo->ph_var);
- joinrel->reltarget.width += phinfo->ph_width;
+ joinrel->reltarget->exprs = lappend(joinrel->reltarget->exprs,
+ phinfo->ph_var);
+ joinrel->reltarget->width += phinfo->ph_width;
/*
* Charge the cost of evaluating the contained expression if
cost_qual_eval_node(&cost, (Node *) phinfo->ph_var->phexpr,
root);
- joinrel->reltarget.cost.startup += cost.startup;
- joinrel->reltarget.cost.per_tuple += cost.per_tuple;
+ joinrel->reltarget->cost.startup += cost.startup;
+ joinrel->reltarget->cost.per_tuple += cost.per_tuple;
}
/* Adjust joinrel's direct_lateral_relids as needed */
#include "optimizer/placeholder.h"
#include "optimizer/plancat.h"
#include "optimizer/restrictinfo.h"
+#include "optimizer/tlist.h"
#include "utils/hsearch.h"
rel->consider_startup = (root->tuple_fraction > 0);
rel->consider_param_startup = false; /* might get changed later */
rel->consider_parallel = false; /* might get changed later */
- rel->reltarget.exprs = NIL;
- rel->reltarget.sortgrouprefs = NULL;
- rel->reltarget.cost.startup = 0;
- rel->reltarget.cost.per_tuple = 0;
- rel->reltarget.width = 0;
+ rel->reltarget = create_empty_pathtarget();
rel->pathlist = NIL;
rel->ppilist = NIL;
rel->partial_pathlist = NIL;
joinrel->consider_startup = (root->tuple_fraction > 0);
joinrel->consider_param_startup = false;
joinrel->consider_parallel = false;
- joinrel->reltarget.exprs = NIL;
- joinrel->reltarget.sortgrouprefs = NULL;
- joinrel->reltarget.cost.startup = 0;
- joinrel->reltarget.cost.per_tuple = 0;
- joinrel->reltarget.width = 0;
+ joinrel->reltarget = create_empty_pathtarget();
joinrel->pathlist = NIL;
joinrel->ppilist = NIL;
joinrel->partial_pathlist = NIL;
Relids relids = joinrel->relids;
ListCell *vars;
- foreach(vars, input_rel->reltarget.exprs)
+ foreach(vars, input_rel->reltarget->exprs)
{
Var *var = (Var *) lfirst(vars);
RelOptInfo *baserel;
if (bms_nonempty_difference(baserel->attr_needed[ndx], relids))
{
/* Yup, add it to the output */
- joinrel->reltarget.exprs = lappend(joinrel->reltarget.exprs, var);
- /* Vars have cost zero, so no need to adjust reltarget.cost */
- joinrel->reltarget.width += baserel->attr_widths[ndx];
+ joinrel->reltarget->exprs = lappend(joinrel->reltarget->exprs, var);
+ /* Vars have cost zero, so no need to adjust reltarget->cost */
+ joinrel->reltarget->width += baserel->attr_widths[ndx];
}
}
}
joinrel->relids = NULL; /* empty set */
joinrel->rows = 1; /* we produce one row for such cases */
joinrel->rtekind = RTE_JOIN;
+ joinrel->reltarget = create_empty_pathtarget();
root->join_rel_list = lappend(root->join_rel_list, joinrel);
upperrel->consider_startup = (root->tuple_fraction > 0);
upperrel->consider_param_startup = false;
upperrel->consider_parallel = false; /* might get changed later */
+ upperrel->reltarget = create_empty_pathtarget();
upperrel->pathlist = NIL;
upperrel->cheapest_startup_path = NULL;
upperrel->cheapest_total_path = NULL;
PathTarget *
make_pathtarget_from_tlist(List *tlist)
{
- PathTarget *target = (PathTarget *) palloc0(sizeof(PathTarget));
+ PathTarget *target = makeNode(PathTarget);
int i;
ListCell *lc;
PathTarget *
copy_pathtarget(PathTarget *src)
{
- PathTarget *dst = (PathTarget *) palloc(sizeof(PathTarget));
+ PathTarget *dst = makeNode(PathTarget);
/* Copy scalar fields */
memcpy(dst, src, sizeof(PathTarget));
create_empty_pathtarget(void)
{
/* This is easy, but we don't want callers to hard-wire this ... */
- return (PathTarget *) palloc0(sizeof(PathTarget));
+ return makeNode(PathTarget);
}
/*
T_EquivalenceClass,
T_EquivalenceMember,
T_PathKey,
+ T_PathTarget,
T_RestrictInfo,
T_PlaceHolderVar,
T_SpecialJoinInfo,
Size transitionSpace; /* space for pass-by-ref transition data */
} AggClauseCosts;
-/*
- * This struct contains what we need to know during planning about the
- * targetlist (output columns) that a Path will compute. Each RelOptInfo
- * includes a default PathTarget, which its individual Paths may merely point
- * to. However, in some cases a Path may compute outputs different from other
- * Paths, and in that case we make a custom PathTarget struct for it. For
- * example, an indexscan might return index expressions that would otherwise
- * need to be explicitly calculated.
- *
- * exprs contains bare expressions; they do not have TargetEntry nodes on top,
- * though those will appear in finished Plans.
- *
- * sortgrouprefs[] is an array of the same length as exprs, containing the
- * corresponding sort/group refnos, or zeroes for expressions not referenced
- * by sort/group clauses. If sortgrouprefs is NULL (which it always is in
- * RelOptInfo.reltarget structs; only upper-level Paths contain this info), we
- * have not identified sort/group columns in this tlist. This allows us to
- * deal with sort/group refnos when needed with less expense than including
- * TargetEntry nodes in the exprs list.
- */
-typedef struct PathTarget
-{
- List *exprs; /* list of expressions to be computed */
- Index *sortgrouprefs; /* corresponding sort/group refnos, or 0 */
- QualCost cost; /* cost of evaluating the expressions */
- int width; /* estimated avg width of result tuples */
-} PathTarget;
-
/*
* This enum identifies the different types of "upper" (post-scan/join)
* relations that we might deal with during planning.
bool consider_parallel; /* consider parallel paths? */
/* default result targetlist for Paths scanning this relation */
- PathTarget reltarget; /* list of Vars/Exprs, cost, width */
+ struct PathTarget *reltarget; /* list of Vars/Exprs, cost, width */
/* materialization information */
List *pathlist; /* Path structures */
} PathKey;
+/*
+ * PathTarget
+ *
+ * This struct contains what we need to know during planning about the
+ * targetlist (output columns) that a Path will compute. Each RelOptInfo
+ * includes a default PathTarget, which its individual Paths may simply
+ * reference. However, in some cases a Path may compute outputs different
+ * from other Paths, and in that case we make a custom PathTarget for it.
+ * For example, an indexscan might return index expressions that would
+ * otherwise need to be explicitly calculated. (Note also that "upper"
+ * relations generally don't have useful default PathTargets.)
+ *
+ * exprs contains bare expressions; they do not have TargetEntry nodes on top,
+ * though those will appear in finished Plans.
+ *
+ * sortgrouprefs[] is an array of the same length as exprs, containing the
+ * corresponding sort/group refnos, or zeroes for expressions not referenced
+ * by sort/group clauses. If sortgrouprefs is NULL (which it generally is in
+ * RelOptInfo.reltarget targets; only upper-level Paths contain this info),
+ * we have not identified sort/group columns in this tlist. This allows us to
+ * deal with sort/group refnos when needed with less expense than including
+ * TargetEntry nodes in the exprs list.
+ */
+typedef struct PathTarget
+{
+ NodeTag type;
+ List *exprs; /* list of expressions to be computed */
+ Index *sortgrouprefs; /* corresponding sort/group refnos, or 0 */
+ QualCost cost; /* cost of evaluating the expressions */
+ int width; /* estimated avg width of result tuples */
+} PathTarget;
+
+
/*
* ParamPathInfo
*
* "parent" identifies the relation this Path scans, and "pathtarget"
* describes the precise set of output columns the Path would compute.
* In simple cases all Paths for a given rel share the same targetlist,
- * which we represent by having path->pathtarget point to parent->reltarget.
+ * which we represent by having path->pathtarget equal to parent->reltarget.
*
* "param_info", if not NULL, links to a ParamPathInfo that identifies outer
* relation(s) that provide parameter values to each scan of this path.