aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer/plan
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer/plan')
-rw-r--r--src/backend/optimizer/plan/createplan.c15
-rw-r--r--src/backend/optimizer/plan/initsplan.c7
-rw-r--r--src/backend/optimizer/plan/planner.c67
3 files changed, 51 insertions, 38 deletions
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 6e0db080383..198b06b849d 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -476,7 +476,7 @@ build_path_tlist(PlannerInfo *root, Path *path)
int resno = 1;
ListCell *v;
- foreach(v, rel->reltargetlist)
+ foreach(v, rel->reltarget.exprs)
{
/* Do we really need to copy here? Not sure */
Node *node = (Node *) copyObject(lfirst(v));
@@ -875,9 +875,8 @@ create_result_plan(PlannerInfo *root, ResultPath *best_path)
List *tlist;
List *quals;
- /* The tlist will be installed later, since we have no RelOptInfo */
- Assert(best_path->path.parent == NULL);
- tlist = NIL;
+ /* This is a bit useless currently, because rel will have empty tlist */
+ tlist = build_path_tlist(root, &best_path->path);
/* best_path->quals is just bare clauses */
@@ -2183,7 +2182,7 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/*
* If rel is a base relation, detect whether any system columns are
* requested from the rel. (If rel is a join relation, rel->relid will be
- * 0, but there can be no Var with relid 0 in the reltargetlist or the
+ * 0, but there can be no Var with relid 0 in the rel's targetlist or the
* restriction clauses, so we skip this in that case. Note that any such
* columns in base relations that were joined are assumed to be contained
* in fdw_scan_tlist.) This is a bit of a kluge and might go away someday,
@@ -2198,10 +2197,10 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/*
* First, examine all the attributes needed for joins or final output.
- * Note: we must look at reltargetlist, not the attr_needed data,
+ * Note: we must look at rel's targetlist, not the attr_needed data,
* because attr_needed isn't computed for inheritance child rels.
*/
- pull_varattnos((Node *) rel->reltargetlist, scan_relid, &attrs_used);
+ pull_varattnos((Node *) rel->reltarget.exprs, scan_relid, &attrs_used);
/* Add all the attributes used by restriction clauses. */
foreach(lc, rel->baserestrictinfo)
@@ -3455,7 +3454,7 @@ copy_generic_path_info(Plan *dest, Path *src)
dest->startup_cost = src->startup_cost;
dest->total_cost = src->total_cost;
dest->plan_rows = src->rows;
- dest->plan_width = src->parent->width;
+ dest->plan_width = src->pathtarget->width;
dest->parallel_aware = src->parallel_aware;
}
else
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 4a906a88a85..37fb5862097 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -211,10 +211,11 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
attno -= rel->min_attr;
if (rel->attr_needed[attno] == NULL)
{
- /* Variable not yet requested, so add to reltargetlist */
+ /* Variable not yet requested, so add to rel's targetlist */
/* XXX is copyObject necessary here? */
- rel->reltargetlist = lappend(rel->reltargetlist,
- copyObject(var));
+ rel->reltarget.exprs = lappend(rel->reltarget.exprs,
+ copyObject(var));
+ /* reltarget cost and width will be computed later */
}
rel->attr_needed[attno] = bms_add_members(rel->attr_needed[attno],
where_needed);
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index f77c804b702..65b99e2af38 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -98,14 +98,16 @@ static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
static void standard_qp_callback(PlannerInfo *root, void *extra);
static bool choose_hashed_grouping(PlannerInfo *root,
double tuple_fraction, double limit_tuples,
- double path_rows, int path_width,
+ double path_rows,
Path *cheapest_path, Path *sorted_path,
double dNumGroups, AggClauseCosts *agg_costs);
static bool choose_hashed_distinct(PlannerInfo *root,
double tuple_fraction, double limit_tuples,
- double path_rows, int path_width,
+ double path_rows,
Cost cheapest_startup_cost, Cost cheapest_total_cost,
+ int cheapest_path_width,
Cost sorted_startup_cost, Cost sorted_total_cost,
+ int sorted_path_width,
List *sorted_pathkeys,
double dNumDistinctRows);
static List *make_subplanTargetList(PlannerInfo *root, List *tlist,
@@ -1467,7 +1469,6 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
AggClauseCosts agg_costs;
int numGroupCols;
double path_rows;
- int path_width;
bool use_hashed_grouping = false;
WindowFuncLists *wflists = NULL;
List *activeWindows = NIL;
@@ -1672,12 +1673,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
standard_qp_callback, &qp_extra);
/*
- * Extract rowcount and width estimates for use below. If final_rel
- * has been proven dummy, its rows estimate will be zero; clamp it to
- * one to avoid zero-divide in subsequent calculations.
+ * Extract rowcount estimate for use below. If final_rel has been
+ * proven dummy, its rows estimate will be zero; clamp it to one to
+ * avoid zero-divide in subsequent calculations.
*/
path_rows = clamp_row_est(final_rel->rows);
- path_width = final_rel->width;
/*
* If there's grouping going on, estimate the number of result groups.
@@ -1849,7 +1849,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/* Figure cost for sorting */
cost_sort(&sort_path, root, root->query_pathkeys,
cheapest_path->total_cost,
- path_rows, path_width,
+ path_rows, cheapest_path->pathtarget->width,
0.0, work_mem, root->limit_tuples);
}
@@ -1881,7 +1881,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
use_hashed_grouping =
choose_hashed_grouping(root,
tuple_fraction, limit_tuples,
- path_rows, path_width,
+ path_rows,
cheapest_path, sorted_path,
dNumGroups, &agg_costs);
}
@@ -1900,11 +1900,13 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
use_hashed_distinct =
choose_hashed_distinct(root,
tuple_fraction, limit_tuples,
- path_rows, path_width,
+ path_rows,
cheapest_path->startup_cost,
cheapest_path->total_cost,
+ cheapest_path->pathtarget->width,
sorted_path->startup_cost,
sorted_path->total_cost,
+ sorted_path->pathtarget->width,
sorted_path->pathkeys,
dNumGroups);
tested_hashed_distinct = true;
@@ -2343,11 +2345,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
choose_hashed_distinct(root,
tuple_fraction, limit_tuples,
result_plan->plan_rows,
- result_plan->plan_width,
result_plan->startup_cost,
result_plan->total_cost,
+ result_plan->plan_width,
result_plan->startup_cost,
result_plan->total_cost,
+ result_plan->plan_width,
current_pathkeys,
dNumDistinctRows);
}
@@ -2678,10 +2681,13 @@ build_grouping_chain(PlannerInfo *root,
* any logic that uses plan_rows to, eg, estimate qual evaluation costs.)
*
* Note: during initial stages of planning, we mostly consider plan nodes with
- * "flat" tlists, containing just Vars. So their evaluation cost is zero
- * according to the model used by cost_qual_eval() (or if you prefer, the cost
- * is factored into cpu_tuple_cost). Thus we can avoid accounting for tlist
- * cost throughout query_planner() and subroutines. But once we apply a
+ * "flat" tlists, containing just Vars and PlaceHolderVars. The evaluation
+ * cost of Vars is zero according to the model used by cost_qual_eval() (or if
+ * you prefer, the cost is factored into cpu_tuple_cost). The evaluation cost
+ * of a PHV's expression is charged as part of the scan cost of whichever plan
+ * node first computes it, and then subsequent references to the PHV can be
+ * taken as having cost zero. Thus we can avoid worrying about tlist cost
+ * as such throughout query_planner() and subroutines. But once we apply a
* tlist that might contain actual operators, sub-selects, etc, we'd better
* account for its cost. Any set-returning functions in the tlist must also
* affect the estimated rowcount.
@@ -3840,7 +3846,7 @@ standard_qp_callback(PlannerInfo *root, void *extra)
static bool
choose_hashed_grouping(PlannerInfo *root,
double tuple_fraction, double limit_tuples,
- double path_rows, int path_width,
+ double path_rows,
Path *cheapest_path, Path *sorted_path,
double dNumGroups, AggClauseCosts *agg_costs)
{
@@ -3853,6 +3859,7 @@ choose_hashed_grouping(PlannerInfo *root,
List *current_pathkeys;
Path hashed_p;
Path sorted_p;
+ int sorted_p_width;
/*
* Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
@@ -3890,7 +3897,8 @@ choose_hashed_grouping(PlannerInfo *root,
*/
/* Estimate per-hash-entry space at tuple width... */
- hashentrysize = MAXALIGN(path_width) + MAXALIGN(SizeofMinimalTupleHeader);
+ hashentrysize = MAXALIGN(cheapest_path->pathtarget->width) +
+ MAXALIGN(SizeofMinimalTupleHeader);
/* plus space for pass-by-ref transition values... */
hashentrysize += agg_costs->transitionSpace;
/* plus the per-hash-entry overhead */
@@ -3935,25 +3943,27 @@ choose_hashed_grouping(PlannerInfo *root,
/* Result of hashed agg is always unsorted */
if (target_pathkeys)
cost_sort(&hashed_p, root, target_pathkeys, hashed_p.total_cost,
- dNumGroups, path_width,
+ dNumGroups, cheapest_path->pathtarget->width,
0.0, work_mem, limit_tuples);
if (sorted_path)
{
sorted_p.startup_cost = sorted_path->startup_cost;
sorted_p.total_cost = sorted_path->total_cost;
+ sorted_p_width = sorted_path->pathtarget->width;
current_pathkeys = sorted_path->pathkeys;
}
else
{
sorted_p.startup_cost = cheapest_path->startup_cost;
sorted_p.total_cost = cheapest_path->total_cost;
+ sorted_p_width = cheapest_path->pathtarget->width;
current_pathkeys = cheapest_path->pathkeys;
}
if (!pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
{
cost_sort(&sorted_p, root, root->group_pathkeys, sorted_p.total_cost,
- path_rows, path_width,
+ path_rows, sorted_p_width,
0.0, work_mem, -1.0);
current_pathkeys = root->group_pathkeys;
}
@@ -3971,7 +3981,7 @@ choose_hashed_grouping(PlannerInfo *root,
if (target_pathkeys &&
!pathkeys_contained_in(target_pathkeys, current_pathkeys))
cost_sort(&sorted_p, root, target_pathkeys, sorted_p.total_cost,
- dNumGroups, path_width,
+ dNumGroups, sorted_p_width,
0.0, work_mem, limit_tuples);
/*
@@ -4008,9 +4018,11 @@ choose_hashed_grouping(PlannerInfo *root,
static bool
choose_hashed_distinct(PlannerInfo *root,
double tuple_fraction, double limit_tuples,
- double path_rows, int path_width,
+ double path_rows,
Cost cheapest_startup_cost, Cost cheapest_total_cost,
+ int cheapest_path_width,
Cost sorted_startup_cost, Cost sorted_total_cost,
+ int sorted_path_width,
List *sorted_pathkeys,
double dNumDistinctRows)
{
@@ -4058,7 +4070,8 @@ choose_hashed_distinct(PlannerInfo *root,
*/
/* Estimate per-hash-entry space at tuple width... */
- hashentrysize = MAXALIGN(path_width) + MAXALIGN(SizeofMinimalTupleHeader);
+ hashentrysize = MAXALIGN(cheapest_path_width) +
+ MAXALIGN(SizeofMinimalTupleHeader);
/* plus the per-hash-entry overhead */
hashentrysize += hash_agg_entry_size(0);
@@ -4089,7 +4102,7 @@ choose_hashed_distinct(PlannerInfo *root,
*/
if (parse->sortClause)
cost_sort(&hashed_p, root, root->sort_pathkeys, hashed_p.total_cost,
- dNumDistinctRows, path_width,
+ dNumDistinctRows, cheapest_path_width,
0.0, work_mem, limit_tuples);
/*
@@ -4113,7 +4126,7 @@ choose_hashed_distinct(PlannerInfo *root,
else
current_pathkeys = root->sort_pathkeys;
cost_sort(&sorted_p, root, current_pathkeys, sorted_p.total_cost,
- path_rows, path_width,
+ path_rows, sorted_path_width,
0.0, work_mem, -1.0);
}
cost_group(&sorted_p, root, numDistinctCols, dNumDistinctRows,
@@ -4122,7 +4135,7 @@ choose_hashed_distinct(PlannerInfo *root,
if (parse->sortClause &&
!pathkeys_contained_in(root->sort_pathkeys, current_pathkeys))
cost_sort(&sorted_p, root, root->sort_pathkeys, sorted_p.total_cost,
- dNumDistinctRows, path_width,
+ dNumDistinctRows, sorted_path_width,
0.0, work_mem, limit_tuples);
/*
@@ -4896,7 +4909,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
* set_baserel_size_estimates, just do a quick hack for rows and width.
*/
rel->rows = rel->tuples;
- rel->width = get_relation_data_width(tableOid, NULL);
+ rel->reltarget.width = get_relation_data_width(tableOid, NULL);
root->total_table_pages = rel->pages;
@@ -4912,7 +4925,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
/* Estimate the cost of seq scan + sort */
seqScanPath = create_seqscan_path(root, rel, NULL, 0);
cost_sort(&seqScanAndSortPath, root, NIL,
- seqScanPath->total_cost, rel->tuples, rel->width,
+ seqScanPath->total_cost, rel->tuples, rel->reltarget.width,
comparisonCost, maintenance_work_mem, -1.0);
/* Estimate the cost of index scan */