aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer/util/pathnode.c
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2024-08-21 10:12:30 -0400
committerRobert Haas <rhaas@postgresql.org>2024-08-21 10:12:30 -0400
commite22253467942fdb100087787c3e1e3a8620c54b2 (patch)
tree879d77c5fd4157f6867ded7a3ed682d67177c49b /src/backend/optimizer/util/pathnode.c
parent2b03cfeea47834913ff769124f4deba88140f662 (diff)
downloadpostgresql-e22253467942fdb100087787c3e1e3a8620c54b2.tar.gz
postgresql-e22253467942fdb100087787c3e1e3a8620c54b2.zip
Treat number of disabled nodes in a path as a separate cost metric.
Previously, when a path type was disabled by e.g. enable_seqscan=false, we either avoided generating that path type in the first place, or more commonly, we added a large constant, called disable_cost, to the estimated startup cost of that path. This latter approach can distort planning. For instance, an extremely expensive non-disabled path could seem to be worse than a disabled path, especially if the full cost of that path node need not be paid (e.g. due to a Limit). Or, as in the regression test whose expected output changes with this commit, the addition of disable_cost can make two paths that would normally be distinguishible in cost seem to have fuzzily the same cost. To fix that, we now count the number of disabled path nodes and consider that a high-order component of both the startup cost and the total cost. Hence, the path list is now sorted by disabled_nodes and then by total_cost, instead of just by the latter, and likewise for the partial path list. It is important that this number is a count and not simply a Boolean; else, as soon as we're unable to respect disabled path types in all portions of the path, we stop trying to avoid them where we can. Because the path list is now sorted by the number of disabled nodes, the join prechecks must compute the count of disabled nodes during the initial cost phase instead of postponing it to final cost time. Counts of disabled nodes do not cross subquery levels; at present, there is no reason for them to do so, since the we do not postpone path selection across subquery boundaries (see make_subplan). Reviewed by Andres Freund, Heikki Linnakangas, and David Rowley. Discussion: http://postgr.es/m/CA+TgmoZ_+MS+o6NeGK2xyBv-xM+w1AfFVuHE4f_aq6ekHv7YSQ@mail.gmail.com
Diffstat (limited to 'src/backend/optimizer/util/pathnode.c')
-rw-r--r--src/backend/optimizer/util/pathnode.c212
1 files changed, 158 insertions, 54 deletions
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 54e042a8a59..fc97bf6ee26 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -68,6 +68,15 @@ static bool pathlist_is_reparameterizable_by_child(List *pathlist,
int
compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
{
+ /* Number of disabled nodes, if different, trumps all else. */
+ if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
+ {
+ if (path1->disabled_nodes < path2->disabled_nodes)
+ return -1;
+ else
+ return +1;
+ }
+
if (criterion == STARTUP_COST)
{
if (path1->startup_cost < path2->startup_cost)
@@ -118,6 +127,15 @@ compare_fractional_path_costs(Path *path1, Path *path2,
Cost cost1,
cost2;
+ /* Number of disabled nodes, if different, trumps all else. */
+ if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
+ {
+ if (path1->disabled_nodes < path2->disabled_nodes)
+ return -1;
+ else
+ return +1;
+ }
+
if (fraction <= 0.0 || fraction >= 1.0)
return compare_path_costs(path1, path2, TOTAL_COST);
cost1 = path1->startup_cost +
@@ -166,6 +184,15 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
#define CONSIDER_PATH_STARTUP_COST(p) \
((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup)
+ /* Number of disabled nodes, if different, trumps all else. */
+ if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
+ {
+ if (path1->disabled_nodes < path2->disabled_nodes)
+ return COSTS_BETTER1;
+ else
+ return COSTS_BETTER2;
+ }
+
/*
* Check total cost first since it's more likely to be different; many
* paths have zero startup cost.
@@ -362,15 +389,29 @@ set_cheapest(RelOptInfo *parent_rel)
* add_path
* Consider a potential implementation path for the specified parent rel,
* and add it to the rel's pathlist if it is worthy of consideration.
+ *
* A path is worthy if it has a better sort order (better pathkeys) or
- * cheaper cost (on either dimension), or generates fewer rows, than any
- * existing path that has the same or superset parameterization rels.
- * We also consider parallel-safe paths more worthy than others.
+ * cheaper cost (as defined below), or generates fewer rows, than any
+ * existing path that has the same or superset parameterization rels. We
+ * also consider parallel-safe paths more worthy than others.
+ *
+ * Cheaper cost can mean either a cheaper total cost or a cheaper startup
+ * cost; if one path is cheaper in one of these aspects and another is
+ * cheaper in the other, we keep both. However, when some path type is
+ * disabled (e.g. due to enable_seqscan=false), the number of times that
+ * a disabled path type is used is considered to be a higher-order
+ * component of the cost. Hence, if path A uses no disabled path type,
+ * and path B uses 1 or more disabled path types, A is cheaper, no matter
+ * what we estimate for the startup and total costs. The startup and total
+ * cost essentially act as a tiebreak when comparing paths that use equal
+ * numbers of disabled path nodes; but in practice this tiebreak is almost
+ * always used, since normally no path types are disabled.
*
- * We also remove from the rel's pathlist any old paths that are dominated
- * by new_path --- that is, new_path is cheaper, at least as well ordered,
- * generates no more rows, requires no outer rels not required by the old
- * path, and is no less parallel-safe.
+ * In addition to possibly adding new_path, we also remove from the rel's
+ * pathlist any old paths that are dominated by new_path --- that is,
+ * new_path is cheaper, at least as well ordered, generates no more rows,
+ * requires no outer rels not required by the old path, and is no less
+ * parallel-safe.
*
* In most cases, a path with a superset parameterization will generate
* fewer rows (since it has more join clauses to apply), so that those two
@@ -389,10 +430,10 @@ set_cheapest(RelOptInfo *parent_rel)
* parent_rel->consider_param_startup is true for a parameterized one.
* Again, this allows discarding useless paths sooner.
*
- * The pathlist is kept sorted by total_cost, with cheaper paths
- * at the front. Within this routine, that's simply a speed hack:
- * doing it that way makes it more likely that we will reject an inferior
- * path after a few comparisons, rather than many comparisons.
+ * The pathlist is kept sorted by disabled_nodes and then by total_cost,
+ * with cheaper paths at the front. Within this routine, that's simply a
+ * speed hack: doing it that way makes it more likely that we will reject
+ * an inferior path after a few comparisons, rather than many comparisons.
* However, add_path_precheck relies on this ordering to exit early
* when possible.
*
@@ -593,8 +634,13 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
}
else
{
- /* new belongs after this old path if it has cost >= old's */
- if (new_path->total_cost >= old_path->total_cost)
+ /*
+ * new belongs after this old path if it has more disabled nodes
+ * or if it has the same number of nodes but a greater total cost
+ */
+ if (new_path->disabled_nodes > old_path->disabled_nodes ||
+ (new_path->disabled_nodes == old_path->disabled_nodes &&
+ new_path->total_cost >= old_path->total_cost))
insert_at = foreach_current_index(p1) + 1;
}
@@ -639,7 +685,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
* so the required information has to be passed piecemeal.
*/
bool
-add_path_precheck(RelOptInfo *parent_rel,
+add_path_precheck(RelOptInfo *parent_rel, int disabled_nodes,
Cost startup_cost, Cost total_cost,
List *pathkeys, Relids required_outer)
{
@@ -659,6 +705,20 @@ add_path_precheck(RelOptInfo *parent_rel,
PathKeysComparison keyscmp;
/*
+ * Since the pathlist is sorted by disabled_nodes and then by
+ * total_cost, we can stop looking once we reach a path with more
+ * disabled nodes, or the same number of disabled nodes plus a
+ * total_cost larger than the new path's.
+ */
+ if (unlikely(old_path->disabled_nodes != disabled_nodes))
+ {
+ if (disabled_nodes < old_path->disabled_nodes)
+ break;
+ }
+ else if (total_cost <= old_path->total_cost * STD_FUZZ_FACTOR)
+ break;
+
+ /*
* We are looking for an old_path with the same parameterization (and
* by assumption the same rowcount) that dominates the new path on
* pathkeys as well as both cost metrics. If we find one, we can
@@ -666,39 +726,27 @@ add_path_precheck(RelOptInfo *parent_rel,
*
* Cost comparisons here should match compare_path_costs_fuzzily.
*/
- if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
+ /* new path can win on startup cost only if consider_startup */
+ if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
+ !consider_startup)
{
- /* new path can win on startup cost only if consider_startup */
- if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
- !consider_startup)
+ /* new path loses on cost, so check pathkeys... */
+ List *old_path_pathkeys;
+
+ old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
+ keyscmp = compare_pathkeys(new_path_pathkeys,
+ old_path_pathkeys);
+ if (keyscmp == PATHKEYS_EQUAL ||
+ keyscmp == PATHKEYS_BETTER2)
{
- /* new path loses on cost, so check pathkeys... */
- List *old_path_pathkeys;
-
- old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
- keyscmp = compare_pathkeys(new_path_pathkeys,
- old_path_pathkeys);
- if (keyscmp == PATHKEYS_EQUAL ||
- keyscmp == PATHKEYS_BETTER2)
+ /* new path does not win on pathkeys... */
+ if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
{
- /* new path does not win on pathkeys... */
- if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
- {
- /* Found an old path that dominates the new one */
- return false;
- }
+ /* Found an old path that dominates the new one */
+ return false;
}
}
}
- else
- {
- /*
- * Since the pathlist is sorted by total_cost, we can stop looking
- * once we reach a path with a total_cost larger than the new
- * path's.
- */
- break;
- }
}
return true;
@@ -734,7 +782,7 @@ add_path_precheck(RelOptInfo *parent_rel,
* produce the same number of rows. Neither do we need to consider startup
* costs: parallelism is only used for plans that will be run to completion.
* Therefore, this routine is much simpler than add_path: it needs to
- * consider only pathkeys and total cost.
+ * consider only disabled nodes, pathkeys and total cost.
*
* As with add_path, we pfree paths that are found to be dominated by
* another partial path; this requires that there be no other references to
@@ -775,7 +823,15 @@ add_partial_path(RelOptInfo *parent_rel, Path *new_path)
/* Unless pathkeys are incompatible, keep just one of the two paths. */
if (keyscmp != PATHKEYS_DIFFERENT)
{
- if (new_path->total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
+ if (unlikely(new_path->disabled_nodes != old_path->disabled_nodes))
+ {
+ if (new_path->disabled_nodes > old_path->disabled_nodes)
+ accept_new = false;
+ else
+ remove_old = true;
+ }
+ else if (new_path->total_cost > old_path->total_cost
+ * STD_FUZZ_FACTOR)
{
/* New path costs more; keep it only if pathkeys are better. */
if (keyscmp != PATHKEYS_BETTER1)
@@ -862,8 +918,8 @@ add_partial_path(RelOptInfo *parent_rel, Path *new_path)
* is surely a loser.
*/
bool
-add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost,
- List *pathkeys)
+add_partial_path_precheck(RelOptInfo *parent_rel, int disabled_nodes,
+ Cost total_cost, List *pathkeys)
{
ListCell *p1;
@@ -906,8 +962,8 @@ add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost,
* partial path; the resulting plans, if run in parallel, will be run to
* completion.
*/
- if (!add_path_precheck(parent_rel, total_cost, total_cost, pathkeys,
- NULL))
+ if (!add_path_precheck(parent_rel, disabled_nodes, total_cost, total_cost,
+ pathkeys, NULL))
return false;
return true;
@@ -1419,6 +1475,7 @@ create_merge_append_path(PlannerInfo *root,
Relids required_outer)
{
MergeAppendPath *pathnode = makeNode(MergeAppendPath);
+ int input_disabled_nodes;
Cost input_startup_cost;
Cost input_total_cost;
ListCell *l;
@@ -1452,6 +1509,7 @@ create_merge_append_path(PlannerInfo *root,
* Add up the sizes and costs of the input paths.
*/
pathnode->path.rows = 0;
+ input_disabled_nodes = 0;
input_startup_cost = 0;
input_total_cost = 0;
foreach(l, subpaths)
@@ -1468,6 +1526,7 @@ create_merge_append_path(PlannerInfo *root,
if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
{
/* Subpath is adequately ordered, we won't need to sort it */
+ input_disabled_nodes += subpath->disabled_nodes;
input_startup_cost += subpath->startup_cost;
input_total_cost += subpath->total_cost;
}
@@ -1479,12 +1538,14 @@ create_merge_append_path(PlannerInfo *root,
cost_sort(&sort_path,
root,
pathkeys,
+ subpath->disabled_nodes,
subpath->total_cost,
subpath->rows,
subpath->pathtarget->width,
0.0,
work_mem,
pathnode->limit_tuples);
+ input_disabled_nodes += sort_path.disabled_nodes;
input_startup_cost += sort_path.startup_cost;
input_total_cost += sort_path.total_cost;
}
@@ -1500,12 +1561,14 @@ create_merge_append_path(PlannerInfo *root,
((Path *) linitial(subpaths))->parallel_aware ==
pathnode->path.parallel_aware)
{
+ pathnode->path.disabled_nodes = input_disabled_nodes;
pathnode->path.startup_cost = input_startup_cost;
pathnode->path.total_cost = input_total_cost;
}
else
cost_merge_append(&pathnode->path, root,
pathkeys, list_length(subpaths),
+ input_disabled_nodes,
input_startup_cost, input_total_cost,
pathnode->path.rows);
@@ -1587,6 +1650,7 @@ create_material_path(RelOptInfo *rel, Path *subpath)
pathnode->subpath = subpath;
cost_material(&pathnode->path,
+ subpath->disabled_nodes,
subpath->startup_cost,
subpath->total_cost,
subpath->rows,
@@ -1633,6 +1697,10 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
*/
pathnode->est_entries = 0;
+ /* we should not generate this path type when enable_memoize=false */
+ Assert(enable_memoize);
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
+
/*
* Add a small additional charge for caching the first entry. All the
* harder calculations for rescans are performed in cost_memoize_rescan().
@@ -1732,6 +1800,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
{
pathnode->umethod = UNIQUE_PATH_NOOP;
pathnode->path.rows = rel->rows;
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost;
pathnode->path.pathkeys = subpath->pathkeys;
@@ -1770,6 +1839,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
{
pathnode->umethod = UNIQUE_PATH_NOOP;
pathnode->path.rows = rel->rows;
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost;
pathnode->path.pathkeys = subpath->pathkeys;
@@ -1797,6 +1867,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
* Estimate cost for sort+unique implementation
*/
cost_sort(&sort_path, root, NIL,
+ subpath->disabled_nodes,
subpath->total_cost,
rel->rows,
subpath->pathtarget->width,
@@ -1834,6 +1905,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
AGG_HASHED, NULL,
numCols, pathnode->path.rows,
NIL,
+ subpath->disabled_nodes,
subpath->startup_cost,
subpath->total_cost,
rel->rows,
@@ -1842,7 +1914,9 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
{
- if (agg_path.total_cost < sort_path.total_cost)
+ if (agg_path.disabled_nodes < sort_path.disabled_nodes ||
+ (agg_path.disabled_nodes == sort_path.disabled_nodes &&
+ agg_path.total_cost < sort_path.total_cost))
pathnode->umethod = UNIQUE_PATH_HASH;
else
pathnode->umethod = UNIQUE_PATH_SORT;
@@ -1860,11 +1934,13 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
if (pathnode->umethod == UNIQUE_PATH_HASH)
{
+ pathnode->path.disabled_nodes = agg_path.disabled_nodes;
pathnode->path.startup_cost = agg_path.startup_cost;
pathnode->path.total_cost = agg_path.total_cost;
}
else
{
+ pathnode->path.disabled_nodes = sort_path.disabled_nodes;
pathnode->path.startup_cost = sort_path.startup_cost;
pathnode->path.total_cost = sort_path.total_cost;
}
@@ -1888,6 +1964,7 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
Relids required_outer, double *rows)
{
GatherMergePath *pathnode = makeNode(GatherMergePath);
+ int input_disabled_nodes = 0;
Cost input_startup_cost = 0;
Cost input_total_cost = 0;
@@ -1915,11 +1992,13 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
pathnode->path.pathkeys = pathkeys;
pathnode->path.pathtarget = target ? target : rel->reltarget;
+ input_disabled_nodes += subpath->disabled_nodes;
input_startup_cost += subpath->startup_cost;
input_total_cost += subpath->total_cost;
cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
- input_startup_cost, input_total_cost, rows);
+ input_disabled_nodes, input_startup_cost,
+ input_total_cost, rows);
return pathnode;
}
@@ -2227,7 +2306,8 @@ create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel,
ForeignPath *
create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
PathTarget *target,
- double rows, Cost startup_cost, Cost total_cost,
+ double rows, int disabled_nodes,
+ Cost startup_cost, Cost total_cost,
List *pathkeys,
Relids required_outer,
Path *fdw_outerpath,
@@ -2248,6 +2328,7 @@ create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
pathnode->path.parallel_safe = rel->consider_parallel;
pathnode->path.parallel_workers = 0;
pathnode->path.rows = rows;
+ pathnode->path.disabled_nodes = disabled_nodes;
pathnode->path.startup_cost = startup_cost;
pathnode->path.total_cost = total_cost;
pathnode->path.pathkeys = pathkeys;
@@ -2273,7 +2354,8 @@ create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
ForeignPath *
create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel,
PathTarget *target,
- double rows, Cost startup_cost, Cost total_cost,
+ double rows, int disabled_nodes,
+ Cost startup_cost, Cost total_cost,
List *pathkeys,
Relids required_outer,
Path *fdw_outerpath,
@@ -2300,6 +2382,7 @@ create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel,
pathnode->path.parallel_safe = rel->consider_parallel;
pathnode->path.parallel_workers = 0;
pathnode->path.rows = rows;
+ pathnode->path.disabled_nodes = disabled_nodes;
pathnode->path.startup_cost = startup_cost;
pathnode->path.total_cost = total_cost;
pathnode->path.pathkeys = pathkeys;
@@ -2325,7 +2408,8 @@ create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel,
ForeignPath *
create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel,
PathTarget *target,
- double rows, Cost startup_cost, Cost total_cost,
+ double rows, int disabled_nodes,
+ Cost startup_cost, Cost total_cost,
List *pathkeys,
Path *fdw_outerpath,
List *fdw_restrictinfo,
@@ -2347,6 +2431,7 @@ create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel,
pathnode->path.parallel_safe = rel->consider_parallel;
pathnode->path.parallel_workers = 0;
pathnode->path.rows = rows;
+ pathnode->path.disabled_nodes = disabled_nodes;
pathnode->path.startup_cost = startup_cost;
pathnode->path.total_cost = total_cost;
pathnode->path.pathkeys = pathkeys;
@@ -2734,6 +2819,7 @@ create_projection_path(PlannerInfo *root,
* Set cost of plan as subpath's cost, adjusted for tlist replacement.
*/
pathnode->path.rows = subpath->rows;
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost +
(target->cost.startup - oldtarget->cost.startup);
pathnode->path.total_cost = subpath->total_cost +
@@ -2750,6 +2836,7 @@ create_projection_path(PlannerInfo *root,
* evaluating the tlist. There is no qual to worry about.
*/
pathnode->path.rows = subpath->rows;
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost +
target->cost.startup;
pathnode->path.total_cost = subpath->total_cost +
@@ -2917,6 +3004,7 @@ create_set_projection_path(PlannerInfo *root,
* This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
* this estimate later.
*/
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.rows = subpath->rows * tlist_rows;
pathnode->path.startup_cost = subpath->startup_cost +
target->cost.startup;
@@ -2967,6 +3055,7 @@ create_incremental_sort_path(PlannerInfo *root,
cost_incremental_sort(&pathnode->path,
root, pathkeys, presorted_keys,
+ subpath->disabled_nodes,
subpath->startup_cost,
subpath->total_cost,
subpath->rows,
@@ -3013,6 +3102,7 @@ create_sort_path(PlannerInfo *root,
pathnode->subpath = subpath;
cost_sort(&pathnode->path, root, pathkeys,
+ subpath->disabled_nodes,
subpath->total_cost,
subpath->rows,
subpath->pathtarget->width,
@@ -3065,6 +3155,7 @@ create_group_path(PlannerInfo *root,
list_length(groupClause),
numGroups,
qual,
+ subpath->disabled_nodes,
subpath->startup_cost, subpath->total_cost,
subpath->rows);
@@ -3122,6 +3213,7 @@ create_upper_unique_path(PlannerInfo *root,
* all columns get compared at most of the tuples. (XXX probably this is
* an overestimate.)
*/
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost +
cpu_operator_cost * subpath->rows * numCols;
@@ -3200,6 +3292,7 @@ create_agg_path(PlannerInfo *root,
aggstrategy, aggcosts,
list_length(groupClause), numGroups,
qual,
+ subpath->disabled_nodes,
subpath->startup_cost, subpath->total_cost,
subpath->rows, subpath->pathtarget->width);
@@ -3308,6 +3401,7 @@ create_groupingsets_path(PlannerInfo *root,
numGroupCols,
rollup->numGroups,
having_qual,
+ subpath->disabled_nodes,
subpath->startup_cost,
subpath->total_cost,
subpath->rows,
@@ -3333,7 +3427,7 @@ create_groupingsets_path(PlannerInfo *root,
numGroupCols,
rollup->numGroups,
having_qual,
- 0.0, 0.0,
+ 0, 0.0, 0.0,
subpath->rows,
subpath->pathtarget->width);
if (!rollup->is_hashed)
@@ -3342,7 +3436,7 @@ create_groupingsets_path(PlannerInfo *root,
else
{
/* Account for cost of sort, but don't charge input cost again */
- cost_sort(&sort_path, root, NIL,
+ cost_sort(&sort_path, root, NIL, 0,
0.0,
subpath->rows,
subpath->pathtarget->width,
@@ -3358,12 +3452,14 @@ create_groupingsets_path(PlannerInfo *root,
numGroupCols,
rollup->numGroups,
having_qual,
+ sort_path.disabled_nodes,
sort_path.startup_cost,
sort_path.total_cost,
sort_path.rows,
subpath->pathtarget->width);
}
+ pathnode->path.disabled_nodes += agg_path.disabled_nodes;
pathnode->path.total_cost += agg_path.total_cost;
pathnode->path.rows += agg_path.rows;
}
@@ -3395,6 +3491,7 @@ create_minmaxagg_path(PlannerInfo *root,
{
MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
Cost initplan_cost;
+ int initplan_disabled_nodes = 0;
ListCell *lc;
/* The topmost generated Plan node will be a Result */
@@ -3419,12 +3516,14 @@ create_minmaxagg_path(PlannerInfo *root,
{
MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
+ initplan_disabled_nodes += mminfo->path->disabled_nodes;
initplan_cost += mminfo->pathcost;
if (!mminfo->path->parallel_safe)
pathnode->path.parallel_safe = false;
}
/* add tlist eval cost for each output row, plus cpu_tuple_cost */
+ pathnode->path.disabled_nodes = initplan_disabled_nodes;
pathnode->path.startup_cost = initplan_cost + target->cost.startup;
pathnode->path.total_cost = initplan_cost + target->cost.startup +
target->cost.per_tuple + cpu_tuple_cost;
@@ -3517,6 +3616,7 @@ create_windowagg_path(PlannerInfo *root,
cost_windowagg(&pathnode->path, root,
windowFuncs,
winclause,
+ subpath->disabled_nodes,
subpath->startup_cost,
subpath->total_cost,
subpath->rows);
@@ -3584,6 +3684,7 @@ create_setop_path(PlannerInfo *root,
* Charge one cpu_operator_cost per comparison per input tuple. We assume
* all columns get compared at most of the tuples.
*/
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost +
cpu_operator_cost * subpath->rows * list_length(distinctList);
@@ -3683,6 +3784,7 @@ create_lockrows_path(PlannerInfo *root, RelOptInfo *rel,
* possible refetches, but it's hard to say how much. For now, use
* cpu_tuple_cost per row.
*/
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost +
cpu_tuple_cost * subpath->rows;
@@ -3759,6 +3861,7 @@ create_modifytable_path(PlannerInfo *root, RelOptInfo *rel,
* costs to change any higher-level planning choices. But we might want
* to make it look better sometime.
*/
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost;
if (returningLists != NIL)
@@ -3835,6 +3938,7 @@ create_limit_path(PlannerInfo *root, RelOptInfo *rel,
subpath->parallel_safe;
pathnode->path.parallel_workers = subpath->parallel_workers;
pathnode->path.rows = subpath->rows;
+ pathnode->path.disabled_nodes = subpath->disabled_nodes;
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost;
pathnode->path.pathkeys = subpath->pathkeys;