diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2015-07-30 12:11:23 -0400 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2015-07-30 12:11:23 -0400 |
commit | c7d1712519085b46b084412631ab3a0c27a7a1a6 (patch) | |
tree | 629dadff686c401e99f37a0c8a204cefb0683486 | |
parent | 0a89f3bc6e9b1e911e9efd8132377fe5c6838c66 (diff) | |
download | postgresql-c7d1712519085b46b084412631ab3a0c27a7a1a6.tar.gz postgresql-c7d1712519085b46b084412631ab3a0c27a7a1a6.zip |
Avoid some zero-divide hazards in the planner.
Although I think on all modern machines floating division by zero
results in Infinity not SIGFPE, we still don't want infinities
running around in the planner's costing estimates; too much risk
of that leading to insane behavior.
grouping_planner() failed to consider the possibility that final_rel
might be known dummy and hence have zero rowcount. (I wonder if it
would be better to set a rows estimate of 1 for dummy relations?
But at least in the back branches, changing this convention seems
like a bad idea, so I'll leave that for another day.)
Make certain that get_variable_numdistinct() produces a nonzero result.
The case that can be shown to be broken is with stadistinct < 0.0 and
small ntuples; we did not prevent the result from rounding to zero.
For good luck I applied clamp_row_est() to all the nonconstant return
values.
In ExecChooseHashTableSize(), Assert that we compute positive nbuckets
and nbatch. I know of no reason to think this isn't the case, but it
seems like a good safety check.
Per reports from Piotr Stefaniak. Back-patch to all active branches.
-rw-r--r-- | src/backend/executor/nodeHash.c | 3 | ||||
-rw-r--r-- | src/backend/optimizer/plan/planmain.c | 2 | ||||
-rw-r--r-- | src/backend/optimizer/plan/planner.c | 7 | ||||
-rw-r--r-- | src/backend/utils/adt/selfuncs.c | 10 |
4 files changed, 14 insertions, 8 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 8cb5611671b..26eff5eb8a5 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -508,6 +508,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, i++; nbuckets = (1 << i); + Assert(nbuckets > 0); + Assert(nbatch > 0); + *numbuckets = nbuckets; *numbatches = nbatch; } diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index bb163d0cab2..6df8ff6d5f3 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -345,7 +345,7 @@ query_planner(PlannerInfo *root, List *tlist, * can be divided by the number of tuples. */ if (tuple_fraction >= 1.0) - tuple_fraction /= final_rel->rows; + tuple_fraction /= clamp_row_est(final_rel->rows); } /* diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index e64de3aa67a..8e90e193bff 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -1247,11 +1247,14 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * Extract rowcount and width estimates for possible use in grouping * decisions. Beware here of the possibility that - * cheapest_path->parent is NULL (ie, there is no FROM clause). + * cheapest_path->parent is NULL (ie, there is no FROM clause). Also, + * if the final rel has been proven dummy, its rows estimate will be + * zero; clamp it to one to avoid zero-divide in subsequent + * calculations. */ if (cheapest_path->parent) { - path_rows = cheapest_path->parent->rows; + path_rows = clamp_row_est(cheapest_path->parent->rows); path_width = cheapest_path->parent->width; } else diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 94a2372ecaa..9b5e5ab9c25 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -4616,8 +4616,8 @@ examine_simple_variable(PlannerInfo *root, Var *var, * *isdefault: set to TRUE if the result is a default rather than based on * anything meaningful. * - * NB: be careful to produce an integral result, since callers may compare - * the result to exact integer counts. + * NB: be careful to produce a positive integral result, since callers may + * compare the result to exact integer counts, or might divide by it. */ double get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) @@ -4693,7 +4693,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) * If we had an absolute estimate, use that. */ if (stadistinct > 0.0) - return stadistinct; + return clamp_row_est(stadistinct); /* * Otherwise we need to get the relation size; punt if not available. @@ -4714,7 +4714,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) * If we had a relative estimate, use that. */ if (stadistinct < 0.0) - return floor((-stadistinct * ntuples) + 0.5); + return clamp_row_est(-stadistinct * ntuples); /* * With no data, estimate ndistinct = ntuples if the table is small, else @@ -4722,7 +4722,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) * that the behavior isn't discontinuous. */ if (ntuples < DEFAULT_NUM_DISTINCT) - return ntuples; + return clamp_row_est(ntuples); *isdefault = true; return DEFAULT_NUM_DISTINCT; |