aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeHash.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2009-10-30 20:59:10 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2009-10-30 20:59:10 +0000
commit3a2e45720429cc4328ba1956478ac3d5140bf282 (patch)
tree7117538933211c989d683ac649ac41ba9a38b00c /src/backend/executor/nodeHash.c
parent08dd656b71202911be7bcf4f554558a18138c867 (diff)
downloadpostgresql-3a2e45720429cc4328ba1956478ac3d5140bf282.tar.gz
postgresql-3a2e45720429cc4328ba1956478ac3d5140bf282.zip
Make the overflow guards in ExecChooseHashTableSize be more protective.
The original coding ensured nbuckets and nbatch didn't exceed INT_MAX, which while not insane on its own terms did nothing to protect subsequent code like "palloc(nbatch * sizeof(BufFile *))". Since enormous join size estimates might well be planner error rather than reality, it seems best to constrain the initial sizes to be not more than work_mem/sizeof(pointer), thus ensuring the allocated arrays don't exceed work_mem. We will allow nbatch to get bigger than that during subsequent ExecHashIncreaseNumBatches calls, but we should still guard against integer overflow in those palloc requests. Per bug #5145 from Bernt Marius Johnsen. Although the given test case only seems to fail back to 8.2, previous releases have variants of this issue, so patch all supported branches.
Diffstat (limited to 'src/backend/executor/nodeHash.c')
-rw-r--r--src/backend/executor/nodeHash.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 826cafe8737..37487baec1a 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96.2.3 2007/06/01 15:58:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96.2.4 2009/10/30 20:59:10 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -345,6 +345,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
int tupsize;
double inner_rel_bytes;
long hash_table_bytes;
+ long max_pointers;
int nbatch;
int nbuckets;
int i;
@@ -371,8 +372,13 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
/*
* Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
* memory is filled. Set nbatch to the smallest power of 2 that appears
- * sufficient.
+ * sufficient. The Min() steps limit the results so that the pointer
+ * arrays we'll try to allocate do not exceed work_mem.
*/
+ max_pointers = (work_mem * 1024L) / sizeof(void *);
+ /* also ensure we avoid integer overflow in nbatch and nbuckets */
+ max_pointers = Min(max_pointers, INT_MAX / 2);
+
if (inner_rel_bytes > hash_table_bytes)
{
/* We'll need multiple batches */
@@ -381,11 +387,11 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
int minbatch;
lbuckets = (hash_table_bytes / tupsize) / NTUP_PER_BUCKET;
- lbuckets = Min(lbuckets, INT_MAX);
+ lbuckets = Min(lbuckets, max_pointers);
nbuckets = (int) lbuckets;
dbatch = ceil(inner_rel_bytes / hash_table_bytes);
- dbatch = Min(dbatch, INT_MAX / 2);
+ dbatch = Min(dbatch, max_pointers);
minbatch = (int) dbatch;
nbatch = 2;
while (nbatch < minbatch)
@@ -397,7 +403,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
double dbuckets;
dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
- dbuckets = Min(dbuckets, INT_MAX);
+ dbuckets = Min(dbuckets, max_pointers);
nbuckets = (int) dbuckets;
nbatch = 1;
@@ -476,7 +482,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
return;
/* safety check to avoid overflow */
- if (oldnbatch > INT_MAX / 2)
+ if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
return;
nbatch = oldnbatch * 2;