aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2009-10-30 20:59:03 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2009-10-30 20:59:03 +0000
commit818d2014c40da6ada093c5fd591ce363266e349d (patch)
tree6c13dd7c4d417a9db7563f03a86efcf01d1784e5
parentc4fb0b23644787a6a8f12410b27cc0988d39e465 (diff)
downloadpostgresql-818d2014c40da6ada093c5fd591ce363266e349d.tar.gz
postgresql-818d2014c40da6ada093c5fd591ce363266e349d.zip
Make the overflow guards in ExecChooseHashTableSize be more protective.
The original coding ensured nbuckets and nbatch didn't exceed INT_MAX, which while not insane on its own terms did nothing to protect subsequent code like "palloc(nbatch * sizeof(BufFile *))". Since enormous join size estimates might well be planner error rather than reality, it seems best to constrain the initial sizes to be not more than work_mem/sizeof(pointer), thus ensuring the allocated arrays don't exceed work_mem. We will allow nbatch to get bigger than that during subsequent ExecHashIncreaseNumBatches calls, but we should still guard against integer overflow in those palloc requests. Per bug #5145 from Bernt Marius Johnsen. Although the given test case only seems to fail back to 8.2, previous releases have variants of this issue, so patch all supported branches.
-rw-r--r--src/backend/executor/nodeHash.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index b36832b8384..953e1242c82 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.107.2.1 2007/06/01 15:58:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.107.2.2 2009/10/30 20:59:03 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -350,6 +350,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
int tupsize;
double inner_rel_bytes;
long hash_table_bytes;
+ long max_pointers;
int nbatch;
int nbuckets;
int i;
@@ -376,8 +377,13 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
/*
* Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
* memory is filled. Set nbatch to the smallest power of 2 that appears
- * sufficient.
+ * sufficient. The Min() steps limit the results so that the pointer
+ * arrays we'll try to allocate do not exceed work_mem.
*/
+ max_pointers = (work_mem * 1024L) / sizeof(void *);
+ /* also ensure we avoid integer overflow in nbatch and nbuckets */
+ max_pointers = Min(max_pointers, INT_MAX / 2);
+
if (inner_rel_bytes > hash_table_bytes)
{
/* We'll need multiple batches */
@@ -386,11 +392,11 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
int minbatch;
lbuckets = (hash_table_bytes / tupsize) / NTUP_PER_BUCKET;
- lbuckets = Min(lbuckets, INT_MAX);
+ lbuckets = Min(lbuckets, max_pointers);
nbuckets = (int) lbuckets;
dbatch = ceil(inner_rel_bytes / hash_table_bytes);
- dbatch = Min(dbatch, INT_MAX / 2);
+ dbatch = Min(dbatch, max_pointers);
minbatch = (int) dbatch;
nbatch = 2;
while (nbatch < minbatch)
@@ -402,7 +408,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
double dbuckets;
dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
- dbuckets = Min(dbuckets, INT_MAX);
+ dbuckets = Min(dbuckets, max_pointers);
nbuckets = (int) dbuckets;
nbatch = 1;
@@ -481,7 +487,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
return;
/* safety check to avoid overflow */
- if (oldnbatch > INT_MAX / 2)
+ if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
return;
nbatch = oldnbatch * 2;