diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2003-12-30 20:05:15 +0000 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2003-12-30 20:05:15 +0000 |
commit | 73021301793a824b8ffbab9037498f726752b1f5 (patch) | |
tree | f7ad067d711223f82b36d78e3cf89cbd1c98a3ad /src/backend/executor/nodeIndexscan.c | |
parent | 386258d3e59ef9c2945577639dcff81b159f86e7 (diff) | |
download | postgresql-73021301793a824b8ffbab9037498f726752b1f5.tar.gz postgresql-73021301793a824b8ffbab9037498f726752b1f5.zip |
Avoid running out of memory during hash_create, by not passing a
number-of-buckets that exceeds the size we actually plan to allow
the hash table to grow to. Per trouble report from Sean Shanny.
Diffstat (limited to 'src/backend/executor/nodeIndexscan.c')
-rw-r--r-- | src/backend/executor/nodeIndexscan.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 6ab2f0a47bd..070067816dd 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.84 2003/09/24 18:54:01 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.84.2.1 2003/12/30 20:05:15 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -1019,22 +1019,28 @@ static void create_duphash(IndexScanState *node) { HASHCTL hash_ctl; + long nbuckets; + node->iss_MaxHash = (SortMem * 1024L) / + (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry))); MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = SizeOfIptrData; hash_ctl.entrysize = sizeof(DupHashTabEntry); hash_ctl.hash = tag_hash; hash_ctl.hcxt = CurrentMemoryContext; + nbuckets = (long) ceil(node->ss.ps.plan->plan_rows); + if (nbuckets < 1) + nbuckets = 1; + if (nbuckets > node->iss_MaxHash) + nbuckets = node->iss_MaxHash; node->iss_DupHash = hash_create("DupHashTable", - (long) ceil(node->ss.ps.plan->plan_rows), + nbuckets, &hash_ctl, HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); if (node->iss_DupHash == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); - node->iss_MaxHash = (SortMem * 1024L) / - (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry))); } int |