diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2005-05-29 04:23:07 +0000 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2005-05-29 04:23:07 +0000 |
commit | e92a88272eb2b164e7e8c625ad3f7ad267e0224f (patch) | |
tree | 3ca62f2a6cdd52f78911a50b628f16e77e70be78 /src/backend/utils | |
parent | ecd70d75267ccf845e8f3976048d944e4ee35cf7 (diff) | |
download | postgresql-e92a88272eb2b164e7e8c625ad3f7ad267e0224f.tar.gz postgresql-e92a88272eb2b164e7e8c625ad3f7ad267e0224f.zip |
Modify hash_search() API to prevent future occurrences of the error
spotted by Qingqing Zhou. The HASH_ENTER action now automatically
fails with elog(ERROR) on out-of-memory --- which incidentally lets
us eliminate duplicate error checks in quite a bunch of places. If
you really need the old return-NULL-on-out-of-memory behavior, you
can ask for HASH_ENTER_NULL. But there is now an Assert in that path
checking that you aren't hoping to get that behavior in a palloc-based
hash table.
Along the way, remove the old HASH_FIND_SAVE/HASH_REMOVE_SAVED actions,
which were not being used anywhere anymore, and were surely too ugly
and unsafe to want to see revived again.
Diffstat (limited to 'src/backend/utils')
-rw-r--r-- | src/backend/utils/adt/ri_triggers.c | 6 | ||||
-rw-r--r-- | src/backend/utils/cache/relcache.c | 10 | ||||
-rw-r--r-- | src/backend/utils/cache/typcache.c | 10 | ||||
-rw-r--r-- | src/backend/utils/fmgr/fmgr.c | 6 | ||||
-rw-r--r-- | src/backend/utils/hash/dynahash.c | 118 | ||||
-rw-r--r-- | src/backend/utils/mmgr/portalmem.c | 6 |
6 files changed, 58 insertions, 98 deletions
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 78a85b7edcc..0dc12410124 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -17,7 +17,7 @@ * * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * - * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.77 2005/04/28 21:47:15 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.78 2005/05/29 04:23:05 tgl Exp $ * * ---------- */ @@ -3466,10 +3466,6 @@ ri_HashPreparedPlan(RI_QueryKey *key, void *plan) entry = (RI_QueryHashEntry *) hash_search(ri_query_cache, (void *) key, HASH_ENTER, &found); - if (entry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); entry->plan = plan; } diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 3123eca518e..7b140228c8c 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.224 2005/05/27 23:31:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.225 2005/05/29 04:23:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -137,10 +137,6 @@ do { \ (void *) &(RELATION->rd_id), \ HASH_ENTER, \ &found); \ - if (idhentry == NULL) \ - ereport(ERROR, \ - (errcode(ERRCODE_OUT_OF_MEMORY), \ - errmsg("out of memory"))); \ /* used to give notice if found -- now just keep quiet */ \ idhentry->reldesc = RELATION; \ } while(0) @@ -1044,10 +1040,6 @@ LookupOpclassInfo(Oid operatorClassOid, opcentry = (OpClassCacheEnt *) hash_search(OpClassCache, (void *) &operatorClassOid, HASH_ENTER, &found); - if (opcentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); if (found && opcentry->valid) { diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index a46267643b5..b0b890516df 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -36,7 +36,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.13 2005/04/14 20:32:43 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.14 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -158,10 +158,6 @@ lookup_type_cache(Oid type_id, int flags) typentry = (TypeCacheEntry *) hash_search(TypeCacheHash, (void *) &type_id, HASH_ENTER, &found); - if (typentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); Assert(!found); /* it wasn't there a moment ago */ MemSet(typentry, 0, sizeof(TypeCacheEntry)); @@ -480,10 +476,6 @@ assign_record_type_typmod(TupleDesc tupDesc) recentry = (RecordCacheEntry *) hash_search(RecordCacheHash, (void *) hashkey, HASH_ENTER, &found); - if (recentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); if (!found) { /* New entry ... hash_search initialized only the hash key */ diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index 181da00fd7e..0733f190c2b 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.94 2005/04/14 20:32:43 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.95 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -521,10 +521,6 @@ record_C_func(HeapTuple procedureTuple, &fn_oid, HASH_ENTER, &found); - if (entry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); /* OID is already filled in */ entry->fn_xmin = HeapTupleHeaderGetXmin(procedureTuple->t_data); entry->fn_cmin = HeapTupleHeaderGetCmin(procedureTuple->t_data); diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 85bd82244c3..8f1af2b8fa3 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.60 2005/05/16 00:19:04 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.61 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -498,22 +498,22 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val) * action is one of: * HASH_FIND: look up key in table * HASH_ENTER: look up key in table, creating entry if not present + * HASH_ENTER_NULL: same, but return NULL if out of memory * HASH_REMOVE: look up key in table, remove entry if present - * HASH_FIND_SAVE: look up key in table, also save in static var - * HASH_REMOVE_SAVED: remove entry saved by HASH_FIND_SAVE * * Return value is a pointer to the element found/entered/removed if any, - * or NULL if no match was found. (NB: in the case of the REMOVE actions, + * or NULL if no match was found. (NB: in the case of the REMOVE action, * the result is a dangling pointer that shouldn't be dereferenced!) - * A NULL result for HASH_ENTER implies we ran out of memory. + * + * HASH_ENTER will normally ereport a generic "out of memory" error if + * it is unable to create a new entry. The HASH_ENTER_NULL operation is + * the same except it will return NULL if out of memory. Note that + * HASH_ENTER_NULL cannot be used with the default palloc-based allocator, + * since palloc internally ereports on out-of-memory. * * If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an * existing entry in the table, FALSE otherwise. This is needed in the * HASH_ENTER case, but is redundant with the return value otherwise. - * - * The HASH_FIND_SAVE/HASH_REMOVE_SAVED interface is a hack to save one - * table lookup in a find/process/remove scenario. Note that no other - * addition or removal in the table can safely happen in between. *---------- */ void * @@ -523,19 +523,15 @@ hash_search(HTAB *hashp, bool *foundPtr) { HASHHDR *hctl = hashp->hctl; - uint32 hashvalue = 0; + Size keysize = hctl->keysize; + uint32 hashvalue; uint32 bucket; long segment_num; long segment_ndx; HASHSEGMENT segp; HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; - - static struct State - { - HASHBUCKET currBucket; - HASHBUCKET *prevBucketPtr; - } saveState; + HashCompareFunc match; #if HASH_STATISTICS hash_accesses++; @@ -543,54 +539,38 @@ hash_search(HTAB *hashp, #endif /* - * Do the initial lookup (or recall result of prior lookup) + * Do the initial lookup */ - if (action == HASH_REMOVE_SAVED) - { - currBucket = saveState.currBucket; - prevBucketPtr = saveState.prevBucketPtr; + hashvalue = hashp->hash(keyPtr, keysize); + bucket = calc_bucket(hctl, hashvalue); - /* - * Try to catch subsequent errors - */ - Assert(currBucket); - saveState.currBucket = NULL; - } - else - { - HashCompareFunc match; - Size keysize = hctl->keysize; + segment_num = bucket >> hctl->sshift; + segment_ndx = MOD(bucket, hctl->ssize); - hashvalue = hashp->hash(keyPtr, keysize); - bucket = calc_bucket(hctl, hashvalue); + segp = hashp->dir[segment_num]; - segment_num = bucket >> hctl->sshift; - segment_ndx = MOD(bucket, hctl->ssize); + if (segp == NULL) + hash_corrupted(hashp); - segp = hashp->dir[segment_num]; + prevBucketPtr = &segp[segment_ndx]; + currBucket = *prevBucketPtr; - if (segp == NULL) - hash_corrupted(hashp); + /* + * Follow collision chain looking for matching key + */ + match = hashp->match; /* save one fetch in inner loop */ - prevBucketPtr = &segp[segment_ndx]; + while (currBucket != NULL) + { + if (currBucket->hashvalue == hashvalue && + match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) + break; + prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; - - /* - * Follow collision chain looking for matching key - */ - match = hashp->match; /* save one fetch in inner loop */ - while (currBucket != NULL) - { - if (currBucket->hashvalue == hashvalue && - match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) - break; - prevBucketPtr = &(currBucket->link); - currBucket = *prevBucketPtr; #if HASH_STATISTICS - hash_collisions++; - hctl->collisions++; + hash_collisions++; + hctl->collisions++; #endif - } } if (foundPtr) @@ -606,17 +586,7 @@ hash_search(HTAB *hashp, return (void *) ELEMENTKEY(currBucket); return NULL; - case HASH_FIND_SAVE: - if (currBucket != NULL) - { - saveState.currBucket = currBucket; - saveState.prevBucketPtr = prevBucketPtr; - return (void *) ELEMENTKEY(currBucket); - } - return NULL; - case HASH_REMOVE: - case HASH_REMOVE_SAVED: if (currBucket != NULL) { Assert(hctl->nentries > 0); @@ -638,6 +608,11 @@ hash_search(HTAB *hashp, } return NULL; + case HASH_ENTER_NULL: + /* ENTER_NULL does not work with palloc-based allocator */ + Assert(hashp->alloc != DynaHashAlloc); + /* FALL THRU */ + case HASH_ENTER: /* Return existing element if found, else create one */ if (currBucket != NULL) @@ -649,7 +624,20 @@ hash_search(HTAB *hashp, { /* no free elements. allocate another chunk of buckets */ if (!element_alloc(hashp, HASHELEMENT_ALLOC_INCR)) - return NULL; /* out of memory */ + { + /* out of memory */ + if (action == HASH_ENTER_NULL) + return NULL; + /* report a generic message */ + if (hashp->isshared) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of shared memory"))); + else + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); + } currBucket = hctl->freeList; Assert(currBucket != NULL); } diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 362d3df967f..26c5dd02a31 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -12,7 +12,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.79 2005/05/11 18:05:37 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.80 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -72,10 +72,6 @@ do { \ StrNCpy(key, NAME, MAX_PORTALNAME_LEN); \ hentry = (PortalHashEnt*)hash_search(PortalHashTable, \ key, HASH_ENTER, &found); \ - if (hentry == NULL) \ - ereport(ERROR, \ - (errcode(ERRCODE_OUT_OF_MEMORY), \ - errmsg("out of memory"))); \ if (found) \ elog(ERROR, "duplicate portal name"); \ hentry->portal = PORTAL; \ |