From: Tom Lane Date: Sun, 29 May 2005 04:23:07 +0000 (+0000) Subject: Modify hash_search() API to prevent future occurrences of the error X-Git-Tag: REL8_1_0BETA1~711 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=e92a88272eb2b164e7e8c625ad3f7ad267e0224f;p=postgresql Modify hash_search() API to prevent future occurrences of the error spotted by Qingqing Zhou. The HASH_ENTER action now automatically fails with elog(ERROR) on out-of-memory --- which incidentally lets us eliminate duplicate error checks in quite a bunch of places. If you really need the old return-NULL-on-out-of-memory behavior, you can ask for HASH_ENTER_NULL. But there is now an Assert in that path checking that you aren't hoping to get that behavior in a palloc-based hash table. Along the way, remove the old HASH_FIND_SAVE/HASH_REMOVE_SAVED actions, which were not being used anywhere anymore, and were surely too ugly and unsafe to want to see revived again. --- diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index f23151ac14..b216dbeea9 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -2061,11 +2061,6 @@ createNewConnection(const char *name, remoteConn * con) hentry = (remoteConnHashEnt *) hash_search(remoteConnHash, key, HASH_ENTER, &found); - if (!hentry) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); - if (found) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index c364fc6595..0aa52b5281 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -144,10 +144,6 @@ do { \ snprintf(key, MAX_CATNAME_LEN - 1, "%s", CATDESC->catname); \ hentry = (crosstab_HashEnt*) hash_search(crosstab_HashTable, \ key, HASH_ENTER, &found); \ - if (hentry == NULL) \ - ereport(ERROR, \ - (errcode(ERRCODE_OUT_OF_MEMORY), \ - errmsg("out of memory"))); \ if (found) \ ereport(ERROR, \ (errcode(ERRCODE_DUPLICATE_OBJECT), \ diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 251098d408..72833a755f 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -11,7 +11,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.36 2005/01/10 20:02:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.37 2005/05/29 04:23:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -223,9 +223,6 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode) hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache, (void *) &rnode, HASH_ENTER, &found); - if (hentry == NULL) - elog(PANIC, "XLogOpenRelation: out of memory for cache"); - if (found) elog(PANIC, "XLogOpenRelation: file found on insert into cache"); diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index ada9d534de..16aae68693 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -10,7 +10,7 @@ * Copyright (c) 2002-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.37 2005/05/24 04:18:04 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.38 2005/05/29 04:23:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -346,9 +346,9 @@ StorePreparedStatement(const char *stmt_name, HASH_ENTER, &found); - /* Shouldn't get a failure, nor a duplicate entry */ - if (!entry || found) - elog(ERROR, "could not store prepared statement \"%s\"", + /* Shouldn't get a duplicate entry */ + if (found) + elog(ERROR, "duplicate prepared statement \"%s\"", stmt_name); /* Fill in the hash table entry with copied data */ diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 37db2bcd2f..1bf46d815c 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.14 2005/03/16 21:38:06 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.15 2005/05/29 04:23:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -379,13 +379,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, } else { - /* created new entry ... we hope */ - if (entry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); - /* + * created new entry + * * Zero any caller-requested space in the entry. (This zaps * the "key data" dynahash.c copied into the new entry, but we * don't care since we're about to overwrite it anyway.) diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index d0ca978cb6..1da1aa785e 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -23,7 +23,7 @@ * Copyright (c) 2003-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.3 2005/05/17 00:43:47 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.4 2005/05/29 04:23:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -220,10 +220,6 @@ tbm_create_pagetable(TIDBitmap *tbm) page = (PagetableEntry *) hash_search(tbm->pagetable, (void *) &tbm->entry1.blockno, HASH_ENTER, &found); - if (page == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); Assert(!found); memcpy(page, &tbm->entry1, sizeof(PagetableEntry)); } @@ -726,10 +722,6 @@ tbm_get_pageentry(TIDBitmap *tbm, BlockNumber pageno) page = (PagetableEntry *) hash_search(tbm->pagetable, (void *) &pageno, HASH_ENTER, &found); - if (page == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); } /* Initialize it if not present before */ @@ -820,10 +812,6 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno) page = (PagetableEntry *) hash_search(tbm->pagetable, (void *) &chunk_pageno, HASH_ENTER, &found); - if (page == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); /* Initialize it if not present before */ if (!found) diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 3d2244b0d8..b7f15ea38e 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -13,7 +13,7 @@ * * Copyright (c) 2001-2005, PostgreSQL Global Development Group * - * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.94 2005/05/11 01:41:40 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.95 2005/05/29 04:23:03 tgl Exp $ * ---------- */ #include "postgres.h" @@ -2061,10 +2061,6 @@ pgstat_get_db_entry(int databaseid) result = (PgStat_StatDBEntry *) hash_search(pgStatDBHash, &databaseid, HASH_ENTER, &found); - if (result == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory in statistics collector --- abort"))); /* If not found, initialize the new one. */ if (!found) @@ -2126,10 +2122,6 @@ pgstat_sub_backend(int procpid) (void *) &procpid, HASH_ENTER, &found); - if (deadbe == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory in statistics collector --- abort"))); if (!found) { @@ -2435,12 +2427,6 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb, (void *) &dbbuf.databaseid, HASH_ENTER, &found); - if (dbentry == NULL) - { - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); - } if (found) { ereport(pgStatRunningInCollector ? LOG : WARNING, @@ -2503,10 +2489,6 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb, tabentry = (PgStat_StatTabEntry *) hash_search(tabhash, (void *) &tabbuf.tableid, HASH_ENTER, &found); - if (tabentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); if (found) { @@ -2730,10 +2712,6 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len) tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables, (void *) &(tabmsg[i].t_id), HASH_ENTER, &found); - if (tabentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory in statistics collector --- abort"))); if (!found) { diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c index 7fe9e83477..5ef277edc9 100644 --- a/src/backend/storage/buffer/buf_table.c +++ b/src/backend/storage/buffer/buf_table.c @@ -13,7 +13,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.40 2005/03/04 20:21:06 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.41 2005/05/29 04:23:04 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -110,11 +110,6 @@ BufTableInsert(BufferTag *tagPtr, int buf_id) result = (BufferLookupEnt *) hash_search(SharedBufHash, (void *) tagPtr, HASH_ENTER, &found); - if (!result) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of shared memory"))); - if (found) /* found something already in the table */ return result->id; diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 0e9e7b68cc..a2604dcf55 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.66 2005/03/19 23:27:05 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.67 2005/05/29 04:23:04 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -192,10 +192,6 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr) hresult = (LocalBufferLookupEnt *) hash_search(LocalBufHash, (void *) &newTag, HASH_ENTER, &found); - if (!hresult) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); if (found) /* shouldn't happen */ elog(ERROR, "local buffer hash table corrupted"); hresult->id = b; diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index b5cb8ef713..27e9952b3f 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.44 2005/04/24 03:51:49 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.45 2005/05/29 04:23:04 tgl Exp $ * * * NOTES: @@ -1036,10 +1036,6 @@ create_fsm_rel(RelFileNode *rel) (void *) rel, HASH_ENTER, &found); - if (!fsmrel) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of shared memory"))); if (!found) { diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index ca4ca19340..9af059139e 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.83 2005/04/04 04:34:41 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.84 2005/05/29 04:23:04 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -235,10 +235,6 @@ InitShmemIndex(void) result = (ShmemIndexEnt *) hash_search(ShmemIndex, (void *) &item, HASH_ENTER, &found); - if (!result) - ereport(FATAL, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of shared memory"))); Assert(!found); @@ -367,7 +363,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr) /* look it up in the shmem index */ result = (ShmemIndexEnt *) - hash_search(ShmemIndex, (void *) &item, HASH_ENTER, foundPtr); + hash_search(ShmemIndex, (void *) &item, HASH_ENTER_NULL, foundPtr); if (!result) { @@ -375,7 +371,6 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"))); - return NULL; } if (*foundPtr) diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 49843b2de6..d7180d7c1f 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.152 2005/05/19 23:30:18 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.153 2005/05/29 04:23:04 tgl Exp $ * * NOTES * Outside modules can create a lock table and acquire/release @@ -470,10 +470,6 @@ LockAcquire(LOCKMETHODID lockmethodid, LOCKTAG *locktag, locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash[lockmethodid], (void *) &localtag, HASH_ENTER, &found); - if (!locallock) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); /* * if it's a new locallock object, initialize it @@ -531,7 +527,7 @@ LockAcquire(LOCKMETHODID lockmethodid, LOCKTAG *locktag, */ lock = (LOCK *) hash_search(LockMethodLockHash[lockmethodid], (void *) locktag, - HASH_ENTER, &found); + HASH_ENTER_NULL, &found); if (!lock) { LWLockRelease(masterLock); @@ -578,7 +574,7 @@ LockAcquire(LOCKMETHODID lockmethodid, LOCKTAG *locktag, */ proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash[lockmethodid], (void *) &proclocktag, - HASH_ENTER, &found); + HASH_ENTER_NULL, &found); if (!proclock) { /* Ooops, not enough shmem for the proclock */ diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index 73194ec4ef..1c0cb7e240 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.114 2004/12/31 22:01:13 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.115 2005/05/29 04:23:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -805,9 +805,8 @@ register_dirty_segment(SMgrRelation reln, MdfdVec *seg) entry.rnode = reln->smgr_rnode; entry.segno = seg->mdfd_segno; - if (hash_search(pendingOpsTable, &entry, HASH_ENTER, NULL) != NULL) - return true; - /* out of memory: fall through to do it locally */ + (void) hash_search(pendingOpsTable, &entry, HASH_ENTER, NULL); + return true; } else { @@ -838,10 +837,7 @@ RememberFsyncRequest(RelFileNode rnode, BlockNumber segno) entry.rnode = rnode; entry.segno = segno; - if (hash_search(pendingOpsTable, &entry, HASH_ENTER, NULL) == NULL) - ereport(FATAL, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); + (void) hash_search(pendingOpsTable, &entry, HASH_ENTER, NULL); } /* diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index 5abfc6d477..0b5a7154d3 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -11,7 +11,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.86 2005/03/20 22:00:53 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.87 2005/05/29 04:23:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -207,10 +207,6 @@ smgropen(RelFileNode rnode) reln = (SMgrRelation) hash_search(SMgrRelationHash, (void *) &rnode, HASH_ENTER, &found); - if (reln == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); /* Initialize it if not present before */ if (!found) diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 78a85b7edc..0dc1241012 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -17,7 +17,7 @@ * * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * - * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.77 2005/04/28 21:47:15 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.78 2005/05/29 04:23:05 tgl Exp $ * * ---------- */ @@ -3466,10 +3466,6 @@ ri_HashPreparedPlan(RI_QueryKey *key, void *plan) entry = (RI_QueryHashEntry *) hash_search(ri_query_cache, (void *) key, HASH_ENTER, &found); - if (entry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); entry->plan = plan; } diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 3123eca518..7b140228c8 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.224 2005/05/27 23:31:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.225 2005/05/29 04:23:05 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -137,10 +137,6 @@ do { \ (void *) &(RELATION->rd_id), \ HASH_ENTER, \ &found); \ - if (idhentry == NULL) \ - ereport(ERROR, \ - (errcode(ERRCODE_OUT_OF_MEMORY), \ - errmsg("out of memory"))); \ /* used to give notice if found -- now just keep quiet */ \ idhentry->reldesc = RELATION; \ } while(0) @@ -1044,10 +1040,6 @@ LookupOpclassInfo(Oid operatorClassOid, opcentry = (OpClassCacheEnt *) hash_search(OpClassCache, (void *) &operatorClassOid, HASH_ENTER, &found); - if (opcentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); if (found && opcentry->valid) { diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index a46267643b..b0b890516d 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -36,7 +36,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.13 2005/04/14 20:32:43 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.14 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -158,10 +158,6 @@ lookup_type_cache(Oid type_id, int flags) typentry = (TypeCacheEntry *) hash_search(TypeCacheHash, (void *) &type_id, HASH_ENTER, &found); - if (typentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); Assert(!found); /* it wasn't there a moment ago */ MemSet(typentry, 0, sizeof(TypeCacheEntry)); @@ -480,10 +476,6 @@ assign_record_type_typmod(TupleDesc tupDesc) recentry = (RecordCacheEntry *) hash_search(RecordCacheHash, (void *) hashkey, HASH_ENTER, &found); - if (recentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); if (!found) { /* New entry ... hash_search initialized only the hash key */ diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index 181da00fd7..0733f190c2 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.94 2005/04/14 20:32:43 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.95 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -521,10 +521,6 @@ record_C_func(HeapTuple procedureTuple, &fn_oid, HASH_ENTER, &found); - if (entry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); /* OID is already filled in */ entry->fn_xmin = HeapTupleHeaderGetXmin(procedureTuple->t_data); entry->fn_cmin = HeapTupleHeaderGetCmin(procedureTuple->t_data); diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 85bd82244c..8f1af2b8fa 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.60 2005/05/16 00:19:04 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.61 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -498,22 +498,22 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val) * action is one of: * HASH_FIND: look up key in table * HASH_ENTER: look up key in table, creating entry if not present + * HASH_ENTER_NULL: same, but return NULL if out of memory * HASH_REMOVE: look up key in table, remove entry if present - * HASH_FIND_SAVE: look up key in table, also save in static var - * HASH_REMOVE_SAVED: remove entry saved by HASH_FIND_SAVE * * Return value is a pointer to the element found/entered/removed if any, - * or NULL if no match was found. (NB: in the case of the REMOVE actions, + * or NULL if no match was found. (NB: in the case of the REMOVE action, * the result is a dangling pointer that shouldn't be dereferenced!) - * A NULL result for HASH_ENTER implies we ran out of memory. + * + * HASH_ENTER will normally ereport a generic "out of memory" error if + * it is unable to create a new entry. The HASH_ENTER_NULL operation is + * the same except it will return NULL if out of memory. Note that + * HASH_ENTER_NULL cannot be used with the default palloc-based allocator, + * since palloc internally ereports on out-of-memory. * * If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an * existing entry in the table, FALSE otherwise. This is needed in the * HASH_ENTER case, but is redundant with the return value otherwise. - * - * The HASH_FIND_SAVE/HASH_REMOVE_SAVED interface is a hack to save one - * table lookup in a find/process/remove scenario. Note that no other - * addition or removal in the table can safely happen in between. *---------- */ void * @@ -523,19 +523,15 @@ hash_search(HTAB *hashp, bool *foundPtr) { HASHHDR *hctl = hashp->hctl; - uint32 hashvalue = 0; + Size keysize = hctl->keysize; + uint32 hashvalue; uint32 bucket; long segment_num; long segment_ndx; HASHSEGMENT segp; HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; - - static struct State - { - HASHBUCKET currBucket; - HASHBUCKET *prevBucketPtr; - } saveState; + HashCompareFunc match; #if HASH_STATISTICS hash_accesses++; @@ -543,54 +539,38 @@ hash_search(HTAB *hashp, #endif /* - * Do the initial lookup (or recall result of prior lookup) + * Do the initial lookup */ - if (action == HASH_REMOVE_SAVED) - { - currBucket = saveState.currBucket; - prevBucketPtr = saveState.prevBucketPtr; + hashvalue = hashp->hash(keyPtr, keysize); + bucket = calc_bucket(hctl, hashvalue); - /* - * Try to catch subsequent errors - */ - Assert(currBucket); - saveState.currBucket = NULL; - } - else - { - HashCompareFunc match; - Size keysize = hctl->keysize; + segment_num = bucket >> hctl->sshift; + segment_ndx = MOD(bucket, hctl->ssize); - hashvalue = hashp->hash(keyPtr, keysize); - bucket = calc_bucket(hctl, hashvalue); + segp = hashp->dir[segment_num]; - segment_num = bucket >> hctl->sshift; - segment_ndx = MOD(bucket, hctl->ssize); + if (segp == NULL) + hash_corrupted(hashp); - segp = hashp->dir[segment_num]; + prevBucketPtr = &segp[segment_ndx]; + currBucket = *prevBucketPtr; - if (segp == NULL) - hash_corrupted(hashp); + /* + * Follow collision chain looking for matching key + */ + match = hashp->match; /* save one fetch in inner loop */ - prevBucketPtr = &segp[segment_ndx]; + while (currBucket != NULL) + { + if (currBucket->hashvalue == hashvalue && + match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) + break; + prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; - - /* - * Follow collision chain looking for matching key - */ - match = hashp->match; /* save one fetch in inner loop */ - while (currBucket != NULL) - { - if (currBucket->hashvalue == hashvalue && - match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) - break; - prevBucketPtr = &(currBucket->link); - currBucket = *prevBucketPtr; #if HASH_STATISTICS - hash_collisions++; - hctl->collisions++; + hash_collisions++; + hctl->collisions++; #endif - } } if (foundPtr) @@ -606,17 +586,7 @@ hash_search(HTAB *hashp, return (void *) ELEMENTKEY(currBucket); return NULL; - case HASH_FIND_SAVE: - if (currBucket != NULL) - { - saveState.currBucket = currBucket; - saveState.prevBucketPtr = prevBucketPtr; - return (void *) ELEMENTKEY(currBucket); - } - return NULL; - case HASH_REMOVE: - case HASH_REMOVE_SAVED: if (currBucket != NULL) { Assert(hctl->nentries > 0); @@ -638,6 +608,11 @@ hash_search(HTAB *hashp, } return NULL; + case HASH_ENTER_NULL: + /* ENTER_NULL does not work with palloc-based allocator */ + Assert(hashp->alloc != DynaHashAlloc); + /* FALL THRU */ + case HASH_ENTER: /* Return existing element if found, else create one */ if (currBucket != NULL) @@ -649,7 +624,20 @@ hash_search(HTAB *hashp, { /* no free elements. allocate another chunk of buckets */ if (!element_alloc(hashp, HASHELEMENT_ALLOC_INCR)) - return NULL; /* out of memory */ + { + /* out of memory */ + if (action == HASH_ENTER_NULL) + return NULL; + /* report a generic message */ + if (hashp->isshared) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of shared memory"))); + else + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); + } currBucket = hctl->freeList; Assert(currBucket != NULL); } diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 362d3df967..26c5dd02a3 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -12,7 +12,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.79 2005/05/11 18:05:37 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.80 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -72,10 +72,6 @@ do { \ StrNCpy(key, NAME, MAX_PORTALNAME_LEN); \ hentry = (PortalHashEnt*)hash_search(PortalHashTable, \ key, HASH_ENTER, &found); \ - if (hentry == NULL) \ - ereport(ERROR, \ - (errcode(ERRCODE_OUT_OF_MEMORY), \ - errmsg("out of memory"))); \ if (found) \ elog(ERROR, "duplicate portal name"); \ hentry->portal = PORTAL; \ diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index e6293bc000..bb93dea077 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/utils/hsearch.h,v 1.35 2005/04/14 20:32:43 tgl Exp $ + * $PostgreSQL: pgsql/src/include/utils/hsearch.h,v 1.36 2005/05/29 04:23:06 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -153,8 +153,7 @@ typedef enum HASH_FIND, HASH_ENTER, HASH_REMOVE, - HASH_FIND_SAVE, - HASH_REMOVE_SAVED + HASH_ENTER_NULL } HASHACTION; /* hash_seq status (should be considered an opaque type by callers) */ diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index b4cd6d1fdd..b746a8374b 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -3,7 +3,7 @@ * procedural language * * IDENTIFICATION - * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.89 2005/05/06 17:24:55 tgl Exp $ + * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.90 2005/05/29 04:23:06 tgl Exp $ * * This software is copyrighted by Jan Wieck - Hamburg. * @@ -2157,10 +2157,6 @@ plpgsql_HashTableInsert(PLpgSQL_function *function, (void *) func_key, HASH_ENTER, &found); - if (hentry == NULL) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); if (found) elog(WARNING, "trying to insert a function that already exists"); diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c index bdf3d40407..38c7196406 100644 --- a/src/timezone/pgtz.c +++ b/src/timezone/pgtz.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.31 2005/05/23 21:54:02 momjian Exp $ + * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.32 2005/05/29 04:23:07 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -971,6 +971,7 @@ identify_system_timezone(void) * load and parse the TZ definition file every time it is selected. */ static HTAB *timezone_cache = NULL; + static bool init_timezone_hashtable(void) { @@ -1013,14 +1014,18 @@ pg_tzset(const char *name) HASH_FIND, NULL); if (tzp) + { /* Timezone found in cache, nothing more to do */ return tzp; + } if (tzload(name, &tz.state) != 0) { if (name[0] == ':' || tzparse(name, &tz.state, FALSE) != 0) + { /* Unknown timezone. Fail our call instead of loading GMT! */ return NULL; + } } strcpy(tz.TZname, name); @@ -1031,9 +1036,6 @@ pg_tzset(const char *name) HASH_ENTER, NULL); - if (!tzp) - return NULL; - strcpy(tzp->TZname, tz.TZname); memcpy(&tzp->state, &tz.state, sizeof(tz.state));