]> granicus.if.org Git - postgresql/commitdiff
Clean up non-reentrant interface for hash_seq/HashTableWalk, so that
authorTom Lane <tgl@sss.pgh.pa.us>
Tue, 2 Jan 2001 04:33:24 +0000 (04:33 +0000)
committerTom Lane <tgl@sss.pgh.pa.us>
Tue, 2 Jan 2001 04:33:24 +0000 (04:33 +0000)
starting a new hashtable search no longer clobbers any other search
active anywhere in the system.  Fix RelationCacheInvalidate() so that
it will not crash or go into an infinite loop if invoked recursively,
as for example by a second SI Reset message arriving while we are still
processing a prior one.

src/backend/access/transam/xlogutils.c
src/backend/lib/hasht.c
src/backend/storage/lmgr/lock.c
src/backend/utils/cache/relcache.c
src/backend/utils/hash/dynahash.c
src/backend/utils/mmgr/portalmem.c
src/include/lib/hasht.h
src/include/utils/hsearch.h

index acd19da263c90df03bc0d766ad7c72962dc23a5e..1447fa29a5460b149bc5bb09d0983291e0dd1e2d 100644 (file)
@@ -262,7 +262,7 @@ _xl_init_rel_cache(void)
 }
 
 static void
-_xl_remove_hash_entry(XLogRelDesc **edata, int dummy)
+_xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
 {
        XLogRelCacheEntry          *hentry;
        bool                                    found;
@@ -328,7 +328,7 @@ XLogCloseRelationCache(void)
        if (!_xlrelarr)
                return;
 
-       HashTableWalk(_xlrelcache, (HashtFunc)_xl_remove_hash_entry, 0);
+       HashTableWalk(_xlrelcache, (HashtFunc) _xl_remove_hash_entry, 0);
        hash_destroy(_xlrelcache);
 
        free(_xlrelarr);
index 5caeabd281ab3530dea037a2827f9622677be8d2..1ac04cc92dde7c974bffa53acfaeca12b45a754d 100644 (file)
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/lib/Attic/hasht.c,v 1.13 2000/01/31 04:35:51 tgl Exp $
+ *       $Header: /cvsroot/pgsql/src/backend/lib/Attic/hasht.c,v 1.14 2001/01/02 04:33:15 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
 /* -----------------------------------
  *             HashTableWalk
  *
- *             call function on every element in hashtable
- *             one extra argument (arg) may be supplied
+ * call given function on every element in hashtable
+ *
+ * one extra argument (arg) may be supplied
+ *
+ * NOTE: it is allowed for the given function to delete the hashtable entry
+ * it is passed.  However, deleting any other element while the scan is
+ * in progress is UNDEFINED (see hash_seq functions).  Also, if elements are
+ * added to the table while the scan is in progress, it is unspecified
+ * whether they will be visited by the scan or not.
  * -----------------------------------
  */
 void
-HashTableWalk(HTAB *hashtable, HashtFunc function, int arg)
+HashTableWalk(HTAB *hashtable, HashtFunc function, Datum arg)
 {
+       HASH_SEQ_STATUS status;
        long       *hashent;
        void       *data;
        int                     keysize;
 
+       hash_seq_init(&status, hashtable);
        keysize = hashtable->hctl->keysize;
-       hash_seq((HTAB *) NULL);
-       while ((hashent = hash_seq(hashtable)) != (long *) TRUE)
+
+       while ((hashent = hash_seq_search(&status)) != (long *) TRUE)
        {
                if (hashent == NULL)
-                       elog(FATAL, "error in HashTableWalk.");
+                       elog(FATAL, "error in HashTableWalk");
 
                /*
                 * XXX the corresponding hash table insertion does NOT LONGALIGN
index b7ad1dcb5c7295742f3b99debf682e3985f2931c..bee814b0326753aff37e0dcc6c9a5144a59c082b 100644 (file)
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.74 2000/12/22 00:51:54 tgl Exp $
+ *       $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.75 2001/01/02 04:33:16 tgl Exp $
  *
  * NOTES
  *       Outside modules can create a lock table and acquire/release
@@ -1772,6 +1772,7 @@ DumpAllLocks(void)
        int                     lockmethod = DEFAULT_LOCKMETHOD;
        LOCKMETHODTABLE *lockMethodTable;
        HTAB       *holderTable;
+       HASH_SEQ_STATUS status;
 
        pid = getpid();
        ShmemPIDLookup(pid, &location);
@@ -1791,8 +1792,8 @@ DumpAllLocks(void)
        if (proc->waitLock)
                LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
 
-       hash_seq(NULL);
-       while ((holder = (HOLDER *) hash_seq(holderTable)) &&
+       hash_seq_init(&status, holderTable);
+       while ((holder = (HOLDER *) hash_seq_search(&status)) &&
                   (holder != (HOLDER *) TRUE))
        {
                HOLDER_PRINT("DumpAllLocks", holder);
index 1b7ce6fe18c280f77c8b86824570dab34958cd60..6fc68cca70560f80040d96bfe0ccfdc46a1285fd 100644 (file)
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.121 2000/12/22 23:12:06 tgl Exp $
+ *       $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.122 2001/01/02 04:33:19 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -253,10 +253,10 @@ static void RelationClearRelation(Relation relation, bool rebuildIt);
 #ifdef ENABLE_REINDEX_NAILED_RELATIONS
 static void RelationReloadClassinfo(Relation relation);
 #endif /* ENABLE_REINDEX_NAILED_RELATIONS */
-static void RelationFlushRelation(Relation *relationPtr,
-                                         int skipLocalRelations);
+static void RelationFlushRelation(Relation relation);
 static Relation RelationNameCacheGetRelation(const char *relationName);
-static void RelationCacheAbortWalker(Relation *relationPtr, int dummy);
+static void RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp);
+static void RelationCacheAbortWalker(Relation *relationPtr, Datum dummy);
 static void init_irels(void);
 static void write_irels(void);
 
@@ -1639,14 +1639,12 @@ RelationClearRelation(Relation relation, bool rebuildIt)
         * we'd be unable to recover.
         */
        if (relation->rd_isnailed)
-#ifdef ENABLE_REINDEX_NAILED_RELATIONS
        {
+#ifdef ENABLE_REINDEX_NAILED_RELATIONS
                RelationReloadClassinfo(relation);
 #endif /* ENABLE_REINDEX_NAILED_RELATIONS */
                return;
-#ifdef ENABLE_REINDEX_NAILED_RELATIONS
        }
-#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
 
        /*
         * Remove relation from hash tables
@@ -1774,26 +1772,15 @@ RelationClearRelation(Relation relation, bool rebuildIt)
  * RelationFlushRelation
  *
  *      Rebuild the relation if it is open (refcount > 0), else blow it away.
- *      If skipLocalRelations is TRUE, xact-local relations are ignored
- *      (which is useful when processing SI cache reset, since xact-local
- *      relations could not be targets of notifications from other backends).
- *
- *      The peculiar calling convention (pointer to pointer to relation)
- *      is needed so that we can use this routine as a hash table walker.
  * --------------------------------
  */
 static void
-RelationFlushRelation(Relation *relationPtr,
-                                         int skipLocalRelations)
+RelationFlushRelation(Relation relation)
 {
-       Relation        relation = *relationPtr;
        bool            rebuildIt;
 
        if (relation->rd_myxactonly)
        {
-               if (skipLocalRelations)
-                       return;                         /* don't touch local rels if so commanded */
-
                /*
                 * Local rels should always be rebuilt, not flushed; the relcache
                 * entry must live until RelationPurgeLocalRelation().
@@ -1878,7 +1865,7 @@ RelationIdInvalidateRelationCacheByRelationId(Oid relationId)
        RelationIdCacheLookup(relationId, relation);
 
        if (PointerIsValid(relation))
-               RelationFlushRelation(&relation, false);
+               RelationFlushRelation(relation);
 }
 
 #if NOT_USED
@@ -1900,35 +1887,12 @@ RelationFlushIndexes(Relation *r,
        if (relation->rd_rel->relkind == RELKIND_INDEX &&       /* XXX style */
                (!OidIsValid(accessMethodId) ||
                 relation->rd_rel->relam == accessMethodId))
-               RelationFlushRelation(&relation, false);
+               RelationFlushRelation(relation);
 }
 
 #endif
 
 
-#if NOT_USED
-void
-RelationIdInvalidateRelationCacheByAccessMethodId(Oid accessMethodId)
-{
-
-       /*
-        * 25 aug 1992:  mao commented out the ht walk below.  it should be
-        * doing the right thing, in theory, but flushing reldescs for index
-        * relations apparently doesn't work.  we want to cut 4.0.1, and i
-        * don't want to introduce new bugs.  this code never executed before,
-        * so i'm turning it off for now.  after the release is cut, i'll fix
-        * this up.
-        *
-        * 20 nov 1999:  this code has still never done anything, so I'm cutting
-        * the routine out of the system entirely.      tgl
-        */
-
-       HashTableWalk(RelationNameCache, (HashtFunc) RelationFlushIndexes,
-                                 accessMethodId);
-}
-
-#endif
-
 /*
  * RelationCacheInvalidate
  *      Blow away cached relation descriptors that have zero reference counts,
@@ -1938,12 +1902,59 @@ RelationIdInvalidateRelationCacheByAccessMethodId(Oid accessMethodId)
  *      so we do not touch transaction-local relations; they cannot be targets
  *      of cross-backend SI updates (and our own updates now go through a
  *      separate linked list that isn't limited by the SI message buffer size).
+ *
+ *      We do this in two phases: the first pass deletes deletable items, and
+ *      the second one rebuilds the rebuildable items.  This is essential for
+ *      safety, because HashTableWalk only copes with concurrent deletion of
+ *      the element it is currently visiting.  If a second SI overflow were to
+ *      occur while we are walking the table, resulting in recursive entry to
+ *      this routine, we could crash because the inner invocation blows away
+ *      the entry next to be visited by the outer scan.  But this way is OK,
+ *      because (a) during the first pass we won't process any more SI messages,
+ *      so HashTableWalk will complete safely; (b) during the second pass we
+ *      only hold onto pointers to nondeletable entries.
  */
 void
 RelationCacheInvalidate(void)
 {
-       HashTableWalk(RelationNameCache, (HashtFunc) RelationFlushRelation,
-                                 (int) true);
+       List   *rebuildList = NIL;
+       List   *l;
+
+       /* Phase 1 */
+       HashTableWalk(RelationNameCache,
+                                 (HashtFunc) RelationCacheInvalidateWalker,
+                                 PointerGetDatum(&rebuildList));
+
+       /* Phase 2: rebuild the items found to need rebuild in phase 1 */
+       foreach (l, rebuildList)
+       {
+               Relation        relation = (Relation) lfirst(l);
+
+               RelationClearRelation(relation, true);
+       }
+       freeList(rebuildList);
+}
+
+static void
+RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp)
+{
+       Relation        relation = *relationPtr;
+       List   **rebuildList = (List **) DatumGetPointer(listp);
+
+       /* We can ignore xact-local relations, since they are never SI targets */
+       if (relation->rd_myxactonly)
+               return;
+
+       if (RelationHasReferenceCountZero(relation))
+       {
+               /* Delete this entry immediately */
+               RelationClearRelation(relation, false);
+       }
+       else
+       {
+               /* Add entry to list of stuff to rebuild in second pass */
+               *rebuildList = lcons(relation, *rebuildList);
+       }
 }
 
 /*
@@ -1962,12 +1973,13 @@ RelationCacheInvalidate(void)
 void
 RelationCacheAbort(void)
 {
-       HashTableWalk(RelationNameCache, (HashtFunc) RelationCacheAbortWalker,
+       HashTableWalk(RelationNameCache,
+                                 (HashtFunc) RelationCacheAbortWalker,
                                  0);
 }
 
 static void
-RelationCacheAbortWalker(Relation *relationPtr, int dummy)
+RelationCacheAbortWalker(Relation *relationPtr, Datum dummy)
 {
        Relation        relation = *relationPtr;
 
index 9646a7c5504424670f606218953fa10cb6133489..2174b82b467bd29488160eb1aece532e7215233d 100644 (file)
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.32 2000/06/28 03:32:34 tgl Exp $
+ *       $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.33 2001/01/02 04:33:20 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -681,50 +681,54 @@ hash_search(HTAB *hashp,
 }
 
 /*
- * hash_seq -- sequentially search through hash table and return
- *                        all the elements one by one, return NULL on error and
- *                        return TRUE in the end.
+ * hash_seq_init/_search
+ *                     Sequentially search through hash table and return
+ *                     all the elements one by one, return NULL on error and
+ *                     return (long *) TRUE in the end.
  *
+ * NOTE: caller may delete the returned element before continuing the scan.
+ * However, deleting any other element while the scan is in progress is
+ * UNDEFINED (it might be the one that curIndex is pointing at!).  Also,
+ * if elements are added to the table while the scan is in progress, it is
+ * unspecified whether they will be visited by the scan or not.
  */
+void
+hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
+{
+       status->hashp = hashp;
+       status->curBucket = 0;
+       status->curIndex = INVALID_INDEX;
+}
+
 long *
-hash_seq(HTAB *hashp)
+hash_seq_search(HASH_SEQ_STATUS *status)
 {
-       static long curBucket = 0;
-       static BUCKET_INDEX curIndex;
-       ELEMENT    *curElem;
-       long            segment_num;
-       long            segment_ndx;
-       SEGMENT         segp;
-       HHDR       *hctl;
+       HTAB       *hashp = status->hashp;
+       HHDR       *hctl = hashp->hctl;
 
-       if (hashp == NULL)
+       while (status->curBucket <= hctl->max_bucket)
        {
+               long            segment_num;
+               long            segment_ndx;
+               SEGMENT         segp;
 
-               /*
-                * reset static state
-                */
-               curBucket = 0;
-               curIndex = INVALID_INDEX;
-               return (long *) NULL;
-       }
-
-       hctl = hashp->hctl;
-       while (curBucket <= hctl->max_bucket)
-       {
-               if (curIndex != INVALID_INDEX)
+               if (status->curIndex != INVALID_INDEX)
                {
-                       curElem = GET_BUCKET(hashp, curIndex);
-                       curIndex = curElem->next;
-                       if (curIndex == INVALID_INDEX)          /* end of this bucket */
-                               ++curBucket;
+                       /* Continuing scan of curBucket... */
+                       ELEMENT    *curElem;
+
+                       curElem = GET_BUCKET(hashp, status->curIndex);
+                       status->curIndex = curElem->next;
+                       if (status->curIndex == INVALID_INDEX)  /* end of this bucket */
+                               ++status->curBucket;
                        return &(curElem->key);
                }
 
                /*
                 * initialize the search within this bucket.
                 */
-               segment_num = curBucket >> hctl->sshift;
-               segment_ndx = MOD(curBucket, hctl->ssize);
+               segment_num = status->curBucket >> hctl->sshift;
+               segment_ndx = MOD(status->curBucket, hctl->ssize);
 
                /*
                 * first find the right segment in the table directory.
@@ -742,10 +746,10 @@ hash_seq(HTAB *hashp)
                 * directory of valid stuff.  if there are elements in the bucket
                 * chains that point to the freelist we're in big trouble.
                 */
-               curIndex = segp[segment_ndx];
+               status->curIndex = segp[segment_ndx];
 
-               if (curIndex == INVALID_INDEX)  /* empty bucket */
-                       ++curBucket;
+               if (status->curIndex == INVALID_INDEX)  /* empty bucket */
+                       ++status->curBucket;
        }
 
        return (long *) TRUE;           /* out of buckets */
index 21aeb0a8a5f9080a841177efbbfaf1f68fbfc616..fe68d5adc648448b30cf2cb2232fac4f4cb119e1 100644 (file)
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.37 2000/06/28 03:32:50 tgl Exp $
+ *       $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.38 2001/01/02 04:33:24 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -55,8 +55,6 @@
 #include "utils/memutils.h"
 #include "utils/portal.h"
 
-static void CollectNamedPortals(Portal *portalP, int destroy);
-
 /* ----------------
  *             Global state
  * ----------------
@@ -120,65 +118,11 @@ do { \
 static MemoryContext PortalMemory = NULL;
 
 
-/* ----------------------------------------------------------------
- *                               private internal support routines
- * ----------------------------------------------------------------
- */
-
-/*
- * This routine is used to collect all portals created in this xaction
- * and then destroy them.  There is a little trickiness required as a
- * result of the dynamic hashing interface to getting every hash entry
- * sequentially.  Its use of static variables requires that we get every
- * entry *before* we destroy anything (destroying updates the hashtable
- * and screws up the sequential walk of the table). -mer 17 Aug 1992
- */
-static void
-CollectNamedPortals(Portal *portalP, int destroy)
-{
-       static Portal *portalList = (Portal *) NULL;
-       static int      listIndex = 0;
-       static int      maxIndex = 9;
-
-       if (portalList == (Portal *) NULL)
-               portalList = (Portal *) malloc(10 * sizeof(Portal));
-
-       if (destroy != 0)
-       {
-               int                     i;
-
-               for (i = 0; i < listIndex; i++)
-                       PortalDrop(&portalList[i]);
-               listIndex = 0;
-       }
-       else
-       {
-               Assert(portalP);
-               Assert(*portalP);
-
-               portalList[listIndex] = *portalP;
-               listIndex++;
-               if (listIndex == maxIndex)
-               {
-                       portalList = (Portal *)
-                               realloc(portalList, (maxIndex + 11) * sizeof(Portal));
-                       maxIndex += 10;
-               }
-       }
-       return;
-}
-
-void
-AtEOXact_portals()
-{
-       HashTableWalk(PortalHashTable, (HashtFunc) CollectNamedPortals, 0);
-       CollectNamedPortals(NULL, 1);
-}
-
 /* ----------------------------------------------------------------
  *                                public portal interface functions
  * ----------------------------------------------------------------
  */
+
 /*
  * EnablePortalManager
  *             Enables the portal management module at backend startup.
@@ -337,6 +281,9 @@ CreatePortal(char *name)
  * Exceptions:
  *             BadState if called when disabled.
  *             BadArg if portal is invalid.
+ *
+ * Note peculiar calling convention: pass a pointer to a portal pointer.
+ * This is mainly so that this routine can be used as a hashtable walker.
  */
 void
 PortalDrop(Portal *portalP)
@@ -360,6 +307,15 @@ PortalDrop(Portal *portalP)
        pfree(portal);
 }
 
+/*
+ * Destroy all portals created in the current transaction (ie, all of them).
+ */
+void
+AtEOXact_portals(void)
+{
+       HashTableWalk(PortalHashTable, (HashtFunc) PortalDrop, 0);
+}
+
 /*
  * PortalGetHeapMemory
  *             Returns heap memory context for a given portal.
index 78318f954d46592506ff6e5df3ba2636e7fef722..04700e648cd31b6a8ac37cd2cfb59ad07615e916 100644 (file)
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: hasht.h,v 1.10 2000/01/31 04:35:55 tgl Exp $
+ * $Id: hasht.h,v 1.11 2001/01/02 04:33:24 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -17,8 +17,8 @@
 
 #include "utils/hsearch.h"
 
-typedef void (*HashtFunc) (void *hashitem, int arg);
+typedef void (*HashtFunc) (void *hashitem, Datum arg);
 
-extern void HashTableWalk(HTAB *hashtable, HashtFunc function, int arg);
+extern void HashTableWalk(HTAB *hashtable, HashtFunc function, Datum arg);
 
 #endif  /* HASHT_H */
index 6d857175c94dd9bb5878f26324fdedce5ea9c262..c58ccb1a16b9ff8f4af90210214d4c0bf76940cb 100644 (file)
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: hsearch.h,v 1.16 2000/06/28 03:33:33 tgl Exp $
+ * $Id: hsearch.h,v 1.17 2001/01/02 04:33:24 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -131,6 +131,14 @@ typedef enum
        HASH_REMOVE_SAVED
 } HASHACTION;
 
+/* hash_seq status (should be considered an opaque type by callers) */
+typedef struct
+{
+       HTAB   *hashp;
+       long    curBucket;
+       BUCKET_INDEX curIndex;
+} HASH_SEQ_STATUS;
+
 /*
  * prototypes from functions in dynahash.c
  */
@@ -139,7 +147,8 @@ extern void hash_destroy(HTAB *hashp);
 extern void hash_stats(char *where, HTAB *hashp);
 extern long *hash_search(HTAB *hashp, char *keyPtr, HASHACTION action,
                        bool *foundPtr);
-extern long *hash_seq(HTAB *hashp);
+extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp);
+extern long *hash_seq_search(HASH_SEQ_STATUS *status);
 extern long hash_estimate_size(long num_entries, long keysize, long datasize);
 extern long hash_select_dirsize(long num_entries);