}
static void
-_xl_remove_hash_entry(XLogRelDesc **edata, int dummy)
+_xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
{
XLogRelCacheEntry *hentry;
bool found;
if (!_xlrelarr)
return;
- HashTableWalk(_xlrelcache, (HashtFunc)_xl_remove_hash_entry, 0);
+ HashTableWalk(_xlrelcache, (HashtFunc) _xl_remove_hash_entry, 0);
hash_destroy(_xlrelcache);
free(_xlrelarr);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/lib/Attic/hasht.c,v 1.13 2000/01/31 04:35:51 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/lib/Attic/hasht.c,v 1.14 2001/01/02 04:33:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/* -----------------------------------
* HashTableWalk
*
- * call function on every element in hashtable
- * one extra argument (arg) may be supplied
+ * call given function on every element in hashtable
+ *
+ * one extra argument (arg) may be supplied
+ *
+ * NOTE: it is allowed for the given function to delete the hashtable entry
+ * it is passed. However, deleting any other element while the scan is
+ * in progress is UNDEFINED (see hash_seq functions). Also, if elements are
+ * added to the table while the scan is in progress, it is unspecified
+ * whether they will be visited by the scan or not.
* -----------------------------------
*/
void
-HashTableWalk(HTAB *hashtable, HashtFunc function, int arg)
+HashTableWalk(HTAB *hashtable, HashtFunc function, Datum arg)
{
+ HASH_SEQ_STATUS status;
long *hashent;
void *data;
int keysize;
+ hash_seq_init(&status, hashtable);
keysize = hashtable->hctl->keysize;
- hash_seq((HTAB *) NULL);
- while ((hashent = hash_seq(hashtable)) != (long *) TRUE)
+
+ while ((hashent = hash_seq_search(&status)) != (long *) TRUE)
{
if (hashent == NULL)
- elog(FATAL, "error in HashTableWalk.");
+ elog(FATAL, "error in HashTableWalk");
/*
* XXX the corresponding hash table insertion does NOT LONGALIGN
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.74 2000/12/22 00:51:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.75 2001/01/02 04:33:16 tgl Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
int lockmethod = DEFAULT_LOCKMETHOD;
LOCKMETHODTABLE *lockMethodTable;
HTAB *holderTable;
+ HASH_SEQ_STATUS status;
pid = getpid();
ShmemPIDLookup(pid, &location);
if (proc->waitLock)
LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
- hash_seq(NULL);
- while ((holder = (HOLDER *) hash_seq(holderTable)) &&
+ hash_seq_init(&status, holderTable);
+ while ((holder = (HOLDER *) hash_seq_search(&status)) &&
(holder != (HOLDER *) TRUE))
{
HOLDER_PRINT("DumpAllLocks", holder);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.121 2000/12/22 23:12:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.122 2001/01/02 04:33:19 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef ENABLE_REINDEX_NAILED_RELATIONS
static void RelationReloadClassinfo(Relation relation);
#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
-static void RelationFlushRelation(Relation *relationPtr,
- int skipLocalRelations);
+static void RelationFlushRelation(Relation relation);
static Relation RelationNameCacheGetRelation(const char *relationName);
-static void RelationCacheAbortWalker(Relation *relationPtr, int dummy);
+static void RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp);
+static void RelationCacheAbortWalker(Relation *relationPtr, Datum dummy);
static void init_irels(void);
static void write_irels(void);
* we'd be unable to recover.
*/
if (relation->rd_isnailed)
-#ifdef ENABLE_REINDEX_NAILED_RELATIONS
{
+#ifdef ENABLE_REINDEX_NAILED_RELATIONS
RelationReloadClassinfo(relation);
#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
return;
-#ifdef ENABLE_REINDEX_NAILED_RELATIONS
}
-#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
/*
* Remove relation from hash tables
* RelationFlushRelation
*
* Rebuild the relation if it is open (refcount > 0), else blow it away.
- * If skipLocalRelations is TRUE, xact-local relations are ignored
- * (which is useful when processing SI cache reset, since xact-local
- * relations could not be targets of notifications from other backends).
- *
- * The peculiar calling convention (pointer to pointer to relation)
- * is needed so that we can use this routine as a hash table walker.
* --------------------------------
*/
static void
-RelationFlushRelation(Relation *relationPtr,
- int skipLocalRelations)
+RelationFlushRelation(Relation relation)
{
- Relation relation = *relationPtr;
bool rebuildIt;
if (relation->rd_myxactonly)
{
- if (skipLocalRelations)
- return; /* don't touch local rels if so commanded */
-
/*
* Local rels should always be rebuilt, not flushed; the relcache
* entry must live until RelationPurgeLocalRelation().
RelationIdCacheLookup(relationId, relation);
if (PointerIsValid(relation))
- RelationFlushRelation(&relation, false);
+ RelationFlushRelation(relation);
}
#if NOT_USED
if (relation->rd_rel->relkind == RELKIND_INDEX && /* XXX style */
(!OidIsValid(accessMethodId) ||
relation->rd_rel->relam == accessMethodId))
- RelationFlushRelation(&relation, false);
+ RelationFlushRelation(relation);
}
#endif
-#if NOT_USED
-void
-RelationIdInvalidateRelationCacheByAccessMethodId(Oid accessMethodId)
-{
-
- /*
- * 25 aug 1992: mao commented out the ht walk below. it should be
- * doing the right thing, in theory, but flushing reldescs for index
- * relations apparently doesn't work. we want to cut 4.0.1, and i
- * don't want to introduce new bugs. this code never executed before,
- * so i'm turning it off for now. after the release is cut, i'll fix
- * this up.
- *
- * 20 nov 1999: this code has still never done anything, so I'm cutting
- * the routine out of the system entirely. tgl
- */
-
- HashTableWalk(RelationNameCache, (HashtFunc) RelationFlushIndexes,
- accessMethodId);
-}
-
-#endif
-
/*
* RelationCacheInvalidate
* Blow away cached relation descriptors that have zero reference counts,
* so we do not touch transaction-local relations; they cannot be targets
* of cross-backend SI updates (and our own updates now go through a
* separate linked list that isn't limited by the SI message buffer size).
+ *
+ * We do this in two phases: the first pass deletes deletable items, and
+ * the second one rebuilds the rebuildable items. This is essential for
+ * safety, because HashTableWalk only copes with concurrent deletion of
+ * the element it is currently visiting. If a second SI overflow were to
+ * occur while we are walking the table, resulting in recursive entry to
+ * this routine, we could crash because the inner invocation blows away
+ * the entry next to be visited by the outer scan. But this way is OK,
+ * because (a) during the first pass we won't process any more SI messages,
+ * so HashTableWalk will complete safely; (b) during the second pass we
+ * only hold onto pointers to nondeletable entries.
*/
void
RelationCacheInvalidate(void)
{
- HashTableWalk(RelationNameCache, (HashtFunc) RelationFlushRelation,
- (int) true);
+ List *rebuildList = NIL;
+ List *l;
+
+ /* Phase 1 */
+ HashTableWalk(RelationNameCache,
+ (HashtFunc) RelationCacheInvalidateWalker,
+ PointerGetDatum(&rebuildList));
+
+ /* Phase 2: rebuild the items found to need rebuild in phase 1 */
+ foreach (l, rebuildList)
+ {
+ Relation relation = (Relation) lfirst(l);
+
+ RelationClearRelation(relation, true);
+ }
+ freeList(rebuildList);
+}
+
+static void
+RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp)
+{
+ Relation relation = *relationPtr;
+ List **rebuildList = (List **) DatumGetPointer(listp);
+
+ /* We can ignore xact-local relations, since they are never SI targets */
+ if (relation->rd_myxactonly)
+ return;
+
+ if (RelationHasReferenceCountZero(relation))
+ {
+ /* Delete this entry immediately */
+ RelationClearRelation(relation, false);
+ }
+ else
+ {
+ /* Add entry to list of stuff to rebuild in second pass */
+ *rebuildList = lcons(relation, *rebuildList);
+ }
}
/*
void
RelationCacheAbort(void)
{
- HashTableWalk(RelationNameCache, (HashtFunc) RelationCacheAbortWalker,
+ HashTableWalk(RelationNameCache,
+ (HashtFunc) RelationCacheAbortWalker,
0);
}
static void
-RelationCacheAbortWalker(Relation *relationPtr, int dummy)
+RelationCacheAbortWalker(Relation *relationPtr, Datum dummy)
{
Relation relation = *relationPtr;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.32 2000/06/28 03:32:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.33 2001/01/02 04:33:20 tgl Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * hash_seq -- sequentially search through hash table and return
- * all the elements one by one, return NULL on error and
- * return TRUE in the end.
+ * hash_seq_init/_search
+ * Sequentially search through hash table and return
+ * all the elements one by one, return NULL on error and
+ * return (long *) TRUE in the end.
*
+ * NOTE: caller may delete the returned element before continuing the scan.
+ * However, deleting any other element while the scan is in progress is
+ * UNDEFINED (it might be the one that curIndex is pointing at!). Also,
+ * if elements are added to the table while the scan is in progress, it is
+ * unspecified whether they will be visited by the scan or not.
*/
+void
+hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
+{
+ status->hashp = hashp;
+ status->curBucket = 0;
+ status->curIndex = INVALID_INDEX;
+}
+
long *
-hash_seq(HTAB *hashp)
+hash_seq_search(HASH_SEQ_STATUS *status)
{
- static long curBucket = 0;
- static BUCKET_INDEX curIndex;
- ELEMENT *curElem;
- long segment_num;
- long segment_ndx;
- SEGMENT segp;
- HHDR *hctl;
+ HTAB *hashp = status->hashp;
+ HHDR *hctl = hashp->hctl;
- if (hashp == NULL)
+ while (status->curBucket <= hctl->max_bucket)
{
+ long segment_num;
+ long segment_ndx;
+ SEGMENT segp;
- /*
- * reset static state
- */
- curBucket = 0;
- curIndex = INVALID_INDEX;
- return (long *) NULL;
- }
-
- hctl = hashp->hctl;
- while (curBucket <= hctl->max_bucket)
- {
- if (curIndex != INVALID_INDEX)
+ if (status->curIndex != INVALID_INDEX)
{
- curElem = GET_BUCKET(hashp, curIndex);
- curIndex = curElem->next;
- if (curIndex == INVALID_INDEX) /* end of this bucket */
- ++curBucket;
+ /* Continuing scan of curBucket... */
+ ELEMENT *curElem;
+
+ curElem = GET_BUCKET(hashp, status->curIndex);
+ status->curIndex = curElem->next;
+ if (status->curIndex == INVALID_INDEX) /* end of this bucket */
+ ++status->curBucket;
return &(curElem->key);
}
/*
* initialize the search within this bucket.
*/
- segment_num = curBucket >> hctl->sshift;
- segment_ndx = MOD(curBucket, hctl->ssize);
+ segment_num = status->curBucket >> hctl->sshift;
+ segment_ndx = MOD(status->curBucket, hctl->ssize);
/*
* first find the right segment in the table directory.
* directory of valid stuff. if there are elements in the bucket
* chains that point to the freelist we're in big trouble.
*/
- curIndex = segp[segment_ndx];
+ status->curIndex = segp[segment_ndx];
- if (curIndex == INVALID_INDEX) /* empty bucket */
- ++curBucket;
+ if (status->curIndex == INVALID_INDEX) /* empty bucket */
+ ++status->curBucket;
}
return (long *) TRUE; /* out of buckets */
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.37 2000/06/28 03:32:50 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.38 2001/01/02 04:33:24 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/memutils.h"
#include "utils/portal.h"
-static void CollectNamedPortals(Portal *portalP, int destroy);
-
/* ----------------
* Global state
* ----------------
static MemoryContext PortalMemory = NULL;
-/* ----------------------------------------------------------------
- * private internal support routines
- * ----------------------------------------------------------------
- */
-
-/*
- * This routine is used to collect all portals created in this xaction
- * and then destroy them. There is a little trickiness required as a
- * result of the dynamic hashing interface to getting every hash entry
- * sequentially. Its use of static variables requires that we get every
- * entry *before* we destroy anything (destroying updates the hashtable
- * and screws up the sequential walk of the table). -mer 17 Aug 1992
- */
-static void
-CollectNamedPortals(Portal *portalP, int destroy)
-{
- static Portal *portalList = (Portal *) NULL;
- static int listIndex = 0;
- static int maxIndex = 9;
-
- if (portalList == (Portal *) NULL)
- portalList = (Portal *) malloc(10 * sizeof(Portal));
-
- if (destroy != 0)
- {
- int i;
-
- for (i = 0; i < listIndex; i++)
- PortalDrop(&portalList[i]);
- listIndex = 0;
- }
- else
- {
- Assert(portalP);
- Assert(*portalP);
-
- portalList[listIndex] = *portalP;
- listIndex++;
- if (listIndex == maxIndex)
- {
- portalList = (Portal *)
- realloc(portalList, (maxIndex + 11) * sizeof(Portal));
- maxIndex += 10;
- }
- }
- return;
-}
-
-void
-AtEOXact_portals()
-{
- HashTableWalk(PortalHashTable, (HashtFunc) CollectNamedPortals, 0);
- CollectNamedPortals(NULL, 1);
-}
-
/* ----------------------------------------------------------------
* public portal interface functions
* ----------------------------------------------------------------
*/
+
/*
* EnablePortalManager
* Enables the portal management module at backend startup.
* Exceptions:
* BadState if called when disabled.
* BadArg if portal is invalid.
+ *
+ * Note peculiar calling convention: pass a pointer to a portal pointer.
+ * This is mainly so that this routine can be used as a hashtable walker.
*/
void
PortalDrop(Portal *portalP)
pfree(portal);
}
+/*
+ * Destroy all portals created in the current transaction (ie, all of them).
+ */
+void
+AtEOXact_portals(void)
+{
+ HashTableWalk(PortalHashTable, (HashtFunc) PortalDrop, 0);
+}
+
/*
* PortalGetHeapMemory
* Returns heap memory context for a given portal.
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: hasht.h,v 1.10 2000/01/31 04:35:55 tgl Exp $
+ * $Id: hasht.h,v 1.11 2001/01/02 04:33:24 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/hsearch.h"
-typedef void (*HashtFunc) (void *hashitem, int arg);
+typedef void (*HashtFunc) (void *hashitem, Datum arg);
-extern void HashTableWalk(HTAB *hashtable, HashtFunc function, int arg);
+extern void HashTableWalk(HTAB *hashtable, HashtFunc function, Datum arg);
#endif /* HASHT_H */
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: hsearch.h,v 1.16 2000/06/28 03:33:33 tgl Exp $
+ * $Id: hsearch.h,v 1.17 2001/01/02 04:33:24 tgl Exp $
*
*-------------------------------------------------------------------------
*/
HASH_REMOVE_SAVED
} HASHACTION;
+/* hash_seq status (should be considered an opaque type by callers) */
+typedef struct
+{
+ HTAB *hashp;
+ long curBucket;
+ BUCKET_INDEX curIndex;
+} HASH_SEQ_STATUS;
+
/*
* prototypes from functions in dynahash.c
*/
extern void hash_stats(char *where, HTAB *hashp);
extern long *hash_search(HTAB *hashp, char *keyPtr, HASHACTION action,
bool *foundPtr);
-extern long *hash_seq(HTAB *hashp);
+extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp);
+extern long *hash_seq_search(HASH_SEQ_STATUS *status);
extern long hash_estimate_size(long num_entries, long keysize, long datasize);
extern long hash_select_dirsize(long num_entries);