*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.122 2005/08/08 19:17:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.123 2005/08/13 22:18:07 tgl Exp $
*
*-------------------------------------------------------------------------
*/
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
uint32 hashValue, Index hashIndex,
bool negative);
+static void CatalogCacheCleanup(CatCTup *savect);
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
* Unlink and delete the given cache entry
*
* NB: if it is a member of a CatCList, the CatCList is deleted too.
+ * Both the cache entry and the list had better have zero refcount.
*/
static void
CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
if (ct->negative ||
ItemPointerEquals(pointer, &ct->tuple.t_self))
{
- if (ct->refcount > 0)
+ if (ct->refcount > 0 ||
+ (ct->c_list && ct->c_list->refcount > 0))
+ {
ct->dead = true;
+ /* list, if any, was marked dead above */
+ Assert(ct->c_list == NULL || ct->c_list->dead);
+ }
else
CatCacheRemoveCTup(ccp, ct);
CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
nextelt = DLGetSucc(elt);
- if (ct->refcount > 0)
+ if (ct->refcount > 0 ||
+ (ct->c_list && ct->c_list->refcount > 0))
+ {
ct->dead = true;
+ /* list, if any, was marked dead above */
+ Assert(ct->c_list == NULL || ct->c_list->dead);
+ }
else
CatCacheRemoveCTup(cache, ct);
#ifdef CATCACHE_STATS
if (tupRelid == relId)
{
- if (ct->refcount > 0)
+ if (ct->refcount > 0 ||
+ (ct->c_list && ct->c_list->refcount > 0))
+ {
ct->dead = true;
+ /* parent list must be considered dead too */
+ if (ct->c_list)
+ ct->c_list->dead = true;
+ }
else
CatCacheRemoveCTup(cache, ct);
#ifdef CATCACHE_STATS
ct->refcount--;
ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
- if (ct->refcount == 0
+ if (
#ifndef CATCACHE_FORCE_RELEASE
- && ct->dead
+ ct->dead &&
#endif
- )
+ ct->refcount == 0 &&
+ (ct->c_list == NULL || ct->c_list->refcount == 0))
CatCacheRemoveCTup(ct->my_cache, ct);
}
continue;
/*
- * we found a matching list: move each of its members to the front
- * of the global LRU list. Also move the list itself to the front
- * of the cache's list-of-lists, to speed subsequent searches. (We
- * do not move the members to the fronts of their hashbucket
+ * We found a matching list: mark it as touched since the last
+ * CatalogCacheCleanup() sweep. Also move the list to the front
+ * of the cache's list-of-lists, to speed subsequent searches.
+ * (We do not move the members to the fronts of their hashbucket
* lists, however, since there's no point in that unless they are
- * searched for individually.) Also bump the members' refcounts.
- * (member refcounts are NOT registered separately with the
- * resource owner.)
+ * searched for individually.)
*/
- ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
- for (i = 0; i < cl->n_members; i++)
- {
- cl->members[i]->refcount++;
- DLMoveToFront(&cl->members[i]->lrulist_elem);
- }
+ cl->touched = true;
DLMoveToFront(&cl->cache_elem);
/* Bump the list's refcount and return it */
+ ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
cl->refcount++;
ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
* relation. For each matching tuple found in the relation, use an
* existing cache entry if possible, else build a new one.
*
- * We have to bump the member refcounts immediately to ensure they
+ * We have to bump the member refcounts temporarily to ensure they
* won't get dropped from the cache while loading other members.
* We use a PG_TRY block to ensure we can undo those refcounts if
* we get an error before we finish constructing the CatCList.
}
/* Careful here: add entry to ctlist, then bump its refcount */
+ /* This way leaves state correct if lappend runs out of memory */
ctlist = lappend(ctlist, ct);
ct->refcount++;
}
Assert(ct->c_list == NULL);
Assert(ct->refcount > 0);
ct->refcount--;
- if (ct->refcount == 0
+ if (
#ifndef CATCACHE_FORCE_RELEASE
- && ct->dead
+ ct->dead &&
#endif
- )
+ ct->refcount == 0 &&
+ (ct->c_list == NULL || ct->c_list->refcount == 0))
CatCacheRemoveCTup(cache, ct);
}
cl->refcount = 0; /* for the moment */
cl->dead = false;
cl->ordered = ordered;
+ cl->touched = false; /* we already moved members to front */
cl->nkeys = nkeys;
cl->hash_value = lHashValue;
cl->n_members = nmembers;
cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
Assert(ct->c_list == NULL);
ct->c_list = cl;
+ /* release the temporary refcount on the member */
+ Assert(ct->refcount > 0);
+ ct->refcount--;
/* mark list dead if any members already dead */
if (ct->dead)
cl->dead = true;
/*
* ReleaseCatCacheList
*
- * Decrement the reference counts of a catcache list.
+ * Decrement the reference count of a catcache list.
*/
void
ReleaseCatCacheList(CatCList *list)
{
- int i;
-
/* Safety checks to ensure we were handed a cache entry */
Assert(list->cl_magic == CL_MAGIC);
Assert(list->refcount > 0);
-
- for (i = list->n_members; --i >= 0;)
- {
- CatCTup *ct = list->members[i];
-
- Assert(ct->refcount > 0);
-
- ct->refcount--;
-
- if (ct->dead)
- list->dead = true;
- /* can't remove tuple before list is removed */
- }
-
list->refcount--;
ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
- if (list->refcount == 0
+ if (
#ifndef CATCACHE_FORCE_RELEASE
- && list->dead
+ list->dead &&
#endif
- )
+ list->refcount == 0)
CatCacheRemoveCList(list->my_cache, list);
}
/*
* If we've exceeded the desired size of the caches, try to throw away
- * the least recently used entry. NB: be careful not to throw away
+ * the least recently used entry(s). NB: be careful not to throw away
* the newly-built entry...
*/
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
- {
- Dlelem *elt,
- *prevelt;
+ CatalogCacheCleanup(ct);
- for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
- {
- CatCTup *oldct = (CatCTup *) DLE_VAL(elt);
+ return ct;
+}
+
+/*
+ * CatalogCacheCleanup
+ * Try to reduce the size of the catcaches when they get too big
+ *
+ * savect can be NULL, or a specific CatCTup not to remove even if it
+ * has zero refcount.
+ */
+static void
+CatalogCacheCleanup(CatCTup *savect)
+{
+ int tup_target;
+ CatCache *ccp;
+ Dlelem *elt,
+ *prevelt;
- prevelt = DLGetPred(elt);
+ /*
+ * Each time we have to do this, try to cut the cache size down to
+ * about 90% of the maximum.
+ */
+ tup_target = (CacheHdr->ch_maxtup * 9) / 10;
+
+ /*
+ * Our strategy for managing CatCLists is that, each time we have to
+ * throw away some cache entries, we first move-to-front all the members
+ * of CatCLists that have been touched since the last cleanup sweep.
+ * Then we do strict LRU elimination by individual tuples, zapping a list
+ * if any of its members gets zapped. Before PostgreSQL 8.1, we moved
+ * members to front each time their owning list was touched, which was
+ * arguably more fair in balancing list members against standalone tuples
+ * --- but the overhead for large lists was horrendous. This scheme is
+ * more heavily biased towards preserving lists, but that is not
+ * necessarily bad either.
+ */
+ for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
+ {
+ for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
+ {
+ CatCList *cl = (CatCList *) DLE_VAL(elt);
- if (oldct->refcount == 0 && oldct != ct)
+ Assert(cl->cl_magic == CL_MAGIC);
+ if (cl->touched && !cl->dead)
{
- CACHE2_elog(DEBUG2, "CatCacheCreateEntry(%s): Overflow, LRU removal",
- cache->cc_relname);
-#ifdef CATCACHE_STATS
- oldct->my_cache->cc_discards++;
-#endif
- CatCacheRemoveCTup(oldct->my_cache, oldct);
- if (CacheHdr->ch_ntup <= CacheHdr->ch_maxtup)
- break;
+ int i;
+
+ for (i = 0; i < cl->n_members; i++)
+ DLMoveToFront(&cl->members[i]->lrulist_elem);
}
+ cl->touched = false;
}
}
- return ct;
+ /* Now get rid of unreferenced tuples in reverse global LRU order */
+ for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
+ {
+ CatCTup *ct = (CatCTup *) DLE_VAL(elt);
+
+ prevelt = DLGetPred(elt);
+
+ if (ct->refcount == 0 &&
+ (ct->c_list == NULL || ct->c_list->refcount == 0) &&
+ ct != savect)
+ {
+#ifdef CATCACHE_STATS
+ ct->my_cache->cc_discards++;
+#endif
+ CatCacheRemoveCTup(ct->my_cache, ct);
+
+ /* Quit when we've removed enough tuples */
+ if (CacheHdr->ch_ntup <= tup_target)
+ break;
+ }
+ }
}
/*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.54 2005/04/14 20:03:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.55 2005/08/13 22:18:07 tgl Exp $
*
*-------------------------------------------------------------------------
*/
* Each tuple in a cache is a member of two Dllists: one lists all the
* elements in all the caches in LRU order, and the other lists just
* the elements in one hashbucket of one cache, also in LRU order.
- *
+ */
+ Dlelem lrulist_elem; /* list member of global LRU list */
+ Dlelem cache_elem; /* list member of per-bucket list */
+
+ /*
* The tuple may also be a member of at most one CatCList. (If a single
* catcache is list-searched with varying numbers of keys, we may have
* to make multiple entries for the same tuple because of this
* restriction. Currently, that's not expected to be common, so we
* accept the potential inefficiency.)
*/
- Dlelem lrulist_elem; /* list member of global LRU list */
- Dlelem cache_elem; /* list member of per-bucket list */
- struct catclist *c_list; /* containing catclist, or NULL if none */
+ struct catclist *c_list; /* containing CatCList, or NULL if none */
/*
* A tuple marked "dead" must not be returned by subsequent searches.
* However, it won't be physically deleted from the cache until its
- * refcount goes to zero.
+ * refcount goes to zero. (If it's a member of a CatCList, the list's
+ * refcount must go to zero, too; also, remember to mark the list dead
+ * at the same time the tuple is marked.)
*
* A negative cache entry is an assertion that there is no tuple matching
* a particular key. This is just as useful as a normal entry so far
* none of these will be negative cache entries.)
*
* A CatCList is only a member of a per-cache list; we do not do separate
- * LRU management for CatCLists. Instead, a CatCList is dropped from
- * the cache as soon as any one of its member tuples ages out due to
- * tuple-level LRU management.
+ * LRU management for CatCLists. See CatalogCacheCleanup() for the
+ * details of the management algorithm.
*
* A list marked "dead" must not be returned by subsequent searches.
* However, it won't be physically deleted from the cache until its
- * refcount goes to zero. (Its member tuples must have refcounts at
- * least as large, so they won't go away either.)
+ * refcount goes to zero. (A list should be marked dead if any of its
+ * member entries are dead.)
*
* If "ordered" is true then the member tuples appear in the order of the
* cache's underlying index. This will be true in normal operation,
int refcount; /* number of active references */
bool dead; /* dead but not yet removed? */
bool ordered; /* members listed in index order? */
+ bool touched; /* used since last CatalogCacheCleanup? */
short nkeys; /* number of lookup keys specified */
uint32 hash_value; /* hash value for lookup keys */
HeapTupleData tuple; /* header for tuple holding keys */