1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.74 2001/01/05 22:54:37 tgl Exp $
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_operator.h"
22 #include "catalog/pg_type.h"
23 #include "catalog/catname.h"
24 #include "catalog/indexing.h"
25 #include "miscadmin.h"
26 #include "utils/builtins.h"
27 #include "utils/fmgroids.h"
28 #include "utils/catcache.h"
29 #include "utils/syscache.h"
32 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
33 static Index CatalogCacheComputeHashIndex(CatCache *cache,
35 static Index CatalogCacheComputeTupleHashIndex(CatCache *cache,
37 static void CatalogCacheInitializeCache(CatCache *cache);
38 static Datum cc_hashname(PG_FUNCTION_ARGS);
41 * variables, macros and other stuff
46 #define CACHE1_elog(a,b) elog(a,b)
47 #define CACHE2_elog(a,b,c) elog(a,b,c)
48 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
49 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
50 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
51 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
53 #define CACHE1_elog(a,b)
54 #define CACHE2_elog(a,b,c)
55 #define CACHE3_elog(a,b,c,d)
56 #define CACHE4_elog(a,b,c,d,e)
57 #define CACHE5_elog(a,b,c,d,e,f)
58 #define CACHE6_elog(a,b,c,d,e,f,g)
61 static CatCache *Caches = NULL; /* head of list of caches */
65 * EQPROC is used in CatalogCacheInitializeCache to find the equality
66 * functions for system types that are used as cache key fields.
67 * See also GetCCHashFunc, which should support the same set of types.
69 * XXX this should be replaced by catalog lookups,
70 * but that seems to pose considerable risk of circularity...
73 static const Oid eqproc[] = {
74 F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid,
75 F_INT2EQ, F_INT2VECTOREQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ,
76 F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ
79 #define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-BOOLOID]
81 /* ----------------------------------------------------------------
82 * internal support functions
83 * ----------------------------------------------------------------
87 GetCCHashFunc(Oid keytype)
99 return hashint2vector;
108 return hashoidvector;
110 elog(FATAL, "GetCCHashFunc: type %u unsupported as catcache key",
112 return (PGFunction) NULL;
117 cc_hashname(PG_FUNCTION_ARGS)
121 * We need our own variant of hashname because we want to accept
122 * null-terminated C strings as search values for name fields. So, we
123 * have to make sure the data is correctly padded before we compute
128 namestrcpy(&my_n, NameStr(* PG_GETARG_NAME(0)));
130 return DirectFunctionCall1(hashname, NameGetDatum(&my_n));
135 * Standard routine for creating cache context if it doesn't exist yet
137 * There are a lot of places (probably far more than necessary) that check
138 * whether CacheMemoryContext exists yet and want to create it if not.
139 * We centralize knowledge of exactly how to create it here.
142 CreateCacheMemoryContext(void)
144 /* Purely for paranoia, check that context doesn't exist;
145 * caller probably did so already.
147 if (!CacheMemoryContext)
148 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
149 "CacheMemoryContext",
150 ALLOCSET_DEFAULT_MINSIZE,
151 ALLOCSET_DEFAULT_INITSIZE,
152 ALLOCSET_DEFAULT_MAXSIZE);
156 /* --------------------------------
157 * CatalogCacheInitializeCache
159 * This function does final initialization of a catcache: obtain the tuple
160 * descriptor and set up the hash and equality function links. We assume
161 * that the relcache entry can be opened at this point!
162 * --------------------------------
165 #define CatalogCacheInitializeCache_DEBUG1 \
166 elog(DEBUG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
169 #define CatalogCacheInitializeCache_DEBUG2 \
171 if (cache->cc_key[i] > 0) { \
172 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
173 i+1, cache->cc_nkeys, cache->cc_key[i], \
174 tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
176 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
177 i+1, cache->cc_nkeys, cache->cc_key[i]); \
182 #define CatalogCacheInitializeCache_DEBUG1
183 #define CatalogCacheInitializeCache_DEBUG2
187 CatalogCacheInitializeCache(CatCache *cache)
190 MemoryContext oldcxt;
194 CatalogCacheInitializeCache_DEBUG1;
197 * Open the relation without locking --- we only need the tupdesc,
198 * which we assume will never change ...
200 relation = heap_openr(cache->cc_relname, NoLock);
201 Assert(RelationIsValid(relation));
204 * switch to the cache context so our allocations
205 * do not vanish at the end of a transaction
208 if (!CacheMemoryContext)
209 CreateCacheMemoryContext();
211 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
214 * copy the relcache's tuple descriptor to permanent cache storage
217 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
220 * return to the caller's memory context and close the rel
223 MemoryContextSwitchTo(oldcxt);
225 heap_close(relation, NoLock);
227 CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: %s, %d keys",
228 cache->cc_relname, cache->cc_nkeys);
231 * initialize cache's key information
234 for (i = 0; i < cache->cc_nkeys; ++i)
238 CatalogCacheInitializeCache_DEBUG2;
240 if (cache->cc_key[i] > 0)
242 keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
246 if (cache->cc_key[i] != ObjectIdAttributeNumber)
247 elog(FATAL, "CatalogCacheInit: only sys attr supported is OID");
251 cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
253 * If GetCCHashFunc liked the type, safe to index into eqproc[]
255 cache->cc_skey[i].sk_procedure = EQPROC(keytype);
257 fmgr_info(cache->cc_skey[i].sk_procedure,
258 &cache->cc_skey[i].sk_func);
259 cache->cc_skey[i].sk_nargs = cache->cc_skey[i].sk_func.fn_nargs;
261 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
262 cache->cc_skey[i].sk_attno = cache->cc_key[i];
264 CACHE4_elog(DEBUG, "CatalogCacheInit %s %d %p",
271 * mark this cache fully initialized
274 cache->cc_tupdesc = tupdesc;
277 /* --------------------------------
278 * CatalogCacheComputeHashIndex
279 * --------------------------------
282 CatalogCacheComputeHashIndex(CatCache *cache, ScanKey cur_skey)
284 uint32 hashIndex = 0;
286 CACHE4_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %p",
291 switch (cache->cc_nkeys)
295 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
296 cur_skey[3].sk_argument)) << 9;
300 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
301 cur_skey[2].sk_argument)) << 6;
305 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
306 cur_skey[1].sk_argument)) << 3;
310 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
311 cur_skey[0].sk_argument));
314 elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cache->cc_nkeys);
317 hashIndex %= (uint32) cache->cc_size;
318 return (Index) hashIndex;
321 /* --------------------------------
322 * CatalogCacheComputeTupleHashIndex
323 * --------------------------------
326 CatalogCacheComputeTupleHashIndex(CatCache *cache,
329 ScanKeyData cur_skey[4];
332 /* Copy pre-initialized overhead data for scankey */
333 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
335 /* Now extract key fields from tuple, insert into scankey */
336 switch (cache->cc_nkeys)
339 cur_skey[3].sk_argument =
340 (cache->cc_key[3] == ObjectIdAttributeNumber)
341 ? ObjectIdGetDatum(tuple->t_data->t_oid)
349 cur_skey[2].sk_argument =
350 (cache->cc_key[2] == ObjectIdAttributeNumber)
351 ? ObjectIdGetDatum(tuple->t_data->t_oid)
359 cur_skey[1].sk_argument =
360 (cache->cc_key[1] == ObjectIdAttributeNumber)
361 ? ObjectIdGetDatum(tuple->t_data->t_oid)
369 cur_skey[0].sk_argument =
370 (cache->cc_key[0] == ObjectIdAttributeNumber)
371 ? ObjectIdGetDatum(tuple->t_data->t_oid)
379 elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
384 return CatalogCacheComputeHashIndex(cache, cur_skey);
387 /* --------------------------------
389 * --------------------------------
392 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
394 Assert(ct->refcount == 0);
396 /* delink from linked lists */
397 DLRemove(&ct->lrulist_elem);
398 DLRemove(&ct->cache_elem);
400 /* free associated tuple data */
401 if (ct->tuple.t_data != NULL)
402 pfree(ct->tuple.t_data);
408 /* --------------------------------
409 * CatalogCacheIdInvalidate()
411 * Invalidate a tuple given a cache id. In this case the id should always
412 * be found (whether the cache has opened its relation or not). Of course,
413 * if the cache has yet to open its relation, there will be no tuples so
415 * --------------------------------
418 CatalogCacheIdInvalidate(int cacheId,
428 Assert(hashIndex < NCCBUCK);
429 Assert(ItemPointerIsValid(pointer));
430 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
433 * inspect caches to find the proper cache
436 for (ccp = Caches; ccp; ccp = ccp->cc_next)
441 if (cacheId != ccp->id)
444 * inspect the hash bucket until we find a match or exhaust
447 for (elt = DLGetHead(&ccp->cc_cache[hashIndex]); elt; elt = nextelt)
449 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
451 nextelt = DLGetSucc(elt);
453 if (ItemPointerEquals(pointer, &ct->tuple.t_self))
455 if (ct->refcount > 0)
458 CatCacheRemoveCTup(ccp, ct);
459 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
460 /* could be multiple matches, so keep looking! */
463 break; /* need only search this one cache */
467 /* ----------------------------------------------------------------
475 * RelationInvalidateCatalogCacheTuple
476 * ----------------------------------------------------------------
480 /* --------------------------------
483 * Clean up catcaches at end of transaction (either commit or abort)
485 * We scan the caches to reset refcounts to zero. This is of course
486 * necessary in the abort case, since elog() may have interrupted routines.
487 * In the commit case, any nonzero counts indicate failure to call
488 * ReleaseSysCache, so we put out a notice for debugging purposes.
489 * --------------------------------
492 AtEOXact_CatCache(bool isCommit)
496 for (cache = Caches; cache; cache = cache->cc_next)
501 for (elt = DLGetHead(&cache->cc_lrulist); elt; elt = nextelt)
503 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
505 nextelt = DLGetSucc(elt);
507 if (ct->refcount != 0)
510 elog(NOTICE, "Cache reference leak: cache %s (%d), tuple %u has count %d",
511 cache->cc_relname, cache->id,
512 ct->tuple.t_data->t_oid,
517 /* Clean up any now-deletable dead entries */
519 CatCacheRemoveCTup(cache, ct);
524 /* --------------------------------
527 * Reset caches when a shared cache inval event forces it
528 * --------------------------------
531 ResetSystemCache(void)
535 CACHE1_elog(DEBUG, "ResetSystemCache called");
538 * here we purge the contents of all the caches
540 * for each system cache
542 * remove the tuple, or at least mark it dead
545 for (cache = Caches; cache; cache = cache->cc_next)
550 for (elt = DLGetHead(&cache->cc_lrulist); elt; elt = nextelt)
552 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
554 nextelt = DLGetSucc(elt);
556 if (ct->refcount > 0)
559 CatCacheRemoveCTup(cache, ct);
563 CACHE1_elog(DEBUG, "end of ResetSystemCache call");
566 /* --------------------------------
567 * SystemCacheRelationFlushed
569 * This is called by RelationFlushRelation() to clear out cached information
570 * about a relation being dropped. (This could be a DROP TABLE command,
571 * or a temp table being dropped at end of transaction, or a table created
572 * during the current transaction that is being dropped because of abort.)
573 * Remove all cache entries relevant to the specified relation OID.
575 * A special case occurs when relId is itself one of the cacheable system
576 * tables --- although those'll never be dropped, they can get flushed from
577 * the relcache (VACUUM causes this, for example). In that case we need
578 * to flush all cache entries from that table. The brute-force method
579 * currently used takes care of that quite handily. (At one point we
580 * also tried to force re-execution of CatalogCacheInitializeCache for
581 * the cache(s) on that table. This is a bad idea since it leads to all
582 * kinds of trouble if a cache flush occurs while loading cache entries.
583 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
584 * rather than relying on the relcache to keep a tupdesc for us. Of course
585 * this assumes the tupdesc of a cachable system table will not change...)
586 * --------------------------------
589 SystemCacheRelationFlushed(Oid relId)
593 * XXX Ideally we'd search the caches and just zap entries that
594 * actually refer to or come from the indicated relation. For now, we
595 * take the brute-force approach: just flush the caches entirely.
600 /* --------------------------------
603 * This allocates and initializes a cache for a system catalog relation.
604 * Actually, the cache is only partially initialized to avoid opening the
605 * relation. The relation will be opened and the rest of the cache
606 * structure initialized on the first access.
607 * --------------------------------
610 #define InitCatCache_DEBUG1 \
612 elog(DEBUG, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
613 cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_size); \
617 #define InitCatCache_DEBUG1
628 MemoryContext oldcxt;
632 * first switch to the cache context so our allocations
633 * do not vanish at the end of a transaction
636 if (!CacheMemoryContext)
637 CreateCacheMemoryContext();
639 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
642 * allocate a new cache structure
645 cp = (CatCache *) palloc(sizeof(CatCache));
646 MemSet((char *) cp, 0, sizeof(CatCache));
649 * initialize the cache buckets (each bucket is a list header)
650 * and the LRU tuple list
653 DLInitList(&cp->cc_lrulist);
654 for (i = 0; i < NCCBUCK; ++i)
655 DLInitList(&cp->cc_cache[i]);
658 * Caches is the pointer to the head of the list of all the
659 * system caches. here we add the new cache to the top of the list.
662 cp->cc_next = Caches; /* list of caches (single link) */
666 * initialize the cache's relation information for the relation
667 * corresponding to this cache, and initialize some of the new
668 * cache's other internal fields. But don't open the relation yet.
671 cp->cc_relname = relname;
672 cp->cc_indname = indname;
673 cp->cc_tupdesc = (TupleDesc) NULL;
675 cp->cc_maxtup = MAXTUP;
676 cp->cc_size = NCCBUCK;
677 cp->cc_nkeys = nkeys;
678 for (i = 0; i < nkeys; ++i)
679 cp->cc_key[i] = key[i];
682 * all done. new cache is initialized. print some debugging
683 * information, if appropriate.
689 * back to the old context before we return...
692 MemoryContextSwitchTo(oldcxt);
698 /* --------------------------------
701 * This function checks for tuples that will be fetched by
702 * IndexSupportInitialize() during relcache initialization for
703 * certain system indexes that support critical syscaches.
704 * We can't use an indexscan to fetch these, else we'll get into
705 * infinite recursion. A plain heap scan will work, however.
706 * --------------------------------
709 IndexScanOK(CatCache *cache, ScanKey cur_skey)
711 if (cache->id == INDEXRELID)
713 static Oid indexSelfOid = InvalidOid;
715 /* One-time lookup of the OID of pg_index_indexrelid_index */
716 if (!OidIsValid(indexSelfOid))
723 rel = heap_openr(RelationRelationName, AccessShareLock);
724 ScanKeyEntryInitialize(&key, 0, Anum_pg_class_relname,
726 PointerGetDatum(IndexRelidIndex));
727 sd = heap_beginscan(rel, false, SnapshotNow, 1, &key);
728 ntp = heap_getnext(sd, 0);
729 if (!HeapTupleIsValid(ntp))
730 elog(ERROR, "SearchSelfReferences: %s not found in %s",
731 IndexRelidIndex, RelationRelationName);
732 indexSelfOid = ntp->t_data->t_oid;
734 heap_close(rel, AccessShareLock);
737 /* Looking for pg_index_indexrelid_index? */
738 if (DatumGetObjectId(cur_skey[0].sk_argument) == indexSelfOid)
741 else if (cache->id == OPEROID)
743 /* Looking for an OID comparison function? */
744 Oid lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
746 if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
750 /* Normal case, allow index scan */
754 /* --------------------------------
757 * This call searches a system cache for a tuple, opening the relation
758 * if necessary (the first access to a particular cache).
759 * --------------------------------
762 SearchCatCache(CatCache *cache,
768 ScanKeyData cur_skey[4];
774 MemoryContext oldcxt;
777 * one-time startup overhead
780 if (cache->cc_tupdesc == NULL)
781 CatalogCacheInitializeCache(cache);
784 * initialize the search key information
787 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
788 cur_skey[0].sk_argument = v1;
789 cur_skey[1].sk_argument = v2;
790 cur_skey[2].sk_argument = v3;
791 cur_skey[3].sk_argument = v4;
794 * find the hash bucket in which to look for the tuple
797 hash = CatalogCacheComputeHashIndex(cache, cur_skey);
800 * scan the hash bucket until we find a match or exhaust our tuples
803 for (elt = DLGetHead(&cache->cc_cache[hash]);
805 elt = DLGetSucc(elt))
809 ct = (CatCTup *) DLE_VAL(elt);
812 continue; /* ignore dead entries */
815 * see if the cached tuple matches our key.
816 * (should we be worried about time ranges? -cim 10/2/90)
819 HeapKeyTest(&ct->tuple,
828 * we found a tuple in the cache: bump its refcount, move it to
829 * the front of the LRU list, and return it. We also move it
830 * to the front of the list for its hashbucket, in order to speed
831 * subsequent searches. (The most frequently accessed elements
832 * in any hashbucket will tend to be near the front of the
833 * hashbucket's list.)
838 DLMoveToFront(&ct->lrulist_elem);
839 DLMoveToFront(&ct->cache_elem);
842 CACHE3_elog(DEBUG, "SearchCatCache(%s): found in bucket %d",
843 cache->cc_relname, hash);
844 #endif /* CACHEDEBUG */
850 * Tuple was not found in cache, so we have to try and
851 * retrieve it directly from the relation. If it's found,
852 * we add it to the cache.
854 * NOTE: it is possible for recursive cache lookups to occur while
855 * reading the relation --- for example, due to shared-cache-inval
856 * messages being processed during heap_open(). This is OK. It's
857 * even possible for one of those lookups to find and enter the
858 * very same tuple we are trying to fetch here. If that happens,
859 * we will enter a second copy of the tuple into the cache. The
860 * first copy will never be referenced again, and will eventually
861 * age out of the cache, so there's no functional problem. This case
862 * is rare enough that it's not worth expending extra cycles to detect.
867 * open the relation associated with the cache
870 relation = heap_openr(cache->cc_relname, AccessShareLock);
873 * Scan the relation to find the tuple. If there's an index, and
874 * if it's safe to do so, use the index. Else do a heap scan.
879 if ((RelationGetForm(relation))->relhasindex &&
880 !IsIgnoringSystemIndexes() &&
881 IndexScanOK(cache, cur_skey))
885 RetrieveIndexResult indexRes;
890 CACHE2_elog(DEBUG, "SearchCatCache(%s): performing index scan",
894 * For an index scan, sk_attno has to be set to the index attribute
895 * number(s), not the heap attribute numbers. We assume that the
896 * index corresponds exactly to the cache keys (or its first N
899 for (i = 0; i < cache->cc_nkeys; ++i)
900 cur_skey[i].sk_attno = i+1;
902 idesc = index_openr(cache->cc_indname);
903 isd = index_beginscan(idesc, false, cache->cc_nkeys, cur_skey);
904 tuple.t_datamcxt = CurrentMemoryContext;
906 while ((indexRes = index_getnext(isd, ForwardScanDirection)))
908 tuple.t_self = indexRes->heap_iptr;
909 heap_fetch(relation, SnapshotNow, &tuple, &buffer);
911 if (tuple.t_data != NULL)
913 /* Copy tuple into our context */
914 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
915 ct = (CatCTup *) palloc(sizeof(CatCTup));
916 heap_copytuple_with_tuple(&tuple, &ct->tuple);
917 MemoryContextSwitchTo(oldcxt);
918 ReleaseBuffer(buffer);
929 CACHE2_elog(DEBUG, "SearchCatCache(%s): performing heap scan",
932 sd = heap_beginscan(relation, 0, SnapshotNow,
933 cache->cc_nkeys, cur_skey);
935 ntp = heap_getnext(sd, 0);
937 if (HeapTupleIsValid(ntp))
939 /* Copy tuple into our context */
940 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
941 ct = (CatCTup *) palloc(sizeof(CatCTup));
942 heap_copytuple_with_tuple(ntp, &ct->tuple);
943 MemoryContextSwitchTo(oldcxt);
944 /* We should not free the result of heap_getnext... */
954 heap_close(relation, AccessShareLock);
957 * scan is complete. if tup was found, we can add it to the cache.
964 * Finish initializing the CatCTup header, and add it to the
968 CACHE1_elog(DEBUG, "SearchCatCache: found tuple");
970 ct->ct_magic = CT_MAGIC;
971 DLInitElem(&ct->lrulist_elem, (void *) ct);
972 DLInitElem(&ct->cache_elem, (void *) ct);
973 ct->refcount = 1; /* count this first reference */
976 DLAddHead(&cache->cc_lrulist, &ct->lrulist_elem);
977 DLAddHead(&cache->cc_cache[hash], &ct->cache_elem);
980 * If we've exceeded the desired size of this cache,
981 * try to throw away the least recently used entry.
984 if (++cache->cc_ntup > cache->cc_maxtup)
986 for (elt = DLGetTail(&cache->cc_lrulist);
988 elt = DLGetPred(elt))
990 CatCTup *oldct = (CatCTup *) DLE_VAL(elt);
992 if (oldct->refcount == 0)
994 CACHE2_elog(DEBUG, "SearchCatCache(%s): Overflow, LRU removal",
996 CatCacheRemoveCTup(cache, oldct);
1002 CACHE4_elog(DEBUG, "SearchCatCache(%s): Contains %d/%d tuples",
1003 cache->cc_relname, cache->cc_ntup, cache->cc_maxtup);
1004 CACHE3_elog(DEBUG, "SearchCatCache(%s): put in bucket %d",
1005 cache->cc_relname, hash);
1010 /* --------------------------------
1013 * Decrement the reference count of a catcache entry (releasing the
1014 * hold grabbed by a successful SearchCatCache).
1016 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1017 * will be freed as soon as their refcount goes to zero. In combination
1018 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1019 * to catch references to already-released catcache entries.
1020 * --------------------------------
1023 ReleaseCatCache(HeapTuple tuple)
1025 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1026 offsetof(CatCTup, tuple));
1028 /* Safety checks to ensure we were handed a cache entry */
1029 Assert(ct->ct_magic == CT_MAGIC);
1030 Assert(ct->refcount > 0);
1034 if (ct->refcount == 0
1035 #ifndef CATCACHE_FORCE_RELEASE
1040 /* We can find the associated cache using the dllist pointers */
1041 Dllist *lru = DLGetListHdr(&ct->lrulist_elem);
1042 CatCache *cache = (CatCache *) (((char *) lru) -
1043 offsetof(CatCache, cc_lrulist));
1045 CatCacheRemoveCTup(cache, ct);
1049 /* --------------------------------
1050 * PrepareToInvalidateCacheTuple()
1052 * This is part of a rather subtle chain of events, so pay attention:
1054 * When a tuple is updated or deleted, it cannot be flushed from the
1055 * catcaches immediately, for reasons explained at the top of inval.c.
1056 * Instead we have to add entry(s) for the tuple to a list of pending tuple
1057 * invalidations that will be done at the end of the command or transaction.
1059 * The lists of tuples that need to be flushed are kept by inval.c. This
1060 * routine is a helper routine for inval.c. Given a tuple belonging to
1061 * the specified relation, find all catcaches it could be in, compute the
1062 * correct hashindex for each such catcache, and call the specified function
1063 * to record the cache id, hashindex, and tuple ItemPointer in inval.c's
1064 * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
1065 * using the recorded information.
1067 * Note that it is irrelevant whether the given tuple is actually loaded
1068 * into the catcache at the moment. Even if it's not there now, it might
1069 * be by the end of the command, so we have to be prepared to flush it.
1071 * Also note that it's not an error if there are no catcaches for the
1072 * specified relation. inval.c doesn't know exactly which rels have
1073 * catcaches --- it will call this routine for any tuple that's in a
1075 * --------------------------------
1078 PrepareToInvalidateCacheTuple(Relation relation,
1080 void (*function) (int, Index, ItemPointer))
1088 Assert(RelationIsValid(relation));
1089 Assert(HeapTupleIsValid(tuple));
1090 Assert(PointerIsValid(function));
1091 CACHE1_elog(DEBUG, "PrepareToInvalidateCacheTuple: called");
1095 * if the cache contains tuples from the specified relation
1096 * compute the tuple's hash index in this cache,
1097 * and call the passed function to register the information.
1101 for (ccp = Caches; ccp; ccp = ccp->cc_next)
1103 if (strcmp(ccp->cc_relname, RelationGetRelationName(relation)) != 0)
1106 /* Just in case cache hasn't finished initialization yet... */
1107 if (ccp->cc_tupdesc == NULL)
1108 CatalogCacheInitializeCache(ccp);
1110 (*function) (ccp->id,
1111 CatalogCacheComputeTupleHashIndex(ccp, tuple),