1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.62 2000/02/21 03:36:49 tgl Exp $
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_operator.h"
22 #include "catalog/pg_type.h"
23 #include "catalog/catname.h"
24 #include "catalog/indexing.h"
25 #include "miscadmin.h"
26 #include "utils/builtins.h"
27 #include "utils/catcache.h"
28 #include "utils/syscache.h"
30 static void CatCacheRemoveCTup(CatCache *cache, Dlelem *e);
31 static Index CatalogCacheComputeHashIndex(struct catcache * cacheInP);
32 static Index CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP,
35 static void CatalogCacheInitializeCache(struct catcache * cache,
37 static uint32 cc_hashname(NameData *n);
40 * variables, macros and other stuff
45 #define CACHE1_elog(a,b) elog(a,b)
46 #define CACHE2_elog(a,b,c) elog(a,b,c)
47 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
48 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
49 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
50 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
52 #define CACHE1_elog(a,b)
53 #define CACHE2_elog(a,b,c)
54 #define CACHE3_elog(a,b,c,d)
55 #define CACHE4_elog(a,b,c,d,e)
56 #define CACHE5_elog(a,b,c,d,e,f)
57 #define CACHE6_elog(a,b,c,d,e,f,g)
60 static CatCache *Caches = NULL; /* head of list of caches */
62 GlobalMemory CacheCxt; /* context in which caches are allocated */
63 /* CacheCxt is global because relcache uses it too. */
67 * EQPROC is used in CatalogCacheInitializeCache to find the equality
68 * functions for system types that are used as cache key fields.
69 * See also GetCCHashFunc, which should support the same set of types.
71 * XXX this should be replaced by catalog lookups,
72 * but that seems to pose considerable risk of circularity...
75 static const Oid eqproc[] = {
76 F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid,
77 F_INT2EQ, F_INT2VECTOREQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ,
78 F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ
81 #define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-BOOLOID]
83 /* ----------------------------------------------------------------
84 * internal support functions
85 * ----------------------------------------------------------------
89 GetCCHashFunc(Oid keytype)
95 return (CCHashFunc) hashchar;
97 return (CCHashFunc) cc_hashname;
99 return (CCHashFunc) hashint2;
101 return (CCHashFunc) hashint2vector;
103 return (CCHashFunc) hashint4;
105 return (CCHashFunc) hashtext;
108 return (CCHashFunc) hashoid;
110 return (CCHashFunc) hashoidvector;
112 elog(FATAL, "GetCCHashFunc: type %u unsupported as catcache key",
119 cc_hashname(NameData *n)
122 * We need our own variant of hashname because we want to accept
123 * null-terminated C strings as search values for name fields.
124 * So, we have to make sure the data is correctly padded before
125 * we compute the hash value.
129 namestrcpy(&my_n, NameStr(*n));
131 return hashname(&my_n);
135 /* --------------------------------
136 * CatalogCacheInitializeCache
137 * --------------------------------
140 #define CatalogCacheInitializeCache_DEBUG1 \
142 elog(DEBUG, "CatalogCacheInitializeCache: cache @%08lx", cache); \
144 elog(DEBUG, "CatalogCacheInitializeCache: called w/relation(inval)"); \
146 elog(DEBUG, "CatalogCacheInitializeCache: called w/relname %s", \
150 #define CatalogCacheInitializeCache_DEBUG2 \
152 if (cache->cc_key[i] > 0) { \
153 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %d", \
154 i+1, cache->cc_nkeys, cache->cc_key[i], \
155 relation->rd_att->attrs[cache->cc_key[i] - 1]->attlen); \
157 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
158 i+1, cache->cc_nkeys, cache->cc_key[i]); \
163 #define CatalogCacheInitializeCache_DEBUG1
164 #define CatalogCacheInitializeCache_DEBUG2
168 CatalogCacheInitializeCache(struct catcache * cache,
171 MemoryContext oldcxt;
176 CatalogCacheInitializeCache_DEBUG1;
179 * first switch to the cache context so our allocations
180 * do not vanish at the end of a transaction
184 CacheCxt = CreateGlobalMemory("Cache");
185 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
188 * If no relation was passed we must open it to get access to
189 * its fields. If one of the other caches has already opened
190 * it we use heap_open() instead of heap_openr().
191 * XXX is that really worth the trouble of checking?
194 if (!RelationIsValid(relation))
199 * scan the caches to see if any other cache has opened the relation
202 for (cp = Caches; cp; cp = cp->cc_next)
204 if (strncmp(cp->cc_relname, cache->cc_relname, NAMEDATALEN) == 0)
206 if (cp->relationId != InvalidOid)
212 * open the relation by name or by id
216 relation = heap_open(cp->relationId, NoLock);
218 relation = heap_openr(cache->cc_relname, NoLock);
224 * initialize the cache's relation id and tuple descriptor
227 Assert(RelationIsValid(relation));
228 cache->relationId = RelationGetRelid(relation);
229 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
230 cache->cc_tupdesc = tupdesc;
232 CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: relid %u, %d keys",
233 cache->relationId, cache->cc_nkeys);
236 * initialize cache's key information
239 for (i = 0; i < cache->cc_nkeys; ++i)
241 CatalogCacheInitializeCache_DEBUG2;
243 if (cache->cc_key[i] > 0)
245 Oid keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
247 cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
249 /* If GetCCHashFunc liked the type, safe to index into eqproc[] */
250 cache->cc_skey[i].sk_procedure = EQPROC(keytype);
252 fmgr_info(cache->cc_skey[i].sk_procedure,
253 &cache->cc_skey[i].sk_func);
254 cache->cc_skey[i].sk_nargs = cache->cc_skey[i].sk_func.fn_nargs;
256 CACHE4_elog(DEBUG, "CatalogCacheInit %s %d %x",
257 RelationGetRelationName(relation),
264 * close the relation if we opened it
268 heap_close(relation, NoLock);
271 * initialize index information for the cache. this
272 * should only be done once per cache.
275 if (cache->cc_indname != NULL && cache->indexId == InvalidOid)
277 if (!IsIgnoringSystemIndexes() && RelationGetForm(relation)->relhasindex)
281 * If the index doesn't exist we are in trouble.
283 relation = index_openr(cache->cc_indname);
285 cache->indexId = RelationGetRelid(relation);
286 index_close(relation);
289 cache->cc_indname = NULL;
293 * return to the proper memory context
296 MemoryContextSwitchTo(oldcxt);
299 /* --------------------------------
300 * CatalogCacheComputeHashIndex
301 * --------------------------------
304 CatalogCacheComputeHashIndex(struct catcache * cacheInP)
306 uint32 hashIndex = 0;
308 CACHE4_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %x",
309 cacheInP->cc_relname,
313 switch (cacheInP->cc_nkeys)
317 (*cacheInP->cc_hashfunc[3])(cacheInP->cc_skey[3].sk_argument) << 9;
321 (*cacheInP->cc_hashfunc[2])(cacheInP->cc_skey[2].sk_argument) << 6;
325 (*cacheInP->cc_hashfunc[1])(cacheInP->cc_skey[1].sk_argument) << 3;
329 (*cacheInP->cc_hashfunc[0])(cacheInP->cc_skey[0].sk_argument);
332 elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cacheInP->cc_nkeys);
335 hashIndex %= (uint32) cacheInP->cc_size;
336 return (Index) hashIndex;
339 /* --------------------------------
340 * CatalogCacheComputeTupleHashIndex
341 * --------------------------------
344 CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP,
350 /* XXX is this really needed? */
351 if (cacheInOutP->relationId == InvalidOid)
352 CatalogCacheInitializeCache(cacheInOutP, relation);
354 switch (cacheInOutP->cc_nkeys)
357 cacheInOutP->cc_skey[3].sk_argument =
358 (cacheInOutP->cc_key[3] == ObjectIdAttributeNumber)
359 ? (Datum) tuple->t_data->t_oid
361 cacheInOutP->cc_key[3],
362 RelationGetDescr(relation),
367 cacheInOutP->cc_skey[2].sk_argument =
368 (cacheInOutP->cc_key[2] == ObjectIdAttributeNumber)
369 ? (Datum) tuple->t_data->t_oid
371 cacheInOutP->cc_key[2],
372 RelationGetDescr(relation),
377 cacheInOutP->cc_skey[1].sk_argument =
378 (cacheInOutP->cc_key[1] == ObjectIdAttributeNumber)
379 ? (Datum) tuple->t_data->t_oid
381 cacheInOutP->cc_key[1],
382 RelationGetDescr(relation),
387 cacheInOutP->cc_skey[0].sk_argument =
388 (cacheInOutP->cc_key[0] == ObjectIdAttributeNumber)
389 ? (Datum) tuple->t_data->t_oid
391 cacheInOutP->cc_key[0],
392 RelationGetDescr(relation),
397 elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
398 cacheInOutP->cc_nkeys);
402 return CatalogCacheComputeHashIndex(cacheInOutP);
405 /* --------------------------------
408 * NB: assumes caller has switched to CacheCxt
409 * --------------------------------
412 CatCacheRemoveCTup(CatCache *cache, Dlelem *elt)
418 if (!elt) /* probably-useless safety check */
421 /* We need to zap both linked-list elements as well as the tuple */
423 ct = (CatCTup *) DLE_VAL(elt);
424 other_elt = ct->ct_node;
425 other_ct = (CatCTup *) DLE_VAL(other_elt);
427 heap_freetuple(ct->ct_tup);
430 DLFreeElem(other_elt);
439 /* --------------------------------
440 * CatalogCacheIdInvalidate()
442 * Invalidate a tuple given a cache id. In this case the id should always
443 * be found (whether the cache has opened its relation or not). Of course,
444 * if the cache has yet to open its relation, there will be no tuples so
446 * --------------------------------
449 CatalogCacheIdInvalidate(int cacheId, /* XXX */
456 MemoryContext oldcxt;
462 Assert(hashIndex < NCCBUCK);
463 Assert(ItemPointerIsValid(pointer));
464 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
467 * switch to the cache context for our memory allocations
471 CacheCxt = CreateGlobalMemory("Cache");
472 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
475 * inspect every cache that could contain the tuple
478 for (ccp = Caches; ccp; ccp = ccp->cc_next)
480 if (cacheId != ccp->id)
483 * inspect the hash bucket until we find a match or exhaust
486 for (elt = DLGetHead(ccp->cc_cache[hashIndex]);
488 elt = DLGetSucc(elt))
490 ct = (CatCTup *) DLE_VAL(elt);
491 if (ItemPointerEquals(pointer, &ct->ct_tup->t_self))
496 * if we found a matching tuple, invalidate it.
502 CatCacheRemoveCTup(ccp, elt);
504 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
507 if (cacheId != InvalidCatalogCacheId)
512 * return to the proper memory context
515 MemoryContextSwitchTo(oldcxt);
518 /* ----------------------------------------------------------------
522 * InitIndexedSysCache
525 * RelationInvalidateCatalogCacheTuple
526 * ----------------------------------------------------------------
528 /* --------------------------------
530 * --------------------------------
535 MemoryContext oldcxt;
536 struct catcache *cache;
538 CACHE1_elog(DEBUG, "ResetSystemCache called");
541 * first switch to the cache context so our allocations
542 * do not vanish at the end of a transaction
546 CacheCxt = CreateGlobalMemory("Cache");
548 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
551 * here we purge the contents of all the caches
553 * for each system cache
554 * for each hash bucket
555 * for each tuple in hash bucket
559 for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next)
563 for (hash = 0; hash < NCCBUCK; hash += 1)
568 for (elt = DLGetHead(cache->cc_cache[hash]); elt; elt = nextelt)
570 nextelt = DLGetSucc(elt);
571 CatCacheRemoveCTup(cache, elt);
572 if (cache->cc_ntup < 0)
574 "ResetSystemCache: cc_ntup<0 (software error)");
577 cache->cc_ntup = 0; /* in case of WARN error above */
578 cache->busy = false; /* to recover from recursive-use error */
581 CACHE1_elog(DEBUG, "end of ResetSystemCache call");
584 * back to the old context before we return...
587 MemoryContextSwitchTo(oldcxt);
590 /* --------------------------------
591 * SystemCacheRelationFlushed
593 * This is called by RelationFlushRelation() to clear out cached information
594 * about a relation being dropped. (This could be a DROP TABLE command,
595 * or a temp table being dropped at end of transaction, or a table created
596 * during the current transaction that is being dropped because of abort.)
597 * Remove all cache entries relevant to the specified relation OID.
599 * A special case occurs when relId is itself one of the cacheable system
600 * tables --- although those'll never be dropped, they can get flushed from
601 * the relcache (VACUUM causes this, for example). In that case we need
602 * to flush all cache entries from that table. The brute-force method
603 * currently used takes care of that quite handily. (At one point we
604 * also tried to force re-execution of CatalogCacheInitializeCache for
605 * the cache(s) on that table. This is a bad idea since it leads to all
606 * kinds of trouble if a cache flush occurs while loading cache entries.
607 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
608 * rather than relying on the relcache to keep a tupdesc for us. Of course
609 * this assumes the tupdesc of a cachable system table will not change...)
610 * --------------------------------
613 SystemCacheRelationFlushed(Oid relId)
616 * XXX Ideally we'd search the caches and just zap entries that actually
617 * refer to or come from the indicated relation. For now, we take the
618 * brute-force approach: just flush the caches entirely.
623 /* --------------------------------
624 * InitIndexedSysCache
626 * This allocates and initializes a cache for a system catalog relation.
627 * Actually, the cache is only partially initialized to avoid opening the
628 * relation. The relation will be opened and the rest of the cache
629 * structure initialized on the first access.
630 * --------------------------------
633 #define InitSysCache_DEBUG1 \
635 elog(DEBUG, "InitSysCache: rid=%u id=%d nkeys=%d size=%d\n", \
636 cp->relationId, cp->id, cp->cc_nkeys, cp->cc_size); \
637 for (i = 0; i < nkeys; i += 1) \
639 elog(DEBUG, "InitSysCache: key=%d skey=[%d %d %d %d]\n", \
641 cp->cc_skey[i].sk_flags, \
642 cp->cc_skey[i].sk_attno, \
643 cp->cc_skey[i].sk_procedure, \
644 cp->cc_skey[i].sk_argument); \
649 #define InitSysCache_DEBUG1
653 InitSysCache(char *relname,
658 HeapTuple (*iScanfuncP) ())
662 MemoryContext oldcxt;
666 indname = (iname) ? iname : NULL;
669 * first switch to the cache context so our allocations
670 * do not vanish at the end of a transaction
674 CacheCxt = CreateGlobalMemory("Cache");
676 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
679 * allocate a new cache structure
682 cp = (CatCache *) palloc(sizeof(CatCache));
683 MemSet((char *) cp, 0, sizeof(CatCache));
686 * initialize the cache buckets (each bucket is a list header)
687 * and the LRU tuple list
692 * We can only do this optimization because the number of hash
693 * buckets never changes. Without it, we call palloc() too much.
694 * We could move this to dllist.c, but the way we do this is not
695 * dynamic/portable, so why allow other routines to use it.
697 Dllist *cache_begin = palloc((NCCBUCK + 1) * sizeof(Dllist));
699 for (i = 0; i <= NCCBUCK; ++i)
701 cp->cc_cache[i] = &cache_begin[i];
702 cp->cc_cache[i]->dll_head = 0;
703 cp->cc_cache[i]->dll_tail = 0;
707 cp->cc_lrulist = DLNewList();
710 * Caches is the pointer to the head of the list of all the
711 * system caches. here we add the new cache to the top of the list.
714 cp->cc_next = Caches; /* list of caches (single link) */
718 * initialize the cache's relation information for the relation
719 * corresponding to this cache and initialize some of the the new
720 * cache's other internal fields.
723 cp->relationId = InvalidOid;
724 cp->indexId = InvalidOid;
725 cp->cc_relname = relname;
726 cp->cc_indname = indname;
727 cp->cc_tupdesc = (TupleDesc) NULL;
730 cp->cc_maxtup = MAXTUP;
731 cp->cc_size = NCCBUCK;
732 cp->cc_nkeys = nkeys;
733 cp->cc_iscanfunc = iScanfuncP;
736 * partially initialize the cache's key information
737 * CatalogCacheInitializeCache() will do the rest
740 for (i = 0; i < nkeys; ++i)
742 cp->cc_key[i] = key[i];
744 elog(FATAL, "InitSysCache: called with 0 key[%d]", i);
747 if (key[i] != ObjectIdAttributeNumber)
748 elog(FATAL, "InitSysCache: called with %d key[%d]", key[i], i);
751 cp->cc_hashfunc[i] = GetCCHashFunc(OIDOID);
752 ScanKeyEntryInitialize(&cp->cc_skey[i],
755 (RegProcedure) F_OIDEQ,
761 cp->cc_skey[i].sk_attno = key[i];
765 * all done. new cache is initialized. print some debugging
766 * information, if appropriate.
772 * back to the old context before we return...
775 MemoryContextSwitchTo(oldcxt);
780 /* --------------------------------
781 * SearchSelfReferences
783 * This call searches for self-referencing information,
784 * which causes infinite recursion in the system catalog cache.
785 * This code short-circuits the normal index lookup for cache loads
786 * in those cases and replaces it with a heap scan.
788 * cache should already be initailized
789 * --------------------------------
792 SearchSelfReferences(struct catcache * cache)
797 if (cache->id == INDEXRELID)
799 static Oid indexSelfOid = InvalidOid;
800 static HeapTuple indexSelfTuple = NULL;
802 if (!OidIsValid(indexSelfOid))
806 /* Find oid of pg_index_indexrelid_index */
807 rel = heap_openr(RelationRelationName, AccessShareLock);
808 ScanKeyEntryInitialize(&key, 0, Anum_pg_class_relname,
809 F_NAMEEQ, PointerGetDatum(IndexRelidIndex));
810 sd = heap_beginscan(rel, false, SnapshotNow, 1, &key);
811 ntp = heap_getnext(sd, 0);
812 if (!HeapTupleIsValid(ntp))
813 elog(ERROR, "SearchSelfReferences: %s not found in %s",
814 IndexRelidIndex, RelationRelationName);
815 indexSelfOid = ntp->t_data->t_oid;
817 heap_close(rel, AccessShareLock);
819 /* Looking for something other than pg_index_indexrelid_index? */
820 if ((Oid)cache->cc_skey[0].sk_argument != indexSelfOid)
823 /* Do we need to load our private copy of the tuple? */
824 if (!HeapTupleIsValid(indexSelfTuple))
827 MemoryContext oldcxt;
830 CacheCxt = CreateGlobalMemory("Cache");
831 rel = heap_open(cache->relationId, AccessShareLock);
832 sd = heap_beginscan(rel, false, SnapshotNow, 1, cache->cc_skey);
833 ntp = heap_getnext(sd, 0);
834 if (!HeapTupleIsValid(ntp))
835 elog(ERROR, "SearchSelfReferences: tuple not found");
836 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
837 indexSelfTuple = heap_copytuple(ntp);
838 MemoryContextSwitchTo(oldcxt);
840 heap_close(rel, AccessShareLock);
842 return indexSelfTuple;
844 else if (cache->id == OPEROID)
846 /* bootstrapping this requires preloading a range of rows. bjm */
847 static HeapTuple operatorSelfTuple[MAX_OIDCMP-MIN_OIDCMP+1];
848 Oid lookup_oid = (Oid)cache->cc_skey[0].sk_argument;
850 if (lookup_oid < MIN_OIDCMP || lookup_oid > MAX_OIDCMP)
853 if (!HeapTupleIsValid(operatorSelfTuple[lookup_oid-MIN_OIDCMP]))
856 MemoryContext oldcxt;
859 CacheCxt = CreateGlobalMemory("Cache");
860 rel = heap_open(cache->relationId, AccessShareLock);
861 sd = heap_beginscan(rel, false, SnapshotNow, 1, cache->cc_skey);
862 ntp = heap_getnext(sd, 0);
863 if (!HeapTupleIsValid(ntp))
864 elog(ERROR, "SearchSelfReferences: tuple not found");
865 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
866 operatorSelfTuple[lookup_oid-MIN_OIDCMP] = heap_copytuple(ntp);
867 MemoryContextSwitchTo(oldcxt);
869 heap_close(rel, AccessShareLock);
871 return operatorSelfTuple[lookup_oid-MIN_OIDCMP];
878 /* --------------------------------
881 * This call searches a system cache for a tuple, opening the relation
882 * if necessary (the first access to a particular cache).
883 * --------------------------------
886 SearchSysCache(struct catcache * cache,
897 HeapTuple ntp = NULL;
900 MemoryContext oldcxt;
903 * one-time startup overhead
906 if (cache->relationId == InvalidOid)
907 CatalogCacheInitializeCache(cache, NULL);
910 * initialize the search key information
913 cache->cc_skey[0].sk_argument = v1;
914 cache->cc_skey[1].sk_argument = v2;
915 cache->cc_skey[2].sk_argument = v3;
916 cache->cc_skey[3].sk_argument = v4;
919 * resolve self referencing informtion
921 if ((ntp = SearchSelfReferences(cache)))
925 * find the hash bucket in which to look for the tuple
928 hash = CatalogCacheComputeHashIndex(cache);
931 * scan the hash bucket until we find a match or exhaust our tuples
934 for (elt = DLGetHead(cache->cc_cache[hash]);
936 elt = DLGetSucc(elt))
940 ct = (CatCTup *) DLE_VAL(elt);
942 * see if the cached tuple matches our key.
943 * (should we be worried about time ranges? -cim 10/2/90)
946 HeapKeyTest(ct->ct_tup,
956 * if we found a tuple in the cache, move it to the top of the
957 * lru list, and return it. We also move it to the front of the
958 * list for its hashbucket, in order to speed subsequent searches.
959 * (The most frequently accessed elements in any hashbucket will
960 * tend to be near the front of the hashbucket's list.)
965 Dlelem *old_lru_elt = ((CatCTup *) DLE_VAL(elt))->ct_node;
967 DLMoveToFront(old_lru_elt);
971 CACHE3_elog(DEBUG, "SearchSysCache(%s): found in bucket %d",
972 cache->cc_relname, hash);
973 #endif /* CACHEDEBUG */
979 * Tuple was not found in cache, so we have to try and
980 * retrieve it directly from the relation. If it's found,
981 * we add it to the cache.
983 * To guard against possible infinite recursion, we mark this cache
984 * "busy" while trying to load a new entry for it. It is OK to
985 * recursively invoke SearchSysCache for a different cache, but
986 * a recursive call for the same cache will error out. (We could
987 * store the specific key(s) being looked for, and consider only
988 * a recursive request for the same key to be an error, but this
989 * simple scheme is sufficient for now.)
994 elog(ERROR, "SearchSysCache: recursive use of cache %d", cache->id);
998 * open the relation associated with the cache
1001 relation = heap_open(cache->relationId, AccessShareLock);
1002 CACHE2_elog(DEBUG, "SearchSysCache(%s)",
1003 RelationGetRelationName(relation));
1006 * Switch to the cache memory context.
1011 CacheCxt = CreateGlobalMemory("Cache");
1013 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
1016 * Scan the relation to find the tuple. If there's an index, and
1017 * if this isn't bootstrap (initdb) time, use the index.
1020 CACHE1_elog(DEBUG, "SearchSysCache: performing scan");
1022 if ((RelationGetForm(relation))->relhasindex
1023 && !IsIgnoringSystemIndexes())
1026 * Switch back to old memory context so memory not freed
1027 * in the scan function will go away at transaction end.
1028 * wieck - 10/18/1996
1033 MemoryContextSwitchTo(oldcxt);
1034 Assert(cache->cc_iscanfunc);
1035 switch (cache->cc_nkeys)
1038 indextp = cache->cc_iscanfunc(relation, v1, v2, v3, v4);
1041 indextp = cache->cc_iscanfunc(relation, v1, v2, v3);
1044 indextp = cache->cc_iscanfunc(relation, v1, v2);
1047 indextp = cache->cc_iscanfunc(relation, v1);
1054 * Back to Cache context. If we got a tuple copy it
1055 * into our context. wieck - 10/18/1996
1056 * And free the tuple that was allocated in the
1057 * transaction's context. tgl - 02/03/2000
1060 if (HeapTupleIsValid(indextp)) {
1061 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1062 ntp = heap_copytuple(indextp);
1063 MemoryContextSwitchTo(oldcxt);
1064 heap_freetuple(indextp);
1066 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1073 * As above do the lookup in the callers memory
1075 * wieck - 10/18/1996
1078 MemoryContextSwitchTo(oldcxt);
1080 sd = heap_beginscan(relation, 0, SnapshotNow,
1081 cache->cc_nkeys, cache->cc_skey);
1083 ntp = heap_getnext(sd, 0);
1085 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1087 if (HeapTupleIsValid(ntp))
1089 CACHE1_elog(DEBUG, "SearchSysCache: found tuple");
1090 ntp = heap_copytuple(ntp);
1091 /* We should not free the result of heap_getnext... */
1094 MemoryContextSwitchTo(oldcxt);
1098 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1101 cache->busy = false;
1104 * scan is complete. if tup is valid, we can add it to the cache.
1105 * note we have already copied it into the cache memory context.
1108 if (HeapTupleIsValid(ntp))
1111 * allocate a new cache tuple holder, store the pointer
1112 * to the heap tuple there and initialize the list pointers.
1118 * this is a little cumbersome here because we want the Dlelem's
1119 * in both doubly linked lists to point to one another. That makes
1120 * it easier to remove something from both the cache bucket and
1121 * the lru list at the same time
1123 nct = (CatCTup *) palloc(sizeof(CatCTup));
1125 elt = DLNewElem(nct);
1126 nct2 = (CatCTup *) palloc(sizeof(CatCTup));
1128 lru_elt = DLNewElem(nct2);
1129 nct2->ct_node = elt;
1130 nct->ct_node = lru_elt;
1132 DLAddHead(cache->cc_lrulist, lru_elt);
1133 DLAddHead(cache->cc_cache[hash], elt);
1136 * If we've exceeded the desired size of this cache,
1137 * throw away the least recently used entry.
1140 if (++cache->cc_ntup > cache->cc_maxtup)
1144 elt = DLGetTail(cache->cc_lrulist);
1145 ct = (CatCTup *) DLE_VAL(elt);
1147 if (ct != nct) /* shouldn't be possible, but be safe... */
1149 CACHE2_elog(DEBUG, "SearchSysCache(%s): Overflow, LRU removal",
1150 RelationGetRelationName(relation));
1152 CatCacheRemoveCTup(cache, elt);
1156 CACHE4_elog(DEBUG, "SearchSysCache(%s): Contains %d/%d tuples",
1157 RelationGetRelationName(relation),
1158 cache->cc_ntup, cache->cc_maxtup);
1159 CACHE3_elog(DEBUG, "SearchSysCache(%s): put in bucket %d",
1160 RelationGetRelationName(relation), hash);
1164 * close the relation, switch back to the original memory context
1165 * and return the tuple we found (or NULL)
1168 heap_close(relation, AccessShareLock);
1170 MemoryContextSwitchTo(oldcxt);
1175 /* --------------------------------
1176 * RelationInvalidateCatalogCacheTuple()
1178 * Invalidate a tuple from a specific relation. This call determines the
1179 * cache in question and calls CatalogCacheIdInvalidate(). It is -ok-
1180 * if the relation cannot be found, it simply means this backend has yet
1182 * --------------------------------
1185 RelationInvalidateCatalogCacheTuple(Relation relation,
1187 void (*function) (int, Index, ItemPointer))
1189 struct catcache *ccp;
1190 MemoryContext oldcxt;
1197 Assert(RelationIsValid(relation));
1198 Assert(HeapTupleIsValid(tuple));
1199 Assert(PointerIsValid(function));
1200 CACHE1_elog(DEBUG, "RelationInvalidateCatalogCacheTuple: called");
1203 * switch to the cache memory context
1207 CacheCxt = CreateGlobalMemory("Cache");
1208 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
1212 * if the cache contains tuples from the specified relation
1213 * call the invalidation function on the tuples
1214 * in the proper hash bucket
1217 relationId = RelationGetRelid(relation);
1219 for (ccp = Caches; ccp; ccp = ccp->cc_next)
1221 if (relationId != ccp->relationId)
1225 /* OPT inline simplification of CatalogCacheIdInvalidate */
1226 if (!PointerIsValid(function))
1227 function = CatalogCacheIdInvalidate;
1230 (*function) (ccp->id,
1231 CatalogCacheComputeTupleHashIndex(ccp, relation, tuple),
1236 * return to the proper memory context
1239 MemoryContextSwitchTo(oldcxt);
1241 /* sendpm('I', "Invalidated tuple"); */