1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Copyright (c) 1994, Regents of the University of California
10 * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.43 1999/06/04 02:19:45 tgl Exp $
12 *-------------------------------------------------------------------------
16 #include "access/heapam.h"
17 #include "access/genam.h"
18 #include "utils/tqual.h"
19 #include "utils/builtins.h"
20 #include "utils/portal.h"
21 #include "utils/catcache.h"
22 #include "utils/elog.h"
23 #include "utils/palloc.h"
24 #include "utils/mcxt.h"
25 #include "utils/rel.h"
26 #include "storage/bufpage.h"
27 #include "access/valid.h"
28 #include "miscadmin.h"
29 #include "fmgr.h" /* for F_BOOLEQ, etc. DANGER */
30 #include "catalog/pg_type.h" /* for OID of int28 type */
31 #include "lib/dllist.h"
33 static void CatCacheRemoveCTup(CatCache *cache, Dlelem *e);
34 static Index CatalogCacheComputeHashIndex(struct catcache * cacheInP);
35 static Index CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP,
36 Relation relation, HeapTuple tuple);
37 static void CatalogCacheInitializeCache(struct catcache * cache,
39 static long comphash(long l, char *v);
42 * variables, macros and other stuff
44 * note CCSIZE allocates 51 buckets .. one was already allocated in
45 * the catcache structure.
50 #define CACHE1_elog(a,b) elog(a,b)
51 #define CACHE2_elog(a,b,c) elog(a,b,c)
52 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
53 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
54 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
55 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
57 #define CACHE1_elog(a,b)
58 #define CACHE2_elog(a,b,c)
59 #define CACHE3_elog(a,b,c,d)
60 #define CACHE4_elog(a,b,c,d,e)
61 #define CACHE5_elog(a,b,c,d,e,f)
62 #define CACHE6_elog(a,b,c,d,e,f,g)
65 static CatCache *Caches = NULL; /* head of list of caches */
67 GlobalMemory CacheCxt; /* context in which caches are allocated */
68 /* CacheCxt is global because relcache uses it too. */
72 * EQPROC is used in CatalogCacheInitializeCache
73 * XXX this should be replaced by catalog lookups soon
76 static long eqproc[] = {
77 F_BOOLEQ, 0l, F_CHAREQ, F_NAMEEQ, 0l,
78 F_INT2EQ, F_KEYFIRSTEQ, F_INT4EQ, 0l, F_TEXTEQ,
79 F_OIDEQ, 0l, 0l, 0l, F_OID8EQ
82 #define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-16]
84 /* ----------------------------------------------------------------
85 * internal support functions
86 * ----------------------------------------------------------------
88 /* --------------------------------
89 * CatalogCacheInitializeCache
90 * --------------------------------
93 #define CatalogCacheInitializeCache_DEBUG1 \
95 elog(DEBUG, "CatalogCacheInitializeCache: cache @%08lx", cache); \
97 elog(DEBUG, "CatalogCacheInitializeCache: called w/relation(inval)"); \
99 elog(DEBUG, "CatalogCacheInitializeCache: called w/relname %s", \
103 #define CatalogCacheInitializeCache_DEBUG2 \
105 if (cache->cc_key[i] > 0) { \
106 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %d", \
107 i+1, cache->cc_nkeys, cache->cc_key[i], \
108 relation->rd_att->attrs[cache->cc_key[i] - 1]->attlen); \
110 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
111 i+1, cache->cc_nkeys, cache->cc_key[i]); \
116 #define CatalogCacheInitializeCache_DEBUG1
117 #define CatalogCacheInitializeCache_DEBUG2
121 CatalogCacheInitializeCache(struct catcache * cache,
124 MemoryContext oldcxt;
129 CatalogCacheInitializeCache_DEBUG1;
132 * first switch to the cache context so our allocations
133 * do not vanish at the end of a transaction
137 CacheCxt = CreateGlobalMemory("Cache");
138 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
141 * If no relation was passed we must open it to get access to
142 * its fields. If one of the other caches has already opened
143 * it we use heap_open() instead of heap_openr()
146 if (!RelationIsValid(relation))
151 * scan the caches to see if any other cache has opened the relation
154 for (cp = Caches; cp; cp = cp->cc_next)
156 if (strncmp(cp->cc_relname, cache->cc_relname, NAMEDATALEN) == 0)
158 if (cp->relationId != InvalidOid)
164 * open the relation by name or by id
168 relation = heap_open(cp->relationId);
170 relation = heap_openr(cache->cc_relname);
176 * initialize the cache's relation id
179 Assert(RelationIsValid(relation));
180 cache->relationId = RelationGetRelid(relation);
181 tupdesc = cache->cc_tupdesc = RelationGetDescr(relation);
183 CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: relid %u, %d keys",
184 cache->relationId, cache->cc_nkeys);
187 * initialize cache's key information
190 for (i = 0; i < cache->cc_nkeys; ++i)
192 CatalogCacheInitializeCache_DEBUG2;
194 if (cache->cc_key[i] > 0)
198 * Yoiks. The implementation of the hashing code and the
199 * implementation of int28's are at loggerheads. The right
200 * thing to do is to throw out the implementation of int28's
201 * altogether; until that happens, we do the right thing here
202 * to guarantee that the hash key generator doesn't try to
203 * dereference an int2 by mistake.
206 if (tupdesc->attrs[cache->cc_key[i] - 1]->atttypid == INT28OID)
207 cache->cc_klen[i] = sizeof(short);
209 cache->cc_klen[i] = tupdesc->attrs[cache->cc_key[i] - 1]->attlen;
211 cache->cc_skey[i].sk_procedure = EQPROC(tupdesc->attrs[cache->cc_key[i] - 1]->atttypid);
213 fmgr_info(cache->cc_skey[i].sk_procedure,
214 &cache->cc_skey[i].sk_func);
215 cache->cc_skey[i].sk_nargs = cache->cc_skey[i].sk_func.fn_nargs;
217 CACHE5_elog(DEBUG, "CatalogCacheInit %s %d %d %x",
218 &relation->rd_rel->relname,
220 tupdesc->attrs[cache->cc_key[i] - 1]->attlen,
226 * close the relation if we opened it
230 heap_close(relation);
233 * initialize index information for the cache. this
234 * should only be done once per cache.
237 if (cache->cc_indname != NULL && cache->indexId == InvalidOid)
239 if (RelationGetForm(relation)->relhasindex)
243 * If the index doesn't exist we are in trouble.
245 relation = index_openr(cache->cc_indname);
247 cache->indexId = RelationGetRelid(relation);
248 index_close(relation);
251 cache->cc_indname = NULL;
255 * return to the proper memory context
258 MemoryContextSwitchTo(oldcxt);
261 /* --------------------------------
264 * XXX temporary function
265 * --------------------------------
269 CatalogCacheSetId(CatCache *cacheInOutP, int id)
271 Assert(id == InvalidCatalogCacheId || id >= 0);
272 cacheInOutP->id = id;
279 * Compute a hash value, somehow.
281 * XXX explain algorithm here.
283 * l is length of the attribute value, v
284 * v is the attribute value ("Datum")
288 comphash(long l, char *v)
293 CACHE3_elog(DEBUG, "comphash (%d,%x)", l, v);
303 if (l == NAMEDATALEN)
307 * if it's a name, make sure that the values are null-padded.
309 * Note that this other fixed-length types can also have the same
310 * typelen so this may break them - XXX
324 /* --------------------------------
325 * CatalogCacheComputeHashIndex
326 * --------------------------------
329 CatalogCacheComputeHashIndex(struct catcache * cacheInP)
334 CACHE6_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %d %d %x",
335 cacheInP->cc_relname,
337 cacheInP->cc_klen[0],
338 cacheInP->cc_klen[1],
341 switch (cacheInP->cc_nkeys)
344 hashIndex ^= comphash(cacheInP->cc_klen[3],
345 (char *) cacheInP->cc_skey[3].sk_argument) << 9;
348 hashIndex ^= comphash(cacheInP->cc_klen[2],
349 (char *) cacheInP->cc_skey[2].sk_argument) << 6;
352 hashIndex ^= comphash(cacheInP->cc_klen[1],
353 (char *) cacheInP->cc_skey[1].sk_argument) << 3;
356 hashIndex ^= comphash(cacheInP->cc_klen[0],
357 (char *) cacheInP->cc_skey[0].sk_argument);
360 elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cacheInP->cc_nkeys);
363 hashIndex %= cacheInP->cc_size;
367 /* --------------------------------
368 * CatalogCacheComputeTupleHashIndex
369 * --------------------------------
372 CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP,
378 if (cacheInOutP->relationId == InvalidOid)
379 CatalogCacheInitializeCache(cacheInOutP, relation);
380 switch (cacheInOutP->cc_nkeys)
383 cacheInOutP->cc_skey[3].sk_argument =
384 (cacheInOutP->cc_key[3] == ObjectIdAttributeNumber)
385 ? (Datum) tuple->t_data->t_oid
387 cacheInOutP->cc_key[3],
388 RelationGetDescr(relation),
393 cacheInOutP->cc_skey[2].sk_argument =
394 (cacheInOutP->cc_key[2] == ObjectIdAttributeNumber)
395 ? (Datum) tuple->t_data->t_oid
397 cacheInOutP->cc_key[2],
398 RelationGetDescr(relation),
403 cacheInOutP->cc_skey[1].sk_argument =
404 (cacheInOutP->cc_key[1] == ObjectIdAttributeNumber)
405 ? (Datum) tuple->t_data->t_oid
407 cacheInOutP->cc_key[1],
408 RelationGetDescr(relation),
413 cacheInOutP->cc_skey[0].sk_argument =
414 (cacheInOutP->cc_key[0] == ObjectIdAttributeNumber)
415 ? (Datum) tuple->t_data->t_oid
417 cacheInOutP->cc_key[0],
418 RelationGetDescr(relation),
423 elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
424 cacheInOutP->cc_nkeys
429 return CatalogCacheComputeHashIndex(cacheInOutP);
432 /* --------------------------------
434 * --------------------------------
437 CatCacheRemoveCTup(CatCache *cache, Dlelem *elt)
444 ct = (CatCTup *) DLE_VAL(elt);
448 other_elt = ct->ct_node;
449 other_ct = (CatCTup *) DLE_VAL(other_elt);
451 DLFreeElem(other_elt);
459 /* --------------------------------
460 * CatalogCacheIdInvalidate()
462 * Invalidate a tuple given a cache id. In this case the id should always
463 * be found (whether the cache has opened its relation or not). Of course,
464 * if the cache has yet to open its relation, there will be no tuples so
466 * --------------------------------
469 CatalogCacheIdInvalidate(int cacheId, /* XXX */
476 MemoryContext oldcxt;
482 Assert(hashIndex < NCCBUCK);
483 Assert(ItemPointerIsValid(pointer));
484 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
487 * switch to the cache context for our memory allocations
491 CacheCxt = CreateGlobalMemory("Cache");
492 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
495 * inspect every cache that could contain the tuple
498 for (ccp = Caches; ccp; ccp = ccp->cc_next)
500 if (cacheId != ccp->id)
503 * inspect the hash bucket until we find a match or exhaust
506 for (elt = DLGetHead(ccp->cc_cache[hashIndex]);
508 elt = DLGetSucc(elt))
510 ct = (CatCTup *) DLE_VAL(elt);
511 if (ItemPointerEquals(pointer, &ct->ct_tup->t_self))
516 * if we found a matching tuple, invalidate it.
522 CatCacheRemoveCTup(ccp, elt);
524 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
527 if (cacheId != InvalidCatalogCacheId)
532 * return to the proper memory context
535 MemoryContextSwitchTo(oldcxt);
536 /* sendpm('I', "Invalidated tuple"); */
539 /* ----------------------------------------------------------------
543 * InitIndexedSysCache
546 * RelationInvalidateCatalogCacheTuple
547 * ----------------------------------------------------------------
549 /* --------------------------------
551 * --------------------------------
556 MemoryContext oldcxt;
557 struct catcache *cache;
559 CACHE1_elog(DEBUG, "ResetSystemCache called");
562 * first switch to the cache context so our allocations
563 * do not vanish at the end of a transaction
567 CacheCxt = CreateGlobalMemory("Cache");
569 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
572 * here we purge the contents of all the caches
574 * for each system cache
575 * for each hash bucket
576 * for each tuple in hash bucket
580 for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next)
584 for (hash = 0; hash < NCCBUCK; hash += 1)
589 for (elt = DLGetHead(cache->cc_cache[hash]); elt; elt = nextelt)
591 nextelt = DLGetSucc(elt);
592 CatCacheRemoveCTup(cache, elt);
593 if (cache->cc_ntup < 0)
595 "ResetSystemCache: cc_ntup<0 (software error)");
598 cache->cc_ntup = 0; /* in case of WARN error above */
599 cache->busy = false; /* to recover from recursive-use error */
602 CACHE1_elog(DEBUG, "end of ResetSystemCache call");
605 * back to the old context before we return...
608 MemoryContextSwitchTo(oldcxt);
611 /* --------------------------------
612 * SystemCacheRelationFlushed
614 * This is called by RelationFlushRelation() to clear out cached information
615 * about a relation being dropped. (This could be a DROP TABLE command,
616 * or a temp table being dropped at end of transaction, or a table created
617 * during the current transaction that is being dropped because of abort.)
618 * Remove all cache entries relevant to the specified relation OID.
620 * A special case occurs when relId is itself one of the cacheable system
621 * tables --- although those'll never be dropped, they can get flushed from
622 * the relcache (VACUUM causes this, for example). In that case we need to
623 * force the next SearchSysCache() call to reinitialize the cache itself,
624 * because we have info (such as cc_tupdesc) that is pointing at the about-
625 * to-be-deleted relcache entry.
626 * --------------------------------
629 SystemCacheRelationFlushed(Oid relId)
631 struct catcache *cache;
634 * XXX Ideally we'd search the caches and just zap entries that actually
635 * refer to the indicated relation. For now, we take the brute-force
636 * approach: just flush the caches entirely.
641 * If relcache is dropping a system relation's cache entry, mark the
642 * associated cache structures invalid, so we can rebuild them from
643 * scratch (not just repopulate them) next time they are used.
645 for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next)
647 if (cache->relationId == relId)
648 cache->relationId = InvalidOid;
652 /* --------------------------------
653 * InitIndexedSysCache
655 * This allocates and initializes a cache for a system catalog relation.
656 * Actually, the cache is only partially initialized to avoid opening the
657 * relation. The relation will be opened and the rest of the cache
658 * structure initialized on the first access.
659 * --------------------------------
662 #define InitSysCache_DEBUG1 \
664 elog(DEBUG, "InitSysCache: rid=%u id=%d nkeys=%d size=%d\n", \
665 cp->relationId, cp->id, cp->cc_nkeys, cp->cc_size); \
666 for (i = 0; i < nkeys; i += 1) \
668 elog(DEBUG, "InitSysCache: key=%d len=%d skey=[%d %d %d %d]\n", \
669 cp->cc_key[i], cp->cc_klen[i], \
670 cp->cc_skey[i].sk_flags, \
671 cp->cc_skey[i].sk_attno, \
672 cp->cc_skey[i].sk_procedure, \
673 cp->cc_skey[i].sk_argument); \
678 #define InitSysCache_DEBUG1
682 InitSysCache(char *relname,
687 HeapTuple (*iScanfuncP) ())
691 MemoryContext oldcxt;
695 indname = (iname) ? iname : NULL;
698 * first switch to the cache context so our allocations
699 * do not vanish at the end of a transaction
703 CacheCxt = CreateGlobalMemory("Cache");
705 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
708 * allocate a new cache structure
711 cp = (CatCache *) palloc(sizeof(CatCache));
712 MemSet((char *) cp, 0, sizeof(CatCache));
715 * initialize the cache buckets (each bucket is a list header)
716 * and the LRU tuple list
722 * We can only do this optimization because the number of hash
723 * buckets never changes. Without it, we call malloc() too much.
724 * We could move this to dllist.c, but the way we do this is not
725 * dynamic/portabl, so why allow other routines to use it.
727 Dllist *cache_begin = malloc((NCCBUCK + 1) * sizeof(Dllist));
729 for (i = 0; i <= NCCBUCK; ++i)
731 cp->cc_cache[i] = &cache_begin[i];
732 cp->cc_cache[i]->dll_head = 0;
733 cp->cc_cache[i]->dll_tail = 0;
737 cp->cc_lrulist = DLNewList();
740 * Caches is the pointer to the head of the list of all the
741 * system caches. here we add the new cache to the top of the list.
744 cp->cc_next = Caches; /* list of caches (single link) */
748 * initialize the cache's relation information for the relation
749 * corresponding to this cache and initialize some of the the new
750 * cache's other internal fields.
753 cp->relationId = InvalidOid;
754 cp->indexId = InvalidOid;
755 cp->cc_relname = relname;
756 cp->cc_indname = indname;
757 cp->cc_tupdesc = (TupleDesc) NULL;
760 cp->cc_maxtup = MAXTUP;
761 cp->cc_size = NCCBUCK;
762 cp->cc_nkeys = nkeys;
763 cp->cc_iscanfunc = iScanfuncP;
766 * initialize the cache's key information
769 for (i = 0; i < nkeys; ++i)
771 cp->cc_key[i] = key[i];
773 elog(FATAL, "InitSysCache: called with 0 key[%d]", i);
776 if (key[i] != ObjectIdAttributeNumber)
777 elog(FATAL, "InitSysCache: called with %d key[%d]", key[i], i);
780 cp->cc_klen[i] = sizeof(Oid);
783 * ScanKeyEntryData and struct skey are equivalent. It
784 * looks like a move was made to obsolete struct skey, but
785 * it didn't reach this file. Someday we should clean up
786 * this code and consolidate to ScanKeyEntry - mer 10 Nov
789 ScanKeyEntryInitialize(&cp->cc_skey[i],
792 (RegProcedure) F_OIDEQ,
798 cp->cc_skey[i].sk_attno = key[i];
802 * all done. new cache is initialized. print some debugging
803 * information, if appropriate.
809 * back to the old context before we return...
812 MemoryContextSwitchTo(oldcxt);
817 /* --------------------------------
820 * This call searches a system cache for a tuple, opening the relation
821 * if necessary (the first access to a particular cache).
822 * --------------------------------
825 SearchSysCache(struct catcache * cache,
839 MemoryContext oldcxt;
845 if (cache->relationId == InvalidOid)
846 CatalogCacheInitializeCache(cache, NULL);
849 * initialize the search key information
852 cache->cc_skey[0].sk_argument = v1;
853 cache->cc_skey[1].sk_argument = v2;
854 cache->cc_skey[2].sk_argument = v3;
855 cache->cc_skey[3].sk_argument = v4;
858 * find the hash bucket in which to look for the tuple
861 hash = CatalogCacheComputeHashIndex(cache);
864 * scan the hash bucket until we find a match or exhaust our tuples
867 for (elt = DLGetHead(cache->cc_cache[hash]);
869 elt = DLGetSucc(elt))
873 ct = (CatCTup *) DLE_VAL(elt);
875 * see if the cached tuple matches our key.
876 * (should we be worried about time ranges? -cim 10/2/90)
879 HeapKeyTest(ct->ct_tup,
889 * if we found a tuple in the cache, move it to the top of the
890 * lru list, and return it. We also move it to the front of the
891 * list for its hashbucket, in order to speed subsequent searches.
892 * (The most frequently accessed elements in any hashbucket will
893 * tend to be near the front of the hashbucket's list.)
898 Dlelem *old_lru_elt = ((CatCTup *) DLE_VAL(elt))->ct_node;
900 DLMoveToFront(old_lru_elt);
904 relation = heap_open(cache->relationId);
905 CACHE3_elog(DEBUG, "SearchSysCache(%s): found in bucket %d",
906 RelationGetRelationName(relation), hash);
907 heap_close(relation);
908 #endif /* CACHEDEBUG */
914 * Tuple was not found in cache, so we have to try and
915 * retrieve it directly from the relation. If it's found,
916 * we add it to the cache.
918 * To guard against possible infinite recursion, we mark this cache
919 * "busy" while trying to load a new entry for it. It is OK to
920 * recursively invoke SearchSysCache for a different cache, but
921 * a recursive call for the same cache will error out. (We could
922 * store the specific key(s) being looked for, and consider only
923 * a recursive request for the same key to be an error, but this
924 * simple scheme is sufficient for now.)
930 elog(ERROR, "SearchSysCache: recursive use of cache %d", cache->id);
935 * open the relation associated with the cache
938 relation = heap_open(cache->relationId);
939 CACHE2_elog(DEBUG, "SearchSysCache(%s)",
940 RelationGetRelationName(relation));
943 * Switch to the cache memory context.
948 CacheCxt = CreateGlobalMemory("Cache");
950 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
953 * Scan the relation to find the tuple. If there's an index, and
954 * if this isn't bootstrap (initdb) time, use the index.
957 CACHE2_elog(DEBUG, "SearchSysCache: performing scan (override==%d)",
960 if ((RelationGetForm(relation))->relhasindex
961 && !IsBootstrapProcessingMode())
964 * Switch back to old memory context so memory not freed
965 * in the scan function will go away at transaction end.
969 MemoryContextSwitchTo(oldcxt);
970 Assert(cache->cc_iscanfunc);
971 switch (cache->cc_nkeys)
974 ntp = cache->cc_iscanfunc(relation, v1, v2, v3, v4);
977 ntp = cache->cc_iscanfunc(relation, v1, v2, v3);
980 ntp = cache->cc_iscanfunc(relation, v1, v2);
983 ntp = cache->cc_iscanfunc(relation, v1);
987 * Back to Cache context. If we got a tuple copy it
992 MemoryContextSwitchTo((MemoryContext) CacheCxt);
993 if (HeapTupleIsValid(ntp))
994 ntp = heap_copytuple(ntp);
1001 * As above do the lookup in the callers memory
1003 * wieck - 10/18/1996
1006 MemoryContextSwitchTo(oldcxt);
1008 sd = heap_beginscan(relation, 0, SnapshotNow,
1009 cache->cc_nkeys, cache->cc_skey);
1011 ntp = heap_getnext(sd, 0);
1013 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1015 if (HeapTupleIsValid(ntp))
1017 CACHE1_elog(DEBUG, "SearchSysCache: found tuple");
1018 ntp = heap_copytuple(ntp);
1021 MemoryContextSwitchTo(oldcxt);
1025 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1028 cache->busy = false;
1031 * scan is complete. if tup is valid, we copy it and add the copy to
1035 if (HeapTupleIsValid(ntp))
1038 * allocate a new cache tuple holder, store the pointer
1039 * to the heap tuple there and initialize the list pointers.
1045 * this is a little cumbersome here because we want the Dlelem's
1046 * in both doubly linked lists to point to one another. That makes
1047 * it easier to remove something from both the cache bucket and
1048 * the lru list at the same time
1050 nct = (CatCTup *) malloc(sizeof(CatCTup));
1052 elt = DLNewElem(nct);
1053 nct2 = (CatCTup *) malloc(sizeof(CatCTup));
1055 lru_elt = DLNewElem(nct2);
1056 nct2->ct_node = elt;
1057 nct->ct_node = lru_elt;
1059 DLAddHead(cache->cc_lrulist, lru_elt);
1060 DLAddHead(cache->cc_cache[hash], elt);
1063 * If we've exceeded the desired size of this cache,
1064 * throw away the least recently used entry.
1067 if (++cache->cc_ntup > cache->cc_maxtup)
1071 elt = DLGetTail(cache->cc_lrulist);
1072 ct = (CatCTup *) DLE_VAL(elt);
1074 if (ct != nct) /* shouldn't be possible, but be safe... */
1076 CACHE2_elog(DEBUG, "SearchSysCache(%s): Overflow, LRU removal",
1077 RelationGetRelationName(relation));
1079 CatCacheRemoveCTup(cache, elt);
1083 CACHE4_elog(DEBUG, "SearchSysCache(%s): Contains %d/%d tuples",
1084 RelationGetRelationName(relation),
1085 cache->cc_ntup, cache->cc_maxtup);
1086 CACHE3_elog(DEBUG, "SearchSysCache(%s): put in bucket %d",
1087 RelationGetRelationName(relation), hash);
1091 * close the relation, switch back to the original memory context
1092 * and return the tuple we found (or NULL)
1095 heap_close(relation);
1097 MemoryContextSwitchTo(oldcxt);
1101 /* --------------------------------
1102 * RelationInvalidateCatalogCacheTuple()
1104 * Invalidate a tuple from a specific relation. This call determines the
1105 * cache in question and calls CatalogCacheIdInvalidate(). It is -ok-
1106 * if the relation cannot be found, it simply means this backend has yet
1108 * --------------------------------
1111 RelationInvalidateCatalogCacheTuple(Relation relation,
1113 void (*function) (int, Index, ItemPointer))
1115 struct catcache *ccp;
1116 MemoryContext oldcxt;
1123 Assert(RelationIsValid(relation));
1124 Assert(HeapTupleIsValid(tuple));
1125 Assert(PointerIsValid(function));
1126 CACHE1_elog(DEBUG, "RelationInvalidateCatalogCacheTuple: called");
1129 * switch to the cache memory context
1133 CacheCxt = CreateGlobalMemory("Cache");
1134 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
1138 * if the cache contains tuples from the specified relation
1139 * call the invalidation function on the tuples
1140 * in the proper hash bucket
1143 relationId = RelationGetRelid(relation);
1145 for (ccp = Caches; ccp; ccp = ccp->cc_next)
1147 if (relationId != ccp->relationId)
1151 /* OPT inline simplification of CatalogCacheIdInvalidate */
1152 if (!PointerIsValid(function))
1153 function = CatalogCacheIdInvalidate;
1156 (*function) (ccp->id,
1157 CatalogCacheComputeTupleHashIndex(ccp, relation, tuple),
1160 heap_close(relation);
1164 * return to the proper memory context
1167 MemoryContextSwitchTo(oldcxt);
1169 /* sendpm('I', "Invalidated tuple"); */