1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.93 2002/03/26 19:16:08 tgl Exp $
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_opclass.h"
22 #include "catalog/pg_operator.h"
23 #include "catalog/pg_type.h"
24 #include "catalog/catname.h"
25 #include "catalog/indexing.h"
26 #include "miscadmin.h"
28 #include "storage/ipc.h" /* for on_proc_exit */
30 #include "utils/builtins.h"
31 #include "utils/fmgroids.h"
32 #include "utils/catcache.h"
33 #include "utils/relcache.h"
34 #include "utils/syscache.h"
37 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
40 * Constants related to size of the catcache.
42 * NCCBUCKETS must be a power of two and must be less than 64K (because
43 * SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
44 * practice it should be a lot less, anyway, to avoid chewing up too much
45 * space on hash bucket headers.
47 * MAXCCTUPLES could be as small as a few hundred, if per-backend memory
48 * consumption is at a premium.
50 #define NCCBUCKETS 256 /* Hash buckets per CatCache */
51 #define MAXCCTUPLES 5000 /* Maximum # of tuples in all caches */
54 * Given a hash value and the size of the hash table, find the bucket
55 * in which the hash value belongs. Since the hash table must contain
56 * a power-of-2 number of elements, this is a simple bitmask.
58 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
62 * variables, macros and other stuff
66 #define CACHE1_elog(a,b) elog(a,b)
67 #define CACHE2_elog(a,b,c) elog(a,b,c)
68 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
69 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
70 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
71 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
73 #define CACHE1_elog(a,b)
74 #define CACHE2_elog(a,b,c)
75 #define CACHE3_elog(a,b,c,d)
76 #define CACHE4_elog(a,b,c,d,e)
77 #define CACHE5_elog(a,b,c,d,e,f)
78 #define CACHE6_elog(a,b,c,d,e,f,g)
81 /* Cache management header --- pointer is NULL until created */
82 static CatCacheHeader *CacheHdr = NULL;
85 * EQPROC is used in CatalogCacheInitializeCache to find the equality
86 * functions for system types that are used as cache key fields.
87 * See also GetCCHashFunc, which should support the same set of types.
89 * XXX this should be replaced by catalog lookups,
90 * but that seems to pose considerable risk of circularity...
92 static const Oid eqproc[] = {
93 F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid,
94 F_INT2EQ, F_INT2VECTOREQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ,
95 F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ
98 #define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-BOOLOID]
101 static uint32 CatalogCacheComputeHashValue(CatCache *cache,
103 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
105 #ifdef CATCACHE_STATS
106 static void CatCachePrintStats(void);
108 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
109 static void CatalogCacheInitializeCache(CatCache *cache);
113 * internal support functions
117 GetCCHashFunc(Oid keytype)
129 return hashint2vector;
138 return hashoidvector;
140 elog(FATAL, "GetCCHashFunc: type %u unsupported as catcache key",
142 return (PGFunction) NULL;
147 * CatalogCacheComputeHashValue
149 * Compute the hash value associated with a given set of lookup keys
152 CatalogCacheComputeHashValue(CatCache *cache, ScanKey cur_skey)
154 uint32 hashValue = 0;
156 CACHE4_elog(DEBUG1, "CatalogCacheComputeHashValue %s %d %p",
161 switch (cache->cc_nkeys)
165 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
166 cur_skey[3].sk_argument)) << 9;
170 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
171 cur_skey[2].sk_argument)) << 6;
175 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
176 cur_skey[1].sk_argument)) << 3;
180 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
181 cur_skey[0].sk_argument));
184 elog(FATAL, "CCComputeHashValue: %d cc_nkeys", cache->cc_nkeys);
192 * CatalogCacheComputeTupleHashValue
194 * Compute the hash value associated with a given tuple to be cached
197 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
199 ScanKeyData cur_skey[4];
202 /* Copy pre-initialized overhead data for scankey */
203 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
205 /* Now extract key fields from tuple, insert into scankey */
206 switch (cache->cc_nkeys)
209 cur_skey[3].sk_argument =
210 (cache->cc_key[3] == ObjectIdAttributeNumber)
211 ? ObjectIdGetDatum(tuple->t_data->t_oid)
219 cur_skey[2].sk_argument =
220 (cache->cc_key[2] == ObjectIdAttributeNumber)
221 ? ObjectIdGetDatum(tuple->t_data->t_oid)
229 cur_skey[1].sk_argument =
230 (cache->cc_key[1] == ObjectIdAttributeNumber)
231 ? ObjectIdGetDatum(tuple->t_data->t_oid)
239 cur_skey[0].sk_argument =
240 (cache->cc_key[0] == ObjectIdAttributeNumber)
241 ? ObjectIdGetDatum(tuple->t_data->t_oid)
249 elog(FATAL, "CCComputeTupleHashValue: %d cc_nkeys",
254 return CatalogCacheComputeHashValue(cache, cur_skey);
258 #ifdef CATCACHE_STATS
261 CatCachePrintStats(void)
264 long cc_searches = 0;
266 long cc_neg_hits = 0;
267 long cc_newloads = 0;
269 long cc_discards = 0;
271 elog(DEBUG1, "Catcache stats dump: %d/%d tuples in catcaches",
272 CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
274 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
276 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
277 continue; /* don't print unused caches */
278 elog(DEBUG1, "Catcache %s/%s: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards",
285 cache->cc_hits + cache->cc_neg_hits,
287 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
288 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
291 cc_searches += cache->cc_searches;
292 cc_hits += cache->cc_hits;
293 cc_neg_hits += cache->cc_neg_hits;
294 cc_newloads += cache->cc_newloads;
295 cc_invals += cache->cc_invals;
296 cc_discards += cache->cc_discards;
298 elog(DEBUG1, "Catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards",
303 cc_hits + cc_neg_hits,
305 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
306 cc_searches - cc_hits - cc_neg_hits,
311 #endif /* CATCACHE_STATS */
317 * Unlink and delete the given cache entry
320 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
322 Assert(ct->refcount == 0);
323 Assert(ct->my_cache == cache);
325 /* delink from linked lists */
326 DLRemove(&ct->lrulist_elem);
327 DLRemove(&ct->cache_elem);
329 /* free associated tuple data */
330 if (ct->tuple.t_data != NULL)
331 pfree(ct->tuple.t_data);
339 * CatalogCacheIdInvalidate
341 * Invalidate entries in the specified cache, given a hash value and
342 * item pointer. Positive entries are deleted if they match the item
343 * pointer. Negative entries must be deleted if they match the hash
344 * value (since we do not have the exact key of the tuple that's being
345 * inserted). But this should only rarely result in loss of a cache
346 * entry that could have been kept.
348 * Note that it's not very relevant whether the tuple identified by
349 * the item pointer is being inserted or deleted. We don't expect to
350 * find matching positive entries in the one case, and we don't expect
351 * to find matching negative entries in the other; but we will do the
352 * right things in any case.
354 * This routine is only quasi-public: it should only be used by inval.c.
357 CatalogCacheIdInvalidate(int cacheId,
366 Assert(ItemPointerIsValid(pointer));
367 CACHE1_elog(DEBUG1, "CatalogCacheIdInvalidate: called");
370 * inspect caches to find the proper cache
372 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
378 if (cacheId != ccp->id)
382 * We don't bother to check whether the cache has finished
383 * initialization yet; if not, there will be no entries in it
388 * inspect the proper hash bucket for matches
390 hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
392 for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
394 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
396 nextelt = DLGetSucc(elt);
398 if (hashValue != ct->hash_value)
399 continue; /* ignore non-matching hash values */
402 ItemPointerEquals(pointer, &ct->tuple.t_self))
404 if (ct->refcount > 0)
407 CatCacheRemoveCTup(ccp, ct);
408 CACHE1_elog(DEBUG1, "CatalogCacheIdInvalidate: invalidated");
409 #ifdef CATCACHE_STATS
412 /* could be multiple matches, so keep looking! */
415 break; /* need only search this one cache */
419 /* ----------------------------------------------------------------
421 * ----------------------------------------------------------------
426 * Standard routine for creating cache context if it doesn't exist yet
428 * There are a lot of places (probably far more than necessary) that check
429 * whether CacheMemoryContext exists yet and want to create it if not.
430 * We centralize knowledge of exactly how to create it here.
433 CreateCacheMemoryContext(void)
436 * Purely for paranoia, check that context doesn't exist; caller
437 * probably did so already.
439 if (!CacheMemoryContext)
440 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
441 "CacheMemoryContext",
442 ALLOCSET_DEFAULT_MINSIZE,
443 ALLOCSET_DEFAULT_INITSIZE,
444 ALLOCSET_DEFAULT_MAXSIZE);
451 * Clean up catcaches at end of transaction (either commit or abort)
453 * We scan the caches to reset refcounts to zero. This is of course
454 * necessary in the abort case, since elog() may have interrupted routines.
455 * In the commit case, any nonzero counts indicate failure to call
456 * ReleaseSysCache, so we put out a notice for debugging purposes.
459 AtEOXact_CatCache(bool isCommit)
464 for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = nextelt)
466 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
468 nextelt = DLGetSucc(elt);
470 if (ct->refcount != 0)
473 elog(WARNING, "Cache reference leak: cache %s (%d), tuple %u has count %d",
474 ct->my_cache->cc_relname, ct->my_cache->id,
475 ct->tuple.t_data->t_oid,
480 /* Clean up any now-deletable dead entries */
482 CatCacheRemoveCTup(ct->my_cache, ct);
489 * Reset one catalog cache to empty.
491 * This is not very efficient if the target cache is nearly empty.
492 * However, it shouldn't need to be efficient; we don't invoke it often.
495 ResetCatalogCache(CatCache *cache)
499 /* Remove each tuple in this cache, or at least mark it dead */
500 for (i = 0; i < cache->cc_nbuckets; i++)
505 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
507 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
509 nextelt = DLGetSucc(elt);
511 if (ct->refcount > 0)
514 CatCacheRemoveCTup(cache, ct);
515 #ifdef CATCACHE_STATS
525 * Reset all caches when a shared cache inval event forces it
528 ResetCatalogCaches(void)
532 CACHE1_elog(DEBUG1, "ResetCatalogCaches called");
534 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
535 ResetCatalogCache(cache);
537 CACHE1_elog(DEBUG1, "end of ResetCatalogCaches call");
541 * CatalogCacheFlushRelation
543 * This is called by RelationFlushRelation() to clear out cached information
544 * about a relation being dropped. (This could be a DROP TABLE command,
545 * or a temp table being dropped at end of transaction, or a table created
546 * during the current transaction that is being dropped because of abort.)
547 * Remove all cache entries relevant to the specified relation OID.
549 * A special case occurs when relId is itself one of the cacheable system
550 * tables --- although those'll never be dropped, they can get flushed from
551 * the relcache (VACUUM causes this, for example). In that case we need
552 * to flush all cache entries that came from that table. (At one point we
553 * also tried to force re-execution of CatalogCacheInitializeCache for
554 * the cache(s) on that table. This is a bad idea since it leads to all
555 * kinds of trouble if a cache flush occurs while loading cache entries.
556 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
557 * rather than relying on the relcache to keep a tupdesc for us. Of course
558 * this assumes the tupdesc of a cachable system table will not change...)
561 CatalogCacheFlushRelation(Oid relId)
565 CACHE2_elog(DEBUG1, "CatalogCacheFlushRelation called for %u", relId);
567 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
571 /* We can ignore uninitialized caches, since they must be empty */
572 if (cache->cc_tupdesc == NULL)
575 /* Does this cache store tuples of the target relation itself? */
576 if (cache->cc_tupdesc->attrs[0]->attrelid == relId)
578 /* Yes, so flush all its contents */
579 ResetCatalogCache(cache);
583 /* Does this cache store tuples associated with relations at all? */
584 if (cache->cc_reloidattr == 0)
585 continue; /* nope, leave it alone */
587 /* Yes, scan the tuples and remove those related to relId */
588 for (i = 0; i < cache->cc_nbuckets; i++)
593 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
595 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
598 nextelt = DLGetSucc(elt);
601 * Negative entries are never considered related to a rel,
602 * even if the rel is part of their lookup key.
607 if (cache->cc_reloidattr == ObjectIdAttributeNumber)
608 tupRelid = ct->tuple.t_data->t_oid;
614 DatumGetObjectId(fastgetattr(&ct->tuple,
615 cache->cc_reloidattr,
621 if (tupRelid == relId)
623 if (ct->refcount > 0)
626 CatCacheRemoveCTup(cache, ct);
627 #ifdef CATCACHE_STATS
635 CACHE1_elog(DEBUG1, "end of CatalogCacheFlushRelation call");
641 * This allocates and initializes a cache for a system catalog relation.
642 * Actually, the cache is only partially initialized to avoid opening the
643 * relation. The relation will be opened and the rest of the cache
644 * structure initialized on the first access.
647 #define InitCatCache_DEBUG1 \
649 elog(DEBUG1, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
650 cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_nbuckets); \
654 #define InitCatCache_DEBUG1
666 MemoryContext oldcxt;
670 * first switch to the cache context so our allocations do not vanish
671 * at the end of a transaction
673 if (!CacheMemoryContext)
674 CreateCacheMemoryContext();
676 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
679 * if first time through, initialize the cache group header, including
680 * global LRU list header
682 if (CacheHdr == NULL)
684 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
685 CacheHdr->ch_caches = NULL;
686 CacheHdr->ch_ntup = 0;
687 CacheHdr->ch_maxtup = MAXCCTUPLES;
688 DLInitList(&CacheHdr->ch_lrulist);
689 #ifdef CATCACHE_STATS
690 on_proc_exit(CatCachePrintStats, 0);
695 * allocate a new cache structure
697 * Note: we assume zeroing initializes the bucket headers correctly
699 cp = (CatCache *) palloc(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
700 MemSet((char *) cp, 0, sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
703 * initialize the cache's relation information for the relation
704 * corresponding to this cache, and initialize some of the new cache's
705 * other internal fields. But don't open the relation yet.
708 cp->cc_relname = relname;
709 cp->cc_indname = indname;
710 cp->cc_reloid = InvalidOid; /* temporary */
711 cp->cc_relisshared = false; /* temporary */
712 cp->cc_tupdesc = (TupleDesc) NULL;
713 cp->cc_reloidattr = reloidattr;
715 cp->cc_nbuckets = NCCBUCKETS;
716 cp->cc_nkeys = nkeys;
717 for (i = 0; i < nkeys; ++i)
718 cp->cc_key[i] = key[i];
721 * new cache is initialized as far as we can go for now. print some
722 * debugging information, if appropriate.
727 * add completed cache to top of group header's list
729 cp->cc_next = CacheHdr->ch_caches;
730 CacheHdr->ch_caches = cp;
733 * back to the old context before we return...
735 MemoryContextSwitchTo(oldcxt);
741 * CatalogCacheInitializeCache
743 * This function does final initialization of a catcache: obtain the tuple
744 * descriptor and set up the hash and equality function links. We assume
745 * that the relcache entry can be opened at this point!
748 #define CatalogCacheInitializeCache_DEBUG1 \
749 elog(DEBUG1, "CatalogCacheInitializeCache: cache @%p %s", cache, \
752 #define CatalogCacheInitializeCache_DEBUG2 \
754 if (cache->cc_key[i] > 0) { \
755 elog(DEBUG1, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
756 i+1, cache->cc_nkeys, cache->cc_key[i], \
757 tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
759 elog(DEBUG1, "CatalogCacheInitializeCache: load %d/%d w/%d", \
760 i+1, cache->cc_nkeys, cache->cc_key[i]); \
765 #define CatalogCacheInitializeCache_DEBUG1
766 #define CatalogCacheInitializeCache_DEBUG2
770 CatalogCacheInitializeCache(CatCache *cache)
773 MemoryContext oldcxt;
777 CatalogCacheInitializeCache_DEBUG1;
780 * Open the relation without locking --- we only need the tupdesc,
781 * which we assume will never change ...
783 relation = heap_openr(cache->cc_relname, NoLock);
784 Assert(RelationIsValid(relation));
787 * switch to the cache context so our allocations do not vanish at the
788 * end of a transaction
790 Assert(CacheMemoryContext != NULL);
792 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
795 * copy the relcache's tuple descriptor to permanent cache storage
797 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
800 * get the relation's OID and relisshared flag, too
802 cache->cc_reloid = RelationGetRelid(relation);
803 cache->cc_relisshared = RelationGetForm(relation)->relisshared;
806 * return to the caller's memory context and close the rel
808 MemoryContextSwitchTo(oldcxt);
810 heap_close(relation, NoLock);
812 CACHE3_elog(DEBUG1, "CatalogCacheInitializeCache: %s, %d keys",
813 cache->cc_relname, cache->cc_nkeys);
816 * initialize cache's key information
818 for (i = 0; i < cache->cc_nkeys; ++i)
822 CatalogCacheInitializeCache_DEBUG2;
824 if (cache->cc_key[i] > 0)
825 keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
828 if (cache->cc_key[i] != ObjectIdAttributeNumber)
829 elog(FATAL, "CatalogCacheInit: only sys attr supported is OID");
833 cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
835 cache->cc_isname[i] = (keytype == NAMEOID);
838 * If GetCCHashFunc liked the type, safe to index into eqproc[]
840 cache->cc_skey[i].sk_procedure = EQPROC(keytype);
842 /* Do function lookup */
843 fmgr_info_cxt(cache->cc_skey[i].sk_procedure,
844 &cache->cc_skey[i].sk_func,
847 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
848 cache->cc_skey[i].sk_attno = cache->cc_key[i];
850 CACHE4_elog(DEBUG1, "CatalogCacheInit %s %d %p",
857 * mark this cache fully initialized
859 cache->cc_tupdesc = tupdesc;
863 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
865 * The only reason to call this routine is to ensure that the relcache
866 * has created entries for all the catalogs and indexes referenced by
867 * catcaches. Therefore, open the index too. An exception is the indexes
868 * on pg_am, which we don't use (cf. IndexScanOK).
871 InitCatCachePhase2(CatCache *cache)
873 if (cache->cc_tupdesc == NULL)
874 CatalogCacheInitializeCache(cache);
876 if (cache->id != AMOID &&
881 idesc = index_openr(cache->cc_indname);
890 * This function checks for tuples that will be fetched by
891 * IndexSupportInitialize() during relcache initialization for
892 * certain system indexes that support critical syscaches.
893 * We can't use an indexscan to fetch these, else we'll get into
894 * infinite recursion. A plain heap scan will work, however.
896 * Once we have completed relcache initialization (signaled by
897 * criticalRelcachesBuilt), we don't have to worry anymore.
900 IndexScanOK(CatCache *cache, ScanKey cur_skey)
902 if (cache->id == INDEXRELID)
905 * Since the OIDs of indexes aren't hardwired, it's painful to
906 * figure out which is which. Just force all pg_index searches
907 * to be heap scans while building the relcaches.
909 if (!criticalRelcachesBuilt)
912 else if (cache->id == AMOID ||
916 * Always do heap scans in pg_am, because it's so small there's
917 * not much point in an indexscan anyway. We *must* do this when
918 * initially building critical relcache entries, but we might as
919 * well just always do it.
923 else if (cache->id == OPEROID)
925 if (!criticalRelcachesBuilt)
927 /* Looking for an OID comparison function? */
928 Oid lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
930 if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
935 /* Normal case, allow index scan */
942 * This call searches a system cache for a tuple, opening the relation
943 * if necessary (on the first access to a particular cache).
945 * The result is NULL if not found, or a pointer to a HeapTuple in
946 * the cache. The caller must not modify the tuple, and must call
947 * ReleaseCatCache() when done with it.
949 * The search key values should be expressed as Datums of the key columns'
950 * datatype(s). (Pass zeroes for any unused parameters.) As a special
951 * exception, the passed-in key for a NAME column can be just a C string;
952 * the caller need not go to the trouble of converting it to a fully
956 SearchCatCache(CatCache *cache,
962 ScanKeyData cur_skey[4];
970 MemoryContext oldcxt;
973 * one-time startup overhead for each cache
975 if (cache->cc_tupdesc == NULL)
976 CatalogCacheInitializeCache(cache);
978 #ifdef CATCACHE_STATS
979 cache->cc_searches++;
983 * initialize the search key information
985 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
986 cur_skey[0].sk_argument = v1;
987 cur_skey[1].sk_argument = v2;
988 cur_skey[2].sk_argument = v3;
989 cur_skey[3].sk_argument = v4;
992 * find the hash bucket in which to look for the tuple
994 hashValue = CatalogCacheComputeHashValue(cache, cur_skey);
995 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
998 * scan the hash bucket until we find a match or exhaust our tuples
1000 for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1002 elt = DLGetSucc(elt))
1006 ct = (CatCTup *) DLE_VAL(elt);
1009 continue; /* ignore dead entries */
1011 if (ct->hash_value != hashValue)
1012 continue; /* quickly skip entry if wrong hash val */
1015 * see if the cached tuple matches our key.
1017 HeapKeyTest(&ct->tuple,
1026 * we found a match in the cache: move it to the front of the global
1027 * LRU list. We also move it to the front of the list for its
1028 * hashbucket, in order to speed subsequent searches. (The most
1029 * frequently accessed elements in any hashbucket will tend to be
1030 * near the front of the hashbucket's list.)
1032 DLMoveToFront(&ct->lrulist_elem);
1033 DLMoveToFront(&ct->cache_elem);
1036 * If it's a positive entry, bump its refcount and return it.
1037 * If it's negative, we can report failure to the caller.
1044 CACHE3_elog(DEBUG1, "SearchCatCache(%s): found in bucket %d",
1045 cache->cc_relname, hashIndex);
1046 #endif /* CACHEDEBUG */
1048 #ifdef CATCACHE_STATS
1057 CACHE3_elog(DEBUG1, "SearchCatCache(%s): found neg entry in bucket %d",
1058 cache->cc_relname, hashIndex);
1059 #endif /* CACHEDEBUG */
1061 #ifdef CATCACHE_STATS
1062 cache->cc_neg_hits++;
1070 * Tuple was not found in cache, so we have to try to retrieve it
1071 * directly from the relation. If found, we will add it to the
1072 * cache; if not found, we will add a negative cache entry instead.
1074 * NOTE: it is possible for recursive cache lookups to occur while
1075 * reading the relation --- for example, due to shared-cache-inval
1076 * messages being processed during heap_open(). This is OK. It's
1077 * even possible for one of those lookups to find and enter the very
1078 * same tuple we are trying to fetch here. If that happens, we will
1079 * enter a second copy of the tuple into the cache. The first copy
1080 * will never be referenced again, and will eventually age out of the
1081 * cache, so there's no functional problem. This case is rare enough
1082 * that it's not worth expending extra cycles to detect.
1086 * open the relation associated with the cache
1088 relation = heap_open(cache->cc_reloid, AccessShareLock);
1091 * Pre-create cache entry header, and mark no tuple found.
1093 ct = (CatCTup *) MemoryContextAlloc(CacheMemoryContext, sizeof(CatCTup));
1094 ct->negative = true;
1097 * Scan the relation to find the tuple. If there's an index, and if
1098 * it's safe to do so, use the index. Else do a heap scan.
1100 if ((RelationGetForm(relation))->relhasindex &&
1101 !IsIgnoringSystemIndexes() &&
1102 IndexScanOK(cache, cur_skey))
1106 RetrieveIndexResult indexRes;
1107 HeapTupleData tuple;
1110 CACHE2_elog(DEBUG1, "SearchCatCache(%s): performing index scan",
1114 * For an index scan, sk_attno has to be set to the index
1115 * attribute number(s), not the heap attribute numbers. We assume
1116 * that the index corresponds exactly to the cache keys (or its
1117 * first N keys do, anyway).
1119 for (i = 0; i < cache->cc_nkeys; ++i)
1120 cur_skey[i].sk_attno = i + 1;
1122 idesc = index_openr(cache->cc_indname);
1123 isd = index_beginscan(idesc, false, cache->cc_nkeys, cur_skey);
1124 tuple.t_datamcxt = CurrentMemoryContext;
1125 tuple.t_data = NULL;
1126 while ((indexRes = index_getnext(isd, ForwardScanDirection)))
1128 tuple.t_self = indexRes->heap_iptr;
1129 heap_fetch(relation, SnapshotNow, &tuple, &buffer, isd);
1131 if (tuple.t_data != NULL)
1133 /* Copy tuple into our context */
1134 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1135 heap_copytuple_with_tuple(&tuple, &ct->tuple);
1136 ct->negative = false;
1137 MemoryContextSwitchTo(oldcxt);
1138 ReleaseBuffer(buffer);
1149 CACHE2_elog(DEBUG1, "SearchCatCache(%s): performing heap scan",
1152 sd = heap_beginscan(relation, 0, SnapshotNow,
1153 cache->cc_nkeys, cur_skey);
1155 ntp = heap_getnext(sd, 0);
1157 if (HeapTupleIsValid(ntp))
1159 /* Copy tuple into our context */
1160 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1161 heap_copytuple_with_tuple(ntp, &ct->tuple);
1162 ct->negative = false;
1163 MemoryContextSwitchTo(oldcxt);
1164 /* We should not free the result of heap_getnext... */
1171 * close the relation
1173 heap_close(relation, AccessShareLock);
1176 * scan is complete. If tuple was not found, we need to build
1177 * a fake tuple for the negative cache entry. The fake tuple has
1178 * the correct key columns, but nulls everywhere else.
1182 TupleDesc tupDesc = cache->cc_tupdesc;
1185 Oid negOid = InvalidOid;
1187 values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1188 nulls = (char *) palloc(tupDesc->natts * sizeof(char));
1190 memset(values, 0, tupDesc->natts * sizeof(Datum));
1191 memset(nulls, 'n', tupDesc->natts * sizeof(char));
1193 for (i = 0; i < cache->cc_nkeys; i++)
1195 int attindex = cache->cc_key[i];
1196 Datum keyval = cur_skey[i].sk_argument;
1201 * Here we must be careful in case the caller passed a
1202 * C string where a NAME is wanted: convert the given
1203 * argument to a correctly padded NAME. Otherwise the
1204 * memcpy() done in heap_formtuple could fall off the
1207 if (cache->cc_isname[i])
1209 Name newval = (Name) palloc(NAMEDATALEN);
1211 namestrcpy(newval, DatumGetCString(keyval));
1212 keyval = NameGetDatum(newval);
1214 values[attindex-1] = keyval;
1215 nulls[attindex-1] = ' ';
1219 Assert(attindex == ObjectIdAttributeNumber);
1220 negOid = DatumGetObjectId(keyval);
1224 ntp = heap_formtuple(tupDesc, values, nulls);
1226 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1227 heap_copytuple_with_tuple(ntp, &ct->tuple);
1228 ct->tuple.t_data->t_oid = negOid;
1229 MemoryContextSwitchTo(oldcxt);
1231 heap_freetuple(ntp);
1232 for (i = 0; i < cache->cc_nkeys; i++)
1234 if (cache->cc_isname[i])
1235 pfree(DatumGetName(values[cache->cc_key[i]-1]));
1242 * Finish initializing the CatCTup header, and add it to the linked
1245 ct->ct_magic = CT_MAGIC;
1246 ct->my_cache = cache;
1247 DLInitElem(&ct->lrulist_elem, (void *) ct);
1248 DLInitElem(&ct->cache_elem, (void *) ct);
1249 ct->refcount = 1; /* count this first reference */
1251 ct->hash_value = hashValue;
1253 DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
1254 DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1257 * If we've exceeded the desired size of the caches, try to throw away
1258 * the least recently used entry. NB: the newly-built entry cannot
1259 * get thrown away here, because it has positive refcount.
1262 if (++CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
1266 for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
1268 CatCTup *oldct = (CatCTup *) DLE_VAL(elt);
1270 prevelt = DLGetPred(elt);
1272 if (oldct->refcount == 0)
1274 CACHE2_elog(DEBUG1, "SearchCatCache(%s): Overflow, LRU removal",
1276 #ifdef CATCACHE_STATS
1277 oldct->my_cache->cc_discards++;
1279 CatCacheRemoveCTup(oldct->my_cache, oldct);
1280 if (CacheHdr->ch_ntup <= CacheHdr->ch_maxtup)
1286 CACHE4_elog(DEBUG1, "SearchCatCache(%s): Contains %d/%d tuples",
1287 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1291 CACHE3_elog(DEBUG1, "SearchCatCache(%s): put neg entry in bucket %d",
1292 cache->cc_relname, hashIndex);
1295 * We are not returning the new entry to the caller, so reset its
1296 * refcount. Note it would be uncool to set the refcount to 0
1297 * before doing the extra-entry removal step above.
1299 ct->refcount = 0; /* negative entries never have refs */
1304 CACHE3_elog(DEBUG1, "SearchCatCache(%s): put in bucket %d",
1305 cache->cc_relname, hashIndex);
1307 #ifdef CATCACHE_STATS
1308 cache->cc_newloads++;
1317 * Decrement the reference count of a catcache entry (releasing the
1318 * hold grabbed by a successful SearchCatCache).
1320 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1321 * will be freed as soon as their refcount goes to zero. In combination
1322 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1323 * to catch references to already-released catcache entries.
1326 ReleaseCatCache(HeapTuple tuple)
1328 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1329 offsetof(CatCTup, tuple));
1331 /* Safety checks to ensure we were handed a cache entry */
1332 Assert(ct->ct_magic == CT_MAGIC);
1333 Assert(ct->refcount > 0);
1337 if (ct->refcount == 0
1338 #ifndef CATCACHE_FORCE_RELEASE
1342 CatCacheRemoveCTup(ct->my_cache, ct);
1346 * PrepareToInvalidateCacheTuple()
1348 * This is part of a rather subtle chain of events, so pay attention:
1350 * When a tuple is inserted or deleted, it cannot be flushed from the
1351 * catcaches immediately, for reasons explained at the top of cache/inval.c.
1352 * Instead we have to add entry(s) for the tuple to a list of pending tuple
1353 * invalidations that will be done at the end of the command or transaction.
1355 * The lists of tuples that need to be flushed are kept by inval.c. This
1356 * routine is a helper routine for inval.c. Given a tuple belonging to
1357 * the specified relation, find all catcaches it could be in, compute the
1358 * correct hash value for each such catcache, and call the specified function
1359 * to record the cache id, hash value, and tuple ItemPointer in inval.c's
1360 * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
1361 * using the recorded information.
1363 * Note that it is irrelevant whether the given tuple is actually loaded
1364 * into the catcache at the moment. Even if it's not there now, it might
1365 * be by the end of the command, or there might be a matching negative entry
1366 * to flush --- or other backends' caches might have such entries --- so
1367 * we have to make list entries to flush it later.
1369 * Also note that it's not an error if there are no catcaches for the
1370 * specified relation. inval.c doesn't know exactly which rels have
1371 * catcaches --- it will call this routine for any tuple that's in a
1375 PrepareToInvalidateCacheTuple(Relation relation,
1377 void (*function) (int, uint32, ItemPointer, Oid))
1382 CACHE1_elog(DEBUG1, "PrepareToInvalidateCacheTuple: called");
1387 Assert(RelationIsValid(relation));
1388 Assert(HeapTupleIsValid(tuple));
1389 Assert(PointerIsValid(function));
1390 Assert(CacheHdr != NULL);
1392 reloid = RelationGetRelid(relation);
1396 * if the cache contains tuples from the specified relation
1397 * compute the tuple's hash value in this cache,
1398 * and call the passed function to register the information.
1402 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1404 /* Just in case cache hasn't finished initialization yet... */
1405 if (ccp->cc_tupdesc == NULL)
1406 CatalogCacheInitializeCache(ccp);
1408 if (ccp->cc_reloid != reloid)
1411 (*function) (ccp->id,
1412 CatalogCacheComputeTupleHashValue(ccp, tuple),
1414 ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);