1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.136 2007/01/05 22:19:42 momjian Exp $
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_operator.h"
22 #include "catalog/pg_type.h"
23 #include "miscadmin.h"
25 #include "storage/ipc.h" /* for on_proc_exit */
27 #include "utils/builtins.h"
28 #include "utils/fmgroids.h"
29 #include "utils/memutils.h"
30 #include "utils/relcache.h"
31 #include "utils/resowner.h"
32 #include "utils/syscache.h"
35 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
38 * Given a hash value and the size of the hash table, find the bucket
39 * in which the hash value belongs. Since the hash table must contain
40 * a power-of-2 number of elements, this is a simple bitmask.
42 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
46 * variables, macros and other stuff
50 #define CACHE1_elog(a,b) elog(a,b)
51 #define CACHE2_elog(a,b,c) elog(a,b,c)
52 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
53 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
54 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
55 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
57 #define CACHE1_elog(a,b)
58 #define CACHE2_elog(a,b,c)
59 #define CACHE3_elog(a,b,c,d)
60 #define CACHE4_elog(a,b,c,d,e)
61 #define CACHE5_elog(a,b,c,d,e,f)
62 #define CACHE6_elog(a,b,c,d,e,f,g)
65 /* Cache management header --- pointer is NULL until created */
66 static CatCacheHeader *CacheHdr = NULL;
69 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
71 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
75 static void CatCachePrintStats(int code, Datum arg);
77 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
78 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
79 static void CatalogCacheInitializeCache(CatCache *cache);
80 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
81 uint32 hashValue, Index hashIndex,
83 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
87 * internal support functions
91 * Look up the hash and equality functions for system types that are used
92 * as cache key fields.
94 * XXX this should be replaced by catalog lookups,
95 * but that seems to pose considerable risk of circularity...
98 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
103 *hashfunc = hashchar;
107 *hashfunc = hashchar;
111 *hashfunc = hashname;
115 *hashfunc = hashint2;
119 *hashfunc = hashint2vector;
120 *eqfunc = F_INT2VECTOREQ;
123 *hashfunc = hashint4;
127 *hashfunc = hashtext;
132 case REGPROCEDUREOID:
141 *hashfunc = hashoidvector;
142 *eqfunc = F_OIDVECTOREQ;
145 elog(FATAL, "type %u not supported as catcache key", keytype);
146 *hashfunc = NULL; /* keep compiler quiet */
147 *eqfunc = InvalidOid;
153 * CatalogCacheComputeHashValue
155 * Compute the hash value associated with a given set of lookup keys
158 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
160 uint32 hashValue = 0;
162 CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
171 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
172 cur_skey[3].sk_argument)) << 9;
176 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
177 cur_skey[2].sk_argument)) << 6;
181 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
182 cur_skey[1].sk_argument)) << 3;
186 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
187 cur_skey[0].sk_argument));
190 elog(FATAL, "wrong number of hash keys: %d", nkeys);
198 * CatalogCacheComputeTupleHashValue
200 * Compute the hash value associated with a given tuple to be cached
203 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
205 ScanKeyData cur_skey[4];
208 /* Copy pre-initialized overhead data for scankey */
209 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
211 /* Now extract key fields from tuple, insert into scankey */
212 switch (cache->cc_nkeys)
215 cur_skey[3].sk_argument =
216 (cache->cc_key[3] == ObjectIdAttributeNumber)
217 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
225 cur_skey[2].sk_argument =
226 (cache->cc_key[2] == ObjectIdAttributeNumber)
227 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
235 cur_skey[1].sk_argument =
236 (cache->cc_key[1] == ObjectIdAttributeNumber)
237 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
245 cur_skey[0].sk_argument =
246 (cache->cc_key[0] == ObjectIdAttributeNumber)
247 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
255 elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
259 return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
263 #ifdef CATCACHE_STATS
266 CatCachePrintStats(int code, Datum arg)
269 long cc_searches = 0;
271 long cc_neg_hits = 0;
272 long cc_newloads = 0;
274 long cc_lsearches = 0;
277 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
279 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
280 continue; /* don't print unused caches */
281 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
288 cache->cc_hits + cache->cc_neg_hits,
290 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
291 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
295 cc_searches += cache->cc_searches;
296 cc_hits += cache->cc_hits;
297 cc_neg_hits += cache->cc_neg_hits;
298 cc_newloads += cache->cc_newloads;
299 cc_invals += cache->cc_invals;
300 cc_lsearches += cache->cc_lsearches;
301 cc_lhits += cache->cc_lhits;
303 elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
308 cc_hits + cc_neg_hits,
310 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
311 cc_searches - cc_hits - cc_neg_hits,
316 #endif /* CATCACHE_STATS */
322 * Unlink and delete the given cache entry
324 * NB: if it is a member of a CatCList, the CatCList is deleted too.
325 * Both the cache entry and the list had better have zero refcount.
328 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
330 Assert(ct->refcount == 0);
331 Assert(ct->my_cache == cache);
336 * The cleanest way to handle this is to call CatCacheRemoveCList,
337 * which will recurse back to me, and the recursive call will do the
338 * work. Set the "dead" flag to make sure it does recurse.
341 CatCacheRemoveCList(cache, ct->c_list);
342 return; /* nothing left to do */
345 /* delink from linked list */
346 DLRemove(&ct->cache_elem);
348 /* free associated tuple data */
349 if (ct->tuple.t_data != NULL)
350 pfree(ct->tuple.t_data);
358 * CatCacheRemoveCList
360 * Unlink and delete the given cache list entry
362 * NB: any dead member entries that become unreferenced are deleted too.
365 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
369 Assert(cl->refcount == 0);
370 Assert(cl->my_cache == cache);
372 /* delink from member tuples */
373 for (i = cl->n_members; --i >= 0;)
375 CatCTup *ct = cl->members[i];
377 Assert(ct->c_list == cl);
379 /* if the member is dead and now has no references, remove it */
381 #ifndef CATCACHE_FORCE_RELEASE
385 CatCacheRemoveCTup(cache, ct);
388 /* delink from linked list */
389 DLRemove(&cl->cache_elem);
391 /* free associated tuple data */
392 if (cl->tuple.t_data != NULL)
393 pfree(cl->tuple.t_data);
399 * CatalogCacheIdInvalidate
401 * Invalidate entries in the specified cache, given a hash value and
402 * item pointer. Positive entries are deleted if they match the item
403 * pointer. Negative entries must be deleted if they match the hash
404 * value (since we do not have the exact key of the tuple that's being
405 * inserted). But this should only rarely result in loss of a cache
406 * entry that could have been kept.
408 * Note that it's not very relevant whether the tuple identified by
409 * the item pointer is being inserted or deleted. We don't expect to
410 * find matching positive entries in the one case, and we don't expect
411 * to find matching negative entries in the other; but we will do the
412 * right things in any case.
414 * This routine is only quasi-public: it should only be used by inval.c.
417 CatalogCacheIdInvalidate(int cacheId,
426 Assert(ItemPointerIsValid(pointer));
427 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
430 * inspect caches to find the proper cache
432 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
438 if (cacheId != ccp->id)
442 * We don't bother to check whether the cache has finished
443 * initialization yet; if not, there will be no entries in it so no
448 * Invalidate *all* CatCLists in this cache; it's too hard to tell
449 * which searches might still be correct, so just zap 'em all.
451 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
453 CatCList *cl = (CatCList *) DLE_VAL(elt);
455 nextelt = DLGetSucc(elt);
457 if (cl->refcount > 0)
460 CatCacheRemoveCList(ccp, cl);
464 * inspect the proper hash bucket for tuple matches
466 hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
468 for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
470 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
472 nextelt = DLGetSucc(elt);
474 if (hashValue != ct->hash_value)
475 continue; /* ignore non-matching hash values */
478 ItemPointerEquals(pointer, &ct->tuple.t_self))
480 if (ct->refcount > 0 ||
481 (ct->c_list && ct->c_list->refcount > 0))
484 /* list, if any, was marked dead above */
485 Assert(ct->c_list == NULL || ct->c_list->dead);
488 CatCacheRemoveCTup(ccp, ct);
489 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
490 #ifdef CATCACHE_STATS
493 /* could be multiple matches, so keep looking! */
496 break; /* need only search this one cache */
500 /* ----------------------------------------------------------------
502 * ----------------------------------------------------------------
507 * Standard routine for creating cache context if it doesn't exist yet
509 * There are a lot of places (probably far more than necessary) that check
510 * whether CacheMemoryContext exists yet and want to create it if not.
511 * We centralize knowledge of exactly how to create it here.
514 CreateCacheMemoryContext(void)
517 * Purely for paranoia, check that context doesn't exist; caller probably
520 if (!CacheMemoryContext)
521 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
522 "CacheMemoryContext",
523 ALLOCSET_DEFAULT_MINSIZE,
524 ALLOCSET_DEFAULT_INITSIZE,
525 ALLOCSET_DEFAULT_MAXSIZE);
532 * Clean up catcaches at end of main transaction (either commit or abort)
534 * As of PostgreSQL 8.1, catcache pins should get released by the
535 * ResourceOwner mechanism. This routine is just a debugging
536 * cross-check that no pins remain.
539 AtEOXact_CatCache(bool isCommit)
541 #ifdef USE_ASSERT_CHECKING
546 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
551 /* Check CatCLists */
552 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
554 CatCList *cl = (CatCList *) DLE_VAL(elt);
556 Assert(cl->cl_magic == CL_MAGIC);
557 Assert(cl->refcount == 0);
561 /* Check individual tuples */
562 for (i = 0; i < ccp->cc_nbuckets; i++)
564 for (elt = DLGetHead(&ccp->cc_bucket[i]);
566 elt = DLGetSucc(elt))
568 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
570 Assert(ct->ct_magic == CT_MAGIC);
571 Assert(ct->refcount == 0);
583 * Reset one catalog cache to empty.
585 * This is not very efficient if the target cache is nearly empty.
586 * However, it shouldn't need to be efficient; we don't invoke it often.
589 ResetCatalogCache(CatCache *cache)
595 /* Remove each list in this cache, or at least mark it dead */
596 for (elt = DLGetHead(&cache->cc_lists); elt; elt = nextelt)
598 CatCList *cl = (CatCList *) DLE_VAL(elt);
600 nextelt = DLGetSucc(elt);
602 if (cl->refcount > 0)
605 CatCacheRemoveCList(cache, cl);
608 /* Remove each tuple in this cache, or at least mark it dead */
609 for (i = 0; i < cache->cc_nbuckets; i++)
611 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
613 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
615 nextelt = DLGetSucc(elt);
617 if (ct->refcount > 0 ||
618 (ct->c_list && ct->c_list->refcount > 0))
621 /* list, if any, was marked dead above */
622 Assert(ct->c_list == NULL || ct->c_list->dead);
625 CatCacheRemoveCTup(cache, ct);
626 #ifdef CATCACHE_STATS
636 * Reset all caches when a shared cache inval event forces it
639 ResetCatalogCaches(void)
643 CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
645 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
646 ResetCatalogCache(cache);
648 CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
652 * CatalogCacheFlushRelation
654 * This is called by RelationFlushRelation() to clear out cached information
655 * about a relation being dropped. (This could be a DROP TABLE command,
656 * or a temp table being dropped at end of transaction, or a table created
657 * during the current transaction that is being dropped because of abort.)
658 * Remove all cache entries relevant to the specified relation OID.
660 * A special case occurs when relId is itself one of the cacheable system
661 * tables --- although those'll never be dropped, they can get flushed from
662 * the relcache (VACUUM causes this, for example). In that case we need
663 * to flush all cache entries that came from that table. (At one point we
664 * also tried to force re-execution of CatalogCacheInitializeCache for
665 * the cache(s) on that table. This is a bad idea since it leads to all
666 * kinds of trouble if a cache flush occurs while loading cache entries.
667 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
668 * rather than relying on the relcache to keep a tupdesc for us. Of course
669 * this assumes the tupdesc of a cachable system table will not change...)
672 CatalogCacheFlushRelation(Oid relId)
676 CACHE2_elog(DEBUG2, "CatalogCacheFlushRelation called for %u", relId);
678 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
682 /* We can ignore uninitialized caches, since they must be empty */
683 if (cache->cc_tupdesc == NULL)
686 /* Does this cache store tuples of the target relation itself? */
687 if (cache->cc_tupdesc->attrs[0]->attrelid == relId)
689 /* Yes, so flush all its contents */
690 ResetCatalogCache(cache);
694 /* Does this cache store tuples associated with relations at all? */
695 if (cache->cc_reloidattr == 0)
696 continue; /* nope, leave it alone */
698 /* Yes, scan the tuples and remove those related to relId */
699 for (i = 0; i < cache->cc_nbuckets; i++)
704 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
706 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
709 nextelt = DLGetSucc(elt);
712 * Negative entries are never considered related to a rel,
713 * even if the rel is part of their lookup key.
718 if (cache->cc_reloidattr == ObjectIdAttributeNumber)
719 tupRelid = HeapTupleGetOid(&ct->tuple);
725 DatumGetObjectId(fastgetattr(&ct->tuple,
726 cache->cc_reloidattr,
732 if (tupRelid == relId)
734 if (ct->refcount > 0 ||
735 (ct->c_list && ct->c_list->refcount > 0))
738 /* parent list must be considered dead too */
740 ct->c_list->dead = true;
743 CatCacheRemoveCTup(cache, ct);
744 #ifdef CATCACHE_STATS
752 CACHE1_elog(DEBUG2, "end of CatalogCacheFlushRelation call");
758 * This allocates and initializes a cache for a system catalog relation.
759 * Actually, the cache is only partially initialized to avoid opening the
760 * relation. The relation will be opened and the rest of the cache
761 * structure initialized on the first access.
764 #define InitCatCache_DEBUG2 \
766 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
767 cp->cc_reloid, cp->cc_indexoid, cp->id, \
768 cp->cc_nkeys, cp->cc_nbuckets); \
771 #define InitCatCache_DEBUG2
784 MemoryContext oldcxt;
788 * nbuckets is the number of hash buckets to use in this catcache.
789 * Currently we just use a hard-wired estimate of an appropriate size for
790 * each cache; maybe later make them dynamically resizable?
792 * nbuckets must be a power of two. We check this via Assert rather than
793 * a full runtime check because the values will be coming from constant
796 * If you're confused by the power-of-two check, see comments in
797 * bitmapset.c for an explanation.
799 Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
802 * first switch to the cache context so our allocations do not vanish at
803 * the end of a transaction
805 if (!CacheMemoryContext)
806 CreateCacheMemoryContext();
808 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
811 * if first time through, initialize the cache group header
813 if (CacheHdr == NULL)
815 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
816 CacheHdr->ch_caches = NULL;
817 CacheHdr->ch_ntup = 0;
818 #ifdef CATCACHE_STATS
819 /* set up to dump stats at backend exit */
820 on_proc_exit(CatCachePrintStats, 0);
825 * allocate a new cache structure
827 * Note: we assume zeroing initializes the Dllist headers correctly
829 cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(Dllist));
832 * initialize the cache's relation information for the relation
833 * corresponding to this cache, and initialize some of the new cache's
834 * other internal fields. But don't open the relation yet.
837 cp->cc_relname = "(not known yet)";
838 cp->cc_reloid = reloid;
839 cp->cc_indexoid = indexoid;
840 cp->cc_relisshared = false; /* temporary */
841 cp->cc_tupdesc = (TupleDesc) NULL;
842 cp->cc_reloidattr = reloidattr;
844 cp->cc_nbuckets = nbuckets;
845 cp->cc_nkeys = nkeys;
846 for (i = 0; i < nkeys; ++i)
847 cp->cc_key[i] = key[i];
850 * new cache is initialized as far as we can go for now. print some
851 * debugging information, if appropriate.
856 * add completed cache to top of group header's list
858 cp->cc_next = CacheHdr->ch_caches;
859 CacheHdr->ch_caches = cp;
862 * back to the old context before we return...
864 MemoryContextSwitchTo(oldcxt);
870 * CatalogCacheInitializeCache
872 * This function does final initialization of a catcache: obtain the tuple
873 * descriptor and set up the hash and equality function links. We assume
874 * that the relcache entry can be opened at this point!
877 #define CatalogCacheInitializeCache_DEBUG1 \
878 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
881 #define CatalogCacheInitializeCache_DEBUG2 \
883 if (cache->cc_key[i] > 0) { \
884 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
885 i+1, cache->cc_nkeys, cache->cc_key[i], \
886 tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
888 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
889 i+1, cache->cc_nkeys, cache->cc_key[i]); \
893 #define CatalogCacheInitializeCache_DEBUG1
894 #define CatalogCacheInitializeCache_DEBUG2
898 CatalogCacheInitializeCache(CatCache *cache)
901 MemoryContext oldcxt;
905 CatalogCacheInitializeCache_DEBUG1;
907 relation = heap_open(cache->cc_reloid, AccessShareLock);
910 * switch to the cache context so our allocations do not vanish at the end
913 Assert(CacheMemoryContext != NULL);
915 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
918 * copy the relcache's tuple descriptor to permanent cache storage
920 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
923 * save the relation's name and relisshared flag, too (cc_relname is used
924 * only for debugging purposes)
926 cache->cc_relname = pstrdup(RelationGetRelationName(relation));
927 cache->cc_relisshared = RelationGetForm(relation)->relisshared;
930 * return to the caller's memory context and close the rel
932 MemoryContextSwitchTo(oldcxt);
934 heap_close(relation, AccessShareLock);
936 CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
937 cache->cc_relname, cache->cc_nkeys);
940 * initialize cache's key information
942 for (i = 0; i < cache->cc_nkeys; ++i)
947 CatalogCacheInitializeCache_DEBUG2;
949 if (cache->cc_key[i] > 0)
950 keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
953 if (cache->cc_key[i] != ObjectIdAttributeNumber)
954 elog(FATAL, "only sys attr supported in caches is OID");
958 GetCCHashEqFuncs(keytype,
959 &cache->cc_hashfunc[i],
962 cache->cc_isname[i] = (keytype == NAMEOID);
965 * Do equality-function lookup (we assume this won't need a catalog
966 * lookup for any supported type)
968 fmgr_info_cxt(eqfunc,
969 &cache->cc_skey[i].sk_func,
972 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
973 cache->cc_skey[i].sk_attno = cache->cc_key[i];
975 /* Fill in sk_strategy as well --- always standard equality */
976 cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
977 cache->cc_skey[i].sk_subtype = InvalidOid;
979 CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
986 * mark this cache fully initialized
988 cache->cc_tupdesc = tupdesc;
992 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
994 * One reason to call this routine is to ensure that the relcache has
995 * created entries for all the catalogs and indexes referenced by catcaches.
996 * Therefore, provide an option to open the index as well as fixing the
997 * cache itself. An exception is the indexes on pg_am, which we don't use
1001 InitCatCachePhase2(CatCache *cache, bool touch_index)
1003 if (cache->cc_tupdesc == NULL)
1004 CatalogCacheInitializeCache(cache);
1007 cache->id != AMOID &&
1008 cache->id != AMNAME)
1012 idesc = index_open(cache->cc_indexoid, AccessShareLock);
1013 index_close(idesc, AccessShareLock);
1021 * This function checks for tuples that will be fetched by
1022 * IndexSupportInitialize() during relcache initialization for
1023 * certain system indexes that support critical syscaches.
1024 * We can't use an indexscan to fetch these, else we'll get into
1025 * infinite recursion. A plain heap scan will work, however.
1027 * Once we have completed relcache initialization (signaled by
1028 * criticalRelcachesBuilt), we don't have to worry anymore.
1031 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1033 if (cache->id == INDEXRELID)
1036 * Rather than tracking exactly which indexes have to be loaded
1037 * before we can use indexscans (which changes from time to time),
1038 * just force all pg_index searches to be heap scans until we've
1039 * built the critical relcaches.
1041 if (!criticalRelcachesBuilt)
1044 else if (cache->id == AMOID ||
1045 cache->id == AMNAME)
1048 * Always do heap scans in pg_am, because it's so small there's not
1049 * much point in an indexscan anyway. We *must* do this when
1050 * initially building critical relcache entries, but we might as well
1051 * just always do it.
1056 /* Normal case, allow index scan */
1063 * This call searches a system cache for a tuple, opening the relation
1064 * if necessary (on the first access to a particular cache).
1066 * The result is NULL if not found, or a pointer to a HeapTuple in
1067 * the cache. The caller must not modify the tuple, and must call
1068 * ReleaseCatCache() when done with it.
1070 * The search key values should be expressed as Datums of the key columns'
1071 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1072 * exception, the passed-in key for a NAME column can be just a C string;
1073 * the caller need not go to the trouble of converting it to a fully
1077 SearchCatCache(CatCache *cache,
1083 ScanKeyData cur_skey[4];
1089 SysScanDesc scandesc;
1093 * one-time startup overhead for each cache
1095 if (cache->cc_tupdesc == NULL)
1096 CatalogCacheInitializeCache(cache);
1098 #ifdef CATCACHE_STATS
1099 cache->cc_searches++;
1103 * initialize the search key information
1105 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1106 cur_skey[0].sk_argument = v1;
1107 cur_skey[1].sk_argument = v2;
1108 cur_skey[2].sk_argument = v3;
1109 cur_skey[3].sk_argument = v4;
1112 * find the hash bucket in which to look for the tuple
1114 hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1115 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1118 * scan the hash bucket until we find a match or exhaust our tuples
1120 for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1122 elt = DLGetSucc(elt))
1126 ct = (CatCTup *) DLE_VAL(elt);
1129 continue; /* ignore dead entries */
1131 if (ct->hash_value != hashValue)
1132 continue; /* quickly skip entry if wrong hash val */
1135 * see if the cached tuple matches our key.
1137 HeapKeyTest(&ct->tuple,
1146 * We found a match in the cache. Move it to the front of the list
1147 * for its hashbucket, in order to speed subsequent searches. (The
1148 * most frequently accessed elements in any hashbucket will tend to be
1149 * near the front of the hashbucket's list.)
1151 DLMoveToFront(&ct->cache_elem);
1154 * If it's a positive entry, bump its refcount and return it. If it's
1155 * negative, we can report failure to the caller.
1159 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1161 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1163 CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1164 cache->cc_relname, hashIndex);
1166 #ifdef CATCACHE_STATS
1174 CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1175 cache->cc_relname, hashIndex);
1177 #ifdef CATCACHE_STATS
1178 cache->cc_neg_hits++;
1186 * Tuple was not found in cache, so we have to try to retrieve it directly
1187 * from the relation. If found, we will add it to the cache; if not
1188 * found, we will add a negative cache entry instead.
1190 * NOTE: it is possible for recursive cache lookups to occur while reading
1191 * the relation --- for example, due to shared-cache-inval messages being
1192 * processed during heap_open(). This is OK. It's even possible for one
1193 * of those lookups to find and enter the very same tuple we are trying to
1194 * fetch here. If that happens, we will enter a second copy of the tuple
1195 * into the cache. The first copy will never be referenced again, and
1196 * will eventually age out of the cache, so there's no functional problem.
1197 * This case is rare enough that it's not worth expending extra cycles to
1200 relation = heap_open(cache->cc_reloid, AccessShareLock);
1202 scandesc = systable_beginscan(relation,
1204 IndexScanOK(cache, cur_skey),
1211 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1213 ct = CatalogCacheCreateEntry(cache, ntp,
1214 hashValue, hashIndex,
1216 /* immediately set the refcount to 1 */
1217 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1219 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1220 break; /* assume only one match */
1223 systable_endscan(scandesc);
1225 heap_close(relation, AccessShareLock);
1228 * If tuple was not found, we need to build a negative cache entry
1229 * containing a fake tuple. The fake tuple has the correct key columns,
1230 * but nulls everywhere else.
1232 * In bootstrap mode, we don't build negative entries, because the cache
1233 * invalidation mechanism isn't alive and can't clear them if the tuple
1234 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1235 * cache inval for that.)
1239 if (IsBootstrapProcessingMode())
1242 ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1243 ct = CatalogCacheCreateEntry(cache, ntp,
1244 hashValue, hashIndex,
1246 heap_freetuple(ntp);
1248 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1249 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1250 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1251 cache->cc_relname, hashIndex);
1254 * We are not returning the negative entry to the caller, so leave its
1261 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1262 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1263 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1264 cache->cc_relname, hashIndex);
1266 #ifdef CATCACHE_STATS
1267 cache->cc_newloads++;
1276 * Decrement the reference count of a catcache entry (releasing the
1277 * hold grabbed by a successful SearchCatCache).
1279 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1280 * will be freed as soon as their refcount goes to zero. In combination
1281 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1282 * to catch references to already-released catcache entries.
1285 ReleaseCatCache(HeapTuple tuple)
1287 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1288 offsetof(CatCTup, tuple));
1290 /* Safety checks to ensure we were handed a cache entry */
1291 Assert(ct->ct_magic == CT_MAGIC);
1292 Assert(ct->refcount > 0);
1295 ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1298 #ifndef CATCACHE_FORCE_RELEASE
1301 ct->refcount == 0 &&
1302 (ct->c_list == NULL || ct->c_list->refcount == 0))
1303 CatCacheRemoveCTup(ct->my_cache, ct);
1308 * SearchCatCacheList
1310 * Generate a list of all tuples matching a partial key (that is,
1311 * a key specifying just the first K of the cache's N key columns).
1313 * The caller must not modify the list object or the pointed-to tuples,
1314 * and must call ReleaseCatCacheList() when done with the list.
1317 SearchCatCacheList(CatCache *cache,
1324 ScanKeyData cur_skey[4];
1329 List *volatile ctlist;
1330 ListCell *ctlist_item;
1334 MemoryContext oldcxt;
1338 * one-time startup overhead for each cache
1340 if (cache->cc_tupdesc == NULL)
1341 CatalogCacheInitializeCache(cache);
1343 Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1345 #ifdef CATCACHE_STATS
1346 cache->cc_lsearches++;
1350 * initialize the search key information
1352 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1353 cur_skey[0].sk_argument = v1;
1354 cur_skey[1].sk_argument = v2;
1355 cur_skey[2].sk_argument = v3;
1356 cur_skey[3].sk_argument = v4;
1359 * compute a hash value of the given keys for faster search. We don't
1360 * presently divide the CatCList items into buckets, but this still lets
1361 * us skip non-matching items quickly most of the time.
1363 lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1366 * scan the items until we find a match or exhaust our list
1368 for (elt = DLGetHead(&cache->cc_lists);
1370 elt = DLGetSucc(elt))
1374 cl = (CatCList *) DLE_VAL(elt);
1377 continue; /* ignore dead entries */
1379 if (cl->hash_value != lHashValue)
1380 continue; /* quickly skip entry if wrong hash val */
1383 * see if the cached list matches our key.
1385 if (cl->nkeys != nkeys)
1387 HeapKeyTest(&cl->tuple,
1396 * We found a matching list. Move the list to the front of the
1397 * cache's list-of-lists, to speed subsequent searches. (We do not
1398 * move the members to the fronts of their hashbucket lists, however,
1399 * since there's no point in that unless they are searched for
1402 DLMoveToFront(&cl->cache_elem);
1404 /* Bump the list's refcount and return it */
1405 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1407 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1409 CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1412 #ifdef CATCACHE_STATS
1420 * List was not found in cache, so we have to build it by reading the
1421 * relation. For each matching tuple found in the relation, use an
1422 * existing cache entry if possible, else build a new one.
1424 * We have to bump the member refcounts temporarily to ensure they won't
1425 * get dropped from the cache while loading other members. We use a PG_TRY
1426 * block to ensure we can undo those refcounts if we get an error before
1427 * we finish constructing the CatCList.
1429 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1436 SysScanDesc scandesc;
1438 relation = heap_open(cache->cc_reloid, AccessShareLock);
1440 scandesc = systable_beginscan(relation,
1447 /* The list will be ordered iff we are doing an index scan */
1448 ordered = (scandesc->irel != NULL);
1450 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1456 * See if there's an entry for this tuple already.
1459 hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1460 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1462 for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1464 elt = DLGetSucc(elt))
1466 ct = (CatCTup *) DLE_VAL(elt);
1468 if (ct->dead || ct->negative)
1469 continue; /* ignore dead and negative entries */
1471 if (ct->hash_value != hashValue)
1472 continue; /* quickly skip entry if wrong hash val */
1474 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1475 continue; /* not same tuple */
1478 * Found a match, but can't use it if it belongs to another
1489 /* We didn't find a usable entry, so make a new one */
1490 ct = CatalogCacheCreateEntry(cache, ntp,
1491 hashValue, hashIndex,
1495 /* Careful here: add entry to ctlist, then bump its refcount */
1496 /* This way leaves state correct if lappend runs out of memory */
1497 ctlist = lappend(ctlist, ct);
1501 systable_endscan(scandesc);
1503 heap_close(relation, AccessShareLock);
1506 * Now we can build the CatCList entry. First we need a dummy tuple
1507 * containing the key values...
1509 ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1510 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1511 nmembers = list_length(ctlist);
1513 palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
1514 heap_copytuple_with_tuple(ntp, &cl->tuple);
1515 MemoryContextSwitchTo(oldcxt);
1516 heap_freetuple(ntp);
1519 * We are now past the last thing that could trigger an elog before we
1520 * have finished building the CatCList and remembering it in the
1521 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1522 * we'd better do so before we start marking the members as belonging
1529 foreach(ctlist_item, ctlist)
1531 ct = (CatCTup *) lfirst(ctlist_item);
1532 Assert(ct->c_list == NULL);
1533 Assert(ct->refcount > 0);
1536 #ifndef CATCACHE_FORCE_RELEASE
1539 ct->refcount == 0 &&
1540 (ct->c_list == NULL || ct->c_list->refcount == 0))
1541 CatCacheRemoveCTup(cache, ct);
1548 cl->cl_magic = CL_MAGIC;
1549 cl->my_cache = cache;
1550 DLInitElem(&cl->cache_elem, cl);
1551 cl->refcount = 0; /* for the moment */
1553 cl->ordered = ordered;
1555 cl->hash_value = lHashValue;
1556 cl->n_members = nmembers;
1559 foreach(ctlist_item, ctlist)
1561 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1562 Assert(ct->c_list == NULL);
1564 /* release the temporary refcount on the member */
1565 Assert(ct->refcount > 0);
1567 /* mark list dead if any members already dead */
1571 Assert(i == nmembers);
1573 DLAddHead(&cache->cc_lists, &cl->cache_elem);
1575 /* Finally, bump the list's refcount and return it */
1577 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1579 CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1580 cache->cc_relname, nmembers);
1586 * ReleaseCatCacheList
1588 * Decrement the reference count of a catcache list.
1591 ReleaseCatCacheList(CatCList *list)
1593 /* Safety checks to ensure we were handed a cache entry */
1594 Assert(list->cl_magic == CL_MAGIC);
1595 Assert(list->refcount > 0);
1597 ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1600 #ifndef CATCACHE_FORCE_RELEASE
1603 list->refcount == 0)
1604 CatCacheRemoveCList(list->my_cache, list);
1609 * CatalogCacheCreateEntry
1610 * Create a new CatCTup entry, copying the given HeapTuple and other
1611 * supplied data into it. The new entry initially has refcount 0.
1614 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
1615 uint32 hashValue, Index hashIndex, bool negative)
1618 MemoryContext oldcxt;
1621 * Allocate CatCTup header in cache memory, and copy the tuple there too.
1623 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1624 ct = (CatCTup *) palloc(sizeof(CatCTup));
1625 heap_copytuple_with_tuple(ntp, &ct->tuple);
1626 MemoryContextSwitchTo(oldcxt);
1629 * Finish initializing the CatCTup header, and add it to the cache's
1630 * linked list and counts.
1632 ct->ct_magic = CT_MAGIC;
1633 ct->my_cache = cache;
1634 DLInitElem(&ct->cache_elem, (void *) ct);
1636 ct->refcount = 0; /* for the moment */
1638 ct->negative = negative;
1639 ct->hash_value = hashValue;
1641 DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1644 CacheHdr->ch_ntup++;
1651 * Generate a palloc'd HeapTuple that contains the specified key
1652 * columns, and NULLs for other columns.
1654 * This is used to store the keys for negative cache entries and CatCList
1655 * entries, which don't have real tuples associated with them.
1658 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1661 TupleDesc tupDesc = cache->cc_tupdesc;
1664 Oid tupOid = InvalidOid;
1665 NameData tempNames[4];
1668 values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1669 nulls = (char *) palloc(tupDesc->natts * sizeof(char));
1671 memset(values, 0, tupDesc->natts * sizeof(Datum));
1672 memset(nulls, 'n', tupDesc->natts * sizeof(char));
1674 for (i = 0; i < nkeys; i++)
1676 int attindex = cache->cc_key[i];
1677 Datum keyval = skeys[i].sk_argument;
1682 * Here we must be careful in case the caller passed a C string
1683 * where a NAME is wanted: convert the given argument to a
1684 * correctly padded NAME. Otherwise the memcpy() done in
1685 * heap_formtuple could fall off the end of memory.
1687 if (cache->cc_isname[i])
1689 Name newval = &tempNames[i];
1691 namestrcpy(newval, DatumGetCString(keyval));
1692 keyval = NameGetDatum(newval);
1694 values[attindex - 1] = keyval;
1695 nulls[attindex - 1] = ' ';
1699 Assert(attindex == ObjectIdAttributeNumber);
1700 tupOid = DatumGetObjectId(keyval);
1704 ntp = heap_formtuple(tupDesc, values, nulls);
1705 if (tupOid != InvalidOid)
1706 HeapTupleSetOid(ntp, tupOid);
1716 * PrepareToInvalidateCacheTuple()
1718 * This is part of a rather subtle chain of events, so pay attention:
1720 * When a tuple is inserted or deleted, it cannot be flushed from the
1721 * catcaches immediately, for reasons explained at the top of cache/inval.c.
1722 * Instead we have to add entry(s) for the tuple to a list of pending tuple
1723 * invalidations that will be done at the end of the command or transaction.
1725 * The lists of tuples that need to be flushed are kept by inval.c. This
1726 * routine is a helper routine for inval.c. Given a tuple belonging to
1727 * the specified relation, find all catcaches it could be in, compute the
1728 * correct hash value for each such catcache, and call the specified function
1729 * to record the cache id, hash value, and tuple ItemPointer in inval.c's
1730 * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
1731 * using the recorded information.
1733 * Note that it is irrelevant whether the given tuple is actually loaded
1734 * into the catcache at the moment. Even if it's not there now, it might
1735 * be by the end of the command, or there might be a matching negative entry
1736 * to flush --- or other backends' caches might have such entries --- so
1737 * we have to make list entries to flush it later.
1739 * Also note that it's not an error if there are no catcaches for the
1740 * specified relation. inval.c doesn't know exactly which rels have
1741 * catcaches --- it will call this routine for any tuple that's in a
1745 PrepareToInvalidateCacheTuple(Relation relation,
1747 void (*function) (int, uint32, ItemPointer, Oid))
1752 CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1757 Assert(RelationIsValid(relation));
1758 Assert(HeapTupleIsValid(tuple));
1759 Assert(PointerIsValid(function));
1760 Assert(CacheHdr != NULL);
1762 reloid = RelationGetRelid(relation);
1766 * if the cache contains tuples from the specified relation
1767 * compute the tuple's hash value in this cache,
1768 * and call the passed function to register the information.
1772 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1774 /* Just in case cache hasn't finished initialization yet... */
1775 if (ccp->cc_tupdesc == NULL)
1776 CatalogCacheInitializeCache(ccp);
1778 if (ccp->cc_reloid != reloid)
1781 (*function) (ccp->id,
1782 CatalogCacheComputeTupleHashValue(ccp, tuple),
1784 ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
1790 * Subroutines for warning about reference leaks. These are exported so
1791 * that resowner.c can call them.
1794 PrintCatCacheLeakWarning(HeapTuple tuple)
1796 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1797 offsetof(CatCTup, tuple));
1799 /* Safety check to ensure we were handed a cache entry */
1800 Assert(ct->ct_magic == CT_MAGIC);
1802 elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1803 ct->my_cache->cc_relname, ct->my_cache->id,
1804 ItemPointerGetBlockNumber(&(tuple->t_self)),
1805 ItemPointerGetOffsetNumber(&(tuple->t_self)),
1810 PrintCatCacheListLeakWarning(CatCList *list)
1812 elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1813 list->my_cache->cc_relname, list->my_cache->id,
1814 list, list->refcount);