1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/utils/cache/catcache.c
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/relscan.h"
21 #include "access/sysattr.h"
22 #include "access/valid.h"
23 #include "catalog/pg_operator.h"
24 #include "catalog/pg_type.h"
25 #include "miscadmin.h"
27 #include "storage/ipc.h" /* for on_proc_exit */
29 #include "storage/lmgr.h"
30 #include "utils/builtins.h"
31 #include "utils/fmgroids.h"
32 #include "utils/inval.h"
33 #include "utils/memutils.h"
34 #include "utils/rel.h"
35 #include "utils/resowner.h"
36 #include "utils/syscache.h"
37 #include "utils/tqual.h"
40 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
43 * Given a hash value and the size of the hash table, find the bucket
44 * in which the hash value belongs. Since the hash table must contain
45 * a power-of-2 number of elements, this is a simple bitmask.
47 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
51 * variables, macros and other stuff
55 #define CACHE1_elog(a,b) elog(a,b)
56 #define CACHE2_elog(a,b,c) elog(a,b,c)
57 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
58 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
59 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
60 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
62 #define CACHE1_elog(a,b)
63 #define CACHE2_elog(a,b,c)
64 #define CACHE3_elog(a,b,c,d)
65 #define CACHE4_elog(a,b,c,d,e)
66 #define CACHE5_elog(a,b,c,d,e,f)
67 #define CACHE6_elog(a,b,c,d,e,f,g)
70 /* Cache management header --- pointer is NULL until created */
71 static CatCacheHeader *CacheHdr = NULL;
74 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
76 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
80 static void CatCachePrintStats(int code, Datum arg);
82 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
83 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
84 static void CatalogCacheInitializeCache(CatCache *cache);
85 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
86 uint32 hashValue, Index hashIndex,
88 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
92 * internal support functions
96 * Look up the hash and equality functions for system types that are used
97 * as cache key fields.
99 * XXX this should be replaced by catalog lookups,
100 * but that seems to pose considerable risk of circularity...
103 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
108 *hashfunc = hashchar;
113 *hashfunc = hashchar;
118 *hashfunc = hashname;
123 *hashfunc = hashint2;
128 *hashfunc = hashint2vector;
130 *eqfunc = F_INT2VECTOREQ;
133 *hashfunc = hashint4;
138 *hashfunc = hashtext;
144 case REGPROCEDUREOID:
150 case REGDICTIONARYOID:
156 *hashfunc = hashoidvector;
158 *eqfunc = F_OIDVECTOREQ;
161 elog(FATAL, "type %u not supported as catcache key", keytype);
162 *hashfunc = NULL; /* keep compiler quiet */
164 *eqfunc = InvalidOid;
170 * CatalogCacheComputeHashValue
172 * Compute the hash value associated with a given set of lookup keys
175 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
177 uint32 hashValue = 0;
180 CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
189 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
190 cur_skey[3].sk_argument));
191 hashValue ^= oneHash << 24;
192 hashValue ^= oneHash >> 8;
196 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
197 cur_skey[2].sk_argument));
198 hashValue ^= oneHash << 16;
199 hashValue ^= oneHash >> 16;
203 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
204 cur_skey[1].sk_argument));
205 hashValue ^= oneHash << 8;
206 hashValue ^= oneHash >> 24;
210 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
211 cur_skey[0].sk_argument));
212 hashValue ^= oneHash;
215 elog(FATAL, "wrong number of hash keys: %d", nkeys);
223 * CatalogCacheComputeTupleHashValue
225 * Compute the hash value associated with a given tuple to be cached
228 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
230 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
233 /* Copy pre-initialized overhead data for scankey */
234 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
236 /* Now extract key fields from tuple, insert into scankey */
237 switch (cache->cc_nkeys)
240 cur_skey[3].sk_argument =
241 (cache->cc_key[3] == ObjectIdAttributeNumber)
242 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
250 cur_skey[2].sk_argument =
251 (cache->cc_key[2] == ObjectIdAttributeNumber)
252 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
260 cur_skey[1].sk_argument =
261 (cache->cc_key[1] == ObjectIdAttributeNumber)
262 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
270 cur_skey[0].sk_argument =
271 (cache->cc_key[0] == ObjectIdAttributeNumber)
272 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
280 elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
284 return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
288 #ifdef CATCACHE_STATS
291 CatCachePrintStats(int code, Datum arg)
294 long cc_searches = 0;
296 long cc_neg_hits = 0;
297 long cc_newloads = 0;
299 long cc_lsearches = 0;
302 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
304 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
305 continue; /* don't print unused caches */
306 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
313 cache->cc_hits + cache->cc_neg_hits,
315 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
316 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
320 cc_searches += cache->cc_searches;
321 cc_hits += cache->cc_hits;
322 cc_neg_hits += cache->cc_neg_hits;
323 cc_newloads += cache->cc_newloads;
324 cc_invals += cache->cc_invals;
325 cc_lsearches += cache->cc_lsearches;
326 cc_lhits += cache->cc_lhits;
328 elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
333 cc_hits + cc_neg_hits,
335 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
336 cc_searches - cc_hits - cc_neg_hits,
341 #endif /* CATCACHE_STATS */
347 * Unlink and delete the given cache entry
349 * NB: if it is a member of a CatCList, the CatCList is deleted too.
350 * Both the cache entry and the list had better have zero refcount.
353 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
355 Assert(ct->refcount == 0);
356 Assert(ct->my_cache == cache);
361 * The cleanest way to handle this is to call CatCacheRemoveCList,
362 * which will recurse back to me, and the recursive call will do the
363 * work. Set the "dead" flag to make sure it does recurse.
366 CatCacheRemoveCList(cache, ct->c_list);
367 return; /* nothing left to do */
370 /* delink from linked list */
371 DLRemove(&ct->cache_elem);
373 /* free associated tuple data */
374 if (ct->tuple.t_data != NULL)
375 pfree(ct->tuple.t_data);
383 * CatCacheRemoveCList
385 * Unlink and delete the given cache list entry
387 * NB: any dead member entries that become unreferenced are deleted too.
390 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
394 Assert(cl->refcount == 0);
395 Assert(cl->my_cache == cache);
397 /* delink from member tuples */
398 for (i = cl->n_members; --i >= 0;)
400 CatCTup *ct = cl->members[i];
402 Assert(ct->c_list == cl);
404 /* if the member is dead and now has no references, remove it */
406 #ifndef CATCACHE_FORCE_RELEASE
410 CatCacheRemoveCTup(cache, ct);
413 /* delink from linked list */
414 DLRemove(&cl->cache_elem);
416 /* free associated tuple data */
417 if (cl->tuple.t_data != NULL)
418 pfree(cl->tuple.t_data);
424 * CatalogCacheIdInvalidate
426 * Invalidate entries in the specified cache, given a hash value.
428 * We delete cache entries that match the hash value, whether positive
429 * or negative. We don't care whether the invalidation is the result
430 * of a tuple insertion or a deletion.
432 * We used to try to match positive cache entries by TID, but that is
433 * unsafe after a VACUUM FULL on a system catalog: an inval event could
434 * be queued before VACUUM FULL, and then processed afterwards, when the
435 * target tuple that has to be invalidated has a different TID than it
436 * did when the event was created. So now we just compare hash values and
437 * accept the small risk of unnecessary invalidations due to false matches.
439 * This routine is only quasi-public: it should only be used by inval.c.
442 CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
446 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
449 * inspect caches to find the proper cache
451 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
457 if (cacheId != ccp->id)
461 * We don't bother to check whether the cache has finished
462 * initialization yet; if not, there will be no entries in it so no
467 * Invalidate *all* CatCLists in this cache; it's too hard to tell
468 * which searches might still be correct, so just zap 'em all.
470 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
472 CatCList *cl = (CatCList *) DLE_VAL(elt);
474 nextelt = DLGetSucc(elt);
476 if (cl->refcount > 0)
479 CatCacheRemoveCList(ccp, cl);
483 * inspect the proper hash bucket for tuple matches
485 hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
487 for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
489 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
491 nextelt = DLGetSucc(elt);
493 if (hashValue == ct->hash_value)
495 if (ct->refcount > 0 ||
496 (ct->c_list && ct->c_list->refcount > 0))
499 /* list, if any, was marked dead above */
500 Assert(ct->c_list == NULL || ct->c_list->dead);
503 CatCacheRemoveCTup(ccp, ct);
504 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
505 #ifdef CATCACHE_STATS
508 /* could be multiple matches, so keep looking! */
511 break; /* need only search this one cache */
515 /* ----------------------------------------------------------------
517 * ----------------------------------------------------------------
522 * Standard routine for creating cache context if it doesn't exist yet
524 * There are a lot of places (probably far more than necessary) that check
525 * whether CacheMemoryContext exists yet and want to create it if not.
526 * We centralize knowledge of exactly how to create it here.
529 CreateCacheMemoryContext(void)
532 * Purely for paranoia, check that context doesn't exist; caller probably
535 if (!CacheMemoryContext)
536 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
537 "CacheMemoryContext",
538 ALLOCSET_DEFAULT_MINSIZE,
539 ALLOCSET_DEFAULT_INITSIZE,
540 ALLOCSET_DEFAULT_MAXSIZE);
547 * Clean up catcaches at end of main transaction (either commit or abort)
549 * As of PostgreSQL 8.1, catcache pins should get released by the
550 * ResourceOwner mechanism. This routine is just a debugging
551 * cross-check that no pins remain.
554 AtEOXact_CatCache(bool isCommit)
556 #ifdef USE_ASSERT_CHECKING
561 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
566 /* Check CatCLists */
567 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
569 CatCList *cl = (CatCList *) DLE_VAL(elt);
571 Assert(cl->cl_magic == CL_MAGIC);
572 Assert(cl->refcount == 0);
576 /* Check individual tuples */
577 for (i = 0; i < ccp->cc_nbuckets; i++)
579 for (elt = DLGetHead(&ccp->cc_bucket[i]);
581 elt = DLGetSucc(elt))
583 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
585 Assert(ct->ct_magic == CT_MAGIC);
586 Assert(ct->refcount == 0);
598 * Reset one catalog cache to empty.
600 * This is not very efficient if the target cache is nearly empty.
601 * However, it shouldn't need to be efficient; we don't invoke it often.
604 ResetCatalogCache(CatCache *cache)
610 /* Remove each list in this cache, or at least mark it dead */
611 for (elt = DLGetHead(&cache->cc_lists); elt; elt = nextelt)
613 CatCList *cl = (CatCList *) DLE_VAL(elt);
615 nextelt = DLGetSucc(elt);
617 if (cl->refcount > 0)
620 CatCacheRemoveCList(cache, cl);
623 /* Remove each tuple in this cache, or at least mark it dead */
624 for (i = 0; i < cache->cc_nbuckets; i++)
626 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
628 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
630 nextelt = DLGetSucc(elt);
632 if (ct->refcount > 0 ||
633 (ct->c_list && ct->c_list->refcount > 0))
636 /* list, if any, was marked dead above */
637 Assert(ct->c_list == NULL || ct->c_list->dead);
640 CatCacheRemoveCTup(cache, ct);
641 #ifdef CATCACHE_STATS
651 * Reset all caches when a shared cache inval event forces it
654 ResetCatalogCaches(void)
658 CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
660 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
661 ResetCatalogCache(cache);
663 CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
667 * CatalogCacheFlushCatalog
669 * Flush all catcache entries that came from the specified system catalog.
670 * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
671 * tuples very likely now have different TIDs than before. (At one point
672 * we also tried to force re-execution of CatalogCacheInitializeCache for
673 * the cache(s) on that catalog. This is a bad idea since it leads to all
674 * kinds of trouble if a cache flush occurs while loading cache entries.
675 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
676 * rather than relying on the relcache to keep a tupdesc for us. Of course
677 * this assumes the tupdesc of a cachable system table will not change...)
680 CatalogCacheFlushCatalog(Oid catId)
684 CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
686 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
688 /* Does this cache store tuples of the target catalog? */
689 if (cache->cc_reloid == catId)
691 /* Yes, so flush all its contents */
692 ResetCatalogCache(cache);
694 /* Tell inval.c to call syscache callbacks for this cache */
695 CallSyscacheCallbacks(cache->id, 0);
699 CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
705 * This allocates and initializes a cache for a system catalog relation.
706 * Actually, the cache is only partially initialized to avoid opening the
707 * relation. The relation will be opened and the rest of the cache
708 * structure initialized on the first access.
711 #define InitCatCache_DEBUG2 \
713 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
714 cp->cc_reloid, cp->cc_indexoid, cp->id, \
715 cp->cc_nkeys, cp->cc_nbuckets); \
718 #define InitCatCache_DEBUG2
730 MemoryContext oldcxt;
734 * nbuckets is the number of hash buckets to use in this catcache.
735 * Currently we just use a hard-wired estimate of an appropriate size for
736 * each cache; maybe later make them dynamically resizable?
738 * nbuckets must be a power of two. We check this via Assert rather than
739 * a full runtime check because the values will be coming from constant
742 * If you're confused by the power-of-two check, see comments in
743 * bitmapset.c for an explanation.
745 Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
748 * first switch to the cache context so our allocations do not vanish at
749 * the end of a transaction
751 if (!CacheMemoryContext)
752 CreateCacheMemoryContext();
754 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
757 * if first time through, initialize the cache group header
759 if (CacheHdr == NULL)
761 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
762 CacheHdr->ch_caches = NULL;
763 CacheHdr->ch_ntup = 0;
764 #ifdef CATCACHE_STATS
765 /* set up to dump stats at backend exit */
766 on_proc_exit(CatCachePrintStats, 0);
771 * allocate a new cache structure
773 * Note: we assume zeroing initializes the Dllist headers correctly
775 cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(Dllist));
778 * initialize the cache's relation information for the relation
779 * corresponding to this cache, and initialize some of the new cache's
780 * other internal fields. But don't open the relation yet.
783 cp->cc_relname = "(not known yet)";
784 cp->cc_reloid = reloid;
785 cp->cc_indexoid = indexoid;
786 cp->cc_relisshared = false; /* temporary */
787 cp->cc_tupdesc = (TupleDesc) NULL;
789 cp->cc_nbuckets = nbuckets;
790 cp->cc_nkeys = nkeys;
791 for (i = 0; i < nkeys; ++i)
792 cp->cc_key[i] = key[i];
795 * new cache is initialized as far as we can go for now. print some
796 * debugging information, if appropriate.
801 * add completed cache to top of group header's list
803 cp->cc_next = CacheHdr->ch_caches;
804 CacheHdr->ch_caches = cp;
807 * back to the old context before we return...
809 MemoryContextSwitchTo(oldcxt);
815 * CatalogCacheInitializeCache
817 * This function does final initialization of a catcache: obtain the tuple
818 * descriptor and set up the hash and equality function links. We assume
819 * that the relcache entry can be opened at this point!
822 #define CatalogCacheInitializeCache_DEBUG1 \
823 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
826 #define CatalogCacheInitializeCache_DEBUG2 \
828 if (cache->cc_key[i] > 0) { \
829 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
830 i+1, cache->cc_nkeys, cache->cc_key[i], \
831 tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
833 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
834 i+1, cache->cc_nkeys, cache->cc_key[i]); \
838 #define CatalogCacheInitializeCache_DEBUG1
839 #define CatalogCacheInitializeCache_DEBUG2
843 CatalogCacheInitializeCache(CatCache *cache)
846 MemoryContext oldcxt;
850 CatalogCacheInitializeCache_DEBUG1;
852 relation = heap_open(cache->cc_reloid, AccessShareLock);
855 * switch to the cache context so our allocations do not vanish at the end
858 Assert(CacheMemoryContext != NULL);
860 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
863 * copy the relcache's tuple descriptor to permanent cache storage
865 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
868 * save the relation's name and relisshared flag, too (cc_relname is used
869 * only for debugging purposes)
871 cache->cc_relname = pstrdup(RelationGetRelationName(relation));
872 cache->cc_relisshared = RelationGetForm(relation)->relisshared;
875 * return to the caller's memory context and close the rel
877 MemoryContextSwitchTo(oldcxt);
879 heap_close(relation, AccessShareLock);
881 CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
882 cache->cc_relname, cache->cc_nkeys);
885 * initialize cache's key information
887 for (i = 0; i < cache->cc_nkeys; ++i)
892 CatalogCacheInitializeCache_DEBUG2;
894 if (cache->cc_key[i] > 0)
895 keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
898 if (cache->cc_key[i] != ObjectIdAttributeNumber)
899 elog(FATAL, "only sys attr supported in caches is OID");
903 GetCCHashEqFuncs(keytype,
904 &cache->cc_hashfunc[i],
907 cache->cc_isname[i] = (keytype == NAMEOID);
910 * Do equality-function lookup (we assume this won't need a catalog
911 * lookup for any supported type)
913 fmgr_info_cxt(eqfunc,
914 &cache->cc_skey[i].sk_func,
917 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
918 cache->cc_skey[i].sk_attno = cache->cc_key[i];
920 /* Fill in sk_strategy as well --- always standard equality */
921 cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
922 cache->cc_skey[i].sk_subtype = InvalidOid;
923 /* Currently, there are no catcaches on collation-aware data types */
924 cache->cc_skey[i].sk_collation = InvalidOid;
926 CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
933 * mark this cache fully initialized
935 cache->cc_tupdesc = tupdesc;
939 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
941 * One reason to call this routine is to ensure that the relcache has
942 * created entries for all the catalogs and indexes referenced by catcaches.
943 * Therefore, provide an option to open the index as well as fixing the
944 * cache itself. An exception is the indexes on pg_am, which we don't use
948 InitCatCachePhase2(CatCache *cache, bool touch_index)
950 if (cache->cc_tupdesc == NULL)
951 CatalogCacheInitializeCache(cache);
954 cache->id != AMOID &&
960 * We must lock the underlying catalog before opening the index to
961 * avoid deadlock, since index_open could possibly result in reading
962 * this same catalog, and if anyone else is exclusive-locking this
963 * catalog and index they'll be doing it in that order.
965 LockRelationOid(cache->cc_reloid, AccessShareLock);
966 idesc = index_open(cache->cc_indexoid, AccessShareLock);
967 index_close(idesc, AccessShareLock);
968 UnlockRelationOid(cache->cc_reloid, AccessShareLock);
976 * This function checks for tuples that will be fetched by
977 * IndexSupportInitialize() during relcache initialization for
978 * certain system indexes that support critical syscaches.
979 * We can't use an indexscan to fetch these, else we'll get into
980 * infinite recursion. A plain heap scan will work, however.
981 * Once we have completed relcache initialization (signaled by
982 * criticalRelcachesBuilt), we don't have to worry anymore.
984 * Similarly, during backend startup we have to be able to use the
985 * pg_authid and pg_auth_members syscaches for authentication even if
986 * we don't yet have relcache entries for those catalogs' indexes.
989 IndexScanOK(CatCache *cache, ScanKey cur_skey)
996 * Rather than tracking exactly which indexes have to be loaded
997 * before we can use indexscans (which changes from time to time),
998 * just force all pg_index searches to be heap scans until we've
999 * built the critical relcaches.
1001 if (!criticalRelcachesBuilt)
1009 * Always do heap scans in pg_am, because it's so small there's
1010 * not much point in an indexscan anyway. We *must* do this when
1011 * initially building critical relcache entries, but we might as
1012 * well just always do it.
1018 case AUTHMEMMEMROLE:
1021 * Protect authentication lookups occurring before relcache has
1022 * collected entries for shared indexes.
1024 if (!criticalSharedRelcachesBuilt)
1032 /* Normal case, allow index scan */
1039 * This call searches a system cache for a tuple, opening the relation
1040 * if necessary (on the first access to a particular cache).
1042 * The result is NULL if not found, or a pointer to a HeapTuple in
1043 * the cache. The caller must not modify the tuple, and must call
1044 * ReleaseCatCache() when done with it.
1046 * The search key values should be expressed as Datums of the key columns'
1047 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1048 * exception, the passed-in key for a NAME column can be just a C string;
1049 * the caller need not go to the trouble of converting it to a fully
1053 SearchCatCache(CatCache *cache,
1059 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1065 SysScanDesc scandesc;
1069 * one-time startup overhead for each cache
1071 if (cache->cc_tupdesc == NULL)
1072 CatalogCacheInitializeCache(cache);
1074 #ifdef CATCACHE_STATS
1075 cache->cc_searches++;
1079 * initialize the search key information
1081 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1082 cur_skey[0].sk_argument = v1;
1083 cur_skey[1].sk_argument = v2;
1084 cur_skey[2].sk_argument = v3;
1085 cur_skey[3].sk_argument = v4;
1088 * find the hash bucket in which to look for the tuple
1090 hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1091 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1094 * scan the hash bucket until we find a match or exhaust our tuples
1096 for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1098 elt = DLGetSucc(elt))
1102 ct = (CatCTup *) DLE_VAL(elt);
1105 continue; /* ignore dead entries */
1107 if (ct->hash_value != hashValue)
1108 continue; /* quickly skip entry if wrong hash val */
1111 * see if the cached tuple matches our key.
1113 HeapKeyTest(&ct->tuple,
1122 * We found a match in the cache. Move it to the front of the list
1123 * for its hashbucket, in order to speed subsequent searches. (The
1124 * most frequently accessed elements in any hashbucket will tend to be
1125 * near the front of the hashbucket's list.)
1127 DLMoveToFront(&ct->cache_elem);
1130 * If it's a positive entry, bump its refcount and return it. If it's
1131 * negative, we can report failure to the caller.
1135 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1137 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1139 CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1140 cache->cc_relname, hashIndex);
1142 #ifdef CATCACHE_STATS
1150 CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1151 cache->cc_relname, hashIndex);
1153 #ifdef CATCACHE_STATS
1154 cache->cc_neg_hits++;
1162 * Tuple was not found in cache, so we have to try to retrieve it directly
1163 * from the relation. If found, we will add it to the cache; if not
1164 * found, we will add a negative cache entry instead.
1166 * NOTE: it is possible for recursive cache lookups to occur while reading
1167 * the relation --- for example, due to shared-cache-inval messages being
1168 * processed during heap_open(). This is OK. It's even possible for one
1169 * of those lookups to find and enter the very same tuple we are trying to
1170 * fetch here. If that happens, we will enter a second copy of the tuple
1171 * into the cache. The first copy will never be referenced again, and
1172 * will eventually age out of the cache, so there's no functional problem.
1173 * This case is rare enough that it's not worth expending extra cycles to
1176 relation = heap_open(cache->cc_reloid, AccessShareLock);
1178 scandesc = systable_beginscan(relation,
1180 IndexScanOK(cache, cur_skey),
1187 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1189 ct = CatalogCacheCreateEntry(cache, ntp,
1190 hashValue, hashIndex,
1192 /* immediately set the refcount to 1 */
1193 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1195 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1196 break; /* assume only one match */
1199 systable_endscan(scandesc);
1201 heap_close(relation, AccessShareLock);
1204 * If tuple was not found, we need to build a negative cache entry
1205 * containing a fake tuple. The fake tuple has the correct key columns,
1206 * but nulls everywhere else.
1208 * In bootstrap mode, we don't build negative entries, because the cache
1209 * invalidation mechanism isn't alive and can't clear them if the tuple
1210 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1211 * cache inval for that.)
1215 if (IsBootstrapProcessingMode())
1218 ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1219 ct = CatalogCacheCreateEntry(cache, ntp,
1220 hashValue, hashIndex,
1222 heap_freetuple(ntp);
1224 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1225 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1226 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1227 cache->cc_relname, hashIndex);
1230 * We are not returning the negative entry to the caller, so leave its
1237 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1238 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1239 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1240 cache->cc_relname, hashIndex);
1242 #ifdef CATCACHE_STATS
1243 cache->cc_newloads++;
1252 * Decrement the reference count of a catcache entry (releasing the
1253 * hold grabbed by a successful SearchCatCache).
1255 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1256 * will be freed as soon as their refcount goes to zero. In combination
1257 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1258 * to catch references to already-released catcache entries.
1261 ReleaseCatCache(HeapTuple tuple)
1263 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1264 offsetof(CatCTup, tuple));
1266 /* Safety checks to ensure we were handed a cache entry */
1267 Assert(ct->ct_magic == CT_MAGIC);
1268 Assert(ct->refcount > 0);
1271 ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1274 #ifndef CATCACHE_FORCE_RELEASE
1277 ct->refcount == 0 &&
1278 (ct->c_list == NULL || ct->c_list->refcount == 0))
1279 CatCacheRemoveCTup(ct->my_cache, ct);
1284 * SearchCatCacheList
1286 * Generate a list of all tuples matching a partial key (that is,
1287 * a key specifying just the first K of the cache's N key columns).
1289 * The caller must not modify the list object or the pointed-to tuples,
1290 * and must call ReleaseCatCacheList() when done with the list.
1293 SearchCatCacheList(CatCache *cache,
1300 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1305 List *volatile ctlist;
1306 ListCell *ctlist_item;
1310 MemoryContext oldcxt;
1314 * one-time startup overhead for each cache
1316 if (cache->cc_tupdesc == NULL)
1317 CatalogCacheInitializeCache(cache);
1319 Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1321 #ifdef CATCACHE_STATS
1322 cache->cc_lsearches++;
1326 * initialize the search key information
1328 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1329 cur_skey[0].sk_argument = v1;
1330 cur_skey[1].sk_argument = v2;
1331 cur_skey[2].sk_argument = v3;
1332 cur_skey[3].sk_argument = v4;
1335 * compute a hash value of the given keys for faster search. We don't
1336 * presently divide the CatCList items into buckets, but this still lets
1337 * us skip non-matching items quickly most of the time.
1339 lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1342 * scan the items until we find a match or exhaust our list
1344 for (elt = DLGetHead(&cache->cc_lists);
1346 elt = DLGetSucc(elt))
1350 cl = (CatCList *) DLE_VAL(elt);
1353 continue; /* ignore dead entries */
1355 if (cl->hash_value != lHashValue)
1356 continue; /* quickly skip entry if wrong hash val */
1359 * see if the cached list matches our key.
1361 if (cl->nkeys != nkeys)
1363 HeapKeyTest(&cl->tuple,
1372 * We found a matching list. Move the list to the front of the
1373 * cache's list-of-lists, to speed subsequent searches. (We do not
1374 * move the members to the fronts of their hashbucket lists, however,
1375 * since there's no point in that unless they are searched for
1378 DLMoveToFront(&cl->cache_elem);
1380 /* Bump the list's refcount and return it */
1381 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1383 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1385 CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1388 #ifdef CATCACHE_STATS
1396 * List was not found in cache, so we have to build it by reading the
1397 * relation. For each matching tuple found in the relation, use an
1398 * existing cache entry if possible, else build a new one.
1400 * We have to bump the member refcounts temporarily to ensure they won't
1401 * get dropped from the cache while loading other members. We use a PG_TRY
1402 * block to ensure we can undo those refcounts if we get an error before
1403 * we finish constructing the CatCList.
1405 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1412 SysScanDesc scandesc;
1414 relation = heap_open(cache->cc_reloid, AccessShareLock);
1416 scandesc = systable_beginscan(relation,
1418 IndexScanOK(cache, cur_skey),
1423 /* The list will be ordered iff we are doing an index scan */
1424 ordered = (scandesc->irel != NULL);
1426 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1432 * See if there's an entry for this tuple already.
1435 hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1436 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1438 for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1440 elt = DLGetSucc(elt))
1442 ct = (CatCTup *) DLE_VAL(elt);
1444 if (ct->dead || ct->negative)
1445 continue; /* ignore dead and negative entries */
1447 if (ct->hash_value != hashValue)
1448 continue; /* quickly skip entry if wrong hash val */
1450 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1451 continue; /* not same tuple */
1454 * Found a match, but can't use it if it belongs to another
1465 /* We didn't find a usable entry, so make a new one */
1466 ct = CatalogCacheCreateEntry(cache, ntp,
1467 hashValue, hashIndex,
1471 /* Careful here: add entry to ctlist, then bump its refcount */
1472 /* This way leaves state correct if lappend runs out of memory */
1473 ctlist = lappend(ctlist, ct);
1477 systable_endscan(scandesc);
1479 heap_close(relation, AccessShareLock);
1482 * Now we can build the CatCList entry. First we need a dummy tuple
1483 * containing the key values...
1485 ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1486 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1487 nmembers = list_length(ctlist);
1489 palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
1490 heap_copytuple_with_tuple(ntp, &cl->tuple);
1491 MemoryContextSwitchTo(oldcxt);
1492 heap_freetuple(ntp);
1495 * We are now past the last thing that could trigger an elog before we
1496 * have finished building the CatCList and remembering it in the
1497 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1498 * we'd better do so before we start marking the members as belonging
1505 foreach(ctlist_item, ctlist)
1507 ct = (CatCTup *) lfirst(ctlist_item);
1508 Assert(ct->c_list == NULL);
1509 Assert(ct->refcount > 0);
1512 #ifndef CATCACHE_FORCE_RELEASE
1515 ct->refcount == 0 &&
1516 (ct->c_list == NULL || ct->c_list->refcount == 0))
1517 CatCacheRemoveCTup(cache, ct);
1524 cl->cl_magic = CL_MAGIC;
1525 cl->my_cache = cache;
1526 DLInitElem(&cl->cache_elem, cl);
1527 cl->refcount = 0; /* for the moment */
1529 cl->ordered = ordered;
1531 cl->hash_value = lHashValue;
1532 cl->n_members = nmembers;
1535 foreach(ctlist_item, ctlist)
1537 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1538 Assert(ct->c_list == NULL);
1540 /* release the temporary refcount on the member */
1541 Assert(ct->refcount > 0);
1543 /* mark list dead if any members already dead */
1547 Assert(i == nmembers);
1549 DLAddHead(&cache->cc_lists, &cl->cache_elem);
1551 /* Finally, bump the list's refcount and return it */
1553 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1555 CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1556 cache->cc_relname, nmembers);
1562 * ReleaseCatCacheList
1564 * Decrement the reference count of a catcache list.
1567 ReleaseCatCacheList(CatCList *list)
1569 /* Safety checks to ensure we were handed a cache entry */
1570 Assert(list->cl_magic == CL_MAGIC);
1571 Assert(list->refcount > 0);
1573 ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1576 #ifndef CATCACHE_FORCE_RELEASE
1579 list->refcount == 0)
1580 CatCacheRemoveCList(list->my_cache, list);
1585 * CatalogCacheCreateEntry
1586 * Create a new CatCTup entry, copying the given HeapTuple and other
1587 * supplied data into it. The new entry initially has refcount 0.
1590 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
1591 uint32 hashValue, Index hashIndex, bool negative)
1594 MemoryContext oldcxt;
1597 * Allocate CatCTup header in cache memory, and copy the tuple there too.
1599 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1600 ct = (CatCTup *) palloc(sizeof(CatCTup));
1601 heap_copytuple_with_tuple(ntp, &ct->tuple);
1602 MemoryContextSwitchTo(oldcxt);
1605 * Finish initializing the CatCTup header, and add it to the cache's
1606 * linked list and counts.
1608 ct->ct_magic = CT_MAGIC;
1609 ct->my_cache = cache;
1610 DLInitElem(&ct->cache_elem, (void *) ct);
1612 ct->refcount = 0; /* for the moment */
1614 ct->negative = negative;
1615 ct->hash_value = hashValue;
1617 DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1620 CacheHdr->ch_ntup++;
1627 * Generate a palloc'd HeapTuple that contains the specified key
1628 * columns, and NULLs for other columns.
1630 * This is used to store the keys for negative cache entries and CatCList
1631 * entries, which don't have real tuples associated with them.
1634 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1637 TupleDesc tupDesc = cache->cc_tupdesc;
1640 Oid tupOid = InvalidOid;
1641 NameData tempNames[4];
1644 values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1645 nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
1647 memset(values, 0, tupDesc->natts * sizeof(Datum));
1648 memset(nulls, true, tupDesc->natts * sizeof(bool));
1650 for (i = 0; i < nkeys; i++)
1652 int attindex = cache->cc_key[i];
1653 Datum keyval = skeys[i].sk_argument;
1658 * Here we must be careful in case the caller passed a C string
1659 * where a NAME is wanted: convert the given argument to a
1660 * correctly padded NAME. Otherwise the memcpy() done in
1661 * heap_form_tuple could fall off the end of memory.
1663 if (cache->cc_isname[i])
1665 Name newval = &tempNames[i];
1667 namestrcpy(newval, DatumGetCString(keyval));
1668 keyval = NameGetDatum(newval);
1670 values[attindex - 1] = keyval;
1671 nulls[attindex - 1] = false;
1675 Assert(attindex == ObjectIdAttributeNumber);
1676 tupOid = DatumGetObjectId(keyval);
1680 ntp = heap_form_tuple(tupDesc, values, nulls);
1681 if (tupOid != InvalidOid)
1682 HeapTupleSetOid(ntp, tupOid);
1692 * PrepareToInvalidateCacheTuple()
1694 * This is part of a rather subtle chain of events, so pay attention:
1696 * When a tuple is inserted or deleted, it cannot be flushed from the
1697 * catcaches immediately, for reasons explained at the top of cache/inval.c.
1698 * Instead we have to add entry(s) for the tuple to a list of pending tuple
1699 * invalidations that will be done at the end of the command or transaction.
1701 * The lists of tuples that need to be flushed are kept by inval.c. This
1702 * routine is a helper routine for inval.c. Given a tuple belonging to
1703 * the specified relation, find all catcaches it could be in, compute the
1704 * correct hash value for each such catcache, and call the specified
1705 * function to record the cache id and hash value in inval.c's lists.
1706 * CatalogCacheIdInvalidate will be called later, if appropriate,
1707 * using the recorded information.
1709 * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1710 * For an update, we are called just once, with tuple being the old tuple
1711 * version and newtuple the new version. We should make two list entries
1712 * if the tuple's hash value changed, but only one if it didn't.
1714 * Note that it is irrelevant whether the given tuple is actually loaded
1715 * into the catcache at the moment. Even if it's not there now, it might
1716 * be by the end of the command, or there might be a matching negative entry
1717 * to flush --- or other backends' caches might have such entries --- so
1718 * we have to make list entries to flush it later.
1720 * Also note that it's not an error if there are no catcaches for the
1721 * specified relation. inval.c doesn't know exactly which rels have
1722 * catcaches --- it will call this routine for any tuple that's in a
1726 PrepareToInvalidateCacheTuple(Relation relation,
1729 void (*function) (int, uint32, Oid))
1734 CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1739 Assert(RelationIsValid(relation));
1740 Assert(HeapTupleIsValid(tuple));
1741 Assert(PointerIsValid(function));
1742 Assert(CacheHdr != NULL);
1744 reloid = RelationGetRelid(relation);
1748 * if the cache contains tuples from the specified relation
1749 * compute the tuple's hash value(s) in this cache,
1750 * and call the passed function to register the information.
1754 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1759 if (ccp->cc_reloid != reloid)
1762 /* Just in case cache hasn't finished initialization yet... */
1763 if (ccp->cc_tupdesc == NULL)
1764 CatalogCacheInitializeCache(ccp);
1766 hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
1767 dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
1769 (*function) (ccp->id, hashvalue, dbid);
1773 uint32 newhashvalue;
1775 newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
1777 if (newhashvalue != hashvalue)
1778 (*function) (ccp->id, newhashvalue, dbid);
1785 * Subroutines for warning about reference leaks. These are exported so
1786 * that resowner.c can call them.
1789 PrintCatCacheLeakWarning(HeapTuple tuple)
1791 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1792 offsetof(CatCTup, tuple));
1794 /* Safety check to ensure we were handed a cache entry */
1795 Assert(ct->ct_magic == CT_MAGIC);
1797 elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1798 ct->my_cache->cc_relname, ct->my_cache->id,
1799 ItemPointerGetBlockNumber(&(tuple->t_self)),
1800 ItemPointerGetOffsetNumber(&(tuple->t_self)),
1805 PrintCatCacheListLeakWarning(CatCList *list)
1807 elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1808 list->my_cache->cc_relname, list->my_cache->id,
1809 list, list->refcount);