1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.71 2000/11/10 00:33:10 tgl Exp $
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_operator.h"
22 #include "catalog/pg_type.h"
23 #include "catalog/catname.h"
24 #include "catalog/indexing.h"
25 #include "miscadmin.h"
26 #include "utils/builtins.h"
27 #include "utils/fmgroids.h"
28 #include "utils/catcache.h"
29 #include "utils/syscache.h"
31 static void CatCacheRemoveCTup(CatCache *cache, Dlelem *e);
32 static Index CatalogCacheComputeHashIndex(CatCache *cache,
34 static Index CatalogCacheComputeTupleHashIndex(CatCache *cache,
36 static void CatalogCacheInitializeCache(CatCache *cache);
37 static Datum cc_hashname(PG_FUNCTION_ARGS);
40 * variables, macros and other stuff
45 #define CACHE1_elog(a,b) elog(a,b)
46 #define CACHE2_elog(a,b,c) elog(a,b,c)
47 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
48 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
49 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
50 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
52 #define CACHE1_elog(a,b)
53 #define CACHE2_elog(a,b,c)
54 #define CACHE3_elog(a,b,c,d)
55 #define CACHE4_elog(a,b,c,d,e)
56 #define CACHE5_elog(a,b,c,d,e,f)
57 #define CACHE6_elog(a,b,c,d,e,f,g)
60 static CatCache *Caches = NULL; /* head of list of caches */
64 * EQPROC is used in CatalogCacheInitializeCache to find the equality
65 * functions for system types that are used as cache key fields.
66 * See also GetCCHashFunc, which should support the same set of types.
68 * XXX this should be replaced by catalog lookups,
69 * but that seems to pose considerable risk of circularity...
72 static const Oid eqproc[] = {
73 F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid,
74 F_INT2EQ, F_INT2VECTOREQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ,
75 F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ
78 #define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-BOOLOID]
80 /* ----------------------------------------------------------------
81 * internal support functions
82 * ----------------------------------------------------------------
86 GetCCHashFunc(Oid keytype)
98 return hashint2vector;
107 return hashoidvector;
109 elog(FATAL, "GetCCHashFunc: type %u unsupported as catcache key",
111 return (PGFunction) NULL;
116 cc_hashname(PG_FUNCTION_ARGS)
120 * We need our own variant of hashname because we want to accept
121 * null-terminated C strings as search values for name fields. So, we
122 * have to make sure the data is correctly padded before we compute
127 namestrcpy(&my_n, NameStr(* PG_GETARG_NAME(0)));
129 return DirectFunctionCall1(hashname, NameGetDatum(&my_n));
134 * Standard routine for creating cache context if it doesn't exist yet
136 * There are a lot of places (probably far more than necessary) that check
137 * whether CacheMemoryContext exists yet and want to create it if not.
138 * We centralize knowledge of exactly how to create it here.
141 CreateCacheMemoryContext(void)
143 /* Purely for paranoia, check that context doesn't exist;
144 * caller probably did so already.
146 if (!CacheMemoryContext)
147 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
148 "CacheMemoryContext",
149 ALLOCSET_DEFAULT_MINSIZE,
150 ALLOCSET_DEFAULT_INITSIZE,
151 ALLOCSET_DEFAULT_MAXSIZE);
155 /* --------------------------------
156 * CatalogCacheInitializeCache
158 * This function does final initialization of a catcache: obtain the tuple
159 * descriptor and set up the hash and equality function links. We assume
160 * that the relcache entry can be opened at this point!
161 * --------------------------------
164 #define CatalogCacheInitializeCache_DEBUG1 \
165 elog(DEBUG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
168 #define CatalogCacheInitializeCache_DEBUG2 \
170 if (cache->cc_key[i] > 0) { \
171 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
172 i+1, cache->cc_nkeys, cache->cc_key[i], \
173 tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
175 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
176 i+1, cache->cc_nkeys, cache->cc_key[i]); \
181 #define CatalogCacheInitializeCache_DEBUG1
182 #define CatalogCacheInitializeCache_DEBUG2
186 CatalogCacheInitializeCache(CatCache *cache)
189 MemoryContext oldcxt;
193 CatalogCacheInitializeCache_DEBUG1;
196 * Open the relation without locking --- we only need the tupdesc,
197 * which we assume will never change ...
199 relation = heap_openr(cache->cc_relname, NoLock);
200 Assert(RelationIsValid(relation));
203 * switch to the cache context so our allocations
204 * do not vanish at the end of a transaction
207 if (!CacheMemoryContext)
208 CreateCacheMemoryContext();
210 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
213 * copy the relcache's tuple descriptor to permanent cache storage
216 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
219 * return to the caller's memory context and close the rel
222 MemoryContextSwitchTo(oldcxt);
224 heap_close(relation, NoLock);
226 CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: %s, %d keys",
227 cache->cc_relname, cache->cc_nkeys);
230 * initialize cache's key information
233 for (i = 0; i < cache->cc_nkeys; ++i)
237 CatalogCacheInitializeCache_DEBUG2;
239 if (cache->cc_key[i] > 0)
241 keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
245 if (cache->cc_key[i] != ObjectIdAttributeNumber)
246 elog(FATAL, "CatalogCacheInit: only sys attr supported is OID");
250 cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
252 * If GetCCHashFunc liked the type, safe to index into eqproc[]
254 cache->cc_skey[i].sk_procedure = EQPROC(keytype);
256 fmgr_info(cache->cc_skey[i].sk_procedure,
257 &cache->cc_skey[i].sk_func);
258 cache->cc_skey[i].sk_nargs = cache->cc_skey[i].sk_func.fn_nargs;
260 /* Initialize sk_attno suitably for index scans */
261 cache->cc_skey[i].sk_attno = i+1;
263 CACHE4_elog(DEBUG, "CatalogCacheInit %s %d %p",
270 * mark this cache fully initialized
273 cache->cc_tupdesc = tupdesc;
276 /* --------------------------------
277 * CatalogCacheComputeHashIndex
278 * --------------------------------
281 CatalogCacheComputeHashIndex(CatCache *cache, ScanKey cur_skey)
283 uint32 hashIndex = 0;
285 CACHE4_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %p",
290 switch (cache->cc_nkeys)
294 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
295 cur_skey[3].sk_argument)) << 9;
299 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
300 cur_skey[2].sk_argument)) << 6;
304 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
305 cur_skey[1].sk_argument)) << 3;
309 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
310 cur_skey[0].sk_argument));
313 elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cache->cc_nkeys);
316 hashIndex %= (uint32) cache->cc_size;
317 return (Index) hashIndex;
320 /* --------------------------------
321 * CatalogCacheComputeTupleHashIndex
322 * --------------------------------
325 CatalogCacheComputeTupleHashIndex(CatCache *cache,
328 ScanKeyData cur_skey[4];
331 /* Copy pre-initialized overhead data for scankey */
332 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
334 /* Now extract key fields from tuple, insert into scankey */
335 switch (cache->cc_nkeys)
338 cur_skey[3].sk_argument =
339 (cache->cc_key[3] == ObjectIdAttributeNumber)
340 ? ObjectIdGetDatum(tuple->t_data->t_oid)
348 cur_skey[2].sk_argument =
349 (cache->cc_key[2] == ObjectIdAttributeNumber)
350 ? ObjectIdGetDatum(tuple->t_data->t_oid)
358 cur_skey[1].sk_argument =
359 (cache->cc_key[1] == ObjectIdAttributeNumber)
360 ? ObjectIdGetDatum(tuple->t_data->t_oid)
368 cur_skey[0].sk_argument =
369 (cache->cc_key[0] == ObjectIdAttributeNumber)
370 ? ObjectIdGetDatum(tuple->t_data->t_oid)
378 elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
383 return CatalogCacheComputeHashIndex(cache, cur_skey);
386 /* --------------------------------
388 * --------------------------------
391 CatCacheRemoveCTup(CatCache *cache, Dlelem *elt)
397 if (!elt) /* probably-useless safety check */
400 /* We need to zap both linked-list elements as well as the tuple */
402 ct = (CatCTup *) DLE_VAL(elt);
403 other_elt = ct->ct_node;
404 other_ct = (CatCTup *) DLE_VAL(other_elt);
406 heap_freetuple(ct->ct_tup);
409 DLFreeElem(other_elt);
418 /* --------------------------------
419 * CatalogCacheIdInvalidate()
421 * Invalidate a tuple given a cache id. In this case the id should always
422 * be found (whether the cache has opened its relation or not). Of course,
423 * if the cache has yet to open its relation, there will be no tuples so
425 * --------------------------------
428 CatalogCacheIdInvalidate(int cacheId, /* XXX */
440 Assert(hashIndex < NCCBUCK);
441 Assert(ItemPointerIsValid(pointer));
442 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
445 * inspect every cache that could contain the tuple
448 for (ccp = Caches; ccp; ccp = ccp->cc_next)
450 if (cacheId != ccp->id)
453 * inspect the hash bucket until we find a match or exhaust
456 for (elt = DLGetHead(ccp->cc_cache[hashIndex]);
458 elt = DLGetSucc(elt))
460 ct = (CatCTup *) DLE_VAL(elt);
461 if (ItemPointerEquals(pointer, &ct->ct_tup->t_self))
466 * if we found a matching tuple, invalidate it.
472 CatCacheRemoveCTup(ccp, elt);
474 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
477 if (cacheId != InvalidCatalogCacheId)
482 /* ----------------------------------------------------------------
488 * RelationInvalidateCatalogCacheTuple
489 * ----------------------------------------------------------------
491 /* --------------------------------
493 * --------------------------------
496 ResetSystemCache(void)
500 CACHE1_elog(DEBUG, "ResetSystemCache called");
503 * here we purge the contents of all the caches
505 * for each system cache
506 * for each hash bucket
507 * for each tuple in hash bucket
511 for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next)
515 for (hash = 0; hash < NCCBUCK; hash += 1)
520 for (elt = DLGetHead(cache->cc_cache[hash]); elt; elt = nextelt)
522 nextelt = DLGetSucc(elt);
523 CatCacheRemoveCTup(cache, elt);
527 /* double-check that ntup is now zero */
528 if (cache->cc_ntup != 0)
531 "ResetSystemCache: cache %d has cc_ntup = %d, should be 0",
532 cache->id, cache->cc_ntup);
537 CACHE1_elog(DEBUG, "end of ResetSystemCache call");
540 /* --------------------------------
541 * SystemCacheRelationFlushed
543 * This is called by RelationFlushRelation() to clear out cached information
544 * about a relation being dropped. (This could be a DROP TABLE command,
545 * or a temp table being dropped at end of transaction, or a table created
546 * during the current transaction that is being dropped because of abort.)
547 * Remove all cache entries relevant to the specified relation OID.
549 * A special case occurs when relId is itself one of the cacheable system
550 * tables --- although those'll never be dropped, they can get flushed from
551 * the relcache (VACUUM causes this, for example). In that case we need
552 * to flush all cache entries from that table. The brute-force method
553 * currently used takes care of that quite handily. (At one point we
554 * also tried to force re-execution of CatalogCacheInitializeCache for
555 * the cache(s) on that table. This is a bad idea since it leads to all
556 * kinds of trouble if a cache flush occurs while loading cache entries.
557 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
558 * rather than relying on the relcache to keep a tupdesc for us. Of course
559 * this assumes the tupdesc of a cachable system table will not change...)
560 * --------------------------------
563 SystemCacheRelationFlushed(Oid relId)
567 * XXX Ideally we'd search the caches and just zap entries that
568 * actually refer to or come from the indicated relation. For now, we
569 * take the brute-force approach: just flush the caches entirely.
574 /* --------------------------------
577 * This allocates and initializes a cache for a system catalog relation.
578 * Actually, the cache is only partially initialized to avoid opening the
579 * relation. The relation will be opened and the rest of the cache
580 * structure initialized on the first access.
581 * --------------------------------
584 #define InitSysCache_DEBUG1 \
586 elog(DEBUG, "InitSysCache: rel=%s id=%d nkeys=%d size=%d\n", \
587 cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_size); \
591 #define InitSysCache_DEBUG1
602 MemoryContext oldcxt;
606 * first switch to the cache context so our allocations
607 * do not vanish at the end of a transaction
610 if (!CacheMemoryContext)
611 CreateCacheMemoryContext();
613 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
616 * allocate a new cache structure
619 cp = (CatCache *) palloc(sizeof(CatCache));
620 MemSet((char *) cp, 0, sizeof(CatCache));
623 * initialize the cache buckets (each bucket is a list header)
624 * and the LRU tuple list
630 * We can only do this optimization because the number of hash
631 * buckets never changes. Without it, we call palloc() too much.
632 * We could move this to dllist.c, but the way we do this is not
633 * dynamic/portable, so why allow other routines to use it.
635 Dllist *cache_begin = palloc((NCCBUCK + 1) * sizeof(Dllist));
637 for (i = 0; i <= NCCBUCK; ++i)
639 cp->cc_cache[i] = &cache_begin[i];
640 cp->cc_cache[i]->dll_head = 0;
641 cp->cc_cache[i]->dll_tail = 0;
645 cp->cc_lrulist = DLNewList();
648 * Caches is the pointer to the head of the list of all the
649 * system caches. here we add the new cache to the top of the list.
652 cp->cc_next = Caches; /* list of caches (single link) */
656 * initialize the cache's relation information for the relation
657 * corresponding to this cache and initialize some of the the new
658 * cache's other internal fields.
661 cp->cc_relname = relname;
662 cp->cc_indname = indname;
663 cp->cc_tupdesc = (TupleDesc) NULL;
665 cp->cc_maxtup = MAXTUP;
666 cp->cc_size = NCCBUCK;
667 cp->cc_nkeys = nkeys;
668 for (i = 0; i < nkeys; ++i)
669 cp->cc_key[i] = key[i];
672 * all done. new cache is initialized. print some debugging
673 * information, if appropriate.
679 * back to the old context before we return...
682 MemoryContextSwitchTo(oldcxt);
688 /* --------------------------------
691 * This function checks for tuples that will be fetched by
692 * IndexSupportInitialize() during relcache initialization for
693 * certain system indexes that support critical syscaches.
694 * We can't use an indexscan to fetch these, else we'll get into
695 * infinite recursion. A plain heap scan will work, however.
696 * --------------------------------
699 IndexScanOK(CatCache *cache, ScanKey cur_skey)
701 if (cache->id == INDEXRELID)
703 static Oid indexSelfOid = InvalidOid;
705 /* One-time lookup of the OID of pg_index_indexrelid_index */
706 if (!OidIsValid(indexSelfOid))
713 rel = heap_openr(RelationRelationName, AccessShareLock);
714 ScanKeyEntryInitialize(&key, 0, Anum_pg_class_relname,
716 PointerGetDatum(IndexRelidIndex));
717 sd = heap_beginscan(rel, false, SnapshotNow, 1, &key);
718 ntp = heap_getnext(sd, 0);
719 if (!HeapTupleIsValid(ntp))
720 elog(ERROR, "SearchSelfReferences: %s not found in %s",
721 IndexRelidIndex, RelationRelationName);
722 indexSelfOid = ntp->t_data->t_oid;
724 heap_close(rel, AccessShareLock);
727 /* Looking for pg_index_indexrelid_index? */
728 if (DatumGetObjectId(cur_skey[0].sk_argument) == indexSelfOid)
731 else if (cache->id == OPEROID)
733 /* Looking for an OID comparison function? */
734 Oid lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
736 if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
740 /* Normal case, allow index scan */
744 /* --------------------------------
747 * This call searches a system cache for a tuple, opening the relation
748 * if necessary (the first access to a particular cache).
749 * --------------------------------
752 SearchSysCache(CatCache *cache,
758 ScanKeyData cur_skey[4];
766 MemoryContext oldcxt;
769 * one-time startup overhead
772 if (cache->cc_tupdesc == NULL)
773 CatalogCacheInitializeCache(cache);
776 * initialize the search key information
779 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
780 cur_skey[0].sk_argument = v1;
781 cur_skey[1].sk_argument = v2;
782 cur_skey[2].sk_argument = v3;
783 cur_skey[3].sk_argument = v4;
786 * find the hash bucket in which to look for the tuple
789 hash = CatalogCacheComputeHashIndex(cache, cur_skey);
792 * scan the hash bucket until we find a match or exhaust our tuples
795 for (elt = DLGetHead(cache->cc_cache[hash]);
797 elt = DLGetSucc(elt))
801 ct = (CatCTup *) DLE_VAL(elt);
803 * see if the cached tuple matches our key.
804 * (should we be worried about time ranges? -cim 10/2/90)
807 HeapKeyTest(ct->ct_tup,
817 * if we found a tuple in the cache, move it to the top of the
818 * lru list, and return it. We also move it to the front of the
819 * list for its hashbucket, in order to speed subsequent searches.
820 * (The most frequently accessed elements in any hashbucket will
821 * tend to be near the front of the hashbucket's list.)
826 Dlelem *old_lru_elt = ((CatCTup *) DLE_VAL(elt))->ct_node;
828 DLMoveToFront(old_lru_elt);
832 CACHE3_elog(DEBUG, "SearchSysCache(%s): found in bucket %d",
833 cache->cc_relname, hash);
834 #endif /* CACHEDEBUG */
840 * Tuple was not found in cache, so we have to try and
841 * retrieve it directly from the relation. If it's found,
842 * we add it to the cache.
844 * NOTE: it is possible for recursive cache lookups to occur while
845 * reading the relation --- for example, due to shared-cache-inval
846 * messages being processed during heap_open(). This is OK. It's
847 * even possible for one of those lookups to find and enter the
848 * very same tuple we are trying to fetch here. If that happens,
849 * we will enter a second copy of the tuple into the cache. The
850 * first copy will never be referenced again, and will eventually
851 * age out of the cache, so there's no functional problem. This case
852 * is rare enough that it's not worth expending extra cycles to detect.
857 * open the relation associated with the cache
860 relation = heap_openr(cache->cc_relname, AccessShareLock);
863 * Scan the relation to find the tuple. If there's an index, and
864 * if it's safe to do so, use the index. Else do a heap scan.
869 if ((RelationGetForm(relation))->relhasindex &&
870 !IsIgnoringSystemIndexes() &&
871 IndexScanOK(cache, cur_skey))
875 RetrieveIndexResult indexRes;
879 CACHE2_elog(DEBUG, "SearchSysCache(%s): performing index scan",
882 idesc = index_openr(cache->cc_indname);
883 isd = index_beginscan(idesc, false, cache->cc_nkeys, cur_skey);
884 tuple.t_datamcxt = CurrentMemoryContext;
886 while ((indexRes = index_getnext(isd, ForwardScanDirection)))
888 tuple.t_self = indexRes->heap_iptr;
889 heap_fetch(relation, SnapshotNow, &tuple, &buffer);
891 if (tuple.t_data != NULL)
893 /* Copy tuple into our context */
894 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
895 ntp = heap_copytuple(&tuple);
896 MemoryContextSwitchTo(oldcxt);
897 ReleaseBuffer(buffer);
909 CACHE2_elog(DEBUG, "SearchSysCache(%s): performing heap scan",
913 * For a heap scan, sk_attno has to be set to the heap attribute
914 * number(s), not the index attribute numbers.
916 for (i = 0; i < cache->cc_nkeys; ++i)
917 cur_skey[i].sk_attno = cache->cc_key[i];
919 sd = heap_beginscan(relation, 0, SnapshotNow,
920 cache->cc_nkeys, cur_skey);
922 ntp = heap_getnext(sd, 0);
924 if (HeapTupleIsValid(ntp))
926 /* Copy tuple into our context */
927 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
928 ntp = heap_copytuple(ntp);
929 MemoryContextSwitchTo(oldcxt);
930 /* We should not free the result of heap_getnext... */
937 * scan is complete. if tup is valid, we can add it to the cache.
938 * note we have already copied it into the cache memory context.
941 if (HeapTupleIsValid(ntp))
944 * allocate a new cache tuple holder, store the pointer
945 * to the heap tuple there and initialize the list pointers.
950 CACHE1_elog(DEBUG, "SearchSysCache: found tuple");
952 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
955 * this is a little cumbersome here because we want the Dlelem's
956 * in both doubly linked lists to point to one another. That makes
957 * it easier to remove something from both the cache bucket and
958 * the lru list at the same time
960 nct = (CatCTup *) palloc(sizeof(CatCTup));
962 elt = DLNewElem(nct);
963 nct2 = (CatCTup *) palloc(sizeof(CatCTup));
965 lru_elt = DLNewElem(nct2);
967 nct->ct_node = lru_elt;
969 DLAddHead(cache->cc_lrulist, lru_elt);
970 DLAddHead(cache->cc_cache[hash], elt);
972 MemoryContextSwitchTo(oldcxt);
975 * If we've exceeded the desired size of this cache,
976 * throw away the least recently used entry.
979 if (++cache->cc_ntup > cache->cc_maxtup)
983 elt = DLGetTail(cache->cc_lrulist);
984 ct = (CatCTup *) DLE_VAL(elt);
986 if (ct != nct) /* shouldn't be possible, but be safe... */
988 CACHE2_elog(DEBUG, "SearchSysCache(%s): Overflow, LRU removal",
991 CatCacheRemoveCTup(cache, elt);
995 CACHE4_elog(DEBUG, "SearchSysCache(%s): Contains %d/%d tuples",
996 cache->cc_relname, cache->cc_ntup, cache->cc_maxtup);
997 CACHE3_elog(DEBUG, "SearchSysCache(%s): put in bucket %d",
998 cache->cc_relname, hash);
1002 * close the relation and return the tuple we found (or NULL)
1005 heap_close(relation, AccessShareLock);
1010 /* --------------------------------
1011 * RelationInvalidateCatalogCacheTuple()
1013 * Invalidate a tuple from a specific relation. This call determines the
1014 * cache in question and calls CatalogCacheIdInvalidate(). It is -ok-
1015 * if the relation cannot be found, it simply means this backend has yet
1017 * --------------------------------
1020 RelationInvalidateCatalogCacheTuple(Relation relation,
1022 void (*function) (int, Index, ItemPointer))
1030 Assert(RelationIsValid(relation));
1031 Assert(HeapTupleIsValid(tuple));
1032 Assert(PointerIsValid(function));
1033 CACHE1_elog(DEBUG, "RelationInvalidateCatalogCacheTuple: called");
1037 * if the cache contains tuples from the specified relation
1038 * call the invalidation function on the tuples
1039 * in the proper hash bucket
1043 for (ccp = Caches; ccp; ccp = ccp->cc_next)
1045 if (strcmp(ccp->cc_relname, RelationGetRelationName(relation)) != 0)
1048 /* Just in case cache hasn't finished initialization yet... */
1049 if (ccp->cc_tupdesc == NULL)
1050 CatalogCacheInitializeCache(ccp);
1052 (*function) (ccp->id,
1053 CatalogCacheComputeTupleHashIndex(ccp, tuple),