]> granicus.if.org Git - postgresql/blob - src/backend/utils/cache/catcache.c
202f447076e2a0bceeeeb243bb0f7a44a0cb8494
[postgresql] / src / backend / utils / cache / catcache.c
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  *        System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.93 2002/03/26 19:16:08 tgl Exp $
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_opclass.h"
22 #include "catalog/pg_operator.h"
23 #include "catalog/pg_type.h"
24 #include "catalog/catname.h"
25 #include "catalog/indexing.h"
26 #include "miscadmin.h"
27 #ifdef CATCACHE_STATS
28 #include "storage/ipc.h"                /* for on_proc_exit */
29 #endif
30 #include "utils/builtins.h"
31 #include "utils/fmgroids.h"
32 #include "utils/catcache.h"
33 #include "utils/relcache.h"
34 #include "utils/syscache.h"
35
36
37  /* #define CACHEDEBUG */       /* turns DEBUG elogs on */
38
39 /*
40  * Constants related to size of the catcache.
41  *
42  * NCCBUCKETS must be a power of two and must be less than 64K (because
43  * SharedInvalCatcacheMsg crams hash indexes into a uint16 field).      In
44  * practice it should be a lot less, anyway, to avoid chewing up too much
45  * space on hash bucket headers.
46  *
47  * MAXCCTUPLES could be as small as a few hundred, if per-backend memory
48  * consumption is at a premium.
49  */
50 #define NCCBUCKETS 256                  /* Hash buckets per CatCache */
51 #define MAXCCTUPLES 5000                /* Maximum # of tuples in all caches */
52
53 /*
54  * Given a hash value and the size of the hash table, find the bucket
55  * in which the hash value belongs. Since the hash table must contain
56  * a power-of-2 number of elements, this is a simple bitmask.
57  */
58 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
59
60
61 /*
62  *              variables, macros and other stuff
63  */
64
65 #ifdef CACHEDEBUG
66 #define CACHE1_elog(a,b)                                elog(a,b)
67 #define CACHE2_elog(a,b,c)                              elog(a,b,c)
68 #define CACHE3_elog(a,b,c,d)                    elog(a,b,c,d)
69 #define CACHE4_elog(a,b,c,d,e)                  elog(a,b,c,d,e)
70 #define CACHE5_elog(a,b,c,d,e,f)                elog(a,b,c,d,e,f)
71 #define CACHE6_elog(a,b,c,d,e,f,g)              elog(a,b,c,d,e,f,g)
72 #else
73 #define CACHE1_elog(a,b)
74 #define CACHE2_elog(a,b,c)
75 #define CACHE3_elog(a,b,c,d)
76 #define CACHE4_elog(a,b,c,d,e)
77 #define CACHE5_elog(a,b,c,d,e,f)
78 #define CACHE6_elog(a,b,c,d,e,f,g)
79 #endif
80
81 /* Cache management header --- pointer is NULL until created */
82 static CatCacheHeader *CacheHdr = NULL;
83
84 /*
85  *              EQPROC is used in CatalogCacheInitializeCache to find the equality
86  *              functions for system types that are used as cache key fields.
87  *              See also GetCCHashFunc, which should support the same set of types.
88  *
89  *              XXX this should be replaced by catalog lookups,
90  *              but that seems to pose considerable risk of circularity...
91  */
92 static const Oid eqproc[] = {
93         F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid,
94         F_INT2EQ, F_INT2VECTOREQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ,
95         F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ
96 };
97
98 #define EQPROC(SYSTEMTYPEOID)   eqproc[(SYSTEMTYPEOID)-BOOLOID]
99
100
101 static uint32 CatalogCacheComputeHashValue(CatCache *cache,
102                                                          ScanKey cur_skey);
103 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
104                                                                   HeapTuple tuple);
105 #ifdef CATCACHE_STATS
106 static void CatCachePrintStats(void);
107 #endif
108 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
109 static void CatalogCacheInitializeCache(CatCache *cache);
110
111
112 /*
113  *                                      internal support functions
114  */
115
116 static PGFunction
117 GetCCHashFunc(Oid keytype)
118 {
119         switch (keytype)
120         {
121                 case BOOLOID:
122                 case CHAROID:
123                         return hashchar;
124                 case NAMEOID:
125                         return hashname;
126                 case INT2OID:
127                         return hashint2;
128                 case INT2VECTOROID:
129                         return hashint2vector;
130                 case INT4OID:
131                         return hashint4;
132                 case TEXTOID:
133                         return hashvarlena;
134                 case REGPROCOID:
135                 case OIDOID:
136                         return hashoid;
137                 case OIDVECTOROID:
138                         return hashoidvector;
139                 default:
140                         elog(FATAL, "GetCCHashFunc: type %u unsupported as catcache key",
141                                  keytype);
142                         return (PGFunction) NULL;
143         }
144 }
145
146 /*
147  *              CatalogCacheComputeHashValue
148  *
149  * Compute the hash value associated with a given set of lookup keys
150  */
151 static uint32
152 CatalogCacheComputeHashValue(CatCache *cache, ScanKey cur_skey)
153 {
154         uint32          hashValue = 0;
155
156         CACHE4_elog(DEBUG1, "CatalogCacheComputeHashValue %s %d %p",
157                                 cache->cc_relname,
158                                 cache->cc_nkeys,
159                                 cache);
160
161         switch (cache->cc_nkeys)
162         {
163                 case 4:
164                         hashValue ^=
165                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
166                                                                                   cur_skey[3].sk_argument)) << 9;
167                         /* FALLTHROUGH */
168                 case 3:
169                         hashValue ^=
170                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
171                                                                                   cur_skey[2].sk_argument)) << 6;
172                         /* FALLTHROUGH */
173                 case 2:
174                         hashValue ^=
175                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
176                                                                                   cur_skey[1].sk_argument)) << 3;
177                         /* FALLTHROUGH */
178                 case 1:
179                         hashValue ^=
180                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
181                                                                                            cur_skey[0].sk_argument));
182                         break;
183                 default:
184                         elog(FATAL, "CCComputeHashValue: %d cc_nkeys", cache->cc_nkeys);
185                         break;
186         }
187
188         return hashValue;
189 }
190
191 /*
192  *              CatalogCacheComputeTupleHashValue
193  *
194  * Compute the hash value associated with a given tuple to be cached
195  */
196 static uint32
197 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
198 {
199         ScanKeyData cur_skey[4];
200         bool            isNull = false;
201
202         /* Copy pre-initialized overhead data for scankey */
203         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
204
205         /* Now extract key fields from tuple, insert into scankey */
206         switch (cache->cc_nkeys)
207         {
208                 case 4:
209                         cur_skey[3].sk_argument =
210                                 (cache->cc_key[3] == ObjectIdAttributeNumber)
211                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
212                                 : fastgetattr(tuple,
213                                                           cache->cc_key[3],
214                                                           cache->cc_tupdesc,
215                                                           &isNull);
216                         Assert(!isNull);
217                         /* FALLTHROUGH */
218                 case 3:
219                         cur_skey[2].sk_argument =
220                                 (cache->cc_key[2] == ObjectIdAttributeNumber)
221                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
222                                 : fastgetattr(tuple,
223                                                           cache->cc_key[2],
224                                                           cache->cc_tupdesc,
225                                                           &isNull);
226                         Assert(!isNull);
227                         /* FALLTHROUGH */
228                 case 2:
229                         cur_skey[1].sk_argument =
230                                 (cache->cc_key[1] == ObjectIdAttributeNumber)
231                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
232                                 : fastgetattr(tuple,
233                                                           cache->cc_key[1],
234                                                           cache->cc_tupdesc,
235                                                           &isNull);
236                         Assert(!isNull);
237                         /* FALLTHROUGH */
238                 case 1:
239                         cur_skey[0].sk_argument =
240                                 (cache->cc_key[0] == ObjectIdAttributeNumber)
241                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
242                                 : fastgetattr(tuple,
243                                                           cache->cc_key[0],
244                                                           cache->cc_tupdesc,
245                                                           &isNull);
246                         Assert(!isNull);
247                         break;
248                 default:
249                         elog(FATAL, "CCComputeTupleHashValue: %d cc_nkeys",
250                                  cache->cc_nkeys);
251                         break;
252         }
253
254         return CatalogCacheComputeHashValue(cache, cur_skey);
255 }
256
257
258 #ifdef CATCACHE_STATS
259
260 static void
261 CatCachePrintStats(void)
262 {
263         CatCache   *cache;
264         long            cc_searches = 0;
265         long            cc_hits = 0;
266         long            cc_neg_hits = 0;
267         long            cc_newloads = 0;
268         long            cc_invals = 0;
269         long            cc_discards = 0;
270
271         elog(DEBUG1, "Catcache stats dump: %d/%d tuples in catcaches",
272                  CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
273
274         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
275         {
276                 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
277                         continue;                       /* don't print unused caches */
278                 elog(DEBUG1, "Catcache %s/%s: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards",
279                          cache->cc_relname,
280                          cache->cc_indname,
281                          cache->cc_ntup,
282                          cache->cc_searches,
283                          cache->cc_hits,
284                          cache->cc_neg_hits,
285                          cache->cc_hits + cache->cc_neg_hits,
286                          cache->cc_newloads,
287                          cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
288                          cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
289                          cache->cc_invals,
290                          cache->cc_discards);
291                 cc_searches += cache->cc_searches;
292                 cc_hits += cache->cc_hits;
293                 cc_neg_hits += cache->cc_neg_hits;
294                 cc_newloads += cache->cc_newloads;
295                 cc_invals += cache->cc_invals;
296                 cc_discards += cache->cc_discards;
297         }
298         elog(DEBUG1, "Catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards",
299                  CacheHdr->ch_ntup,
300                  cc_searches,
301                  cc_hits,
302                  cc_neg_hits,
303                  cc_hits + cc_neg_hits,
304                  cc_newloads,
305                  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
306                  cc_searches - cc_hits - cc_neg_hits,
307                  cc_invals,
308                  cc_discards);
309 }
310
311 #endif /* CATCACHE_STATS */
312
313
314 /*
315  *              CatCacheRemoveCTup
316  *
317  * Unlink and delete the given cache entry
318  */
319 static void
320 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
321 {
322         Assert(ct->refcount == 0);
323         Assert(ct->my_cache == cache);
324
325         /* delink from linked lists */
326         DLRemove(&ct->lrulist_elem);
327         DLRemove(&ct->cache_elem);
328
329         /* free associated tuple data */
330         if (ct->tuple.t_data != NULL)
331                 pfree(ct->tuple.t_data);
332         pfree(ct);
333
334         --cache->cc_ntup;
335         --CacheHdr->ch_ntup;
336 }
337
338 /*
339  *      CatalogCacheIdInvalidate
340  *
341  *      Invalidate entries in the specified cache, given a hash value and
342  *      item pointer.  Positive entries are deleted if they match the item
343  *      pointer.  Negative entries must be deleted if they match the hash
344  *      value (since we do not have the exact key of the tuple that's being
345  *      inserted).  But this should only rarely result in loss of a cache
346  *      entry that could have been kept.
347  *
348  *      Note that it's not very relevant whether the tuple identified by
349  *      the item pointer is being inserted or deleted.  We don't expect to
350  *      find matching positive entries in the one case, and we don't expect
351  *      to find matching negative entries in the other; but we will do the
352  *      right things in any case.
353  *
354  *      This routine is only quasi-public: it should only be used by inval.c.
355  */
356 void
357 CatalogCacheIdInvalidate(int cacheId,
358                                                  uint32 hashValue,
359                                                  ItemPointer pointer)
360 {
361         CatCache   *ccp;
362
363         /*
364          * sanity checks
365          */
366         Assert(ItemPointerIsValid(pointer));
367         CACHE1_elog(DEBUG1, "CatalogCacheIdInvalidate: called");
368
369         /*
370          * inspect caches to find the proper cache
371          */
372         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
373         {
374                 Index           hashIndex;
375                 Dlelem     *elt,
376                                    *nextelt;
377
378                 if (cacheId != ccp->id)
379                         continue;
380
381                 /*
382                  * We don't bother to check whether the cache has finished
383                  * initialization yet; if not, there will be no entries in it
384                  * so no problem.
385                  */
386
387                 /*
388                  * inspect the proper hash bucket for matches
389                  */
390                 hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
391
392                 for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
393                 {
394                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
395
396                         nextelt = DLGetSucc(elt);
397
398                         if (hashValue != ct->hash_value)
399                                 continue;               /* ignore non-matching hash values */
400
401                         if (ct->negative ||
402                                 ItemPointerEquals(pointer, &ct->tuple.t_self))
403                         {
404                                 if (ct->refcount > 0)
405                                         ct->dead = true;
406                                 else
407                                         CatCacheRemoveCTup(ccp, ct);
408                                 CACHE1_elog(DEBUG1, "CatalogCacheIdInvalidate: invalidated");
409 #ifdef CATCACHE_STATS
410                                 ccp->cc_invals++;
411 #endif
412                                 /* could be multiple matches, so keep looking! */
413                         }
414                 }
415                 break;                                  /* need only search this one cache */
416         }
417 }
418
419 /* ----------------------------------------------------------------
420  *                                         public functions
421  * ----------------------------------------------------------------
422  */
423
424
425 /*
426  * Standard routine for creating cache context if it doesn't exist yet
427  *
428  * There are a lot of places (probably far more than necessary) that check
429  * whether CacheMemoryContext exists yet and want to create it if not.
430  * We centralize knowledge of exactly how to create it here.
431  */
432 void
433 CreateCacheMemoryContext(void)
434 {
435         /*
436          * Purely for paranoia, check that context doesn't exist; caller
437          * probably did so already.
438          */
439         if (!CacheMemoryContext)
440                 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
441                                                                                                    "CacheMemoryContext",
442                                                                                                 ALLOCSET_DEFAULT_MINSIZE,
443                                                                                            ALLOCSET_DEFAULT_INITSIZE,
444                                                                                            ALLOCSET_DEFAULT_MAXSIZE);
445 }
446
447
448 /*
449  *              AtEOXact_CatCache
450  *
451  * Clean up catcaches at end of transaction (either commit or abort)
452  *
453  * We scan the caches to reset refcounts to zero.  This is of course
454  * necessary in the abort case, since elog() may have interrupted routines.
455  * In the commit case, any nonzero counts indicate failure to call
456  * ReleaseSysCache, so we put out a notice for debugging purposes.
457  */
458 void
459 AtEOXact_CatCache(bool isCommit)
460 {
461         Dlelem     *elt,
462                            *nextelt;
463
464         for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = nextelt)
465         {
466                 CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
467
468                 nextelt = DLGetSucc(elt);
469
470                 if (ct->refcount != 0)
471                 {
472                         if (isCommit)
473                                 elog(WARNING, "Cache reference leak: cache %s (%d), tuple %u has count %d",
474                                          ct->my_cache->cc_relname, ct->my_cache->id,
475                                          ct->tuple.t_data->t_oid,
476                                          ct->refcount);
477                         ct->refcount = 0;
478                 }
479
480                 /* Clean up any now-deletable dead entries */
481                 if (ct->dead)
482                         CatCacheRemoveCTup(ct->my_cache, ct);
483         }
484 }
485
486 /*
487  *              ResetCatalogCache
488  *
489  * Reset one catalog cache to empty.
490  *
491  * This is not very efficient if the target cache is nearly empty.
492  * However, it shouldn't need to be efficient; we don't invoke it often.
493  */
494 static void
495 ResetCatalogCache(CatCache *cache)
496 {
497         int                     i;
498
499         /* Remove each tuple in this cache, or at least mark it dead */
500         for (i = 0; i < cache->cc_nbuckets; i++)
501         {
502                 Dlelem     *elt,
503                                    *nextelt;
504
505                 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
506                 {
507                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
508
509                         nextelt = DLGetSucc(elt);
510
511                         if (ct->refcount > 0)
512                                 ct->dead = true;
513                         else
514                                 CatCacheRemoveCTup(cache, ct);
515 #ifdef CATCACHE_STATS
516                         cache->cc_invals++;
517 #endif
518                 }
519         }
520 }
521
522 /*
523  *              ResetCatalogCaches
524  *
525  * Reset all caches when a shared cache inval event forces it
526  */
527 void
528 ResetCatalogCaches(void)
529 {
530         CatCache   *cache;
531
532         CACHE1_elog(DEBUG1, "ResetCatalogCaches called");
533
534         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
535                 ResetCatalogCache(cache);
536
537         CACHE1_elog(DEBUG1, "end of ResetCatalogCaches call");
538 }
539
540 /*
541  *              CatalogCacheFlushRelation
542  *
543  *      This is called by RelationFlushRelation() to clear out cached information
544  *      about a relation being dropped.  (This could be a DROP TABLE command,
545  *      or a temp table being dropped at end of transaction, or a table created
546  *      during the current transaction that is being dropped because of abort.)
547  *      Remove all cache entries relevant to the specified relation OID.
548  *
549  *      A special case occurs when relId is itself one of the cacheable system
550  *      tables --- although those'll never be dropped, they can get flushed from
551  *      the relcache (VACUUM causes this, for example).  In that case we need
552  *      to flush all cache entries that came from that table.  (At one point we
553  *      also tried to force re-execution of CatalogCacheInitializeCache for
554  *      the cache(s) on that table.  This is a bad idea since it leads to all
555  *      kinds of trouble if a cache flush occurs while loading cache entries.
556  *      We now avoid the need to do it by copying cc_tupdesc out of the relcache,
557  *      rather than relying on the relcache to keep a tupdesc for us.  Of course
558  *      this assumes the tupdesc of a cachable system table will not change...)
559  */
560 void
561 CatalogCacheFlushRelation(Oid relId)
562 {
563         CatCache   *cache;
564
565         CACHE2_elog(DEBUG1, "CatalogCacheFlushRelation called for %u", relId);
566
567         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
568         {
569                 int                     i;
570
571                 /* We can ignore uninitialized caches, since they must be empty */
572                 if (cache->cc_tupdesc == NULL)
573                         continue;
574
575                 /* Does this cache store tuples of the target relation itself? */
576                 if (cache->cc_tupdesc->attrs[0]->attrelid == relId)
577                 {
578                         /* Yes, so flush all its contents */
579                         ResetCatalogCache(cache);
580                         continue;
581                 }
582
583                 /* Does this cache store tuples associated with relations at all? */
584                 if (cache->cc_reloidattr == 0)
585                         continue;                       /* nope, leave it alone */
586
587                 /* Yes, scan the tuples and remove those related to relId */
588                 for (i = 0; i < cache->cc_nbuckets; i++)
589                 {
590                         Dlelem     *elt,
591                                            *nextelt;
592
593                         for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
594                         {
595                                 CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
596                                 Oid                     tupRelid;
597
598                                 nextelt = DLGetSucc(elt);
599
600                                 /*
601                                  * Negative entries are never considered related to a rel,
602                                  * even if the rel is part of their lookup key.
603                                  */
604                                 if (ct->negative)
605                                         continue;
606
607                                 if (cache->cc_reloidattr == ObjectIdAttributeNumber)
608                                         tupRelid = ct->tuple.t_data->t_oid;
609                                 else
610                                 {
611                                         bool            isNull;
612
613                                         tupRelid =
614                                                 DatumGetObjectId(fastgetattr(&ct->tuple,
615                                                                                                          cache->cc_reloidattr,
616                                                                                                          cache->cc_tupdesc,
617                                                                                                          &isNull));
618                                         Assert(!isNull);
619                                 }
620
621                                 if (tupRelid == relId)
622                                 {
623                                         if (ct->refcount > 0)
624                                                 ct->dead = true;
625                                         else
626                                                 CatCacheRemoveCTup(cache, ct);
627 #ifdef CATCACHE_STATS
628                                         cache->cc_invals++;
629 #endif
630                                 }
631                         }
632                 }
633         }
634
635         CACHE1_elog(DEBUG1, "end of CatalogCacheFlushRelation call");
636 }
637
638 /*
639  *              InitCatCache
640  *
641  *      This allocates and initializes a cache for a system catalog relation.
642  *      Actually, the cache is only partially initialized to avoid opening the
643  *      relation.  The relation will be opened and the rest of the cache
644  *      structure initialized on the first access.
645  */
646 #ifdef CACHEDEBUG
647 #define InitCatCache_DEBUG1 \
648 do { \
649         elog(DEBUG1, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
650                 cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_nbuckets); \
651 } while(0)
652
653 #else
654 #define InitCatCache_DEBUG1
655 #endif
656
657 CatCache *
658 InitCatCache(int id,
659                          const char *relname,
660                          const char *indname,
661                          int reloidattr,
662                          int nkeys,
663                          const int *key)
664 {
665         CatCache   *cp;
666         MemoryContext oldcxt;
667         int                     i;
668
669         /*
670          * first switch to the cache context so our allocations do not vanish
671          * at the end of a transaction
672          */
673         if (!CacheMemoryContext)
674                 CreateCacheMemoryContext();
675
676         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
677
678         /*
679          * if first time through, initialize the cache group header, including
680          * global LRU list header
681          */
682         if (CacheHdr == NULL)
683         {
684                 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
685                 CacheHdr->ch_caches = NULL;
686                 CacheHdr->ch_ntup = 0;
687                 CacheHdr->ch_maxtup = MAXCCTUPLES;
688                 DLInitList(&CacheHdr->ch_lrulist);
689 #ifdef CATCACHE_STATS
690                 on_proc_exit(CatCachePrintStats, 0);
691 #endif
692         }
693
694         /*
695          * allocate a new cache structure
696          *
697          * Note: we assume zeroing initializes the bucket headers correctly
698          */
699         cp = (CatCache *) palloc(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
700         MemSet((char *) cp, 0, sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
701
702         /*
703          * initialize the cache's relation information for the relation
704          * corresponding to this cache, and initialize some of the new cache's
705          * other internal fields.  But don't open the relation yet.
706          */
707         cp->id = id;
708         cp->cc_relname = relname;
709         cp->cc_indname = indname;
710         cp->cc_reloid = InvalidOid;     /* temporary */
711         cp->cc_relisshared = false; /* temporary */
712         cp->cc_tupdesc = (TupleDesc) NULL;
713         cp->cc_reloidattr = reloidattr;
714         cp->cc_ntup = 0;
715         cp->cc_nbuckets = NCCBUCKETS;
716         cp->cc_nkeys = nkeys;
717         for (i = 0; i < nkeys; ++i)
718                 cp->cc_key[i] = key[i];
719
720         /*
721          * new cache is initialized as far as we can go for now. print some
722          * debugging information, if appropriate.
723          */
724         InitCatCache_DEBUG1;
725
726         /*
727          * add completed cache to top of group header's list
728          */
729         cp->cc_next = CacheHdr->ch_caches;
730         CacheHdr->ch_caches = cp;
731
732         /*
733          * back to the old context before we return...
734          */
735         MemoryContextSwitchTo(oldcxt);
736
737         return cp;
738 }
739
740 /*
741  *              CatalogCacheInitializeCache
742  *
743  * This function does final initialization of a catcache: obtain the tuple
744  * descriptor and set up the hash and equality function links.  We assume
745  * that the relcache entry can be opened at this point!
746  */
747 #ifdef CACHEDEBUG
748 #define CatalogCacheInitializeCache_DEBUG1 \
749         elog(DEBUG1, "CatalogCacheInitializeCache: cache @%p %s", cache, \
750                  cache->cc_relname)
751
752 #define CatalogCacheInitializeCache_DEBUG2 \
753 do { \
754                 if (cache->cc_key[i] > 0) { \
755                         elog(DEBUG1, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
756                                 i+1, cache->cc_nkeys, cache->cc_key[i], \
757                                  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
758                 } else { \
759                         elog(DEBUG1, "CatalogCacheInitializeCache: load %d/%d w/%d", \
760                                 i+1, cache->cc_nkeys, cache->cc_key[i]); \
761                 } \
762 } while(0)
763
764 #else
765 #define CatalogCacheInitializeCache_DEBUG1
766 #define CatalogCacheInitializeCache_DEBUG2
767 #endif
768
769 static void
770 CatalogCacheInitializeCache(CatCache *cache)
771 {
772         Relation        relation;
773         MemoryContext oldcxt;
774         TupleDesc       tupdesc;
775         int                     i;
776
777         CatalogCacheInitializeCache_DEBUG1;
778
779         /*
780          * Open the relation without locking --- we only need the tupdesc,
781          * which we assume will never change ...
782          */
783         relation = heap_openr(cache->cc_relname, NoLock);
784         Assert(RelationIsValid(relation));
785
786         /*
787          * switch to the cache context so our allocations do not vanish at the
788          * end of a transaction
789          */
790         Assert(CacheMemoryContext != NULL);
791
792         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
793
794         /*
795          * copy the relcache's tuple descriptor to permanent cache storage
796          */
797         tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
798
799         /*
800          * get the relation's OID and relisshared flag, too
801          */
802         cache->cc_reloid = RelationGetRelid(relation);
803         cache->cc_relisshared = RelationGetForm(relation)->relisshared;
804
805         /*
806          * return to the caller's memory context and close the rel
807          */
808         MemoryContextSwitchTo(oldcxt);
809
810         heap_close(relation, NoLock);
811
812         CACHE3_elog(DEBUG1, "CatalogCacheInitializeCache: %s, %d keys",
813                                 cache->cc_relname, cache->cc_nkeys);
814
815         /*
816          * initialize cache's key information
817          */
818         for (i = 0; i < cache->cc_nkeys; ++i)
819         {
820                 Oid                     keytype;
821
822                 CatalogCacheInitializeCache_DEBUG2;
823
824                 if (cache->cc_key[i] > 0)
825                         keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
826                 else
827                 {
828                         if (cache->cc_key[i] != ObjectIdAttributeNumber)
829                                 elog(FATAL, "CatalogCacheInit: only sys attr supported is OID");
830                         keytype = OIDOID;
831                 }
832
833                 cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
834
835                 cache->cc_isname[i] = (keytype == NAMEOID);
836
837                 /*
838                  * If GetCCHashFunc liked the type, safe to index into eqproc[]
839                  */
840                 cache->cc_skey[i].sk_procedure = EQPROC(keytype);
841
842                 /* Do function lookup */
843                 fmgr_info_cxt(cache->cc_skey[i].sk_procedure,
844                                           &cache->cc_skey[i].sk_func,
845                                           CacheMemoryContext);
846
847                 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
848                 cache->cc_skey[i].sk_attno = cache->cc_key[i];
849
850                 CACHE4_elog(DEBUG1, "CatalogCacheInit %s %d %p",
851                                         cache->cc_relname,
852                                         i,
853                                         cache);
854         }
855
856         /*
857          * mark this cache fully initialized
858          */
859         cache->cc_tupdesc = tupdesc;
860 }
861
862 /*
863  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
864  *
865  * The only reason to call this routine is to ensure that the relcache
866  * has created entries for all the catalogs and indexes referenced by
867  * catcaches.  Therefore, open the index too.  An exception is the indexes
868  * on pg_am, which we don't use (cf. IndexScanOK).
869  */
870 void
871 InitCatCachePhase2(CatCache *cache)
872 {
873         if (cache->cc_tupdesc == NULL)
874                 CatalogCacheInitializeCache(cache);
875
876         if (cache->id != AMOID &&
877                 cache->id != AMNAME)
878         {
879                 Relation        idesc;
880
881                 idesc = index_openr(cache->cc_indname);
882                 index_close(idesc);
883         }
884 }
885
886
887 /*
888  *              IndexScanOK
889  *
890  *              This function checks for tuples that will be fetched by
891  *              IndexSupportInitialize() during relcache initialization for
892  *              certain system indexes that support critical syscaches.
893  *              We can't use an indexscan to fetch these, else we'll get into
894  *              infinite recursion.  A plain heap scan will work, however.
895  *
896  *              Once we have completed relcache initialization (signaled by
897  *              criticalRelcachesBuilt), we don't have to worry anymore.
898  */
899 static bool
900 IndexScanOK(CatCache *cache, ScanKey cur_skey)
901 {
902         if (cache->id == INDEXRELID)
903         {
904                 /*
905                  * Since the OIDs of indexes aren't hardwired, it's painful to
906                  * figure out which is which.  Just force all pg_index searches
907                  * to be heap scans while building the relcaches.
908                  */
909                 if (!criticalRelcachesBuilt)
910                         return false;
911         }
912         else if (cache->id == AMOID ||
913                          cache->id == AMNAME)
914         {
915                 /*
916                  * Always do heap scans in pg_am, because it's so small there's
917                  * not much point in an indexscan anyway.  We *must* do this when
918                  * initially building critical relcache entries, but we might as
919                  * well just always do it.
920                  */
921                 return false;
922         }
923         else if (cache->id == OPEROID)
924         {
925                 if (!criticalRelcachesBuilt)
926                 {
927                         /* Looking for an OID comparison function? */
928                         Oid             lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
929
930                         if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
931                                 return false;
932                 }
933         }
934
935         /* Normal case, allow index scan */
936         return true;
937 }
938
939 /*
940  *      SearchCatCache
941  *
942  *              This call searches a system cache for a tuple, opening the relation
943  *              if necessary (on the first access to a particular cache).
944  *
945  *              The result is NULL if not found, or a pointer to a HeapTuple in
946  *              the cache.  The caller must not modify the tuple, and must call
947  *              ReleaseCatCache() when done with it.
948  *
949  * The search key values should be expressed as Datums of the key columns'
950  * datatype(s).  (Pass zeroes for any unused parameters.)  As a special
951  * exception, the passed-in key for a NAME column can be just a C string;
952  * the caller need not go to the trouble of converting it to a fully
953  * null-padded NAME.
954  */
955 HeapTuple
956 SearchCatCache(CatCache *cache,
957                            Datum v1,
958                            Datum v2,
959                            Datum v3,
960                            Datum v4)
961 {
962         ScanKeyData cur_skey[4];
963         uint32          hashValue;
964         Index           hashIndex;
965         Dlelem     *elt;
966         CatCTup    *ct;
967         Relation        relation;
968         HeapTuple       ntp;
969         int                     i;
970         MemoryContext oldcxt;
971
972         /*
973          * one-time startup overhead for each cache
974          */
975         if (cache->cc_tupdesc == NULL)
976                 CatalogCacheInitializeCache(cache);
977
978 #ifdef CATCACHE_STATS
979         cache->cc_searches++;
980 #endif
981
982         /*
983          * initialize the search key information
984          */
985         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
986         cur_skey[0].sk_argument = v1;
987         cur_skey[1].sk_argument = v2;
988         cur_skey[2].sk_argument = v3;
989         cur_skey[3].sk_argument = v4;
990
991         /*
992          * find the hash bucket in which to look for the tuple
993          */
994         hashValue = CatalogCacheComputeHashValue(cache, cur_skey);
995         hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
996
997         /*
998          * scan the hash bucket until we find a match or exhaust our tuples
999          */
1000         for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1001                  elt;
1002                  elt = DLGetSucc(elt))
1003         {
1004                 bool            res;
1005
1006                 ct = (CatCTup *) DLE_VAL(elt);
1007
1008                 if (ct->dead)
1009                         continue;                       /* ignore dead entries */
1010
1011                 if (ct->hash_value != hashValue)
1012                         continue;                       /* quickly skip entry if wrong hash val */
1013
1014                 /*
1015                  * see if the cached tuple matches our key.
1016                  */
1017                 HeapKeyTest(&ct->tuple,
1018                                         cache->cc_tupdesc,
1019                                         cache->cc_nkeys,
1020                                         cur_skey,
1021                                         res);
1022                 if (!res)
1023                         continue;
1024
1025                 /*
1026                  * we found a match in the cache: move it to the front of the global
1027                  * LRU list.  We also move it to the front of the list for its
1028                  * hashbucket, in order to speed subsequent searches.  (The most
1029                  * frequently accessed elements in any hashbucket will tend to be
1030                  * near the front of the hashbucket's list.)
1031                  */
1032                 DLMoveToFront(&ct->lrulist_elem);
1033                 DLMoveToFront(&ct->cache_elem);
1034
1035                 /*
1036                  * If it's a positive entry, bump its refcount and return it.
1037                  * If it's negative, we can report failure to the caller.
1038                  */
1039                 if (!ct->negative)
1040                 {
1041                         ct->refcount++;
1042
1043 #ifdef CACHEDEBUG
1044                         CACHE3_elog(DEBUG1, "SearchCatCache(%s): found in bucket %d",
1045                                                 cache->cc_relname, hashIndex);
1046 #endif   /* CACHEDEBUG */
1047
1048 #ifdef CATCACHE_STATS
1049                         cache->cc_hits++;
1050 #endif
1051
1052                         return &ct->tuple;
1053                 }
1054                 else
1055                 {
1056 #ifdef CACHEDEBUG
1057                         CACHE3_elog(DEBUG1, "SearchCatCache(%s): found neg entry in bucket %d",
1058                                                 cache->cc_relname, hashIndex);
1059 #endif   /* CACHEDEBUG */
1060
1061 #ifdef CATCACHE_STATS
1062                         cache->cc_neg_hits++;
1063 #endif
1064
1065                         return NULL;
1066                 }
1067         }
1068
1069         /*
1070          * Tuple was not found in cache, so we have to try to retrieve it
1071          * directly from the relation.  If found, we will add it to the
1072          * cache; if not found, we will add a negative cache entry instead.
1073          *
1074          * NOTE: it is possible for recursive cache lookups to occur while
1075          * reading the relation --- for example, due to shared-cache-inval
1076          * messages being processed during heap_open().  This is OK.  It's
1077          * even possible for one of those lookups to find and enter the very
1078          * same tuple we are trying to fetch here.      If that happens, we will
1079          * enter a second copy of the tuple into the cache.  The first copy
1080          * will never be referenced again, and will eventually age out of the
1081          * cache, so there's no functional problem.  This case is rare enough
1082          * that it's not worth expending extra cycles to detect.
1083          */
1084
1085         /*
1086          * open the relation associated with the cache
1087          */
1088         relation = heap_open(cache->cc_reloid, AccessShareLock);
1089
1090         /*
1091          * Pre-create cache entry header, and mark no tuple found.
1092          */
1093         ct = (CatCTup *) MemoryContextAlloc(CacheMemoryContext, sizeof(CatCTup));
1094         ct->negative = true;
1095
1096         /*
1097          * Scan the relation to find the tuple.  If there's an index, and if
1098          * it's safe to do so, use the index.  Else do a heap scan.
1099          */
1100         if ((RelationGetForm(relation))->relhasindex &&
1101                 !IsIgnoringSystemIndexes() &&
1102                 IndexScanOK(cache, cur_skey))
1103         {
1104                 Relation        idesc;
1105                 IndexScanDesc isd;
1106                 RetrieveIndexResult indexRes;
1107                 HeapTupleData tuple;
1108                 Buffer          buffer;
1109
1110                 CACHE2_elog(DEBUG1, "SearchCatCache(%s): performing index scan",
1111                                         cache->cc_relname);
1112
1113                 /*
1114                  * For an index scan, sk_attno has to be set to the index
1115                  * attribute number(s), not the heap attribute numbers.  We assume
1116                  * that the index corresponds exactly to the cache keys (or its
1117                  * first N keys do, anyway).
1118                  */
1119                 for (i = 0; i < cache->cc_nkeys; ++i)
1120                         cur_skey[i].sk_attno = i + 1;
1121
1122                 idesc = index_openr(cache->cc_indname);
1123                 isd = index_beginscan(idesc, false, cache->cc_nkeys, cur_skey);
1124                 tuple.t_datamcxt = CurrentMemoryContext;
1125                 tuple.t_data = NULL;
1126                 while ((indexRes = index_getnext(isd, ForwardScanDirection)))
1127                 {
1128                         tuple.t_self = indexRes->heap_iptr;
1129                         heap_fetch(relation, SnapshotNow, &tuple, &buffer, isd);
1130                         pfree(indexRes);
1131                         if (tuple.t_data != NULL)
1132                         {
1133                                 /* Copy tuple into our context */
1134                                 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1135                                 heap_copytuple_with_tuple(&tuple, &ct->tuple);
1136                                 ct->negative = false;
1137                                 MemoryContextSwitchTo(oldcxt);
1138                                 ReleaseBuffer(buffer);
1139                                 break;
1140                         }
1141                 }
1142                 index_endscan(isd);
1143                 index_close(idesc);
1144         }
1145         else
1146         {
1147                 HeapScanDesc sd;
1148
1149                 CACHE2_elog(DEBUG1, "SearchCatCache(%s): performing heap scan",
1150                                         cache->cc_relname);
1151
1152                 sd = heap_beginscan(relation, 0, SnapshotNow,
1153                                                         cache->cc_nkeys, cur_skey);
1154
1155                 ntp = heap_getnext(sd, 0);
1156
1157                 if (HeapTupleIsValid(ntp))
1158                 {
1159                         /* Copy tuple into our context */
1160                         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1161                         heap_copytuple_with_tuple(ntp, &ct->tuple);
1162                         ct->negative = false;
1163                         MemoryContextSwitchTo(oldcxt);
1164                         /* We should not free the result of heap_getnext... */
1165                 }
1166
1167                 heap_endscan(sd);
1168         }
1169
1170         /*
1171          * close the relation
1172          */
1173         heap_close(relation, AccessShareLock);
1174
1175         /*
1176          * scan is complete.  If tuple was not found, we need to build
1177          * a fake tuple for the negative cache entry.  The fake tuple has
1178          * the correct key columns, but nulls everywhere else.
1179          */
1180         if (ct->negative)
1181         {
1182                 TupleDesc       tupDesc = cache->cc_tupdesc;
1183                 Datum      *values;
1184                 char       *nulls;
1185                 Oid                     negOid = InvalidOid;
1186
1187                 values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1188                 nulls = (char *) palloc(tupDesc->natts * sizeof(char));
1189
1190                 memset(values, 0, tupDesc->natts * sizeof(Datum));
1191                 memset(nulls, 'n', tupDesc->natts * sizeof(char));
1192
1193                 for (i = 0; i < cache->cc_nkeys; i++)
1194                 {
1195                         int             attindex = cache->cc_key[i];
1196                         Datum   keyval = cur_skey[i].sk_argument;
1197
1198                         if (attindex > 0)
1199                         {
1200                                 /*
1201                                  * Here we must be careful in case the caller passed a
1202                                  * C string where a NAME is wanted: convert the given
1203                                  * argument to a correctly padded NAME.  Otherwise the
1204                                  * memcpy() done in heap_formtuple could fall off the
1205                                  * end of memory.
1206                                  */
1207                                 if (cache->cc_isname[i])
1208                                 {
1209                                         Name    newval = (Name) palloc(NAMEDATALEN);
1210
1211                                         namestrcpy(newval, DatumGetCString(keyval));
1212                                         keyval = NameGetDatum(newval);
1213                                 }
1214                                 values[attindex-1] = keyval;
1215                                 nulls[attindex-1] = ' ';
1216                         }
1217                         else
1218                         {
1219                                 Assert(attindex == ObjectIdAttributeNumber);
1220                                 negOid = DatumGetObjectId(keyval);
1221                         }
1222                 }
1223
1224                 ntp = heap_formtuple(tupDesc, values, nulls);
1225
1226                 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1227                 heap_copytuple_with_tuple(ntp, &ct->tuple);
1228                 ct->tuple.t_data->t_oid = negOid;
1229                 MemoryContextSwitchTo(oldcxt);
1230
1231                 heap_freetuple(ntp);
1232                 for (i = 0; i < cache->cc_nkeys; i++)
1233                 {
1234                         if (cache->cc_isname[i])
1235                                 pfree(DatumGetName(values[cache->cc_key[i]-1]));
1236                 }
1237                 pfree(values);
1238                 pfree(nulls);
1239         }
1240
1241         /*
1242          * Finish initializing the CatCTup header, and add it to the linked
1243          * lists.
1244          */
1245         ct->ct_magic = CT_MAGIC;
1246         ct->my_cache = cache;
1247         DLInitElem(&ct->lrulist_elem, (void *) ct);
1248         DLInitElem(&ct->cache_elem, (void *) ct);
1249         ct->refcount = 1;                       /* count this first reference */
1250         ct->dead = false;
1251         ct->hash_value = hashValue;
1252
1253         DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
1254         DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1255
1256         /*
1257          * If we've exceeded the desired size of the caches, try to throw away
1258          * the least recently used entry.  NB: the newly-built entry cannot
1259          * get thrown away here, because it has positive refcount.
1260          */
1261         ++cache->cc_ntup;
1262         if (++CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
1263         {
1264                 Dlelem     *prevelt;
1265
1266                 for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
1267                 {
1268                         CatCTup    *oldct = (CatCTup *) DLE_VAL(elt);
1269
1270                         prevelt = DLGetPred(elt);
1271
1272                         if (oldct->refcount == 0)
1273                         {
1274                                 CACHE2_elog(DEBUG1, "SearchCatCache(%s): Overflow, LRU removal",
1275                                                         cache->cc_relname);
1276 #ifdef CATCACHE_STATS
1277                                 oldct->my_cache->cc_discards++;
1278 #endif
1279                                 CatCacheRemoveCTup(oldct->my_cache, oldct);
1280                                 if (CacheHdr->ch_ntup <= CacheHdr->ch_maxtup)
1281                                         break;
1282                         }
1283                 }
1284         }
1285
1286         CACHE4_elog(DEBUG1, "SearchCatCache(%s): Contains %d/%d tuples",
1287                                 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1288
1289         if (ct->negative)
1290         {
1291                 CACHE3_elog(DEBUG1, "SearchCatCache(%s): put neg entry in bucket %d",
1292                                         cache->cc_relname, hashIndex);
1293
1294                 /*
1295                  * We are not returning the new entry to the caller, so reset its
1296                  * refcount.  Note it would be uncool to set the refcount to 0
1297                  * before doing the extra-entry removal step above.
1298                  */
1299                 ct->refcount = 0;               /* negative entries never have refs */
1300
1301                 return NULL;
1302         }
1303
1304         CACHE3_elog(DEBUG1, "SearchCatCache(%s): put in bucket %d",
1305                                 cache->cc_relname, hashIndex);
1306
1307 #ifdef CATCACHE_STATS
1308         cache->cc_newloads++;
1309 #endif
1310
1311         return &ct->tuple;
1312 }
1313
1314 /*
1315  *      ReleaseCatCache()
1316  *
1317  *      Decrement the reference count of a catcache entry (releasing the
1318  *      hold grabbed by a successful SearchCatCache).
1319  *
1320  *      NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1321  *      will be freed as soon as their refcount goes to zero.  In combination
1322  *      with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1323  *      to catch references to already-released catcache entries.
1324  */
1325 void
1326 ReleaseCatCache(HeapTuple tuple)
1327 {
1328         CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1329                                                                   offsetof(CatCTup, tuple));
1330
1331         /* Safety checks to ensure we were handed a cache entry */
1332         Assert(ct->ct_magic == CT_MAGIC);
1333         Assert(ct->refcount > 0);
1334
1335         ct->refcount--;
1336
1337         if (ct->refcount == 0
1338 #ifndef CATCACHE_FORCE_RELEASE
1339                 && ct->dead
1340 #endif
1341                 )
1342                 CatCacheRemoveCTup(ct->my_cache, ct);
1343 }
1344
1345 /*
1346  *      PrepareToInvalidateCacheTuple()
1347  *
1348  *      This is part of a rather subtle chain of events, so pay attention:
1349  *
1350  *      When a tuple is inserted or deleted, it cannot be flushed from the
1351  *      catcaches immediately, for reasons explained at the top of cache/inval.c.
1352  *      Instead we have to add entry(s) for the tuple to a list of pending tuple
1353  *      invalidations that will be done at the end of the command or transaction.
1354  *
1355  *      The lists of tuples that need to be flushed are kept by inval.c.  This
1356  *      routine is a helper routine for inval.c.  Given a tuple belonging to
1357  *      the specified relation, find all catcaches it could be in, compute the
1358  *      correct hash value for each such catcache, and call the specified function
1359  *      to record the cache id, hash value, and tuple ItemPointer in inval.c's
1360  *      lists.  CatalogCacheIdInvalidate will be called later, if appropriate,
1361  *      using the recorded information.
1362  *
1363  *      Note that it is irrelevant whether the given tuple is actually loaded
1364  *      into the catcache at the moment.  Even if it's not there now, it might
1365  *      be by the end of the command, or there might be a matching negative entry
1366  *      to flush --- or other backends' caches might have such entries --- so
1367  *      we have to make list entries to flush it later.
1368  *
1369  *      Also note that it's not an error if there are no catcaches for the
1370  *      specified relation.  inval.c doesn't know exactly which rels have
1371  *      catcaches --- it will call this routine for any tuple that's in a
1372  *      system relation.
1373  */
1374 void
1375 PrepareToInvalidateCacheTuple(Relation relation,
1376                                                           HeapTuple tuple,
1377                                                  void (*function) (int, uint32, ItemPointer, Oid))
1378 {
1379         CatCache   *ccp;
1380         Oid                     reloid;
1381
1382         CACHE1_elog(DEBUG1, "PrepareToInvalidateCacheTuple: called");
1383
1384         /*
1385          * sanity checks
1386          */
1387         Assert(RelationIsValid(relation));
1388         Assert(HeapTupleIsValid(tuple));
1389         Assert(PointerIsValid(function));
1390         Assert(CacheHdr != NULL);
1391
1392         reloid = RelationGetRelid(relation);
1393
1394         /* ----------------
1395          *      for each cache
1396          *         if the cache contains tuples from the specified relation
1397          *                 compute the tuple's hash value in this cache,
1398          *                 and call the passed function to register the information.
1399          * ----------------
1400          */
1401
1402         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1403         {
1404                 /* Just in case cache hasn't finished initialization yet... */
1405                 if (ccp->cc_tupdesc == NULL)
1406                         CatalogCacheInitializeCache(ccp);
1407
1408                 if (ccp->cc_reloid != reloid)
1409                         continue;
1410
1411                 (*function) (ccp->id,
1412                                          CatalogCacheComputeTupleHashValue(ccp, tuple),
1413                                          &tuple->t_self,
1414                                          ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
1415         }
1416 }