]> granicus.if.org Git - postgresql/blob - src/backend/utils/cache/catcache.c
Rename and document some invalidation routines to make it clearer that
[postgresql] / src / backend / utils / cache / catcache.c
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  *        System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.74 2001/01/05 22:54:37 tgl Exp $
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_operator.h"
22 #include "catalog/pg_type.h"
23 #include "catalog/catname.h"
24 #include "catalog/indexing.h"
25 #include "miscadmin.h"
26 #include "utils/builtins.h"
27 #include "utils/fmgroids.h"
28 #include "utils/catcache.h"
29 #include "utils/syscache.h"
30
31
32 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
33 static Index CatalogCacheComputeHashIndex(CatCache *cache,
34                                                                                   ScanKey cur_skey);
35 static Index CatalogCacheComputeTupleHashIndex(CatCache *cache,
36                                                                                            HeapTuple tuple);
37 static void CatalogCacheInitializeCache(CatCache *cache);
38 static Datum cc_hashname(PG_FUNCTION_ARGS);
39
40 /* ----------------
41  *              variables, macros and other stuff
42  * ----------------
43  */
44
45 #ifdef CACHEDEBUG
46 #define CACHE1_elog(a,b)                                elog(a,b)
47 #define CACHE2_elog(a,b,c)                              elog(a,b,c)
48 #define CACHE3_elog(a,b,c,d)                    elog(a,b,c,d)
49 #define CACHE4_elog(a,b,c,d,e)                  elog(a,b,c,d,e)
50 #define CACHE5_elog(a,b,c,d,e,f)                elog(a,b,c,d,e,f)
51 #define CACHE6_elog(a,b,c,d,e,f,g)              elog(a,b,c,d,e,f,g)
52 #else
53 #define CACHE1_elog(a,b)
54 #define CACHE2_elog(a,b,c)
55 #define CACHE3_elog(a,b,c,d)
56 #define CACHE4_elog(a,b,c,d,e)
57 #define CACHE5_elog(a,b,c,d,e,f)
58 #define CACHE6_elog(a,b,c,d,e,f,g)
59 #endif
60
61 static CatCache *Caches = NULL; /* head of list of caches */
62
63
64 /* ----------------
65  *              EQPROC is used in CatalogCacheInitializeCache to find the equality
66  *              functions for system types that are used as cache key fields.
67  *              See also GetCCHashFunc, which should support the same set of types.
68  *
69  *              XXX this should be replaced by catalog lookups,
70  *              but that seems to pose considerable risk of circularity...
71  * ----------------
72  */
73 static const Oid eqproc[] = {
74         F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid,
75         F_INT2EQ, F_INT2VECTOREQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ,
76         F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ
77 };
78
79 #define EQPROC(SYSTEMTYPEOID)   eqproc[(SYSTEMTYPEOID)-BOOLOID]
80
81 /* ----------------------------------------------------------------
82  *                                      internal support functions
83  * ----------------------------------------------------------------
84  */
85
86 static PGFunction
87 GetCCHashFunc(Oid keytype)
88 {
89         switch (keytype)
90         {
91                 case BOOLOID:
92                 case CHAROID:
93                         return hashchar;
94                 case NAMEOID:
95                         return cc_hashname;
96                 case INT2OID:
97                         return hashint2;
98                 case INT2VECTOROID:
99                         return hashint2vector;
100                 case INT4OID:
101                         return hashint4;
102                 case TEXTOID:
103                         return hashvarlena;
104                 case REGPROCOID:
105                 case OIDOID:
106                         return hashoid;
107                 case OIDVECTOROID:
108                         return hashoidvector;
109                 default:
110                         elog(FATAL, "GetCCHashFunc: type %u unsupported as catcache key",
111                                  keytype);
112                         return (PGFunction) NULL;
113         }
114 }
115
116 static Datum
117 cc_hashname(PG_FUNCTION_ARGS)
118 {
119
120         /*
121          * We need our own variant of hashname because we want to accept
122          * null-terminated C strings as search values for name fields. So, we
123          * have to make sure the data is correctly padded before we compute
124          * the hash value.
125          */
126         NameData        my_n;
127
128         namestrcpy(&my_n, NameStr(* PG_GETARG_NAME(0)));
129
130         return DirectFunctionCall1(hashname, NameGetDatum(&my_n));
131 }
132
133
134 /*
135  * Standard routine for creating cache context if it doesn't exist yet
136  *
137  * There are a lot of places (probably far more than necessary) that check
138  * whether CacheMemoryContext exists yet and want to create it if not.
139  * We centralize knowledge of exactly how to create it here.
140  */
141 void
142 CreateCacheMemoryContext(void)
143 {
144         /* Purely for paranoia, check that context doesn't exist;
145          * caller probably did so already.
146          */
147         if (!CacheMemoryContext)
148                 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
149                                                                                                    "CacheMemoryContext",
150                                                                                                    ALLOCSET_DEFAULT_MINSIZE,
151                                                                                                    ALLOCSET_DEFAULT_INITSIZE,
152                                                                                                    ALLOCSET_DEFAULT_MAXSIZE);
153 }
154
155
156 /* --------------------------------
157  *              CatalogCacheInitializeCache
158  *
159  * This function does final initialization of a catcache: obtain the tuple
160  * descriptor and set up the hash and equality function links.  We assume
161  * that the relcache entry can be opened at this point!
162  * --------------------------------
163  */
164 #ifdef CACHEDEBUG
165 #define CatalogCacheInitializeCache_DEBUG1 \
166         elog(DEBUG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
167                  cache->cc_relname)
168
169 #define CatalogCacheInitializeCache_DEBUG2 \
170 do { \
171                 if (cache->cc_key[i] > 0) { \
172                         elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
173                                 i+1, cache->cc_nkeys, cache->cc_key[i], \
174                                  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
175                 } else { \
176                         elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
177                                 i+1, cache->cc_nkeys, cache->cc_key[i]); \
178                 } \
179 } while(0)
180
181 #else
182 #define CatalogCacheInitializeCache_DEBUG1
183 #define CatalogCacheInitializeCache_DEBUG2
184 #endif
185
186 static void
187 CatalogCacheInitializeCache(CatCache *cache)
188 {
189         Relation        relation;
190         MemoryContext oldcxt;
191         TupleDesc       tupdesc;
192         short           i;
193
194         CatalogCacheInitializeCache_DEBUG1;
195
196         /*
197          * Open the relation without locking --- we only need the tupdesc,
198          * which we assume will never change ...
199          */
200         relation = heap_openr(cache->cc_relname, NoLock);
201         Assert(RelationIsValid(relation));
202
203         /* ----------------
204          *      switch to the cache context so our allocations
205          *      do not vanish at the end of a transaction
206          * ----------------
207          */
208         if (!CacheMemoryContext)
209                 CreateCacheMemoryContext();
210
211         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
212
213         /* ----------------
214          *      copy the relcache's tuple descriptor to permanent cache storage
215          * ----------------
216          */
217         tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
218
219         /* ----------------
220          *      return to the caller's memory context and close the rel
221          * ----------------
222          */
223         MemoryContextSwitchTo(oldcxt);
224
225         heap_close(relation, NoLock);
226
227         CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: %s, %d keys",
228                                 cache->cc_relname, cache->cc_nkeys);
229
230         /* ----------------
231          *      initialize cache's key information
232          * ----------------
233          */
234         for (i = 0; i < cache->cc_nkeys; ++i)
235         {
236                 Oid                     keytype;
237
238                 CatalogCacheInitializeCache_DEBUG2;
239
240                 if (cache->cc_key[i] > 0)
241                 {
242                         keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
243                 }
244                 else
245                 {
246                         if (cache->cc_key[i] != ObjectIdAttributeNumber)
247                                 elog(FATAL, "CatalogCacheInit: only sys attr supported is OID");
248                         keytype = OIDOID;
249                 }
250
251                 cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
252                 /*
253                  * If GetCCHashFunc liked the type, safe to index into eqproc[]
254                  */
255                 cache->cc_skey[i].sk_procedure = EQPROC(keytype);
256
257                 fmgr_info(cache->cc_skey[i].sk_procedure,
258                                   &cache->cc_skey[i].sk_func);
259                 cache->cc_skey[i].sk_nargs = cache->cc_skey[i].sk_func.fn_nargs;
260
261                 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
262                 cache->cc_skey[i].sk_attno = cache->cc_key[i];
263
264                 CACHE4_elog(DEBUG, "CatalogCacheInit %s %d %p",
265                                         cache->cc_relname,
266                                         i,
267                                         cache);
268         }
269
270         /* ----------------
271          *      mark this cache fully initialized
272          * ----------------
273          */
274         cache->cc_tupdesc = tupdesc;
275 }
276
277 /* --------------------------------
278  *              CatalogCacheComputeHashIndex
279  * --------------------------------
280  */
281 static Index
282 CatalogCacheComputeHashIndex(CatCache *cache, ScanKey cur_skey)
283 {
284         uint32          hashIndex = 0;
285
286         CACHE4_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %p",
287                                 cache->cc_relname,
288                                 cache->cc_nkeys,
289                                 cache);
290
291         switch (cache->cc_nkeys)
292         {
293                 case 4:
294                         hashIndex ^=
295                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
296                                                            cur_skey[3].sk_argument)) << 9;
297                         /* FALLTHROUGH */
298                 case 3:
299                         hashIndex ^=
300                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
301                                                            cur_skey[2].sk_argument)) << 6;
302                         /* FALLTHROUGH */
303                 case 2:
304                         hashIndex ^=
305                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
306                                                            cur_skey[1].sk_argument)) << 3;
307                         /* FALLTHROUGH */
308                 case 1:
309                         hashIndex ^=
310                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
311                                                            cur_skey[0].sk_argument));
312                         break;
313                 default:
314                         elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cache->cc_nkeys);
315                         break;
316         }
317         hashIndex %= (uint32) cache->cc_size;
318         return (Index) hashIndex;
319 }
320
321 /* --------------------------------
322  *              CatalogCacheComputeTupleHashIndex
323  * --------------------------------
324  */
325 static Index
326 CatalogCacheComputeTupleHashIndex(CatCache *cache,
327                                                                   HeapTuple tuple)
328 {
329         ScanKeyData cur_skey[4];
330         bool            isNull = false;
331
332         /* Copy pre-initialized overhead data for scankey */
333         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
334
335         /* Now extract key fields from tuple, insert into scankey */
336         switch (cache->cc_nkeys)
337         {
338                 case 4:
339                         cur_skey[3].sk_argument =
340                                 (cache->cc_key[3] == ObjectIdAttributeNumber)
341                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
342                                 : fastgetattr(tuple,
343                                                           cache->cc_key[3],
344                                                           cache->cc_tupdesc,
345                                                           &isNull);
346                         Assert(!isNull);
347                         /* FALLTHROUGH */
348                 case 3:
349                         cur_skey[2].sk_argument =
350                                 (cache->cc_key[2] == ObjectIdAttributeNumber)
351                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
352                                 : fastgetattr(tuple,
353                                                           cache->cc_key[2],
354                                                           cache->cc_tupdesc,
355                                                           &isNull);
356                         Assert(!isNull);
357                         /* FALLTHROUGH */
358                 case 2:
359                         cur_skey[1].sk_argument =
360                                 (cache->cc_key[1] == ObjectIdAttributeNumber)
361                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
362                                 : fastgetattr(tuple,
363                                                           cache->cc_key[1],
364                                                           cache->cc_tupdesc,
365                                                           &isNull);
366                         Assert(!isNull);
367                         /* FALLTHROUGH */
368                 case 1:
369                         cur_skey[0].sk_argument =
370                                 (cache->cc_key[0] == ObjectIdAttributeNumber)
371                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
372                                 : fastgetattr(tuple,
373                                                           cache->cc_key[0],
374                                                           cache->cc_tupdesc,
375                                                           &isNull);
376                         Assert(!isNull);
377                         break;
378                 default:
379                         elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
380                                  cache->cc_nkeys);
381                         break;
382         }
383
384         return CatalogCacheComputeHashIndex(cache, cur_skey);
385 }
386
387 /* --------------------------------
388  *              CatCacheRemoveCTup
389  * --------------------------------
390  */
391 static void
392 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
393 {
394         Assert(ct->refcount == 0);
395
396         /* delink from linked lists */
397         DLRemove(&ct->lrulist_elem);
398         DLRemove(&ct->cache_elem);
399
400         /* free associated tuple data */
401         if (ct->tuple.t_data != NULL)
402                 pfree(ct->tuple.t_data);
403         pfree(ct);
404
405         --cache->cc_ntup;
406 }
407
408 /* --------------------------------
409  *      CatalogCacheIdInvalidate()
410  *
411  *      Invalidate a tuple given a cache id.  In this case the id should always
412  *      be found (whether the cache has opened its relation or not).  Of course,
413  *      if the cache has yet to open its relation, there will be no tuples so
414  *      no problem.
415  * --------------------------------
416  */
417 void
418 CatalogCacheIdInvalidate(int cacheId,
419                                                  Index hashIndex,
420                                                  ItemPointer pointer)
421 {
422         CatCache   *ccp;
423
424         /* ----------------
425          *      sanity checks
426          * ----------------
427          */
428         Assert(hashIndex < NCCBUCK);
429         Assert(ItemPointerIsValid(pointer));
430         CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
431
432         /* ----------------
433          *      inspect caches to find the proper cache
434          * ----------------
435          */
436         for (ccp = Caches; ccp; ccp = ccp->cc_next)
437         {
438                 Dlelem     *elt,
439                                    *nextelt;
440
441                 if (cacheId != ccp->id)
442                         continue;
443                 /* ----------------
444                  *      inspect the hash bucket until we find a match or exhaust
445                  * ----------------
446                  */
447                 for (elt = DLGetHead(&ccp->cc_cache[hashIndex]); elt; elt = nextelt)
448                 {
449                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
450
451                         nextelt = DLGetSucc(elt);
452
453                         if (ItemPointerEquals(pointer, &ct->tuple.t_self))
454                         {
455                                 if (ct->refcount > 0)
456                                         ct->dead = true;
457                                 else
458                                         CatCacheRemoveCTup(ccp, ct);
459                                 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
460                                 /* could be multiple matches, so keep looking! */
461                         }
462                 }
463                 break;                                  /* need only search this one cache */
464         }
465 }
466
467 /* ----------------------------------------------------------------
468  *                                         public functions
469  *
470  *              AtEOXact_CatCache
471  *              ResetSystemCache
472  *              InitCatCache
473  *              SearchCatCache
474  *              ReleaseCatCache
475  *              RelationInvalidateCatalogCacheTuple
476  * ----------------------------------------------------------------
477  */
478
479
480 /* --------------------------------
481  *              AtEOXact_CatCache
482  *
483  * Clean up catcaches at end of transaction (either commit or abort)
484  *
485  * We scan the caches to reset refcounts to zero.  This is of course
486  * necessary in the abort case, since elog() may have interrupted routines.
487  * In the commit case, any nonzero counts indicate failure to call
488  * ReleaseSysCache, so we put out a notice for debugging purposes.
489  * --------------------------------
490  */
491 void
492 AtEOXact_CatCache(bool isCommit)
493 {
494         CatCache *cache;
495
496         for (cache = Caches; cache; cache = cache->cc_next)
497         {
498                 Dlelem     *elt,
499                                    *nextelt;
500
501                 for (elt = DLGetHead(&cache->cc_lrulist); elt; elt = nextelt)
502                 {
503                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
504
505                         nextelt = DLGetSucc(elt);
506
507                         if (ct->refcount != 0)
508                         {
509                                 if (isCommit)
510                                         elog(NOTICE, "Cache reference leak: cache %s (%d), tuple %u has count %d",
511                                                  cache->cc_relname, cache->id,
512                                                  ct->tuple.t_data->t_oid,
513                                                  ct->refcount);
514                                 ct->refcount = 0;
515                         }
516
517                         /* Clean up any now-deletable dead entries */
518                         if (ct->dead)
519                                 CatCacheRemoveCTup(cache, ct);
520                 }
521         }
522 }
523
524 /* --------------------------------
525  *              ResetSystemCache
526  *
527  * Reset caches when a shared cache inval event forces it
528  * --------------------------------
529  */
530 void
531 ResetSystemCache(void)
532 {
533         CatCache *cache;
534
535         CACHE1_elog(DEBUG, "ResetSystemCache called");
536
537         /* ----------------
538          *      here we purge the contents of all the caches
539          *
540          *      for each system cache
541          *              for each tuple
542          *                      remove the tuple, or at least mark it dead
543          * ----------------
544          */
545         for (cache = Caches; cache; cache = cache->cc_next)
546         {
547                 Dlelem     *elt,
548                                    *nextelt;
549
550                 for (elt = DLGetHead(&cache->cc_lrulist); elt; elt = nextelt)
551                 {
552                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
553
554                         nextelt = DLGetSucc(elt);
555
556                         if (ct->refcount > 0)
557                                 ct->dead = true;
558                         else
559                                 CatCacheRemoveCTup(cache, ct);
560                 }
561         }
562
563         CACHE1_elog(DEBUG, "end of ResetSystemCache call");
564 }
565
566 /* --------------------------------
567  *              SystemCacheRelationFlushed
568  *
569  *      This is called by RelationFlushRelation() to clear out cached information
570  *      about a relation being dropped.  (This could be a DROP TABLE command,
571  *      or a temp table being dropped at end of transaction, or a table created
572  *      during the current transaction that is being dropped because of abort.)
573  *      Remove all cache entries relevant to the specified relation OID.
574  *
575  *      A special case occurs when relId is itself one of the cacheable system
576  *      tables --- although those'll never be dropped, they can get flushed from
577  *      the relcache (VACUUM causes this, for example).  In that case we need
578  *      to flush all cache entries from that table.  The brute-force method
579  *      currently used takes care of that quite handily.  (At one point we
580  *      also tried to force re-execution of CatalogCacheInitializeCache for
581  *      the cache(s) on that table.  This is a bad idea since it leads to all
582  *      kinds of trouble if a cache flush occurs while loading cache entries.
583  *      We now avoid the need to do it by copying cc_tupdesc out of the relcache,
584  *      rather than relying on the relcache to keep a tupdesc for us.  Of course
585  *      this assumes the tupdesc of a cachable system table will not change...)
586  * --------------------------------
587  */
588 void
589 SystemCacheRelationFlushed(Oid relId)
590 {
591
592         /*
593          * XXX Ideally we'd search the caches and just zap entries that
594          * actually refer to or come from the indicated relation.  For now, we
595          * take the brute-force approach: just flush the caches entirely.
596          */
597         ResetSystemCache();
598 }
599
600 /* --------------------------------
601  *              InitCatCache
602  *
603  *      This allocates and initializes a cache for a system catalog relation.
604  *      Actually, the cache is only partially initialized to avoid opening the
605  *      relation.  The relation will be opened and the rest of the cache
606  *      structure initialized on the first access.
607  * --------------------------------
608  */
609 #ifdef CACHEDEBUG
610 #define InitCatCache_DEBUG1 \
611 do { \
612         elog(DEBUG, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
613                 cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_size); \
614 } while(0)
615
616 #else
617 #define InitCatCache_DEBUG1
618 #endif
619
620 CatCache *
621 InitCatCache(int id,
622                          char *relname,
623                          char *indname,
624                          int nkeys,
625                          int *key)
626 {
627         CatCache   *cp;
628         MemoryContext oldcxt;
629         int                     i;
630
631         /* ----------------
632          *      first switch to the cache context so our allocations
633          *      do not vanish at the end of a transaction
634          * ----------------
635          */
636         if (!CacheMemoryContext)
637                 CreateCacheMemoryContext();
638
639         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
640
641         /* ----------------
642          *      allocate a new cache structure
643          * ----------------
644          */
645         cp = (CatCache *) palloc(sizeof(CatCache));
646         MemSet((char *) cp, 0, sizeof(CatCache));
647
648         /* ----------------
649          *      initialize the cache buckets (each bucket is a list header)
650          *      and the LRU tuple list
651          * ----------------
652          */
653         DLInitList(&cp->cc_lrulist);
654         for (i = 0; i < NCCBUCK; ++i)
655                 DLInitList(&cp->cc_cache[i]);
656
657         /* ----------------
658          *      Caches is the pointer to the head of the list of all the
659          *      system caches.  here we add the new cache to the top of the list.
660          * ----------------
661          */
662         cp->cc_next = Caches;           /* list of caches (single link) */
663         Caches = cp;
664
665         /* ----------------
666          *      initialize the cache's relation information for the relation
667          *      corresponding to this cache, and initialize some of the new
668          *      cache's other internal fields.  But don't open the relation yet.
669          * ----------------
670          */
671         cp->cc_relname = relname;
672         cp->cc_indname = indname;
673         cp->cc_tupdesc = (TupleDesc) NULL;
674         cp->id = id;
675         cp->cc_maxtup = MAXTUP;
676         cp->cc_size = NCCBUCK;
677         cp->cc_nkeys = nkeys;
678         for (i = 0; i < nkeys; ++i)
679                 cp->cc_key[i] = key[i];
680
681         /* ----------------
682          *      all done.  new cache is initialized.  print some debugging
683          *      information, if appropriate.
684          * ----------------
685          */
686         InitCatCache_DEBUG1;
687
688         /* ----------------
689          *      back to the old context before we return...
690          * ----------------
691          */
692         MemoryContextSwitchTo(oldcxt);
693
694         return cp;
695 }
696
697
698 /* --------------------------------
699  *              IndexScanOK
700  *
701  *              This function checks for tuples that will be fetched by
702  *              IndexSupportInitialize() during relcache initialization for
703  *              certain system indexes that support critical syscaches.
704  *              We can't use an indexscan to fetch these, else we'll get into
705  *              infinite recursion.  A plain heap scan will work, however.
706  * --------------------------------
707  */
708 static bool
709 IndexScanOK(CatCache *cache, ScanKey cur_skey)
710 {
711         if (cache->id == INDEXRELID)
712         {
713                 static Oid      indexSelfOid = InvalidOid;
714
715                 /* One-time lookup of the OID of pg_index_indexrelid_index */
716                 if (!OidIsValid(indexSelfOid))
717                 {
718                         Relation        rel;
719                         ScanKeyData key;
720                         HeapScanDesc sd;
721                         HeapTuple       ntp;
722
723                         rel = heap_openr(RelationRelationName, AccessShareLock);
724                         ScanKeyEntryInitialize(&key, 0, Anum_pg_class_relname,
725                                                                    F_NAMEEQ,
726                                                                    PointerGetDatum(IndexRelidIndex));
727                         sd = heap_beginscan(rel, false, SnapshotNow, 1, &key);
728                         ntp = heap_getnext(sd, 0);
729                         if (!HeapTupleIsValid(ntp))
730                                 elog(ERROR, "SearchSelfReferences: %s not found in %s",
731                                          IndexRelidIndex, RelationRelationName);
732                         indexSelfOid = ntp->t_data->t_oid;
733                         heap_endscan(sd);
734                         heap_close(rel, AccessShareLock);
735                 }
736
737                 /* Looking for pg_index_indexrelid_index? */
738                 if (DatumGetObjectId(cur_skey[0].sk_argument) == indexSelfOid)
739                         return false;
740         }
741         else if (cache->id == OPEROID)
742         {
743                 /* Looking for an OID comparison function? */
744                 Oid                     lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
745
746                 if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
747                         return false;
748         }
749
750         /* Normal case, allow index scan */
751         return true;
752 }
753
754 /* --------------------------------
755  *              SearchCatCache
756  *
757  *              This call searches a system cache for a tuple, opening the relation
758  *              if necessary (the first access to a particular cache).
759  * --------------------------------
760  */
761 HeapTuple
762 SearchCatCache(CatCache *cache,
763                            Datum v1,
764                            Datum v2,
765                            Datum v3,
766                            Datum v4)
767 {
768         ScanKeyData cur_skey[4];
769         Index           hash;
770         Dlelem     *elt;
771         CatCTup    *ct;
772         HeapTuple       ntp;
773         Relation        relation;
774         MemoryContext oldcxt;
775
776         /* ----------------
777          *      one-time startup overhead
778          * ----------------
779          */
780         if (cache->cc_tupdesc == NULL)
781                 CatalogCacheInitializeCache(cache);
782
783         /* ----------------
784          *      initialize the search key information
785          * ----------------
786          */
787         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
788         cur_skey[0].sk_argument = v1;
789         cur_skey[1].sk_argument = v2;
790         cur_skey[2].sk_argument = v3;
791         cur_skey[3].sk_argument = v4;
792
793         /* ----------------
794          *      find the hash bucket in which to look for the tuple
795          * ----------------
796          */
797         hash = CatalogCacheComputeHashIndex(cache, cur_skey);
798
799         /* ----------------
800          *      scan the hash bucket until we find a match or exhaust our tuples
801          * ----------------
802          */
803         for (elt = DLGetHead(&cache->cc_cache[hash]);
804                  elt;
805                  elt = DLGetSucc(elt))
806         {
807                 bool            res;
808
809                 ct = (CatCTup *) DLE_VAL(elt);
810
811                 if (ct->dead)
812                         continue;                       /* ignore dead entries */
813
814                 /* ----------------
815                  *      see if the cached tuple matches our key.
816                  *      (should we be worried about time ranges? -cim 10/2/90)
817                  * ----------------
818                  */
819                 HeapKeyTest(&ct->tuple,
820                                         cache->cc_tupdesc,
821                                         cache->cc_nkeys,
822                                         cur_skey,
823                                         res);
824                 if (! res)
825                         continue;
826
827                 /* ----------------
828                  *      we found a tuple in the cache: bump its refcount, move it to
829                  *      the front of the LRU list, and return it.  We also move it
830                  *      to the front of the list for its hashbucket, in order to speed
831                  *      subsequent searches.  (The most frequently accessed elements
832                  *      in any hashbucket will tend to be near the front of the
833                  *      hashbucket's list.)
834                  * ----------------
835                  */
836                 ct->refcount++;
837
838                 DLMoveToFront(&ct->lrulist_elem);
839                 DLMoveToFront(&ct->cache_elem);
840
841 #ifdef CACHEDEBUG
842                 CACHE3_elog(DEBUG, "SearchCatCache(%s): found in bucket %d",
843                                         cache->cc_relname, hash);
844 #endif   /* CACHEDEBUG */
845
846                 return &ct->tuple;
847         }
848
849         /* ----------------
850          *      Tuple was not found in cache, so we have to try and
851          *      retrieve it directly from the relation.  If it's found,
852          *      we add it to the cache.
853          *
854          *      NOTE: it is possible for recursive cache lookups to occur while
855          *      reading the relation --- for example, due to shared-cache-inval
856          *      messages being processed during heap_open().  This is OK.  It's
857          *      even possible for one of those lookups to find and enter the
858          *      very same tuple we are trying to fetch here.  If that happens,
859          *      we will enter a second copy of the tuple into the cache.  The
860          *      first copy will never be referenced again, and will eventually
861          *      age out of the cache, so there's no functional problem.  This case
862          *      is rare enough that it's not worth expending extra cycles to detect.
863          * ----------------
864          */
865
866         /* ----------------
867          *      open the relation associated with the cache
868          * ----------------
869          */
870         relation = heap_openr(cache->cc_relname, AccessShareLock);
871
872         /* ----------------
873          *      Scan the relation to find the tuple.  If there's an index, and
874          *      if it's safe to do so, use the index.  Else do a heap scan.
875          * ----------------
876          */
877         ct = NULL;
878
879         if ((RelationGetForm(relation))->relhasindex &&
880                 !IsIgnoringSystemIndexes() &&
881                 IndexScanOK(cache, cur_skey))
882         {
883                 Relation        idesc;
884                 IndexScanDesc isd;
885                 RetrieveIndexResult indexRes;
886                 HeapTupleData tuple;
887                 Buffer          buffer;
888                 int                     i;
889
890                 CACHE2_elog(DEBUG, "SearchCatCache(%s): performing index scan",
891                                         cache->cc_relname);
892
893                 /*
894                  * For an index scan, sk_attno has to be set to the index attribute
895                  * number(s), not the heap attribute numbers.  We assume that the
896                  * index corresponds exactly to the cache keys (or its first N
897                  * keys do, anyway).
898                  */
899                 for (i = 0; i < cache->cc_nkeys; ++i)
900                         cur_skey[i].sk_attno = i+1;
901
902                 idesc = index_openr(cache->cc_indname);
903                 isd = index_beginscan(idesc, false, cache->cc_nkeys, cur_skey);
904                 tuple.t_datamcxt = CurrentMemoryContext;
905                 tuple.t_data = NULL;
906                 while ((indexRes = index_getnext(isd, ForwardScanDirection)))
907                 {
908                         tuple.t_self = indexRes->heap_iptr;
909                         heap_fetch(relation, SnapshotNow, &tuple, &buffer);
910                         pfree(indexRes);
911                         if (tuple.t_data != NULL)
912                         {
913                                 /* Copy tuple into our context */
914                                 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
915                                 ct = (CatCTup *) palloc(sizeof(CatCTup));
916                                 heap_copytuple_with_tuple(&tuple, &ct->tuple);
917                                 MemoryContextSwitchTo(oldcxt);
918                                 ReleaseBuffer(buffer);
919                                 break;
920                         }
921                 }
922                 index_endscan(isd);
923                 index_close(idesc);
924         }
925         else
926         {
927                 HeapScanDesc sd;
928
929                 CACHE2_elog(DEBUG, "SearchCatCache(%s): performing heap scan",
930                                         cache->cc_relname);
931
932                 sd = heap_beginscan(relation, 0, SnapshotNow,
933                                                         cache->cc_nkeys, cur_skey);
934
935                 ntp = heap_getnext(sd, 0);
936
937                 if (HeapTupleIsValid(ntp))
938                 {
939                         /* Copy tuple into our context */
940                         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
941                         ct = (CatCTup *) palloc(sizeof(CatCTup));
942                         heap_copytuple_with_tuple(ntp, &ct->tuple);
943                         MemoryContextSwitchTo(oldcxt);
944                         /* We should not free the result of heap_getnext... */
945                 }
946
947                 heap_endscan(sd);
948         }
949
950         /* ----------------
951          *      close the relation
952          * ----------------
953          */
954         heap_close(relation, AccessShareLock);
955
956         /* ----------------
957          *      scan is complete.  if tup was found, we can add it to the cache.
958          * ----------------
959          */
960         if (ct == NULL)
961                 return NULL;
962
963         /* ----------------
964          *      Finish initializing the CatCTup header, and add it to the
965          *      linked lists.
966          * ----------------
967          */
968         CACHE1_elog(DEBUG, "SearchCatCache: found tuple");
969
970         ct->ct_magic = CT_MAGIC;
971         DLInitElem(&ct->lrulist_elem, (void *) ct);
972         DLInitElem(&ct->cache_elem, (void *) ct);
973         ct->refcount = 1;                       /* count this first reference */
974         ct->dead = false;
975
976         DLAddHead(&cache->cc_lrulist, &ct->lrulist_elem);
977         DLAddHead(&cache->cc_cache[hash], &ct->cache_elem);
978
979         /* ----------------
980          *      If we've exceeded the desired size of this cache,
981          *      try to throw away the least recently used entry.
982          * ----------------
983          */
984         if (++cache->cc_ntup > cache->cc_maxtup)
985         {
986                 for (elt = DLGetTail(&cache->cc_lrulist);
987                          elt;
988                          elt = DLGetPred(elt))
989                 {
990                         CatCTup    *oldct = (CatCTup *) DLE_VAL(elt);
991
992                         if (oldct->refcount == 0)
993                         {
994                                 CACHE2_elog(DEBUG, "SearchCatCache(%s): Overflow, LRU removal",
995                                                         cache->cc_relname);
996                                 CatCacheRemoveCTup(cache, oldct);
997                                 break;
998                         }
999                 }
1000         }
1001
1002         CACHE4_elog(DEBUG, "SearchCatCache(%s): Contains %d/%d tuples",
1003                                 cache->cc_relname, cache->cc_ntup, cache->cc_maxtup);
1004         CACHE3_elog(DEBUG, "SearchCatCache(%s): put in bucket %d",
1005                                 cache->cc_relname, hash);
1006
1007         return &ct->tuple;
1008 }
1009
1010 /* --------------------------------
1011  *      ReleaseCatCache()
1012  *
1013  *      Decrement the reference count of a catcache entry (releasing the
1014  *      hold grabbed by a successful SearchCatCache).
1015  *
1016  *      NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1017  *      will be freed as soon as their refcount goes to zero.  In combination
1018  *      with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1019  *      to catch references to already-released catcache entries.
1020  * --------------------------------
1021  */
1022 void
1023 ReleaseCatCache(HeapTuple tuple)
1024 {
1025         CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1026                                                                   offsetof(CatCTup, tuple));
1027
1028         /* Safety checks to ensure we were handed a cache entry */
1029         Assert(ct->ct_magic == CT_MAGIC);
1030         Assert(ct->refcount > 0);
1031
1032         ct->refcount--;
1033
1034         if (ct->refcount == 0
1035 #ifndef CATCACHE_FORCE_RELEASE
1036                 && ct->dead
1037 #endif
1038                 )
1039         {
1040                 /* We can find the associated cache using the dllist pointers */
1041                 Dllist *lru = DLGetListHdr(&ct->lrulist_elem);
1042                 CatCache *cache = (CatCache *) (((char *) lru) -
1043                                                                                 offsetof(CatCache, cc_lrulist));
1044
1045                 CatCacheRemoveCTup(cache, ct);
1046         }
1047 }
1048
1049 /* --------------------------------
1050  *      PrepareToInvalidateCacheTuple()
1051  *
1052  *      This is part of a rather subtle chain of events, so pay attention:
1053  *
1054  *      When a tuple is updated or deleted, it cannot be flushed from the
1055  *      catcaches immediately, for reasons explained at the top of inval.c.
1056  *      Instead we have to add entry(s) for the tuple to a list of pending tuple
1057  *      invalidations that will be done at the end of the command or transaction.
1058  *
1059  *      The lists of tuples that need to be flushed are kept by inval.c.  This
1060  *      routine is a helper routine for inval.c.  Given a tuple belonging to
1061  *      the specified relation, find all catcaches it could be in, compute the
1062  *      correct hashindex for each such catcache, and call the specified function
1063  *      to record the cache id, hashindex, and tuple ItemPointer in inval.c's
1064  *      lists.  CatalogCacheIdInvalidate will be called later, if appropriate,
1065  *      using the recorded information.
1066  *
1067  *      Note that it is irrelevant whether the given tuple is actually loaded
1068  *      into the catcache at the moment.  Even if it's not there now, it might
1069  *      be by the end of the command, so we have to be prepared to flush it.
1070  *
1071  *      Also note that it's not an error if there are no catcaches for the
1072  *      specified relation.  inval.c doesn't know exactly which rels have
1073  *      catcaches --- it will call this routine for any tuple that's in a
1074  *      system relation.
1075  * --------------------------------
1076  */
1077 void
1078 PrepareToInvalidateCacheTuple(Relation relation,
1079                                                           HeapTuple tuple,
1080                                                           void (*function) (int, Index, ItemPointer))
1081 {
1082         CatCache   *ccp;
1083
1084         /* ----------------
1085          *      sanity checks
1086          * ----------------
1087          */
1088         Assert(RelationIsValid(relation));
1089         Assert(HeapTupleIsValid(tuple));
1090         Assert(PointerIsValid(function));
1091         CACHE1_elog(DEBUG, "PrepareToInvalidateCacheTuple: called");
1092
1093         /* ----------------
1094          *      for each cache
1095          *         if the cache contains tuples from the specified relation
1096          *                 compute the tuple's hash index in this cache,
1097          *                 and call the passed function to register the information.
1098          * ----------------
1099          */
1100
1101         for (ccp = Caches; ccp; ccp = ccp->cc_next)
1102         {
1103                 if (strcmp(ccp->cc_relname, RelationGetRelationName(relation)) != 0)
1104                         continue;
1105
1106                 /* Just in case cache hasn't finished initialization yet... */
1107                 if (ccp->cc_tupdesc == NULL)
1108                         CatalogCacheInitializeCache(ccp);
1109
1110                 (*function) (ccp->id,
1111                                          CatalogCacheComputeTupleHashIndex(ccp, tuple),
1112                                          &tuple->t_self);
1113         }
1114 }