]> granicus.if.org Git - postgresql/blob - src/backend/utils/cache/catcache.c
39e05d0fb09ccb8d3faffd76c8ebfbaea0fddca0
[postgresql] / src / backend / utils / cache / catcache.c
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  *        System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.71 2000/11/10 00:33:10 tgl Exp $
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_operator.h"
22 #include "catalog/pg_type.h"
23 #include "catalog/catname.h"
24 #include "catalog/indexing.h"
25 #include "miscadmin.h"
26 #include "utils/builtins.h"
27 #include "utils/fmgroids.h"
28 #include "utils/catcache.h"
29 #include "utils/syscache.h"
30
31 static void CatCacheRemoveCTup(CatCache *cache, Dlelem *e);
32 static Index CatalogCacheComputeHashIndex(CatCache *cache,
33                                                                                   ScanKey cur_skey);
34 static Index CatalogCacheComputeTupleHashIndex(CatCache *cache,
35                                                                                            HeapTuple tuple);
36 static void CatalogCacheInitializeCache(CatCache *cache);
37 static Datum cc_hashname(PG_FUNCTION_ARGS);
38
39 /* ----------------
40  *              variables, macros and other stuff
41  * ----------------
42  */
43
44 #ifdef CACHEDEBUG
45 #define CACHE1_elog(a,b)                                elog(a,b)
46 #define CACHE2_elog(a,b,c)                              elog(a,b,c)
47 #define CACHE3_elog(a,b,c,d)                    elog(a,b,c,d)
48 #define CACHE4_elog(a,b,c,d,e)                  elog(a,b,c,d,e)
49 #define CACHE5_elog(a,b,c,d,e,f)                elog(a,b,c,d,e,f)
50 #define CACHE6_elog(a,b,c,d,e,f,g)              elog(a,b,c,d,e,f,g)
51 #else
52 #define CACHE1_elog(a,b)
53 #define CACHE2_elog(a,b,c)
54 #define CACHE3_elog(a,b,c,d)
55 #define CACHE4_elog(a,b,c,d,e)
56 #define CACHE5_elog(a,b,c,d,e,f)
57 #define CACHE6_elog(a,b,c,d,e,f,g)
58 #endif
59
60 static CatCache *Caches = NULL; /* head of list of caches */
61
62
63 /* ----------------
64  *              EQPROC is used in CatalogCacheInitializeCache to find the equality
65  *              functions for system types that are used as cache key fields.
66  *              See also GetCCHashFunc, which should support the same set of types.
67  *
68  *              XXX this should be replaced by catalog lookups,
69  *              but that seems to pose considerable risk of circularity...
70  * ----------------
71  */
72 static const Oid eqproc[] = {
73         F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid,
74         F_INT2EQ, F_INT2VECTOREQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ,
75         F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ
76 };
77
78 #define EQPROC(SYSTEMTYPEOID)   eqproc[(SYSTEMTYPEOID)-BOOLOID]
79
80 /* ----------------------------------------------------------------
81  *                                      internal support functions
82  * ----------------------------------------------------------------
83  */
84
85 static PGFunction
86 GetCCHashFunc(Oid keytype)
87 {
88         switch (keytype)
89         {
90                 case BOOLOID:
91                 case CHAROID:
92                         return hashchar;
93                 case NAMEOID:
94                         return cc_hashname;
95                 case INT2OID:
96                         return hashint2;
97                 case INT2VECTOROID:
98                         return hashint2vector;
99                 case INT4OID:
100                         return hashint4;
101                 case TEXTOID:
102                         return hashvarlena;
103                 case REGPROCOID:
104                 case OIDOID:
105                         return hashoid;
106                 case OIDVECTOROID:
107                         return hashoidvector;
108                 default:
109                         elog(FATAL, "GetCCHashFunc: type %u unsupported as catcache key",
110                                  keytype);
111                         return (PGFunction) NULL;
112         }
113 }
114
115 static Datum
116 cc_hashname(PG_FUNCTION_ARGS)
117 {
118
119         /*
120          * We need our own variant of hashname because we want to accept
121          * null-terminated C strings as search values for name fields. So, we
122          * have to make sure the data is correctly padded before we compute
123          * the hash value.
124          */
125         NameData        my_n;
126
127         namestrcpy(&my_n, NameStr(* PG_GETARG_NAME(0)));
128
129         return DirectFunctionCall1(hashname, NameGetDatum(&my_n));
130 }
131
132
133 /*
134  * Standard routine for creating cache context if it doesn't exist yet
135  *
136  * There are a lot of places (probably far more than necessary) that check
137  * whether CacheMemoryContext exists yet and want to create it if not.
138  * We centralize knowledge of exactly how to create it here.
139  */
140 void
141 CreateCacheMemoryContext(void)
142 {
143         /* Purely for paranoia, check that context doesn't exist;
144          * caller probably did so already.
145          */
146         if (!CacheMemoryContext)
147                 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
148                                                                                                    "CacheMemoryContext",
149                                                                                                    ALLOCSET_DEFAULT_MINSIZE,
150                                                                                                    ALLOCSET_DEFAULT_INITSIZE,
151                                                                                                    ALLOCSET_DEFAULT_MAXSIZE);
152 }
153
154
155 /* --------------------------------
156  *              CatalogCacheInitializeCache
157  *
158  * This function does final initialization of a catcache: obtain the tuple
159  * descriptor and set up the hash and equality function links.  We assume
160  * that the relcache entry can be opened at this point!
161  * --------------------------------
162  */
163 #ifdef CACHEDEBUG
164 #define CatalogCacheInitializeCache_DEBUG1 \
165         elog(DEBUG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
166                  cache->cc_relname)
167
168 #define CatalogCacheInitializeCache_DEBUG2 \
169 do { \
170                 if (cache->cc_key[i] > 0) { \
171                         elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
172                                 i+1, cache->cc_nkeys, cache->cc_key[i], \
173                                  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
174                 } else { \
175                         elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
176                                 i+1, cache->cc_nkeys, cache->cc_key[i]); \
177                 } \
178 } while(0)
179
180 #else
181 #define CatalogCacheInitializeCache_DEBUG1
182 #define CatalogCacheInitializeCache_DEBUG2
183 #endif
184
185 static void
186 CatalogCacheInitializeCache(CatCache *cache)
187 {
188         Relation        relation;
189         MemoryContext oldcxt;
190         TupleDesc       tupdesc;
191         short           i;
192
193         CatalogCacheInitializeCache_DEBUG1;
194
195         /*
196          * Open the relation without locking --- we only need the tupdesc,
197          * which we assume will never change ...
198          */
199         relation = heap_openr(cache->cc_relname, NoLock);
200         Assert(RelationIsValid(relation));
201
202         /* ----------------
203          *      switch to the cache context so our allocations
204          *      do not vanish at the end of a transaction
205          * ----------------
206          */
207         if (!CacheMemoryContext)
208                 CreateCacheMemoryContext();
209
210         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
211
212         /* ----------------
213          *      copy the relcache's tuple descriptor to permanent cache storage
214          * ----------------
215          */
216         tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
217
218         /* ----------------
219          *      return to the caller's memory context and close the rel
220          * ----------------
221          */
222         MemoryContextSwitchTo(oldcxt);
223
224         heap_close(relation, NoLock);
225
226         CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: %s, %d keys",
227                                 cache->cc_relname, cache->cc_nkeys);
228
229         /* ----------------
230          *      initialize cache's key information
231          * ----------------
232          */
233         for (i = 0; i < cache->cc_nkeys; ++i)
234         {
235                 Oid                     keytype;
236
237                 CatalogCacheInitializeCache_DEBUG2;
238
239                 if (cache->cc_key[i] > 0)
240                 {
241                         keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
242                 }
243                 else
244                 {
245                         if (cache->cc_key[i] != ObjectIdAttributeNumber)
246                                 elog(FATAL, "CatalogCacheInit: only sys attr supported is OID");
247                         keytype = OIDOID;
248                 }
249
250                 cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
251                 /*
252                  * If GetCCHashFunc liked the type, safe to index into eqproc[]
253                  */
254                 cache->cc_skey[i].sk_procedure = EQPROC(keytype);
255
256                 fmgr_info(cache->cc_skey[i].sk_procedure,
257                                   &cache->cc_skey[i].sk_func);
258                 cache->cc_skey[i].sk_nargs = cache->cc_skey[i].sk_func.fn_nargs;
259
260                 /* Initialize sk_attno suitably for index scans */
261                 cache->cc_skey[i].sk_attno = i+1;
262
263                 CACHE4_elog(DEBUG, "CatalogCacheInit %s %d %p",
264                                         cache->cc_relname,
265                                         i,
266                                         cache);
267         }
268
269         /* ----------------
270          *      mark this cache fully initialized
271          * ----------------
272          */
273         cache->cc_tupdesc = tupdesc;
274 }
275
276 /* --------------------------------
277  *              CatalogCacheComputeHashIndex
278  * --------------------------------
279  */
280 static Index
281 CatalogCacheComputeHashIndex(CatCache *cache, ScanKey cur_skey)
282 {
283         uint32          hashIndex = 0;
284
285         CACHE4_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %p",
286                                 cache->cc_relname,
287                                 cache->cc_nkeys,
288                                 cache);
289
290         switch (cache->cc_nkeys)
291         {
292                 case 4:
293                         hashIndex ^=
294                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
295                                                            cur_skey[3].sk_argument)) << 9;
296                         /* FALLTHROUGH */
297                 case 3:
298                         hashIndex ^=
299                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
300                                                            cur_skey[2].sk_argument)) << 6;
301                         /* FALLTHROUGH */
302                 case 2:
303                         hashIndex ^=
304                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
305                                                            cur_skey[1].sk_argument)) << 3;
306                         /* FALLTHROUGH */
307                 case 1:
308                         hashIndex ^=
309                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
310                                                            cur_skey[0].sk_argument));
311                         break;
312                 default:
313                         elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cache->cc_nkeys);
314                         break;
315         }
316         hashIndex %= (uint32) cache->cc_size;
317         return (Index) hashIndex;
318 }
319
320 /* --------------------------------
321  *              CatalogCacheComputeTupleHashIndex
322  * --------------------------------
323  */
324 static Index
325 CatalogCacheComputeTupleHashIndex(CatCache *cache,
326                                                                   HeapTuple tuple)
327 {
328         ScanKeyData cur_skey[4];
329         bool            isNull = false;
330
331         /* Copy pre-initialized overhead data for scankey */
332         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
333
334         /* Now extract key fields from tuple, insert into scankey */
335         switch (cache->cc_nkeys)
336         {
337                 case 4:
338                         cur_skey[3].sk_argument =
339                                 (cache->cc_key[3] == ObjectIdAttributeNumber)
340                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
341                                 : fastgetattr(tuple,
342                                                           cache->cc_key[3],
343                                                           cache->cc_tupdesc,
344                                                           &isNull);
345                         Assert(!isNull);
346                         /* FALLTHROUGH */
347                 case 3:
348                         cur_skey[2].sk_argument =
349                                 (cache->cc_key[2] == ObjectIdAttributeNumber)
350                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
351                                 : fastgetattr(tuple,
352                                                           cache->cc_key[2],
353                                                           cache->cc_tupdesc,
354                                                           &isNull);
355                         Assert(!isNull);
356                         /* FALLTHROUGH */
357                 case 2:
358                         cur_skey[1].sk_argument =
359                                 (cache->cc_key[1] == ObjectIdAttributeNumber)
360                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
361                                 : fastgetattr(tuple,
362                                                           cache->cc_key[1],
363                                                           cache->cc_tupdesc,
364                                                           &isNull);
365                         Assert(!isNull);
366                         /* FALLTHROUGH */
367                 case 1:
368                         cur_skey[0].sk_argument =
369                                 (cache->cc_key[0] == ObjectIdAttributeNumber)
370                                 ? ObjectIdGetDatum(tuple->t_data->t_oid)
371                                 : fastgetattr(tuple,
372                                                           cache->cc_key[0],
373                                                           cache->cc_tupdesc,
374                                                           &isNull);
375                         Assert(!isNull);
376                         break;
377                 default:
378                         elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
379                                  cache->cc_nkeys);
380                         break;
381         }
382
383         return CatalogCacheComputeHashIndex(cache, cur_skey);
384 }
385
386 /* --------------------------------
387  *              CatCacheRemoveCTup
388  * --------------------------------
389  */
390 static void
391 CatCacheRemoveCTup(CatCache *cache, Dlelem *elt)
392 {
393         CatCTup    *ct;
394         CatCTup    *other_ct;
395         Dlelem     *other_elt;
396
397         if (!elt)                                       /* probably-useless safety check */
398                 return;
399
400         /* We need to zap both linked-list elements as well as the tuple */
401
402         ct = (CatCTup *) DLE_VAL(elt);
403         other_elt = ct->ct_node;
404         other_ct = (CatCTup *) DLE_VAL(other_elt);
405
406         heap_freetuple(ct->ct_tup);
407
408         DLRemove(other_elt);
409         DLFreeElem(other_elt);
410         pfree(other_ct);
411         DLRemove(elt);
412         DLFreeElem(elt);
413         pfree(ct);
414
415         --cache->cc_ntup;
416 }
417
418 /* --------------------------------
419  *      CatalogCacheIdInvalidate()
420  *
421  *      Invalidate a tuple given a cache id.  In this case the id should always
422  *      be found (whether the cache has opened its relation or not).  Of course,
423  *      if the cache has yet to open its relation, there will be no tuples so
424  *      no problem.
425  * --------------------------------
426  */
427 void
428 CatalogCacheIdInvalidate(int cacheId,   /* XXX */
429                                                  Index hashIndex,
430                                                  ItemPointer pointer)
431 {
432         CatCache   *ccp;
433         CatCTup    *ct;
434         Dlelem     *elt;
435
436         /* ----------------
437          *      sanity checks
438          * ----------------
439          */
440         Assert(hashIndex < NCCBUCK);
441         Assert(ItemPointerIsValid(pointer));
442         CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
443
444         /* ----------------
445          *      inspect every cache that could contain the tuple
446          * ----------------
447          */
448         for (ccp = Caches; ccp; ccp = ccp->cc_next)
449         {
450                 if (cacheId != ccp->id)
451                         continue;
452                 /* ----------------
453                  *      inspect the hash bucket until we find a match or exhaust
454                  * ----------------
455                  */
456                 for (elt = DLGetHead(ccp->cc_cache[hashIndex]);
457                          elt;
458                          elt = DLGetSucc(elt))
459                 {
460                         ct = (CatCTup *) DLE_VAL(elt);
461                         if (ItemPointerEquals(pointer, &ct->ct_tup->t_self))
462                                 break;
463                 }
464
465                 /* ----------------
466                  *      if we found a matching tuple, invalidate it.
467                  * ----------------
468                  */
469
470                 if (elt)
471                 {
472                         CatCacheRemoveCTup(ccp, elt);
473
474                         CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
475                 }
476
477                 if (cacheId != InvalidCatalogCacheId)
478                         break;
479         }
480 }
481
482 /* ----------------------------------------------------------------
483  *                                         public functions
484  *
485  *              ResetSystemCache
486  *              InitSysCache
487  *              SearchSysCache
488  *              RelationInvalidateCatalogCacheTuple
489  * ----------------------------------------------------------------
490  */
491 /* --------------------------------
492  *              ResetSystemCache
493  * --------------------------------
494  */
495 void
496 ResetSystemCache(void)
497 {
498         CatCache *cache;
499
500         CACHE1_elog(DEBUG, "ResetSystemCache called");
501
502         /* ----------------
503          *      here we purge the contents of all the caches
504          *
505          *      for each system cache
506          *         for each hash bucket
507          *                 for each tuple in hash bucket
508          *                         remove the tuple
509          * ----------------
510          */
511         for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next)
512         {
513                 int                     hash;
514
515                 for (hash = 0; hash < NCCBUCK; hash += 1)
516                 {
517                         Dlelem     *elt,
518                                            *nextelt;
519
520                         for (elt = DLGetHead(cache->cc_cache[hash]); elt; elt = nextelt)
521                         {
522                                 nextelt = DLGetSucc(elt);
523                                 CatCacheRemoveCTup(cache, elt);
524                         }
525                 }
526
527                 /* double-check that ntup is now zero */
528                 if (cache->cc_ntup != 0)
529                 {
530                         elog(NOTICE,
531                                  "ResetSystemCache: cache %d has cc_ntup = %d, should be 0",
532                                  cache->id, cache->cc_ntup);
533                         cache->cc_ntup = 0;
534                 }
535         }
536
537         CACHE1_elog(DEBUG, "end of ResetSystemCache call");
538 }
539
540 /* --------------------------------
541  *              SystemCacheRelationFlushed
542  *
543  *      This is called by RelationFlushRelation() to clear out cached information
544  *      about a relation being dropped.  (This could be a DROP TABLE command,
545  *      or a temp table being dropped at end of transaction, or a table created
546  *      during the current transaction that is being dropped because of abort.)
547  *      Remove all cache entries relevant to the specified relation OID.
548  *
549  *      A special case occurs when relId is itself one of the cacheable system
550  *      tables --- although those'll never be dropped, they can get flushed from
551  *      the relcache (VACUUM causes this, for example).  In that case we need
552  *      to flush all cache entries from that table.  The brute-force method
553  *      currently used takes care of that quite handily.  (At one point we
554  *      also tried to force re-execution of CatalogCacheInitializeCache for
555  *      the cache(s) on that table.  This is a bad idea since it leads to all
556  *      kinds of trouble if a cache flush occurs while loading cache entries.
557  *      We now avoid the need to do it by copying cc_tupdesc out of the relcache,
558  *      rather than relying on the relcache to keep a tupdesc for us.  Of course
559  *      this assumes the tupdesc of a cachable system table will not change...)
560  * --------------------------------
561  */
562 void
563 SystemCacheRelationFlushed(Oid relId)
564 {
565
566         /*
567          * XXX Ideally we'd search the caches and just zap entries that
568          * actually refer to or come from the indicated relation.  For now, we
569          * take the brute-force approach: just flush the caches entirely.
570          */
571         ResetSystemCache();
572 }
573
574 /* --------------------------------
575  *              InitSysCache
576  *
577  *      This allocates and initializes a cache for a system catalog relation.
578  *      Actually, the cache is only partially initialized to avoid opening the
579  *      relation.  The relation will be opened and the rest of the cache
580  *      structure initialized on the first access.
581  * --------------------------------
582  */
583 #ifdef CACHEDEBUG
584 #define InitSysCache_DEBUG1 \
585 do { \
586         elog(DEBUG, "InitSysCache: rel=%s id=%d nkeys=%d size=%d\n", \
587                 cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_size); \
588 } while(0)
589
590 #else
591 #define InitSysCache_DEBUG1
592 #endif
593
594 CatCache *
595 InitSysCache(int id,
596                          char *relname,
597                          char *indname,
598                          int nkeys,
599                          int *key)
600 {
601         CatCache   *cp;
602         MemoryContext oldcxt;
603         int                     i;
604
605         /* ----------------
606          *      first switch to the cache context so our allocations
607          *      do not vanish at the end of a transaction
608          * ----------------
609          */
610         if (!CacheMemoryContext)
611                 CreateCacheMemoryContext();
612
613         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
614
615         /* ----------------
616          *      allocate a new cache structure
617          * ----------------
618          */
619         cp = (CatCache *) palloc(sizeof(CatCache));
620         MemSet((char *) cp, 0, sizeof(CatCache));
621
622         /* ----------------
623          *      initialize the cache buckets (each bucket is a list header)
624          *      and the LRU tuple list
625          * ----------------
626          */
627         {
628
629                 /*
630                  * We can only do this optimization because the number of hash
631                  * buckets never changes.  Without it, we call palloc() too much.
632                  * We could move this to dllist.c, but the way we do this is not
633                  * dynamic/portable, so why allow other routines to use it.
634                  */
635                 Dllist     *cache_begin = palloc((NCCBUCK + 1) * sizeof(Dllist));
636
637                 for (i = 0; i <= NCCBUCK; ++i)
638                 {
639                         cp->cc_cache[i] = &cache_begin[i];
640                         cp->cc_cache[i]->dll_head = 0;
641                         cp->cc_cache[i]->dll_tail = 0;
642                 }
643         }
644
645         cp->cc_lrulist = DLNewList();
646
647         /* ----------------
648          *      Caches is the pointer to the head of the list of all the
649          *      system caches.  here we add the new cache to the top of the list.
650          * ----------------
651          */
652         cp->cc_next = Caches;           /* list of caches (single link) */
653         Caches = cp;
654
655         /* ----------------
656          *      initialize the cache's relation information for the relation
657          *      corresponding to this cache and initialize some of the the new
658          *      cache's other internal fields.
659          * ----------------
660          */
661         cp->cc_relname = relname;
662         cp->cc_indname = indname;
663         cp->cc_tupdesc = (TupleDesc) NULL;
664         cp->id = id;
665         cp->cc_maxtup = MAXTUP;
666         cp->cc_size = NCCBUCK;
667         cp->cc_nkeys = nkeys;
668         for (i = 0; i < nkeys; ++i)
669                 cp->cc_key[i] = key[i];
670
671         /* ----------------
672          *      all done.  new cache is initialized.  print some debugging
673          *      information, if appropriate.
674          * ----------------
675          */
676         InitSysCache_DEBUG1;
677
678         /* ----------------
679          *      back to the old context before we return...
680          * ----------------
681          */
682         MemoryContextSwitchTo(oldcxt);
683
684         return cp;
685 }
686
687
688 /* --------------------------------
689  *              IndexScanOK
690  *
691  *              This function checks for tuples that will be fetched by
692  *              IndexSupportInitialize() during relcache initialization for
693  *              certain system indexes that support critical syscaches.
694  *              We can't use an indexscan to fetch these, else we'll get into
695  *              infinite recursion.  A plain heap scan will work, however.
696  * --------------------------------
697  */
698 static bool
699 IndexScanOK(CatCache *cache, ScanKey cur_skey)
700 {
701         if (cache->id == INDEXRELID)
702         {
703                 static Oid      indexSelfOid = InvalidOid;
704
705                 /* One-time lookup of the OID of pg_index_indexrelid_index */
706                 if (!OidIsValid(indexSelfOid))
707                 {
708                         Relation        rel;
709                         ScanKeyData key;
710                         HeapScanDesc sd;
711                         HeapTuple       ntp;
712
713                         rel = heap_openr(RelationRelationName, AccessShareLock);
714                         ScanKeyEntryInitialize(&key, 0, Anum_pg_class_relname,
715                                                                    F_NAMEEQ,
716                                                                    PointerGetDatum(IndexRelidIndex));
717                         sd = heap_beginscan(rel, false, SnapshotNow, 1, &key);
718                         ntp = heap_getnext(sd, 0);
719                         if (!HeapTupleIsValid(ntp))
720                                 elog(ERROR, "SearchSelfReferences: %s not found in %s",
721                                          IndexRelidIndex, RelationRelationName);
722                         indexSelfOid = ntp->t_data->t_oid;
723                         heap_endscan(sd);
724                         heap_close(rel, AccessShareLock);
725                 }
726
727                 /* Looking for pg_index_indexrelid_index? */
728                 if (DatumGetObjectId(cur_skey[0].sk_argument) == indexSelfOid)
729                         return false;
730         }
731         else if (cache->id == OPEROID)
732         {
733                 /* Looking for an OID comparison function? */
734                 Oid                     lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
735
736                 if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
737                         return false;
738         }
739
740         /* Normal case, allow index scan */
741         return true;
742 }
743
744 /* --------------------------------
745  *              SearchSysCache
746  *
747  *              This call searches a system cache for a tuple, opening the relation
748  *              if necessary (the first access to a particular cache).
749  * --------------------------------
750  */
751 HeapTuple
752 SearchSysCache(CatCache *cache,
753                            Datum v1,
754                            Datum v2,
755                            Datum v3,
756                            Datum v4)
757 {
758         ScanKeyData cur_skey[4];
759         Index           hash;
760         CatCTup    *ct = NULL;
761         CatCTup    *nct;
762         CatCTup    *nct2;
763         Dlelem     *elt;
764         HeapTuple       ntp;
765         Relation        relation;
766         MemoryContext oldcxt;
767
768         /* ----------------
769          *      one-time startup overhead
770          * ----------------
771          */
772         if (cache->cc_tupdesc == NULL)
773                 CatalogCacheInitializeCache(cache);
774
775         /* ----------------
776          *      initialize the search key information
777          * ----------------
778          */
779         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
780         cur_skey[0].sk_argument = v1;
781         cur_skey[1].sk_argument = v2;
782         cur_skey[2].sk_argument = v3;
783         cur_skey[3].sk_argument = v4;
784
785         /* ----------------
786          *      find the hash bucket in which to look for the tuple
787          * ----------------
788          */
789         hash = CatalogCacheComputeHashIndex(cache, cur_skey);
790
791         /* ----------------
792          *      scan the hash bucket until we find a match or exhaust our tuples
793          * ----------------
794          */
795         for (elt = DLGetHead(cache->cc_cache[hash]);
796                  elt;
797                  elt = DLGetSucc(elt))
798         {
799                 bool            res;
800
801                 ct = (CatCTup *) DLE_VAL(elt);
802                 /* ----------------
803                  *      see if the cached tuple matches our key.
804                  *      (should we be worried about time ranges? -cim 10/2/90)
805                  * ----------------
806                  */
807                 HeapKeyTest(ct->ct_tup,
808                                         cache->cc_tupdesc,
809                                         cache->cc_nkeys,
810                                         cur_skey,
811                                         res);
812                 if (res)
813                         break;
814         }
815
816         /* ----------------
817          *      if we found a tuple in the cache, move it to the top of the
818          *      lru list, and return it.  We also move it to the front of the
819          *      list for its hashbucket, in order to speed subsequent searches.
820          *      (The most frequently accessed elements in any hashbucket will
821          *      tend to be near the front of the hashbucket's list.)
822          * ----------------
823          */
824         if (elt)
825         {
826                 Dlelem     *old_lru_elt = ((CatCTup *) DLE_VAL(elt))->ct_node;
827
828                 DLMoveToFront(old_lru_elt);
829                 DLMoveToFront(elt);
830
831 #ifdef CACHEDEBUG
832                 CACHE3_elog(DEBUG, "SearchSysCache(%s): found in bucket %d",
833                                         cache->cc_relname, hash);
834 #endif   /* CACHEDEBUG */
835
836                 return ct->ct_tup;
837         }
838
839         /* ----------------
840          *      Tuple was not found in cache, so we have to try and
841          *      retrieve it directly from the relation.  If it's found,
842          *      we add it to the cache.
843          *
844          *      NOTE: it is possible for recursive cache lookups to occur while
845          *      reading the relation --- for example, due to shared-cache-inval
846          *      messages being processed during heap_open().  This is OK.  It's
847          *      even possible for one of those lookups to find and enter the
848          *      very same tuple we are trying to fetch here.  If that happens,
849          *      we will enter a second copy of the tuple into the cache.  The
850          *      first copy will never be referenced again, and will eventually
851          *      age out of the cache, so there's no functional problem.  This case
852          *      is rare enough that it's not worth expending extra cycles to detect.
853          * ----------------
854          */
855
856         /* ----------------
857          *      open the relation associated with the cache
858          * ----------------
859          */
860         relation = heap_openr(cache->cc_relname, AccessShareLock);
861
862         /* ----------------
863          *      Scan the relation to find the tuple.  If there's an index, and
864          *      if it's safe to do so, use the index.  Else do a heap scan.
865          * ----------------
866          */
867         ntp = NULL;
868
869         if ((RelationGetForm(relation))->relhasindex &&
870                 !IsIgnoringSystemIndexes() &&
871                 IndexScanOK(cache, cur_skey))
872         {
873                 Relation        idesc;
874                 IndexScanDesc isd;
875                 RetrieveIndexResult indexRes;
876                 HeapTupleData tuple;
877                 Buffer          buffer;
878
879                 CACHE2_elog(DEBUG, "SearchSysCache(%s): performing index scan",
880                                         cache->cc_relname);
881
882                 idesc = index_openr(cache->cc_indname);
883                 isd = index_beginscan(idesc, false, cache->cc_nkeys, cur_skey);
884                 tuple.t_datamcxt = CurrentMemoryContext;
885                 tuple.t_data = NULL;
886                 while ((indexRes = index_getnext(isd, ForwardScanDirection)))
887                 {
888                         tuple.t_self = indexRes->heap_iptr;
889                         heap_fetch(relation, SnapshotNow, &tuple, &buffer);
890                         pfree(indexRes);
891                         if (tuple.t_data != NULL)
892                         {
893                                 /* Copy tuple into our context */
894                                 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
895                                 ntp = heap_copytuple(&tuple);
896                                 MemoryContextSwitchTo(oldcxt);
897                                 ReleaseBuffer(buffer);
898                                 break;
899                         }
900                 }
901                 index_endscan(isd);
902                 index_close(idesc);
903         }
904         else
905         {
906                 HeapScanDesc sd;
907                 int                     i;
908
909                 CACHE2_elog(DEBUG, "SearchSysCache(%s): performing heap scan",
910                                         cache->cc_relname);
911
912                 /*
913                  * For a heap scan, sk_attno has to be set to the heap attribute
914                  * number(s), not the index attribute numbers.
915                  */
916                 for (i = 0; i < cache->cc_nkeys; ++i)
917                         cur_skey[i].sk_attno = cache->cc_key[i];
918
919                 sd = heap_beginscan(relation, 0, SnapshotNow,
920                                                         cache->cc_nkeys, cur_skey);
921
922                 ntp = heap_getnext(sd, 0);
923
924                 if (HeapTupleIsValid(ntp))
925                 {
926                         /* Copy tuple into our context */
927                         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
928                         ntp = heap_copytuple(ntp);
929                         MemoryContextSwitchTo(oldcxt);
930                         /* We should not free the result of heap_getnext... */
931                 }
932
933                 heap_endscan(sd);
934         }
935
936         /* ----------------
937          *      scan is complete.  if tup is valid, we can add it to the cache.
938          *      note we have already copied it into the cache memory context.
939          * ----------------
940          */
941         if (HeapTupleIsValid(ntp))
942         {
943                 /* ----------------
944                  *      allocate a new cache tuple holder, store the pointer
945                  *      to the heap tuple there and initialize the list pointers.
946                  * ----------------
947                  */
948                 Dlelem     *lru_elt;
949
950                 CACHE1_elog(DEBUG, "SearchSysCache: found tuple");
951
952                 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
953
954                 /*
955                  * this is a little cumbersome here because we want the Dlelem's
956                  * in both doubly linked lists to point to one another. That makes
957                  * it easier to remove something from both the cache bucket and
958                  * the lru list at the same time
959                  */
960                 nct = (CatCTup *) palloc(sizeof(CatCTup));
961                 nct->ct_tup = ntp;
962                 elt = DLNewElem(nct);
963                 nct2 = (CatCTup *) palloc(sizeof(CatCTup));
964                 nct2->ct_tup = ntp;
965                 lru_elt = DLNewElem(nct2);
966                 nct2->ct_node = elt;
967                 nct->ct_node = lru_elt;
968
969                 DLAddHead(cache->cc_lrulist, lru_elt);
970                 DLAddHead(cache->cc_cache[hash], elt);
971
972                 MemoryContextSwitchTo(oldcxt);
973
974                 /* ----------------
975                  *      If we've exceeded the desired size of this cache,
976                  *      throw away the least recently used entry.
977                  * ----------------
978                  */
979                 if (++cache->cc_ntup > cache->cc_maxtup)
980                 {
981                         CatCTup    *ct;
982
983                         elt = DLGetTail(cache->cc_lrulist);
984                         ct = (CatCTup *) DLE_VAL(elt);
985
986                         if (ct != nct)          /* shouldn't be possible, but be safe... */
987                         {
988                                 CACHE2_elog(DEBUG, "SearchSysCache(%s): Overflow, LRU removal",
989                                                         cache->cc_relname);
990
991                                 CatCacheRemoveCTup(cache, elt);
992                         }
993                 }
994
995                 CACHE4_elog(DEBUG, "SearchSysCache(%s): Contains %d/%d tuples",
996                                         cache->cc_relname, cache->cc_ntup, cache->cc_maxtup);
997                 CACHE3_elog(DEBUG, "SearchSysCache(%s): put in bucket %d",
998                                         cache->cc_relname, hash);
999         }
1000
1001         /* ----------------
1002          *      close the relation and return the tuple we found (or NULL)
1003          * ----------------
1004          */
1005         heap_close(relation, AccessShareLock);
1006
1007         return ntp;
1008 }
1009
1010 /* --------------------------------
1011  *      RelationInvalidateCatalogCacheTuple()
1012  *
1013  *      Invalidate a tuple from a specific relation.  This call determines the
1014  *      cache in question and calls CatalogCacheIdInvalidate().  It is -ok-
1015  *      if the relation cannot be found, it simply means this backend has yet
1016  *      to open it.
1017  * --------------------------------
1018  */
1019 void
1020 RelationInvalidateCatalogCacheTuple(Relation relation,
1021                                                                         HeapTuple tuple,
1022                                                           void (*function) (int, Index, ItemPointer))
1023 {
1024         CatCache   *ccp;
1025
1026         /* ----------------
1027          *      sanity checks
1028          * ----------------
1029          */
1030         Assert(RelationIsValid(relation));
1031         Assert(HeapTupleIsValid(tuple));
1032         Assert(PointerIsValid(function));
1033         CACHE1_elog(DEBUG, "RelationInvalidateCatalogCacheTuple: called");
1034
1035         /* ----------------
1036          *      for each cache
1037          *         if the cache contains tuples from the specified relation
1038          *                 call the invalidation function on the tuples
1039          *                 in the proper hash bucket
1040          * ----------------
1041          */
1042
1043         for (ccp = Caches; ccp; ccp = ccp->cc_next)
1044         {
1045                 if (strcmp(ccp->cc_relname, RelationGetRelationName(relation)) != 0)
1046                         continue;
1047
1048                 /* Just in case cache hasn't finished initialization yet... */
1049                 if (ccp->cc_tupdesc == NULL)
1050                         CatalogCacheInitializeCache(ccp);
1051
1052                 (*function) (ccp->id,
1053                                          CatalogCacheComputeTupleHashIndex(ccp, tuple),
1054                                          &tuple->t_self);
1055         }
1056 }