]> granicus.if.org Git - postgresql/blob - src/backend/utils/cache/catcache.c
First round of changes for new fmgr interface. fmgr itself and the
[postgresql] / src / backend / utils / cache / catcache.c
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  *        System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.64 2000/05/28 17:56:06 tgl Exp $
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_operator.h"
22 #include "catalog/pg_type.h"
23 #include "catalog/catname.h"
24 #include "catalog/indexing.h"
25 #include "miscadmin.h"
26 #include "utils/builtins.h"
27 #include "utils/fmgroids.h"
28 #include "utils/catcache.h"
29 #include "utils/syscache.h"
30
31 static void CatCacheRemoveCTup(CatCache *cache, Dlelem *e);
32 static Index CatalogCacheComputeHashIndex(struct catcache * cacheInP);
33 static Index CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP,
34                                                                   Relation relation,
35                                                                   HeapTuple tuple);
36 static void CatalogCacheInitializeCache(struct catcache * cache,
37                                                         Relation relation);
38 static uint32 cc_hashname(NameData *n);
39
40 /* ----------------
41  *              variables, macros and other stuff
42  * ----------------
43  */
44
45 #ifdef CACHEDEBUG
46 #define CACHE1_elog(a,b)                                elog(a,b)
47 #define CACHE2_elog(a,b,c)                              elog(a,b,c)
48 #define CACHE3_elog(a,b,c,d)                    elog(a,b,c,d)
49 #define CACHE4_elog(a,b,c,d,e)                  elog(a,b,c,d,e)
50 #define CACHE5_elog(a,b,c,d,e,f)                elog(a,b,c,d,e,f)
51 #define CACHE6_elog(a,b,c,d,e,f,g)              elog(a,b,c,d,e,f,g)
52 #else
53 #define CACHE1_elog(a,b)
54 #define CACHE2_elog(a,b,c)
55 #define CACHE3_elog(a,b,c,d)
56 #define CACHE4_elog(a,b,c,d,e)
57 #define CACHE5_elog(a,b,c,d,e,f)
58 #define CACHE6_elog(a,b,c,d,e,f,g)
59 #endif
60
61 static CatCache *Caches = NULL; /* head of list of caches */
62
63 GlobalMemory CacheCxt;                  /* context in which caches are allocated */
64
65 /* CacheCxt is global because relcache uses it too. */
66
67
68 /* ----------------
69  *              EQPROC is used in CatalogCacheInitializeCache to find the equality
70  *              functions for system types that are used as cache key fields.
71  *              See also GetCCHashFunc, which should support the same set of types.
72  *
73  *              XXX this should be replaced by catalog lookups,
74  *              but that seems to pose considerable risk of circularity...
75  * ----------------
76  */
77 static const Oid eqproc[] = {
78         F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid,
79         F_INT2EQ, F_INT2VECTOREQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ,
80         F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ
81 };
82
83 #define EQPROC(SYSTEMTYPEOID)   eqproc[(SYSTEMTYPEOID)-BOOLOID]
84
85 /* ----------------------------------------------------------------
86  *                                      internal support functions
87  * ----------------------------------------------------------------
88  */
89
90 static CCHashFunc
91 GetCCHashFunc(Oid keytype)
92 {
93         switch (keytype)
94         {
95                         case BOOLOID:
96                         case CHAROID:
97                         return (CCHashFunc) hashchar;
98                 case NAMEOID:
99                         return (CCHashFunc) cc_hashname;
100                 case INT2OID:
101                         return (CCHashFunc) hashint2;
102                 case INT2VECTOROID:
103                         return (CCHashFunc) hashint2vector;
104                 case INT4OID:
105                         return (CCHashFunc) hashint4;
106                 case TEXTOID:
107                         return (CCHashFunc) hashtext;
108                 case REGPROCOID:
109                 case OIDOID:
110                         return (CCHashFunc) hashoid;
111                 case OIDVECTOROID:
112                         return (CCHashFunc) hashoidvector;
113                 default:
114                         elog(FATAL, "GetCCHashFunc: type %u unsupported as catcache key",
115                                  keytype);
116                         return NULL;
117         }
118 }
119
120 static uint32
121 cc_hashname(NameData *n)
122 {
123
124         /*
125          * We need our own variant of hashname because we want to accept
126          * null-terminated C strings as search values for name fields. So, we
127          * have to make sure the data is correctly padded before we compute
128          * the hash value.
129          */
130         NameData        my_n;
131
132         namestrcpy(&my_n, NameStr(*n));
133
134         return hashname(&my_n);
135 }
136
137
138 /* --------------------------------
139  *              CatalogCacheInitializeCache
140  * --------------------------------
141  */
142 #ifdef CACHEDEBUG
143 #define CatalogCacheInitializeCache_DEBUG1 \
144 do { \
145         elog(DEBUG, "CatalogCacheInitializeCache: cache @%08lx", cache); \
146         if (relation) \
147                 elog(DEBUG, "CatalogCacheInitializeCache: called w/relation(inval)"); \
148         else \
149                 elog(DEBUG, "CatalogCacheInitializeCache: called w/relname %s", \
150                         cache->cc_relname) \
151 } while(0)
152
153 #define CatalogCacheInitializeCache_DEBUG2 \
154 do { \
155                 if (cache->cc_key[i] > 0) { \
156                         elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %d", \
157                                 i+1, cache->cc_nkeys, cache->cc_key[i], \
158                                 relation->rd_att->attrs[cache->cc_key[i] - 1]->attlen); \
159                 } else { \
160                         elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
161                                 i+1, cache->cc_nkeys, cache->cc_key[i]); \
162                 } \
163 } while(0)
164
165 #else
166 #define CatalogCacheInitializeCache_DEBUG1
167 #define CatalogCacheInitializeCache_DEBUG2
168 #endif
169
170 static void
171 CatalogCacheInitializeCache(struct catcache * cache,
172                                                         Relation relation)
173 {
174         MemoryContext oldcxt;
175         short           didopen = 0;
176         short           i;
177         TupleDesc       tupdesc;
178
179         CatalogCacheInitializeCache_DEBUG1;
180
181         /* ----------------
182          *      first switch to the cache context so our allocations
183          *      do not vanish at the end of a transaction
184          * ----------------
185          */
186         if (!CacheCxt)
187                 CacheCxt = CreateGlobalMemory("Cache");
188         oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
189
190         /* ----------------
191          *      If no relation was passed we must open it to get access to
192          *      its fields.  If one of the other caches has already opened
193          *      it we use heap_open() instead of heap_openr().
194          *      XXX is that really worth the trouble of checking?
195          * ----------------
196          */
197         if (!RelationIsValid(relation))
198         {
199                 struct catcache *cp;
200
201                 /* ----------------
202                  *      scan the caches to see if any other cache has opened the relation
203                  * ----------------
204                  */
205                 for (cp = Caches; cp; cp = cp->cc_next)
206                 {
207                         if (strncmp(cp->cc_relname, cache->cc_relname, NAMEDATALEN) == 0)
208                         {
209                                 if (cp->relationId != InvalidOid)
210                                         break;
211                         }
212                 }
213
214                 /* ----------------
215                  *      open the relation by name or by id
216                  * ----------------
217                  */
218                 if (cp)
219                         relation = heap_open(cp->relationId, NoLock);
220                 else
221                         relation = heap_openr(cache->cc_relname, NoLock);
222
223                 didopen = 1;
224         }
225
226         /* ----------------
227          *      initialize the cache's relation id and tuple descriptor
228          * ----------------
229          */
230         Assert(RelationIsValid(relation));
231         cache->relationId = RelationGetRelid(relation);
232         tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
233         cache->cc_tupdesc = tupdesc;
234
235         CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: relid %u, %d keys",
236                                 cache->relationId, cache->cc_nkeys);
237
238         /* ----------------
239          *      initialize cache's key information
240          * ----------------
241          */
242         for (i = 0; i < cache->cc_nkeys; ++i)
243         {
244                 CatalogCacheInitializeCache_DEBUG2;
245
246                 if (cache->cc_key[i] > 0)
247                 {
248                         Oid                     keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
249
250                         cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
251
252                         /*
253                          * If GetCCHashFunc liked the type, safe to index into
254                          * eqproc[]
255                          */
256                         cache->cc_skey[i].sk_procedure = EQPROC(keytype);
257
258                         fmgr_info(cache->cc_skey[i].sk_procedure,
259                                           &cache->cc_skey[i].sk_func);
260                         cache->cc_skey[i].sk_nargs = cache->cc_skey[i].sk_func.fn_nargs;
261
262                         CACHE4_elog(DEBUG, "CatalogCacheInit %s %d %x",
263                                                 RelationGetRelationName(relation),
264                                                 i,
265                                                 cache);
266                 }
267         }
268
269         /* ----------------
270          *      close the relation if we opened it
271          * ----------------
272          */
273         if (didopen)
274                 heap_close(relation, NoLock);
275
276         /* ----------------
277          *      initialize index information for the cache.  this
278          *      should only be done once per cache.
279          * ----------------
280          */
281         if (cache->cc_indname != NULL && cache->indexId == InvalidOid)
282         {
283                 if (!IsIgnoringSystemIndexes() && RelationGetForm(relation)->relhasindex)
284                 {
285
286                         /*
287                          * If the index doesn't exist we are in trouble.
288                          */
289                         relation = index_openr(cache->cc_indname);
290                         Assert(relation);
291                         cache->indexId = RelationGetRelid(relation);
292                         index_close(relation);
293                 }
294                 else
295                         cache->cc_indname = NULL;
296         }
297
298         /* ----------------
299          *      return to the proper memory context
300          * ----------------
301          */
302         MemoryContextSwitchTo(oldcxt);
303 }
304
305 /* --------------------------------
306  *              CatalogCacheComputeHashIndex
307  * --------------------------------
308  */
309 static Index
310 CatalogCacheComputeHashIndex(struct catcache * cacheInP)
311 {
312         uint32          hashIndex = 0;
313
314         CACHE4_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %x",
315                                 cacheInP->cc_relname,
316                                 cacheInP->cc_nkeys,
317                                 cacheInP);
318
319         switch (cacheInP->cc_nkeys)
320         {
321                 case 4:
322                         hashIndex ^=
323                                 (*cacheInP->cc_hashfunc[3]) (cacheInP->cc_skey[3].sk_argument) << 9;
324                         /* FALLTHROUGH */
325                 case 3:
326                         hashIndex ^=
327                                 (*cacheInP->cc_hashfunc[2]) (cacheInP->cc_skey[2].sk_argument) << 6;
328                         /* FALLTHROUGH */
329                 case 2:
330                         hashIndex ^=
331                                 (*cacheInP->cc_hashfunc[1]) (cacheInP->cc_skey[1].sk_argument) << 3;
332                         /* FALLTHROUGH */
333                 case 1:
334                         hashIndex ^=
335                                 (*cacheInP->cc_hashfunc[0]) (cacheInP->cc_skey[0].sk_argument);
336                         break;
337                 default:
338                         elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cacheInP->cc_nkeys);
339                         break;
340         }
341         hashIndex %= (uint32) cacheInP->cc_size;
342         return (Index) hashIndex;
343 }
344
345 /* --------------------------------
346  *              CatalogCacheComputeTupleHashIndex
347  * --------------------------------
348  */
349 static Index
350 CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP,
351                                                                   Relation relation,
352                                                                   HeapTuple tuple)
353 {
354         bool            isNull = false;
355
356         /* XXX is this really needed? */
357         if (cacheInOutP->relationId == InvalidOid)
358                 CatalogCacheInitializeCache(cacheInOutP, relation);
359
360         switch (cacheInOutP->cc_nkeys)
361         {
362                 case 4:
363                         cacheInOutP->cc_skey[3].sk_argument =
364                                 (cacheInOutP->cc_key[3] == ObjectIdAttributeNumber)
365                                 ? (Datum) tuple->t_data->t_oid
366                                 : fastgetattr(tuple,
367                                                           cacheInOutP->cc_key[3],
368                                                           RelationGetDescr(relation),
369                                                           &isNull);
370                         Assert(!isNull);
371                         /* FALLTHROUGH */
372                 case 3:
373                         cacheInOutP->cc_skey[2].sk_argument =
374                                 (cacheInOutP->cc_key[2] == ObjectIdAttributeNumber)
375                                 ? (Datum) tuple->t_data->t_oid
376                                 : fastgetattr(tuple,
377                                                           cacheInOutP->cc_key[2],
378                                                           RelationGetDescr(relation),
379                                                           &isNull);
380                         Assert(!isNull);
381                         /* FALLTHROUGH */
382                 case 2:
383                         cacheInOutP->cc_skey[1].sk_argument =
384                                 (cacheInOutP->cc_key[1] == ObjectIdAttributeNumber)
385                                 ? (Datum) tuple->t_data->t_oid
386                                 : fastgetattr(tuple,
387                                                           cacheInOutP->cc_key[1],
388                                                           RelationGetDescr(relation),
389                                                           &isNull);
390                         Assert(!isNull);
391                         /* FALLTHROUGH */
392                 case 1:
393                         cacheInOutP->cc_skey[0].sk_argument =
394                                 (cacheInOutP->cc_key[0] == ObjectIdAttributeNumber)
395                                 ? (Datum) tuple->t_data->t_oid
396                                 : fastgetattr(tuple,
397                                                           cacheInOutP->cc_key[0],
398                                                           RelationGetDescr(relation),
399                                                           &isNull);
400                         Assert(!isNull);
401                         break;
402                 default:
403                         elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
404                                  cacheInOutP->cc_nkeys);
405                         break;
406         }
407
408         return CatalogCacheComputeHashIndex(cacheInOutP);
409 }
410
411 /* --------------------------------
412  *              CatCacheRemoveCTup
413  *
414  *              NB: assumes caller has switched to CacheCxt
415  * --------------------------------
416  */
417 static void
418 CatCacheRemoveCTup(CatCache *cache, Dlelem *elt)
419 {
420         CatCTup    *ct;
421         CatCTup    *other_ct;
422         Dlelem     *other_elt;
423
424         if (!elt)                                       /* probably-useless safety check */
425                 return;
426
427         /* We need to zap both linked-list elements as well as the tuple */
428
429         ct = (CatCTup *) DLE_VAL(elt);
430         other_elt = ct->ct_node;
431         other_ct = (CatCTup *) DLE_VAL(other_elt);
432
433         heap_freetuple(ct->ct_tup);
434
435         DLRemove(other_elt);
436         DLFreeElem(other_elt);
437         pfree(other_ct);
438         DLRemove(elt);
439         DLFreeElem(elt);
440         pfree(ct);
441
442         --cache->cc_ntup;
443 }
444
445 /* --------------------------------
446  *      CatalogCacheIdInvalidate()
447  *
448  *      Invalidate a tuple given a cache id.  In this case the id should always
449  *      be found (whether the cache has opened its relation or not).  Of course,
450  *      if the cache has yet to open its relation, there will be no tuples so
451  *      no problem.
452  * --------------------------------
453  */
454 void
455 CatalogCacheIdInvalidate(int cacheId,   /* XXX */
456                                                  Index hashIndex,
457                                                  ItemPointer pointer)
458 {
459         CatCache   *ccp;
460         CatCTup    *ct;
461         Dlelem     *elt;
462         MemoryContext oldcxt;
463
464         /* ----------------
465          *      sanity checks
466          * ----------------
467          */
468         Assert(hashIndex < NCCBUCK);
469         Assert(ItemPointerIsValid(pointer));
470         CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
471
472         /* ----------------
473          *      switch to the cache context for our memory allocations
474          * ----------------
475          */
476         if (!CacheCxt)
477                 CacheCxt = CreateGlobalMemory("Cache");
478         oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
479
480         /* ----------------
481          *      inspect every cache that could contain the tuple
482          * ----------------
483          */
484         for (ccp = Caches; ccp; ccp = ccp->cc_next)
485         {
486                 if (cacheId != ccp->id)
487                         continue;
488                 /* ----------------
489                  *      inspect the hash bucket until we find a match or exhaust
490                  * ----------------
491                  */
492                 for (elt = DLGetHead(ccp->cc_cache[hashIndex]);
493                          elt;
494                          elt = DLGetSucc(elt))
495                 {
496                         ct = (CatCTup *) DLE_VAL(elt);
497                         if (ItemPointerEquals(pointer, &ct->ct_tup->t_self))
498                                 break;
499                 }
500
501                 /* ----------------
502                  *      if we found a matching tuple, invalidate it.
503                  * ----------------
504                  */
505
506                 if (elt)
507                 {
508                         CatCacheRemoveCTup(ccp, elt);
509
510                         CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
511                 }
512
513                 if (cacheId != InvalidCatalogCacheId)
514                         break;
515         }
516
517         /* ----------------
518          *      return to the proper memory context
519          * ----------------
520          */
521         MemoryContextSwitchTo(oldcxt);
522 }
523
524 /* ----------------------------------------------------------------
525  *                                         public functions
526  *
527  *              ResetSystemCache
528  *              InitIndexedSysCache
529  *              InitSysCache
530  *              SearchSysCache
531  *              RelationInvalidateCatalogCacheTuple
532  * ----------------------------------------------------------------
533  */
534 /* --------------------------------
535  *              ResetSystemCache
536  * --------------------------------
537  */
538 void
539 ResetSystemCache()
540 {
541         MemoryContext oldcxt;
542         struct catcache *cache;
543
544         CACHE1_elog(DEBUG, "ResetSystemCache called");
545
546         /* ----------------
547          *      first switch to the cache context so our allocations
548          *      do not vanish at the end of a transaction
549          * ----------------
550          */
551         if (!CacheCxt)
552                 CacheCxt = CreateGlobalMemory("Cache");
553
554         oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
555
556         /* ----------------
557          *      here we purge the contents of all the caches
558          *
559          *      for each system cache
560          *         for each hash bucket
561          *                 for each tuple in hash bucket
562          *                         remove the tuple
563          * ----------------
564          */
565         for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next)
566         {
567                 int                     hash;
568
569                 for (hash = 0; hash < NCCBUCK; hash += 1)
570                 {
571                         Dlelem     *elt,
572                                            *nextelt;
573
574                         for (elt = DLGetHead(cache->cc_cache[hash]); elt; elt = nextelt)
575                         {
576                                 nextelt = DLGetSucc(elt);
577                                 CatCacheRemoveCTup(cache, elt);
578                                 if (cache->cc_ntup < 0)
579                                         elog(NOTICE,
580                                                  "ResetSystemCache: cc_ntup<0 (software error)");
581                         }
582                 }
583                 cache->cc_ntup = 0;             /* in case of WARN error above */
584                 cache->busy = false;    /* to recover from recursive-use error */
585         }
586
587         CACHE1_elog(DEBUG, "end of ResetSystemCache call");
588
589         /* ----------------
590          *      back to the old context before we return...
591          * ----------------
592          */
593         MemoryContextSwitchTo(oldcxt);
594 }
595
596 /* --------------------------------
597  *              SystemCacheRelationFlushed
598  *
599  *      This is called by RelationFlushRelation() to clear out cached information
600  *      about a relation being dropped.  (This could be a DROP TABLE command,
601  *      or a temp table being dropped at end of transaction, or a table created
602  *      during the current transaction that is being dropped because of abort.)
603  *      Remove all cache entries relevant to the specified relation OID.
604  *
605  *      A special case occurs when relId is itself one of the cacheable system
606  *      tables --- although those'll never be dropped, they can get flushed from
607  *      the relcache (VACUUM causes this, for example).  In that case we need
608  *      to flush all cache entries from that table.  The brute-force method
609  *      currently used takes care of that quite handily.  (At one point we
610  *      also tried to force re-execution of CatalogCacheInitializeCache for
611  *      the cache(s) on that table.  This is a bad idea since it leads to all
612  *      kinds of trouble if a cache flush occurs while loading cache entries.
613  *      We now avoid the need to do it by copying cc_tupdesc out of the relcache,
614  *      rather than relying on the relcache to keep a tupdesc for us.  Of course
615  *      this assumes the tupdesc of a cachable system table will not change...)
616  * --------------------------------
617  */
618 void
619 SystemCacheRelationFlushed(Oid relId)
620 {
621
622         /*
623          * XXX Ideally we'd search the caches and just zap entries that
624          * actually refer to or come from the indicated relation.  For now, we
625          * take the brute-force approach: just flush the caches entirely.
626          */
627         ResetSystemCache();
628 }
629
630 /* --------------------------------
631  *              InitIndexedSysCache
632  *
633  *      This allocates and initializes a cache for a system catalog relation.
634  *      Actually, the cache is only partially initialized to avoid opening the
635  *      relation.  The relation will be opened and the rest of the cache
636  *      structure initialized on the first access.
637  * --------------------------------
638  */
639 #ifdef CACHEDEBUG
640 #define InitSysCache_DEBUG1 \
641 do { \
642         elog(DEBUG, "InitSysCache: rid=%u id=%d nkeys=%d size=%d\n", \
643                 cp->relationId, cp->id, cp->cc_nkeys, cp->cc_size); \
644         for (i = 0; i < nkeys; i += 1) \
645         { \
646                 elog(DEBUG, "InitSysCache: key=%d skey=[%d %d %d %d]\n", \
647                          cp->cc_key[i], \
648                          cp->cc_skey[i].sk_flags, \
649                          cp->cc_skey[i].sk_attno, \
650                          cp->cc_skey[i].sk_procedure, \
651                          cp->cc_skey[i].sk_argument); \
652         } \
653 } while(0)
654
655 #else
656 #define InitSysCache_DEBUG1
657 #endif
658
659 CatCache   *
660 InitSysCache(char *relname,
661                          char *iname,
662                          int id,
663                          int nkeys,
664                          int *key,
665                          HeapTuple (*iScanfuncP) ())
666 {
667         CatCache   *cp;
668         int                     i;
669         MemoryContext oldcxt;
670
671         char       *indname;
672
673         indname = (iname) ? iname : NULL;
674
675         /* ----------------
676          *      first switch to the cache context so our allocations
677          *      do not vanish at the end of a transaction
678          * ----------------
679          */
680         if (!CacheCxt)
681                 CacheCxt = CreateGlobalMemory("Cache");
682
683         oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
684
685         /* ----------------
686          *      allocate a new cache structure
687          * ----------------
688          */
689         cp = (CatCache *) palloc(sizeof(CatCache));
690         MemSet((char *) cp, 0, sizeof(CatCache));
691
692         /* ----------------
693          *      initialize the cache buckets (each bucket is a list header)
694          *      and the LRU tuple list
695          * ----------------
696          */
697         {
698
699                 /*
700                  * We can only do this optimization because the number of hash
701                  * buckets never changes.  Without it, we call palloc() too much.
702                  * We could move this to dllist.c, but the way we do this is not
703                  * dynamic/portable, so why allow other routines to use it.
704                  */
705                 Dllist     *cache_begin = palloc((NCCBUCK + 1) * sizeof(Dllist));
706
707                 for (i = 0; i <= NCCBUCK; ++i)
708                 {
709                         cp->cc_cache[i] = &cache_begin[i];
710                         cp->cc_cache[i]->dll_head = 0;
711                         cp->cc_cache[i]->dll_tail = 0;
712                 }
713         }
714
715         cp->cc_lrulist = DLNewList();
716
717         /* ----------------
718          *      Caches is the pointer to the head of the list of all the
719          *      system caches.  here we add the new cache to the top of the list.
720          * ----------------
721          */
722         cp->cc_next = Caches;           /* list of caches (single link) */
723         Caches = cp;
724
725         /* ----------------
726          *      initialize the cache's relation information for the relation
727          *      corresponding to this cache and initialize some of the the new
728          *      cache's other internal fields.
729          * ----------------
730          */
731         cp->relationId = InvalidOid;
732         cp->indexId = InvalidOid;
733         cp->cc_relname = relname;
734         cp->cc_indname = indname;
735         cp->cc_tupdesc = (TupleDesc) NULL;
736         cp->id = id;
737         cp->busy = false;
738         cp->cc_maxtup = MAXTUP;
739         cp->cc_size = NCCBUCK;
740         cp->cc_nkeys = nkeys;
741         cp->cc_iscanfunc = iScanfuncP;
742
743         /* ----------------
744          *      partially initialize the cache's key information
745          *      CatalogCacheInitializeCache() will do the rest
746          * ----------------
747          */
748         for (i = 0; i < nkeys; ++i)
749         {
750                 cp->cc_key[i] = key[i];
751                 if (!key[i])
752                         elog(FATAL, "InitSysCache: called with 0 key[%d]", i);
753                 if (key[i] < 0)
754                 {
755                         if (key[i] != ObjectIdAttributeNumber)
756                                 elog(FATAL, "InitSysCache: called with %d key[%d]", key[i], i);
757                         else
758                         {
759                                 cp->cc_hashfunc[i] = GetCCHashFunc(OIDOID);
760                                 ScanKeyEntryInitialize(&cp->cc_skey[i],
761                                                                            (bits16) 0,
762                                                                            (AttrNumber) key[i],
763                                                                            (RegProcedure) F_OIDEQ,
764                                                                            (Datum) 0);
765                                 continue;
766                         }
767                 }
768
769                 cp->cc_skey[i].sk_attno = key[i];
770         }
771
772         /* ----------------
773          *      all done.  new cache is initialized.  print some debugging
774          *      information, if appropriate.
775          * ----------------
776          */
777         InitSysCache_DEBUG1;
778
779         /* ----------------
780          *      back to the old context before we return...
781          * ----------------
782          */
783         MemoryContextSwitchTo(oldcxt);
784         return cp;
785 }
786
787
788 /* --------------------------------
789  *              SearchSelfReferences
790  *
791  *              This call searches for self-referencing information,
792  *              which causes infinite recursion in the system catalog cache.
793  *              This code short-circuits the normal index lookup for cache loads
794  *              in those cases and replaces it with a heap scan.
795  *
796  *              cache should already be initailized
797  * --------------------------------
798  */
799 static HeapTuple
800 SearchSelfReferences(struct catcache * cache)
801 {
802         HeapTuple       ntp;
803         Relation        rel;
804
805         if (cache->id == INDEXRELID)
806         {
807                 static Oid      indexSelfOid = InvalidOid;
808                 static HeapTuple indexSelfTuple = NULL;
809
810                 if (!OidIsValid(indexSelfOid))
811                 {
812                         ScanKeyData key;
813                         HeapScanDesc sd;
814
815                         /* Find oid of pg_index_indexrelid_index */
816                         rel = heap_openr(RelationRelationName, AccessShareLock);
817                         ScanKeyEntryInitialize(&key, 0, Anum_pg_class_relname,
818                                                          F_NAMEEQ, PointerGetDatum(IndexRelidIndex));
819                         sd = heap_beginscan(rel, false, SnapshotNow, 1, &key);
820                         ntp = heap_getnext(sd, 0);
821                         if (!HeapTupleIsValid(ntp))
822                                 elog(ERROR, "SearchSelfReferences: %s not found in %s",
823                                          IndexRelidIndex, RelationRelationName);
824                         indexSelfOid = ntp->t_data->t_oid;
825                         heap_endscan(sd);
826                         heap_close(rel, AccessShareLock);
827                 }
828                 /* Looking for something other than pg_index_indexrelid_index? */
829                 if ((Oid) cache->cc_skey[0].sk_argument != indexSelfOid)
830                         return (HeapTuple) 0;
831
832                 /* Do we need to load our private copy of the tuple? */
833                 if (!HeapTupleIsValid(indexSelfTuple))
834                 {
835                         HeapScanDesc sd;
836                         MemoryContext oldcxt;
837
838                         if (!CacheCxt)
839                                 CacheCxt = CreateGlobalMemory("Cache");
840                         rel = heap_open(cache->relationId, AccessShareLock);
841                         sd = heap_beginscan(rel, false, SnapshotNow, 1, cache->cc_skey);
842                         ntp = heap_getnext(sd, 0);
843                         if (!HeapTupleIsValid(ntp))
844                                 elog(ERROR, "SearchSelfReferences: tuple not found");
845                         oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
846                         indexSelfTuple = heap_copytuple(ntp);
847                         MemoryContextSwitchTo(oldcxt);
848                         heap_endscan(sd);
849                         heap_close(rel, AccessShareLock);
850                 }
851                 return indexSelfTuple;
852         }
853         else if (cache->id == OPEROID)
854         {
855                 /* bootstrapping this requires preloading a range of rows. bjm */
856                 static HeapTuple operatorSelfTuple[MAX_OIDCMP - MIN_OIDCMP + 1];
857                 Oid                     lookup_oid = (Oid) cache->cc_skey[0].sk_argument;
858
859                 if (lookup_oid < MIN_OIDCMP || lookup_oid > MAX_OIDCMP)
860                         return (HeapTuple) 0;
861
862                 if (!HeapTupleIsValid(operatorSelfTuple[lookup_oid - MIN_OIDCMP]))
863                 {
864                         HeapScanDesc sd;
865                         MemoryContext oldcxt;
866
867                         if (!CacheCxt)
868                                 CacheCxt = CreateGlobalMemory("Cache");
869                         rel = heap_open(cache->relationId, AccessShareLock);
870                         sd = heap_beginscan(rel, false, SnapshotNow, 1, cache->cc_skey);
871                         ntp = heap_getnext(sd, 0);
872                         if (!HeapTupleIsValid(ntp))
873                                 elog(ERROR, "SearchSelfReferences: tuple not found");
874                         oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
875                         operatorSelfTuple[lookup_oid - MIN_OIDCMP] = heap_copytuple(ntp);
876                         MemoryContextSwitchTo(oldcxt);
877                         heap_endscan(sd);
878                         heap_close(rel, AccessShareLock);
879                 }
880                 return operatorSelfTuple[lookup_oid - MIN_OIDCMP];
881         }
882         else
883                 return (HeapTuple) 0;
884
885 }
886
887 /* --------------------------------
888  *              SearchSysCache
889  *
890  *              This call searches a system cache for a tuple, opening the relation
891  *              if necessary (the first access to a particular cache).
892  * --------------------------------
893  */
894 HeapTuple
895 SearchSysCache(struct catcache * cache,
896                            Datum v1,
897                            Datum v2,
898                            Datum v3,
899                            Datum v4)
900 {
901         unsigned        hash;
902         CatCTup    *ct = NULL;
903         CatCTup    *nct;
904         CatCTup    *nct2;
905         Dlelem     *elt;
906         HeapTuple       ntp = NULL;
907
908         Relation        relation;
909         MemoryContext oldcxt;
910
911         /* ----------------
912          *      one-time startup overhead
913          * ----------------
914          */
915         if (cache->relationId == InvalidOid)
916                 CatalogCacheInitializeCache(cache, NULL);
917
918         /* ----------------
919          *      initialize the search key information
920          * ----------------
921          */
922         cache->cc_skey[0].sk_argument = v1;
923         cache->cc_skey[1].sk_argument = v2;
924         cache->cc_skey[2].sk_argument = v3;
925         cache->cc_skey[3].sk_argument = v4;
926
927         /*
928          * resolve self referencing informtion
929          */
930         if ((ntp = SearchSelfReferences(cache)))
931                 return ntp;
932
933         /* ----------------
934          *      find the hash bucket in which to look for the tuple
935          * ----------------
936          */
937         hash = CatalogCacheComputeHashIndex(cache);
938
939         /* ----------------
940          *      scan the hash bucket until we find a match or exhaust our tuples
941          * ----------------
942          */
943         for (elt = DLGetHead(cache->cc_cache[hash]);
944                  elt;
945                  elt = DLGetSucc(elt))
946         {
947                 bool            res;
948
949                 ct = (CatCTup *) DLE_VAL(elt);
950                 /* ----------------
951                  *      see if the cached tuple matches our key.
952                  *      (should we be worried about time ranges? -cim 10/2/90)
953                  * ----------------
954                  */
955                 HeapKeyTest(ct->ct_tup,
956                                         cache->cc_tupdesc,
957                                         cache->cc_nkeys,
958                                         cache->cc_skey,
959                                         res);
960                 if (res)
961                         break;
962         }
963
964         /* ----------------
965          *      if we found a tuple in the cache, move it to the top of the
966          *      lru list, and return it.  We also move it to the front of the
967          *      list for its hashbucket, in order to speed subsequent searches.
968          *      (The most frequently accessed elements in any hashbucket will
969          *      tend to be near the front of the hashbucket's list.)
970          * ----------------
971          */
972         if (elt)
973         {
974                 Dlelem     *old_lru_elt = ((CatCTup *) DLE_VAL(elt))->ct_node;
975
976                 DLMoveToFront(old_lru_elt);
977                 DLMoveToFront(elt);
978
979 #ifdef CACHEDEBUG
980                 CACHE3_elog(DEBUG, "SearchSysCache(%s): found in bucket %d",
981                                         cache->cc_relname, hash);
982 #endif   /* CACHEDEBUG */
983
984                 return ct->ct_tup;
985         }
986
987         /* ----------------
988          *      Tuple was not found in cache, so we have to try and
989          *      retrieve it directly from the relation.  If it's found,
990          *      we add it to the cache.
991          *
992          *      To guard against possible infinite recursion, we mark this cache
993          *      "busy" while trying to load a new entry for it.  It is OK to
994          *      recursively invoke SearchSysCache for a different cache, but
995          *      a recursive call for the same cache will error out.  (We could
996          *      store the specific key(s) being looked for, and consider only
997          *      a recursive request for the same key to be an error, but this
998          *      simple scheme is sufficient for now.)
999          * ----------------
1000          */
1001
1002         if (cache->busy)
1003                 elog(ERROR, "SearchSysCache: recursive use of cache %d", cache->id);
1004         cache->busy = true;
1005
1006         /* ----------------
1007          *      open the relation associated with the cache
1008          * ----------------
1009          */
1010         relation = heap_open(cache->relationId, AccessShareLock);
1011         CACHE2_elog(DEBUG, "SearchSysCache(%s)",
1012                                 RelationGetRelationName(relation));
1013
1014         /* ----------------
1015          *      Switch to the cache memory context.
1016          * ----------------
1017          */
1018
1019         if (!CacheCxt)
1020                 CacheCxt = CreateGlobalMemory("Cache");
1021
1022         oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
1023
1024         /* ----------------
1025          *      Scan the relation to find the tuple.  If there's an index, and
1026          *      if this isn't bootstrap (initdb) time, use the index.
1027          * ----------------
1028          */
1029         CACHE1_elog(DEBUG, "SearchSysCache: performing scan");
1030
1031         if ((RelationGetForm(relation))->relhasindex
1032                 && !IsIgnoringSystemIndexes())
1033         {
1034                 /* ----------
1035                  *      Switch back to old memory context so memory not freed
1036                  *      in the scan function will go away at transaction end.
1037                  *      wieck - 10/18/1996
1038                  * ----------
1039                  */
1040                 HeapTuple       indextp;
1041
1042                 MemoryContextSwitchTo(oldcxt);
1043                 Assert(cache->cc_iscanfunc);
1044                 switch (cache->cc_nkeys)
1045                 {
1046                         case 4:
1047                                 indextp = cache->cc_iscanfunc(relation, v1, v2, v3, v4);
1048                                 break;
1049                         case 3:
1050                                 indextp = cache->cc_iscanfunc(relation, v1, v2, v3);
1051                                 break;
1052                         case 2:
1053                                 indextp = cache->cc_iscanfunc(relation, v1, v2);
1054                                 break;
1055                         case 1:
1056                                 indextp = cache->cc_iscanfunc(relation, v1);
1057                                 break;
1058                         default:
1059                                 indextp = NULL;
1060                                 break;
1061                 }
1062                 /* ----------
1063                  *      Back to Cache context. If we got a tuple copy it
1064                  *      into our context.       wieck - 10/18/1996
1065                  *      And free the tuple that was allocated in the
1066                  *      transaction's context.   tgl - 02/03/2000
1067                  * ----------
1068                  */
1069                 if (HeapTupleIsValid(indextp))
1070                 {
1071                         MemoryContextSwitchTo((MemoryContext) CacheCxt);
1072                         ntp = heap_copytuple(indextp);
1073                         MemoryContextSwitchTo(oldcxt);
1074                         heap_freetuple(indextp);
1075                 }
1076                 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1077         }
1078         else
1079         {
1080                 HeapScanDesc sd;
1081
1082                 /* ----------
1083                  *      As above do the lookup in the callers memory
1084                  *      context.
1085                  *      wieck - 10/18/1996
1086                  * ----------
1087                  */
1088                 MemoryContextSwitchTo(oldcxt);
1089
1090                 sd = heap_beginscan(relation, 0, SnapshotNow,
1091                                                         cache->cc_nkeys, cache->cc_skey);
1092
1093                 ntp = heap_getnext(sd, 0);
1094
1095                 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1096
1097                 if (HeapTupleIsValid(ntp))
1098                 {
1099                         CACHE1_elog(DEBUG, "SearchSysCache: found tuple");
1100                         ntp = heap_copytuple(ntp);
1101                         /* We should not free the result of heap_getnext... */
1102                 }
1103
1104                 MemoryContextSwitchTo(oldcxt);
1105
1106                 heap_endscan(sd);
1107
1108                 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1109         }
1110
1111         cache->busy = false;
1112
1113         /* ----------------
1114          *      scan is complete.  if tup is valid, we can add it to the cache.
1115          *      note we have already copied it into the cache memory context.
1116          * ----------------
1117          */
1118         if (HeapTupleIsValid(ntp))
1119         {
1120                 /* ----------------
1121                  *      allocate a new cache tuple holder, store the pointer
1122                  *      to the heap tuple there and initialize the list pointers.
1123                  * ----------------
1124                  */
1125                 Dlelem     *lru_elt;
1126
1127                 /*
1128                  * this is a little cumbersome here because we want the Dlelem's
1129                  * in both doubly linked lists to point to one another. That makes
1130                  * it easier to remove something from both the cache bucket and
1131                  * the lru list at the same time
1132                  */
1133                 nct = (CatCTup *) palloc(sizeof(CatCTup));
1134                 nct->ct_tup = ntp;
1135                 elt = DLNewElem(nct);
1136                 nct2 = (CatCTup *) palloc(sizeof(CatCTup));
1137                 nct2->ct_tup = ntp;
1138                 lru_elt = DLNewElem(nct2);
1139                 nct2->ct_node = elt;
1140                 nct->ct_node = lru_elt;
1141
1142                 DLAddHead(cache->cc_lrulist, lru_elt);
1143                 DLAddHead(cache->cc_cache[hash], elt);
1144
1145                 /* ----------------
1146                  *      If we've exceeded the desired size of this cache,
1147                  *      throw away the least recently used entry.
1148                  * ----------------
1149                  */
1150                 if (++cache->cc_ntup > cache->cc_maxtup)
1151                 {
1152                         CatCTup    *ct;
1153
1154                         elt = DLGetTail(cache->cc_lrulist);
1155                         ct = (CatCTup *) DLE_VAL(elt);
1156
1157                         if (ct != nct)          /* shouldn't be possible, but be safe... */
1158                         {
1159                                 CACHE2_elog(DEBUG, "SearchSysCache(%s): Overflow, LRU removal",
1160                                                         RelationGetRelationName(relation));
1161
1162                                 CatCacheRemoveCTup(cache, elt);
1163                         }
1164                 }
1165
1166                 CACHE4_elog(DEBUG, "SearchSysCache(%s): Contains %d/%d tuples",
1167                                         RelationGetRelationName(relation),
1168                                         cache->cc_ntup, cache->cc_maxtup);
1169                 CACHE3_elog(DEBUG, "SearchSysCache(%s): put in bucket %d",
1170                                         RelationGetRelationName(relation), hash);
1171         }
1172
1173         /* ----------------
1174          *      close the relation, switch back to the original memory context
1175          *      and return the tuple we found (or NULL)
1176          * ----------------
1177          */
1178         heap_close(relation, AccessShareLock);
1179
1180         MemoryContextSwitchTo(oldcxt);
1181
1182         return ntp;
1183 }
1184
1185 /* --------------------------------
1186  *      RelationInvalidateCatalogCacheTuple()
1187  *
1188  *      Invalidate a tuple from a specific relation.  This call determines the
1189  *      cache in question and calls CatalogCacheIdInvalidate().  It is -ok-
1190  *      if the relation cannot be found, it simply means this backend has yet
1191  *      to open it.
1192  * --------------------------------
1193  */
1194 void
1195 RelationInvalidateCatalogCacheTuple(Relation relation,
1196                                                                         HeapTuple tuple,
1197                                                           void (*function) (int, Index, ItemPointer))
1198 {
1199         struct catcache *ccp;
1200         MemoryContext oldcxt;
1201         Oid                     relationId;
1202
1203         /* ----------------
1204          *      sanity checks
1205          * ----------------
1206          */
1207         Assert(RelationIsValid(relation));
1208         Assert(HeapTupleIsValid(tuple));
1209         Assert(PointerIsValid(function));
1210         CACHE1_elog(DEBUG, "RelationInvalidateCatalogCacheTuple: called");
1211
1212         /* ----------------
1213          *      switch to the cache memory context
1214          * ----------------
1215          */
1216         if (!CacheCxt)
1217                 CacheCxt = CreateGlobalMemory("Cache");
1218         oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
1219
1220         /* ----------------
1221          *      for each cache
1222          *         if the cache contains tuples from the specified relation
1223          *                 call the invalidation function on the tuples
1224          *                 in the proper hash bucket
1225          * ----------------
1226          */
1227         relationId = RelationGetRelid(relation);
1228
1229         for (ccp = Caches; ccp; ccp = ccp->cc_next)
1230         {
1231                 if (relationId != ccp->relationId)
1232                         continue;
1233
1234 #ifdef NOT_USED
1235                 /* OPT inline simplification of CatalogCacheIdInvalidate */
1236                 if (!PointerIsValid(function))
1237                         function = CatalogCacheIdInvalidate;
1238 #endif
1239
1240                 (*function) (ccp->id,
1241                                  CatalogCacheComputeTupleHashIndex(ccp, relation, tuple),
1242                                          &tuple->t_self);
1243         }
1244
1245         /* ----------------
1246          *      return to the proper memory context
1247          * ----------------
1248          */
1249         MemoryContextSwitchTo(oldcxt);
1250
1251         /* sendpm('I', "Invalidated tuple"); */
1252 }