]> granicus.if.org Git - postgresql/blob - src/backend/utils/cache/catcache.c
Update CVS HEAD for 2007 copyright. Back branches are typically not
[postgresql] / src / backend / utils / cache / catcache.c
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  *        System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.136 2007/01/05 22:19:42 momjian Exp $
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_operator.h"
22 #include "catalog/pg_type.h"
23 #include "miscadmin.h"
24 #ifdef CATCACHE_STATS
25 #include "storage/ipc.h"                /* for on_proc_exit */
26 #endif
27 #include "utils/builtins.h"
28 #include "utils/fmgroids.h"
29 #include "utils/memutils.h"
30 #include "utils/relcache.h"
31 #include "utils/resowner.h"
32 #include "utils/syscache.h"
33
34
35  /* #define CACHEDEBUG */       /* turns DEBUG elogs on */
36
37 /*
38  * Given a hash value and the size of the hash table, find the bucket
39  * in which the hash value belongs. Since the hash table must contain
40  * a power-of-2 number of elements, this is a simple bitmask.
41  */
42 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
43
44
45 /*
46  *              variables, macros and other stuff
47  */
48
49 #ifdef CACHEDEBUG
50 #define CACHE1_elog(a,b)                                elog(a,b)
51 #define CACHE2_elog(a,b,c)                              elog(a,b,c)
52 #define CACHE3_elog(a,b,c,d)                    elog(a,b,c,d)
53 #define CACHE4_elog(a,b,c,d,e)                  elog(a,b,c,d,e)
54 #define CACHE5_elog(a,b,c,d,e,f)                elog(a,b,c,d,e,f)
55 #define CACHE6_elog(a,b,c,d,e,f,g)              elog(a,b,c,d,e,f,g)
56 #else
57 #define CACHE1_elog(a,b)
58 #define CACHE2_elog(a,b,c)
59 #define CACHE3_elog(a,b,c,d)
60 #define CACHE4_elog(a,b,c,d,e)
61 #define CACHE5_elog(a,b,c,d,e,f)
62 #define CACHE6_elog(a,b,c,d,e,f,g)
63 #endif
64
65 /* Cache management header --- pointer is NULL until created */
66 static CatCacheHeader *CacheHdr = NULL;
67
68
69 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
70                                                          ScanKey cur_skey);
71 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
72                                                                   HeapTuple tuple);
73
74 #ifdef CATCACHE_STATS
75 static void CatCachePrintStats(int code, Datum arg);
76 #endif
77 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
78 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
79 static void CatalogCacheInitializeCache(CatCache *cache);
80 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
81                                                 uint32 hashValue, Index hashIndex,
82                                                 bool negative);
83 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
84
85
86 /*
87  *                                      internal support functions
88  */
89
90 /*
91  * Look up the hash and equality functions for system types that are used
92  * as cache key fields.
93  *
94  * XXX this should be replaced by catalog lookups,
95  * but that seems to pose considerable risk of circularity...
96  */
97 static void
98 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
99 {
100         switch (keytype)
101         {
102                 case BOOLOID:
103                         *hashfunc = hashchar;
104                         *eqfunc = F_BOOLEQ;
105                         break;
106                 case CHAROID:
107                         *hashfunc = hashchar;
108                         *eqfunc = F_CHAREQ;
109                         break;
110                 case NAMEOID:
111                         *hashfunc = hashname;
112                         *eqfunc = F_NAMEEQ;
113                         break;
114                 case INT2OID:
115                         *hashfunc = hashint2;
116                         *eqfunc = F_INT2EQ;
117                         break;
118                 case INT2VECTOROID:
119                         *hashfunc = hashint2vector;
120                         *eqfunc = F_INT2VECTOREQ;
121                         break;
122                 case INT4OID:
123                         *hashfunc = hashint4;
124                         *eqfunc = F_INT4EQ;
125                         break;
126                 case TEXTOID:
127                         *hashfunc = hashtext;
128                         *eqfunc = F_TEXTEQ;
129                         break;
130                 case OIDOID:
131                 case REGPROCOID:
132                 case REGPROCEDUREOID:
133                 case REGOPEROID:
134                 case REGOPERATOROID:
135                 case REGCLASSOID:
136                 case REGTYPEOID:
137                         *hashfunc = hashoid;
138                         *eqfunc = F_OIDEQ;
139                         break;
140                 case OIDVECTOROID:
141                         *hashfunc = hashoidvector;
142                         *eqfunc = F_OIDVECTOREQ;
143                         break;
144                 default:
145                         elog(FATAL, "type %u not supported as catcache key", keytype);
146                         *hashfunc = NULL;       /* keep compiler quiet */
147                         *eqfunc = InvalidOid;
148                         break;
149         }
150 }
151
152 /*
153  *              CatalogCacheComputeHashValue
154  *
155  * Compute the hash value associated with a given set of lookup keys
156  */
157 static uint32
158 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
159 {
160         uint32          hashValue = 0;
161
162         CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
163                                 cache->cc_relname,
164                                 nkeys,
165                                 cache);
166
167         switch (nkeys)
168         {
169                 case 4:
170                         hashValue ^=
171                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
172                                                                                           cur_skey[3].sk_argument)) << 9;
173                         /* FALLTHROUGH */
174                 case 3:
175                         hashValue ^=
176                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
177                                                                                           cur_skey[2].sk_argument)) << 6;
178                         /* FALLTHROUGH */
179                 case 2:
180                         hashValue ^=
181                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
182                                                                                           cur_skey[1].sk_argument)) << 3;
183                         /* FALLTHROUGH */
184                 case 1:
185                         hashValue ^=
186                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
187                                                                                                    cur_skey[0].sk_argument));
188                         break;
189                 default:
190                         elog(FATAL, "wrong number of hash keys: %d", nkeys);
191                         break;
192         }
193
194         return hashValue;
195 }
196
197 /*
198  *              CatalogCacheComputeTupleHashValue
199  *
200  * Compute the hash value associated with a given tuple to be cached
201  */
202 static uint32
203 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
204 {
205         ScanKeyData cur_skey[4];
206         bool            isNull = false;
207
208         /* Copy pre-initialized overhead data for scankey */
209         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
210
211         /* Now extract key fields from tuple, insert into scankey */
212         switch (cache->cc_nkeys)
213         {
214                 case 4:
215                         cur_skey[3].sk_argument =
216                                 (cache->cc_key[3] == ObjectIdAttributeNumber)
217                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
218                                 : fastgetattr(tuple,
219                                                           cache->cc_key[3],
220                                                           cache->cc_tupdesc,
221                                                           &isNull);
222                         Assert(!isNull);
223                         /* FALLTHROUGH */
224                 case 3:
225                         cur_skey[2].sk_argument =
226                                 (cache->cc_key[2] == ObjectIdAttributeNumber)
227                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
228                                 : fastgetattr(tuple,
229                                                           cache->cc_key[2],
230                                                           cache->cc_tupdesc,
231                                                           &isNull);
232                         Assert(!isNull);
233                         /* FALLTHROUGH */
234                 case 2:
235                         cur_skey[1].sk_argument =
236                                 (cache->cc_key[1] == ObjectIdAttributeNumber)
237                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
238                                 : fastgetattr(tuple,
239                                                           cache->cc_key[1],
240                                                           cache->cc_tupdesc,
241                                                           &isNull);
242                         Assert(!isNull);
243                         /* FALLTHROUGH */
244                 case 1:
245                         cur_skey[0].sk_argument =
246                                 (cache->cc_key[0] == ObjectIdAttributeNumber)
247                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
248                                 : fastgetattr(tuple,
249                                                           cache->cc_key[0],
250                                                           cache->cc_tupdesc,
251                                                           &isNull);
252                         Assert(!isNull);
253                         break;
254                 default:
255                         elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
256                         break;
257         }
258
259         return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
260 }
261
262
263 #ifdef CATCACHE_STATS
264
265 static void
266 CatCachePrintStats(int code, Datum arg)
267 {
268         CatCache   *cache;
269         long            cc_searches = 0;
270         long            cc_hits = 0;
271         long            cc_neg_hits = 0;
272         long            cc_newloads = 0;
273         long            cc_invals = 0;
274         long            cc_lsearches = 0;
275         long            cc_lhits = 0;
276
277         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
278         {
279                 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
280                         continue;                       /* don't print unused caches */
281                 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
282                          cache->cc_relname,
283                          cache->cc_indexoid,
284                          cache->cc_ntup,
285                          cache->cc_searches,
286                          cache->cc_hits,
287                          cache->cc_neg_hits,
288                          cache->cc_hits + cache->cc_neg_hits,
289                          cache->cc_newloads,
290                          cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
291                          cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
292                          cache->cc_invals,
293                          cache->cc_lsearches,
294                          cache->cc_lhits);
295                 cc_searches += cache->cc_searches;
296                 cc_hits += cache->cc_hits;
297                 cc_neg_hits += cache->cc_neg_hits;
298                 cc_newloads += cache->cc_newloads;
299                 cc_invals += cache->cc_invals;
300                 cc_lsearches += cache->cc_lsearches;
301                 cc_lhits += cache->cc_lhits;
302         }
303         elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
304                  CacheHdr->ch_ntup,
305                  cc_searches,
306                  cc_hits,
307                  cc_neg_hits,
308                  cc_hits + cc_neg_hits,
309                  cc_newloads,
310                  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
311                  cc_searches - cc_hits - cc_neg_hits,
312                  cc_invals,
313                  cc_lsearches,
314                  cc_lhits);
315 }
316 #endif   /* CATCACHE_STATS */
317
318
319 /*
320  *              CatCacheRemoveCTup
321  *
322  * Unlink and delete the given cache entry
323  *
324  * NB: if it is a member of a CatCList, the CatCList is deleted too.
325  * Both the cache entry and the list had better have zero refcount.
326  */
327 static void
328 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
329 {
330         Assert(ct->refcount == 0);
331         Assert(ct->my_cache == cache);
332
333         if (ct->c_list)
334         {
335                 /*
336                  * The cleanest way to handle this is to call CatCacheRemoveCList,
337                  * which will recurse back to me, and the recursive call will do the
338                  * work.  Set the "dead" flag to make sure it does recurse.
339                  */
340                 ct->dead = true;
341                 CatCacheRemoveCList(cache, ct->c_list);
342                 return;                                 /* nothing left to do */
343         }
344
345         /* delink from linked list */
346         DLRemove(&ct->cache_elem);
347
348         /* free associated tuple data */
349         if (ct->tuple.t_data != NULL)
350                 pfree(ct->tuple.t_data);
351         pfree(ct);
352
353         --cache->cc_ntup;
354         --CacheHdr->ch_ntup;
355 }
356
357 /*
358  *              CatCacheRemoveCList
359  *
360  * Unlink and delete the given cache list entry
361  *
362  * NB: any dead member entries that become unreferenced are deleted too.
363  */
364 static void
365 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
366 {
367         int                     i;
368
369         Assert(cl->refcount == 0);
370         Assert(cl->my_cache == cache);
371
372         /* delink from member tuples */
373         for (i = cl->n_members; --i >= 0;)
374         {
375                 CatCTup    *ct = cl->members[i];
376
377                 Assert(ct->c_list == cl);
378                 ct->c_list = NULL;
379                 /* if the member is dead and now has no references, remove it */
380                 if (
381 #ifndef CATCACHE_FORCE_RELEASE
382                         ct->dead &&
383 #endif
384                         ct->refcount == 0)
385                         CatCacheRemoveCTup(cache, ct);
386         }
387
388         /* delink from linked list */
389         DLRemove(&cl->cache_elem);
390
391         /* free associated tuple data */
392         if (cl->tuple.t_data != NULL)
393                 pfree(cl->tuple.t_data);
394         pfree(cl);
395 }
396
397
398 /*
399  *      CatalogCacheIdInvalidate
400  *
401  *      Invalidate entries in the specified cache, given a hash value and
402  *      item pointer.  Positive entries are deleted if they match the item
403  *      pointer.  Negative entries must be deleted if they match the hash
404  *      value (since we do not have the exact key of the tuple that's being
405  *      inserted).      But this should only rarely result in loss of a cache
406  *      entry that could have been kept.
407  *
408  *      Note that it's not very relevant whether the tuple identified by
409  *      the item pointer is being inserted or deleted.  We don't expect to
410  *      find matching positive entries in the one case, and we don't expect
411  *      to find matching negative entries in the other; but we will do the
412  *      right things in any case.
413  *
414  *      This routine is only quasi-public: it should only be used by inval.c.
415  */
416 void
417 CatalogCacheIdInvalidate(int cacheId,
418                                                  uint32 hashValue,
419                                                  ItemPointer pointer)
420 {
421         CatCache   *ccp;
422
423         /*
424          * sanity checks
425          */
426         Assert(ItemPointerIsValid(pointer));
427         CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
428
429         /*
430          * inspect caches to find the proper cache
431          */
432         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
433         {
434                 Index           hashIndex;
435                 Dlelem     *elt,
436                                    *nextelt;
437
438                 if (cacheId != ccp->id)
439                         continue;
440
441                 /*
442                  * We don't bother to check whether the cache has finished
443                  * initialization yet; if not, there will be no entries in it so no
444                  * problem.
445                  */
446
447                 /*
448                  * Invalidate *all* CatCLists in this cache; it's too hard to tell
449                  * which searches might still be correct, so just zap 'em all.
450                  */
451                 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
452                 {
453                         CatCList   *cl = (CatCList *) DLE_VAL(elt);
454
455                         nextelt = DLGetSucc(elt);
456
457                         if (cl->refcount > 0)
458                                 cl->dead = true;
459                         else
460                                 CatCacheRemoveCList(ccp, cl);
461                 }
462
463                 /*
464                  * inspect the proper hash bucket for tuple matches
465                  */
466                 hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
467
468                 for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
469                 {
470                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
471
472                         nextelt = DLGetSucc(elt);
473
474                         if (hashValue != ct->hash_value)
475                                 continue;               /* ignore non-matching hash values */
476
477                         if (ct->negative ||
478                                 ItemPointerEquals(pointer, &ct->tuple.t_self))
479                         {
480                                 if (ct->refcount > 0 ||
481                                         (ct->c_list && ct->c_list->refcount > 0))
482                                 {
483                                         ct->dead = true;
484                                         /* list, if any, was marked dead above */
485                                         Assert(ct->c_list == NULL || ct->c_list->dead);
486                                 }
487                                 else
488                                         CatCacheRemoveCTup(ccp, ct);
489                                 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
490 #ifdef CATCACHE_STATS
491                                 ccp->cc_invals++;
492 #endif
493                                 /* could be multiple matches, so keep looking! */
494                         }
495                 }
496                 break;                                  /* need only search this one cache */
497         }
498 }
499
500 /* ----------------------------------------------------------------
501  *                                         public functions
502  * ----------------------------------------------------------------
503  */
504
505
506 /*
507  * Standard routine for creating cache context if it doesn't exist yet
508  *
509  * There are a lot of places (probably far more than necessary) that check
510  * whether CacheMemoryContext exists yet and want to create it if not.
511  * We centralize knowledge of exactly how to create it here.
512  */
513 void
514 CreateCacheMemoryContext(void)
515 {
516         /*
517          * Purely for paranoia, check that context doesn't exist; caller probably
518          * did so already.
519          */
520         if (!CacheMemoryContext)
521                 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
522                                                                                                    "CacheMemoryContext",
523                                                                                                    ALLOCSET_DEFAULT_MINSIZE,
524                                                                                                    ALLOCSET_DEFAULT_INITSIZE,
525                                                                                                    ALLOCSET_DEFAULT_MAXSIZE);
526 }
527
528
529 /*
530  *              AtEOXact_CatCache
531  *
532  * Clean up catcaches at end of main transaction (either commit or abort)
533  *
534  * As of PostgreSQL 8.1, catcache pins should get released by the
535  * ResourceOwner mechanism.  This routine is just a debugging
536  * cross-check that no pins remain.
537  */
538 void
539 AtEOXact_CatCache(bool isCommit)
540 {
541 #ifdef USE_ASSERT_CHECKING
542         if (assert_enabled)
543         {
544                 CatCache   *ccp;
545
546                 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
547                 {
548                         Dlelem     *elt;
549                         int                     i;
550
551                         /* Check CatCLists */
552                         for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
553                         {
554                                 CatCList   *cl = (CatCList *) DLE_VAL(elt);
555
556                                 Assert(cl->cl_magic == CL_MAGIC);
557                                 Assert(cl->refcount == 0);
558                                 Assert(!cl->dead);
559                         }
560
561                         /* Check individual tuples */
562                         for (i = 0; i < ccp->cc_nbuckets; i++)
563                         {
564                                 for (elt = DLGetHead(&ccp->cc_bucket[i]);
565                                          elt;
566                                          elt = DLGetSucc(elt))
567                                 {
568                                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
569
570                                         Assert(ct->ct_magic == CT_MAGIC);
571                                         Assert(ct->refcount == 0);
572                                         Assert(!ct->dead);
573                                 }
574                         }
575                 }
576         }
577 #endif
578 }
579
580 /*
581  *              ResetCatalogCache
582  *
583  * Reset one catalog cache to empty.
584  *
585  * This is not very efficient if the target cache is nearly empty.
586  * However, it shouldn't need to be efficient; we don't invoke it often.
587  */
588 static void
589 ResetCatalogCache(CatCache *cache)
590 {
591         Dlelem     *elt,
592                            *nextelt;
593         int                     i;
594
595         /* Remove each list in this cache, or at least mark it dead */
596         for (elt = DLGetHead(&cache->cc_lists); elt; elt = nextelt)
597         {
598                 CatCList   *cl = (CatCList *) DLE_VAL(elt);
599
600                 nextelt = DLGetSucc(elt);
601
602                 if (cl->refcount > 0)
603                         cl->dead = true;
604                 else
605                         CatCacheRemoveCList(cache, cl);
606         }
607
608         /* Remove each tuple in this cache, or at least mark it dead */
609         for (i = 0; i < cache->cc_nbuckets; i++)
610         {
611                 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
612                 {
613                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
614
615                         nextelt = DLGetSucc(elt);
616
617                         if (ct->refcount > 0 ||
618                                 (ct->c_list && ct->c_list->refcount > 0))
619                         {
620                                 ct->dead = true;
621                                 /* list, if any, was marked dead above */
622                                 Assert(ct->c_list == NULL || ct->c_list->dead);
623                         }
624                         else
625                                 CatCacheRemoveCTup(cache, ct);
626 #ifdef CATCACHE_STATS
627                         cache->cc_invals++;
628 #endif
629                 }
630         }
631 }
632
633 /*
634  *              ResetCatalogCaches
635  *
636  * Reset all caches when a shared cache inval event forces it
637  */
638 void
639 ResetCatalogCaches(void)
640 {
641         CatCache   *cache;
642
643         CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
644
645         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
646                 ResetCatalogCache(cache);
647
648         CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
649 }
650
651 /*
652  *              CatalogCacheFlushRelation
653  *
654  *      This is called by RelationFlushRelation() to clear out cached information
655  *      about a relation being dropped.  (This could be a DROP TABLE command,
656  *      or a temp table being dropped at end of transaction, or a table created
657  *      during the current transaction that is being dropped because of abort.)
658  *      Remove all cache entries relevant to the specified relation OID.
659  *
660  *      A special case occurs when relId is itself one of the cacheable system
661  *      tables --- although those'll never be dropped, they can get flushed from
662  *      the relcache (VACUUM causes this, for example).  In that case we need
663  *      to flush all cache entries that came from that table.  (At one point we
664  *      also tried to force re-execution of CatalogCacheInitializeCache for
665  *      the cache(s) on that table.  This is a bad idea since it leads to all
666  *      kinds of trouble if a cache flush occurs while loading cache entries.
667  *      We now avoid the need to do it by copying cc_tupdesc out of the relcache,
668  *      rather than relying on the relcache to keep a tupdesc for us.  Of course
669  *      this assumes the tupdesc of a cachable system table will not change...)
670  */
671 void
672 CatalogCacheFlushRelation(Oid relId)
673 {
674         CatCache   *cache;
675
676         CACHE2_elog(DEBUG2, "CatalogCacheFlushRelation called for %u", relId);
677
678         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
679         {
680                 int                     i;
681
682                 /* We can ignore uninitialized caches, since they must be empty */
683                 if (cache->cc_tupdesc == NULL)
684                         continue;
685
686                 /* Does this cache store tuples of the target relation itself? */
687                 if (cache->cc_tupdesc->attrs[0]->attrelid == relId)
688                 {
689                         /* Yes, so flush all its contents */
690                         ResetCatalogCache(cache);
691                         continue;
692                 }
693
694                 /* Does this cache store tuples associated with relations at all? */
695                 if (cache->cc_reloidattr == 0)
696                         continue;                       /* nope, leave it alone */
697
698                 /* Yes, scan the tuples and remove those related to relId */
699                 for (i = 0; i < cache->cc_nbuckets; i++)
700                 {
701                         Dlelem     *elt,
702                                            *nextelt;
703
704                         for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
705                         {
706                                 CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
707                                 Oid                     tupRelid;
708
709                                 nextelt = DLGetSucc(elt);
710
711                                 /*
712                                  * Negative entries are never considered related to a rel,
713                                  * even if the rel is part of their lookup key.
714                                  */
715                                 if (ct->negative)
716                                         continue;
717
718                                 if (cache->cc_reloidattr == ObjectIdAttributeNumber)
719                                         tupRelid = HeapTupleGetOid(&ct->tuple);
720                                 else
721                                 {
722                                         bool            isNull;
723
724                                         tupRelid =
725                                                 DatumGetObjectId(fastgetattr(&ct->tuple,
726                                                                                                          cache->cc_reloidattr,
727                                                                                                          cache->cc_tupdesc,
728                                                                                                          &isNull));
729                                         Assert(!isNull);
730                                 }
731
732                                 if (tupRelid == relId)
733                                 {
734                                         if (ct->refcount > 0 ||
735                                                 (ct->c_list && ct->c_list->refcount > 0))
736                                         {
737                                                 ct->dead = true;
738                                                 /* parent list must be considered dead too */
739                                                 if (ct->c_list)
740                                                         ct->c_list->dead = true;
741                                         }
742                                         else
743                                                 CatCacheRemoveCTup(cache, ct);
744 #ifdef CATCACHE_STATS
745                                         cache->cc_invals++;
746 #endif
747                                 }
748                         }
749                 }
750         }
751
752         CACHE1_elog(DEBUG2, "end of CatalogCacheFlushRelation call");
753 }
754
755 /*
756  *              InitCatCache
757  *
758  *      This allocates and initializes a cache for a system catalog relation.
759  *      Actually, the cache is only partially initialized to avoid opening the
760  *      relation.  The relation will be opened and the rest of the cache
761  *      structure initialized on the first access.
762  */
763 #ifdef CACHEDEBUG
764 #define InitCatCache_DEBUG2 \
765 do { \
766         elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
767                  cp->cc_reloid, cp->cc_indexoid, cp->id, \
768                  cp->cc_nkeys, cp->cc_nbuckets); \
769 } while(0)
770 #else
771 #define InitCatCache_DEBUG2
772 #endif
773
774 CatCache *
775 InitCatCache(int id,
776                          Oid reloid,
777                          Oid indexoid,
778                          int reloidattr,
779                          int nkeys,
780                          const int *key,
781                          int nbuckets)
782 {
783         CatCache   *cp;
784         MemoryContext oldcxt;
785         int                     i;
786
787         /*
788          * nbuckets is the number of hash buckets to use in this catcache.
789          * Currently we just use a hard-wired estimate of an appropriate size for
790          * each cache; maybe later make them dynamically resizable?
791          *
792          * nbuckets must be a power of two.  We check this via Assert rather than
793          * a full runtime check because the values will be coming from constant
794          * tables.
795          *
796          * If you're confused by the power-of-two check, see comments in
797          * bitmapset.c for an explanation.
798          */
799         Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
800
801         /*
802          * first switch to the cache context so our allocations do not vanish at
803          * the end of a transaction
804          */
805         if (!CacheMemoryContext)
806                 CreateCacheMemoryContext();
807
808         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
809
810         /*
811          * if first time through, initialize the cache group header
812          */
813         if (CacheHdr == NULL)
814         {
815                 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
816                 CacheHdr->ch_caches = NULL;
817                 CacheHdr->ch_ntup = 0;
818 #ifdef CATCACHE_STATS
819                 /* set up to dump stats at backend exit */
820                 on_proc_exit(CatCachePrintStats, 0);
821 #endif
822         }
823
824         /*
825          * allocate a new cache structure
826          *
827          * Note: we assume zeroing initializes the Dllist headers correctly
828          */
829         cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(Dllist));
830
831         /*
832          * initialize the cache's relation information for the relation
833          * corresponding to this cache, and initialize some of the new cache's
834          * other internal fields.  But don't open the relation yet.
835          */
836         cp->id = id;
837         cp->cc_relname = "(not known yet)";
838         cp->cc_reloid = reloid;
839         cp->cc_indexoid = indexoid;
840         cp->cc_relisshared = false; /* temporary */
841         cp->cc_tupdesc = (TupleDesc) NULL;
842         cp->cc_reloidattr = reloidattr;
843         cp->cc_ntup = 0;
844         cp->cc_nbuckets = nbuckets;
845         cp->cc_nkeys = nkeys;
846         for (i = 0; i < nkeys; ++i)
847                 cp->cc_key[i] = key[i];
848
849         /*
850          * new cache is initialized as far as we can go for now. print some
851          * debugging information, if appropriate.
852          */
853         InitCatCache_DEBUG2;
854
855         /*
856          * add completed cache to top of group header's list
857          */
858         cp->cc_next = CacheHdr->ch_caches;
859         CacheHdr->ch_caches = cp;
860
861         /*
862          * back to the old context before we return...
863          */
864         MemoryContextSwitchTo(oldcxt);
865
866         return cp;
867 }
868
869 /*
870  *              CatalogCacheInitializeCache
871  *
872  * This function does final initialization of a catcache: obtain the tuple
873  * descriptor and set up the hash and equality function links.  We assume
874  * that the relcache entry can be opened at this point!
875  */
876 #ifdef CACHEDEBUG
877 #define CatalogCacheInitializeCache_DEBUG1 \
878         elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
879                  cache->cc_reloid)
880
881 #define CatalogCacheInitializeCache_DEBUG2 \
882 do { \
883                 if (cache->cc_key[i] > 0) { \
884                         elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
885                                 i+1, cache->cc_nkeys, cache->cc_key[i], \
886                                  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
887                 } else { \
888                         elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
889                                 i+1, cache->cc_nkeys, cache->cc_key[i]); \
890                 } \
891 } while(0)
892 #else
893 #define CatalogCacheInitializeCache_DEBUG1
894 #define CatalogCacheInitializeCache_DEBUG2
895 #endif
896
897 static void
898 CatalogCacheInitializeCache(CatCache *cache)
899 {
900         Relation        relation;
901         MemoryContext oldcxt;
902         TupleDesc       tupdesc;
903         int                     i;
904
905         CatalogCacheInitializeCache_DEBUG1;
906
907         relation = heap_open(cache->cc_reloid, AccessShareLock);
908
909         /*
910          * switch to the cache context so our allocations do not vanish at the end
911          * of a transaction
912          */
913         Assert(CacheMemoryContext != NULL);
914
915         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
916
917         /*
918          * copy the relcache's tuple descriptor to permanent cache storage
919          */
920         tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
921
922         /*
923          * save the relation's name and relisshared flag, too (cc_relname is used
924          * only for debugging purposes)
925          */
926         cache->cc_relname = pstrdup(RelationGetRelationName(relation));
927         cache->cc_relisshared = RelationGetForm(relation)->relisshared;
928
929         /*
930          * return to the caller's memory context and close the rel
931          */
932         MemoryContextSwitchTo(oldcxt);
933
934         heap_close(relation, AccessShareLock);
935
936         CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
937                                 cache->cc_relname, cache->cc_nkeys);
938
939         /*
940          * initialize cache's key information
941          */
942         for (i = 0; i < cache->cc_nkeys; ++i)
943         {
944                 Oid                     keytype;
945                 RegProcedure eqfunc;
946
947                 CatalogCacheInitializeCache_DEBUG2;
948
949                 if (cache->cc_key[i] > 0)
950                         keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
951                 else
952                 {
953                         if (cache->cc_key[i] != ObjectIdAttributeNumber)
954                                 elog(FATAL, "only sys attr supported in caches is OID");
955                         keytype = OIDOID;
956                 }
957
958                 GetCCHashEqFuncs(keytype,
959                                                  &cache->cc_hashfunc[i],
960                                                  &eqfunc);
961
962                 cache->cc_isname[i] = (keytype == NAMEOID);
963
964                 /*
965                  * Do equality-function lookup (we assume this won't need a catalog
966                  * lookup for any supported type)
967                  */
968                 fmgr_info_cxt(eqfunc,
969                                           &cache->cc_skey[i].sk_func,
970                                           CacheMemoryContext);
971
972                 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
973                 cache->cc_skey[i].sk_attno = cache->cc_key[i];
974
975                 /* Fill in sk_strategy as well --- always standard equality */
976                 cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
977                 cache->cc_skey[i].sk_subtype = InvalidOid;
978
979                 CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
980                                         cache->cc_relname,
981                                         i,
982                                         cache);
983         }
984
985         /*
986          * mark this cache fully initialized
987          */
988         cache->cc_tupdesc = tupdesc;
989 }
990
991 /*
992  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
993  *
994  * One reason to call this routine is to ensure that the relcache has
995  * created entries for all the catalogs and indexes referenced by catcaches.
996  * Therefore, provide an option to open the index as well as fixing the
997  * cache itself.  An exception is the indexes on pg_am, which we don't use
998  * (cf. IndexScanOK).
999  */
1000 void
1001 InitCatCachePhase2(CatCache *cache, bool touch_index)
1002 {
1003         if (cache->cc_tupdesc == NULL)
1004                 CatalogCacheInitializeCache(cache);
1005
1006         if (touch_index &&
1007                 cache->id != AMOID &&
1008                 cache->id != AMNAME)
1009         {
1010                 Relation        idesc;
1011
1012                 idesc = index_open(cache->cc_indexoid, AccessShareLock);
1013                 index_close(idesc, AccessShareLock);
1014         }
1015 }
1016
1017
1018 /*
1019  *              IndexScanOK
1020  *
1021  *              This function checks for tuples that will be fetched by
1022  *              IndexSupportInitialize() during relcache initialization for
1023  *              certain system indexes that support critical syscaches.
1024  *              We can't use an indexscan to fetch these, else we'll get into
1025  *              infinite recursion.  A plain heap scan will work, however.
1026  *
1027  *              Once we have completed relcache initialization (signaled by
1028  *              criticalRelcachesBuilt), we don't have to worry anymore.
1029  */
1030 static bool
1031 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1032 {
1033         if (cache->id == INDEXRELID)
1034         {
1035                 /*
1036                  * Rather than tracking exactly which indexes have to be loaded
1037                  * before we can use indexscans (which changes from time to time),
1038                  * just force all pg_index searches to be heap scans until we've
1039                  * built the critical relcaches.
1040                  */
1041                 if (!criticalRelcachesBuilt)
1042                         return false;
1043         }
1044         else if (cache->id == AMOID ||
1045                          cache->id == AMNAME)
1046         {
1047                 /*
1048                  * Always do heap scans in pg_am, because it's so small there's not
1049                  * much point in an indexscan anyway.  We *must* do this when
1050                  * initially building critical relcache entries, but we might as well
1051                  * just always do it.
1052                  */
1053                 return false;
1054         }
1055
1056         /* Normal case, allow index scan */
1057         return true;
1058 }
1059
1060 /*
1061  *      SearchCatCache
1062  *
1063  *              This call searches a system cache for a tuple, opening the relation
1064  *              if necessary (on the first access to a particular cache).
1065  *
1066  *              The result is NULL if not found, or a pointer to a HeapTuple in
1067  *              the cache.      The caller must not modify the tuple, and must call
1068  *              ReleaseCatCache() when done with it.
1069  *
1070  * The search key values should be expressed as Datums of the key columns'
1071  * datatype(s).  (Pass zeroes for any unused parameters.)  As a special
1072  * exception, the passed-in key for a NAME column can be just a C string;
1073  * the caller need not go to the trouble of converting it to a fully
1074  * null-padded NAME.
1075  */
1076 HeapTuple
1077 SearchCatCache(CatCache *cache,
1078                            Datum v1,
1079                            Datum v2,
1080                            Datum v3,
1081                            Datum v4)
1082 {
1083         ScanKeyData cur_skey[4];
1084         uint32          hashValue;
1085         Index           hashIndex;
1086         Dlelem     *elt;
1087         CatCTup    *ct;
1088         Relation        relation;
1089         SysScanDesc scandesc;
1090         HeapTuple       ntp;
1091
1092         /*
1093          * one-time startup overhead for each cache
1094          */
1095         if (cache->cc_tupdesc == NULL)
1096                 CatalogCacheInitializeCache(cache);
1097
1098 #ifdef CATCACHE_STATS
1099         cache->cc_searches++;
1100 #endif
1101
1102         /*
1103          * initialize the search key information
1104          */
1105         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1106         cur_skey[0].sk_argument = v1;
1107         cur_skey[1].sk_argument = v2;
1108         cur_skey[2].sk_argument = v3;
1109         cur_skey[3].sk_argument = v4;
1110
1111         /*
1112          * find the hash bucket in which to look for the tuple
1113          */
1114         hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1115         hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1116
1117         /*
1118          * scan the hash bucket until we find a match or exhaust our tuples
1119          */
1120         for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1121                  elt;
1122                  elt = DLGetSucc(elt))
1123         {
1124                 bool            res;
1125
1126                 ct = (CatCTup *) DLE_VAL(elt);
1127
1128                 if (ct->dead)
1129                         continue;                       /* ignore dead entries */
1130
1131                 if (ct->hash_value != hashValue)
1132                         continue;                       /* quickly skip entry if wrong hash val */
1133
1134                 /*
1135                  * see if the cached tuple matches our key.
1136                  */
1137                 HeapKeyTest(&ct->tuple,
1138                                         cache->cc_tupdesc,
1139                                         cache->cc_nkeys,
1140                                         cur_skey,
1141                                         res);
1142                 if (!res)
1143                         continue;
1144
1145                 /*
1146                  * We found a match in the cache.  Move it to the front of the list
1147                  * for its hashbucket, in order to speed subsequent searches.  (The
1148                  * most frequently accessed elements in any hashbucket will tend to be
1149                  * near the front of the hashbucket's list.)
1150                  */
1151                 DLMoveToFront(&ct->cache_elem);
1152
1153                 /*
1154                  * If it's a positive entry, bump its refcount and return it. If it's
1155                  * negative, we can report failure to the caller.
1156                  */
1157                 if (!ct->negative)
1158                 {
1159                         ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1160                         ct->refcount++;
1161                         ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1162
1163                         CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1164                                                 cache->cc_relname, hashIndex);
1165
1166 #ifdef CATCACHE_STATS
1167                         cache->cc_hits++;
1168 #endif
1169
1170                         return &ct->tuple;
1171                 }
1172                 else
1173                 {
1174                         CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1175                                                 cache->cc_relname, hashIndex);
1176
1177 #ifdef CATCACHE_STATS
1178                         cache->cc_neg_hits++;
1179 #endif
1180
1181                         return NULL;
1182                 }
1183         }
1184
1185         /*
1186          * Tuple was not found in cache, so we have to try to retrieve it directly
1187          * from the relation.  If found, we will add it to the cache; if not
1188          * found, we will add a negative cache entry instead.
1189          *
1190          * NOTE: it is possible for recursive cache lookups to occur while reading
1191          * the relation --- for example, due to shared-cache-inval messages being
1192          * processed during heap_open().  This is OK.  It's even possible for one
1193          * of those lookups to find and enter the very same tuple we are trying to
1194          * fetch here.  If that happens, we will enter a second copy of the tuple
1195          * into the cache.      The first copy will never be referenced again, and
1196          * will eventually age out of the cache, so there's no functional problem.
1197          * This case is rare enough that it's not worth expending extra cycles to
1198          * detect.
1199          */
1200         relation = heap_open(cache->cc_reloid, AccessShareLock);
1201
1202         scandesc = systable_beginscan(relation,
1203                                                                   cache->cc_indexoid,
1204                                                                   IndexScanOK(cache, cur_skey),
1205                                                                   SnapshotNow,
1206                                                                   cache->cc_nkeys,
1207                                                                   cur_skey);
1208
1209         ct = NULL;
1210
1211         while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1212         {
1213                 ct = CatalogCacheCreateEntry(cache, ntp,
1214                                                                          hashValue, hashIndex,
1215                                                                          false);
1216                 /* immediately set the refcount to 1 */
1217                 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1218                 ct->refcount++;
1219                 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1220                 break;                                  /* assume only one match */
1221         }
1222
1223         systable_endscan(scandesc);
1224
1225         heap_close(relation, AccessShareLock);
1226
1227         /*
1228          * If tuple was not found, we need to build a negative cache entry
1229          * containing a fake tuple.  The fake tuple has the correct key columns,
1230          * but nulls everywhere else.
1231          *
1232          * In bootstrap mode, we don't build negative entries, because the cache
1233          * invalidation mechanism isn't alive and can't clear them if the tuple
1234          * gets created later.  (Bootstrap doesn't do UPDATEs, so it doesn't need
1235          * cache inval for that.)
1236          */
1237         if (ct == NULL)
1238         {
1239                 if (IsBootstrapProcessingMode())
1240                         return NULL;
1241
1242                 ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1243                 ct = CatalogCacheCreateEntry(cache, ntp,
1244                                                                          hashValue, hashIndex,
1245                                                                          true);
1246                 heap_freetuple(ntp);
1247
1248                 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1249                                         cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1250                 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1251                                         cache->cc_relname, hashIndex);
1252
1253                 /*
1254                  * We are not returning the negative entry to the caller, so leave its
1255                  * refcount zero.
1256                  */
1257
1258                 return NULL;
1259         }
1260
1261         CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1262                                 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1263         CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1264                                 cache->cc_relname, hashIndex);
1265
1266 #ifdef CATCACHE_STATS
1267         cache->cc_newloads++;
1268 #endif
1269
1270         return &ct->tuple;
1271 }
1272
1273 /*
1274  *      ReleaseCatCache
1275  *
1276  *      Decrement the reference count of a catcache entry (releasing the
1277  *      hold grabbed by a successful SearchCatCache).
1278  *
1279  *      NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1280  *      will be freed as soon as their refcount goes to zero.  In combination
1281  *      with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1282  *      to catch references to already-released catcache entries.
1283  */
1284 void
1285 ReleaseCatCache(HeapTuple tuple)
1286 {
1287         CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1288                                                                   offsetof(CatCTup, tuple));
1289
1290         /* Safety checks to ensure we were handed a cache entry */
1291         Assert(ct->ct_magic == CT_MAGIC);
1292         Assert(ct->refcount > 0);
1293
1294         ct->refcount--;
1295         ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1296
1297         if (
1298 #ifndef CATCACHE_FORCE_RELEASE
1299                 ct->dead &&
1300 #endif
1301                 ct->refcount == 0 &&
1302                 (ct->c_list == NULL || ct->c_list->refcount == 0))
1303                 CatCacheRemoveCTup(ct->my_cache, ct);
1304 }
1305
1306
1307 /*
1308  *      SearchCatCacheList
1309  *
1310  *              Generate a list of all tuples matching a partial key (that is,
1311  *              a key specifying just the first K of the cache's N key columns).
1312  *
1313  *              The caller must not modify the list object or the pointed-to tuples,
1314  *              and must call ReleaseCatCacheList() when done with the list.
1315  */
1316 CatCList *
1317 SearchCatCacheList(CatCache *cache,
1318                                    int nkeys,
1319                                    Datum v1,
1320                                    Datum v2,
1321                                    Datum v3,
1322                                    Datum v4)
1323 {
1324         ScanKeyData cur_skey[4];
1325         uint32          lHashValue;
1326         Dlelem     *elt;
1327         CatCList   *cl;
1328         CatCTup    *ct;
1329         List       *volatile ctlist;
1330         ListCell   *ctlist_item;
1331         int                     nmembers;
1332         bool            ordered;
1333         HeapTuple       ntp;
1334         MemoryContext oldcxt;
1335         int                     i;
1336
1337         /*
1338          * one-time startup overhead for each cache
1339          */
1340         if (cache->cc_tupdesc == NULL)
1341                 CatalogCacheInitializeCache(cache);
1342
1343         Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1344
1345 #ifdef CATCACHE_STATS
1346         cache->cc_lsearches++;
1347 #endif
1348
1349         /*
1350          * initialize the search key information
1351          */
1352         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1353         cur_skey[0].sk_argument = v1;
1354         cur_skey[1].sk_argument = v2;
1355         cur_skey[2].sk_argument = v3;
1356         cur_skey[3].sk_argument = v4;
1357
1358         /*
1359          * compute a hash value of the given keys for faster search.  We don't
1360          * presently divide the CatCList items into buckets, but this still lets
1361          * us skip non-matching items quickly most of the time.
1362          */
1363         lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1364
1365         /*
1366          * scan the items until we find a match or exhaust our list
1367          */
1368         for (elt = DLGetHead(&cache->cc_lists);
1369                  elt;
1370                  elt = DLGetSucc(elt))
1371         {
1372                 bool            res;
1373
1374                 cl = (CatCList *) DLE_VAL(elt);
1375
1376                 if (cl->dead)
1377                         continue;                       /* ignore dead entries */
1378
1379                 if (cl->hash_value != lHashValue)
1380                         continue;                       /* quickly skip entry if wrong hash val */
1381
1382                 /*
1383                  * see if the cached list matches our key.
1384                  */
1385                 if (cl->nkeys != nkeys)
1386                         continue;
1387                 HeapKeyTest(&cl->tuple,
1388                                         cache->cc_tupdesc,
1389                                         nkeys,
1390                                         cur_skey,
1391                                         res);
1392                 if (!res)
1393                         continue;
1394
1395                 /*
1396                  * We found a matching list.  Move the list to the front of the
1397                  * cache's list-of-lists, to speed subsequent searches.  (We do not
1398                  * move the members to the fronts of their hashbucket lists, however,
1399                  * since there's no point in that unless they are searched for
1400                  * individually.)
1401                  */
1402                 DLMoveToFront(&cl->cache_elem);
1403
1404                 /* Bump the list's refcount and return it */
1405                 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1406                 cl->refcount++;
1407                 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1408
1409                 CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1410                                         cache->cc_relname);
1411
1412 #ifdef CATCACHE_STATS
1413                 cache->cc_lhits++;
1414 #endif
1415
1416                 return cl;
1417         }
1418
1419         /*
1420          * List was not found in cache, so we have to build it by reading the
1421          * relation.  For each matching tuple found in the relation, use an
1422          * existing cache entry if possible, else build a new one.
1423          *
1424          * We have to bump the member refcounts temporarily to ensure they won't
1425          * get dropped from the cache while loading other members. We use a PG_TRY
1426          * block to ensure we can undo those refcounts if we get an error before
1427          * we finish constructing the CatCList.
1428          */
1429         ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1430
1431         ctlist = NIL;
1432
1433         PG_TRY();
1434         {
1435                 Relation        relation;
1436                 SysScanDesc scandesc;
1437
1438                 relation = heap_open(cache->cc_reloid, AccessShareLock);
1439
1440                 scandesc = systable_beginscan(relation,
1441                                                                           cache->cc_indexoid,
1442                                                                           true,
1443                                                                           SnapshotNow,
1444                                                                           nkeys,
1445                                                                           cur_skey);
1446
1447                 /* The list will be ordered iff we are doing an index scan */
1448                 ordered = (scandesc->irel != NULL);
1449
1450                 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1451                 {
1452                         uint32          hashValue;
1453                         Index           hashIndex;
1454
1455                         /*
1456                          * See if there's an entry for this tuple already.
1457                          */
1458                         ct = NULL;
1459                         hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1460                         hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1461
1462                         for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1463                                  elt;
1464                                  elt = DLGetSucc(elt))
1465                         {
1466                                 ct = (CatCTup *) DLE_VAL(elt);
1467
1468                                 if (ct->dead || ct->negative)
1469                                         continue;       /* ignore dead and negative entries */
1470
1471                                 if (ct->hash_value != hashValue)
1472                                         continue;       /* quickly skip entry if wrong hash val */
1473
1474                                 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1475                                         continue;       /* not same tuple */
1476
1477                                 /*
1478                                  * Found a match, but can't use it if it belongs to another
1479                                  * list already
1480                                  */
1481                                 if (ct->c_list)
1482                                         continue;
1483
1484                                 break;                  /* A-OK */
1485                         }
1486
1487                         if (elt == NULL)
1488                         {
1489                                 /* We didn't find a usable entry, so make a new one */
1490                                 ct = CatalogCacheCreateEntry(cache, ntp,
1491                                                                                          hashValue, hashIndex,
1492                                                                                          false);
1493                         }
1494
1495                         /* Careful here: add entry to ctlist, then bump its refcount */
1496                         /* This way leaves state correct if lappend runs out of memory */
1497                         ctlist = lappend(ctlist, ct);
1498                         ct->refcount++;
1499                 }
1500
1501                 systable_endscan(scandesc);
1502
1503                 heap_close(relation, AccessShareLock);
1504
1505                 /*
1506                  * Now we can build the CatCList entry.  First we need a dummy tuple
1507                  * containing the key values...
1508                  */
1509                 ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1510                 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1511                 nmembers = list_length(ctlist);
1512                 cl = (CatCList *)
1513                         palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
1514                 heap_copytuple_with_tuple(ntp, &cl->tuple);
1515                 MemoryContextSwitchTo(oldcxt);
1516                 heap_freetuple(ntp);
1517
1518                 /*
1519                  * We are now past the last thing that could trigger an elog before we
1520                  * have finished building the CatCList and remembering it in the
1521                  * resource owner.      So it's OK to fall out of the PG_TRY, and indeed
1522                  * we'd better do so before we start marking the members as belonging
1523                  * to the list.
1524                  */
1525
1526         }
1527         PG_CATCH();
1528         {
1529                 foreach(ctlist_item, ctlist)
1530                 {
1531                         ct = (CatCTup *) lfirst(ctlist_item);
1532                         Assert(ct->c_list == NULL);
1533                         Assert(ct->refcount > 0);
1534                         ct->refcount--;
1535                         if (
1536 #ifndef CATCACHE_FORCE_RELEASE
1537                                 ct->dead &&
1538 #endif
1539                                 ct->refcount == 0 &&
1540                                 (ct->c_list == NULL || ct->c_list->refcount == 0))
1541                                 CatCacheRemoveCTup(cache, ct);
1542                 }
1543
1544                 PG_RE_THROW();
1545         }
1546         PG_END_TRY();
1547
1548         cl->cl_magic = CL_MAGIC;
1549         cl->my_cache = cache;
1550         DLInitElem(&cl->cache_elem, cl);
1551         cl->refcount = 0;                       /* for the moment */
1552         cl->dead = false;
1553         cl->ordered = ordered;
1554         cl->nkeys = nkeys;
1555         cl->hash_value = lHashValue;
1556         cl->n_members = nmembers;
1557
1558         i = 0;
1559         foreach(ctlist_item, ctlist)
1560         {
1561                 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1562                 Assert(ct->c_list == NULL);
1563                 ct->c_list = cl;
1564                 /* release the temporary refcount on the member */
1565                 Assert(ct->refcount > 0);
1566                 ct->refcount--;
1567                 /* mark list dead if any members already dead */
1568                 if (ct->dead)
1569                         cl->dead = true;
1570         }
1571         Assert(i == nmembers);
1572
1573         DLAddHead(&cache->cc_lists, &cl->cache_elem);
1574
1575         /* Finally, bump the list's refcount and return it */
1576         cl->refcount++;
1577         ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1578
1579         CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1580                                 cache->cc_relname, nmembers);
1581
1582         return cl;
1583 }
1584
1585 /*
1586  *      ReleaseCatCacheList
1587  *
1588  *      Decrement the reference count of a catcache list.
1589  */
1590 void
1591 ReleaseCatCacheList(CatCList *list)
1592 {
1593         /* Safety checks to ensure we were handed a cache entry */
1594         Assert(list->cl_magic == CL_MAGIC);
1595         Assert(list->refcount > 0);
1596         list->refcount--;
1597         ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1598
1599         if (
1600 #ifndef CATCACHE_FORCE_RELEASE
1601                 list->dead &&
1602 #endif
1603                 list->refcount == 0)
1604                 CatCacheRemoveCList(list->my_cache, list);
1605 }
1606
1607
1608 /*
1609  * CatalogCacheCreateEntry
1610  *              Create a new CatCTup entry, copying the given HeapTuple and other
1611  *              supplied data into it.  The new entry initially has refcount 0.
1612  */
1613 static CatCTup *
1614 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
1615                                                 uint32 hashValue, Index hashIndex, bool negative)
1616 {
1617         CatCTup    *ct;
1618         MemoryContext oldcxt;
1619
1620         /*
1621          * Allocate CatCTup header in cache memory, and copy the tuple there too.
1622          */
1623         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1624         ct = (CatCTup *) palloc(sizeof(CatCTup));
1625         heap_copytuple_with_tuple(ntp, &ct->tuple);
1626         MemoryContextSwitchTo(oldcxt);
1627
1628         /*
1629          * Finish initializing the CatCTup header, and add it to the cache's
1630          * linked list and counts.
1631          */
1632         ct->ct_magic = CT_MAGIC;
1633         ct->my_cache = cache;
1634         DLInitElem(&ct->cache_elem, (void *) ct);
1635         ct->c_list = NULL;
1636         ct->refcount = 0;                       /* for the moment */
1637         ct->dead = false;
1638         ct->negative = negative;
1639         ct->hash_value = hashValue;
1640
1641         DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1642
1643         cache->cc_ntup++;
1644         CacheHdr->ch_ntup++;
1645
1646         return ct;
1647 }
1648
1649 /*
1650  * build_dummy_tuple
1651  *              Generate a palloc'd HeapTuple that contains the specified key
1652  *              columns, and NULLs for other columns.
1653  *
1654  * This is used to store the keys for negative cache entries and CatCList
1655  * entries, which don't have real tuples associated with them.
1656  */
1657 static HeapTuple
1658 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1659 {
1660         HeapTuple       ntp;
1661         TupleDesc       tupDesc = cache->cc_tupdesc;
1662         Datum      *values;
1663         char       *nulls;
1664         Oid                     tupOid = InvalidOid;
1665         NameData        tempNames[4];
1666         int                     i;
1667
1668         values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1669         nulls = (char *) palloc(tupDesc->natts * sizeof(char));
1670
1671         memset(values, 0, tupDesc->natts * sizeof(Datum));
1672         memset(nulls, 'n', tupDesc->natts * sizeof(char));
1673
1674         for (i = 0; i < nkeys; i++)
1675         {
1676                 int                     attindex = cache->cc_key[i];
1677                 Datum           keyval = skeys[i].sk_argument;
1678
1679                 if (attindex > 0)
1680                 {
1681                         /*
1682                          * Here we must be careful in case the caller passed a C string
1683                          * where a NAME is wanted: convert the given argument to a
1684                          * correctly padded NAME.  Otherwise the memcpy() done in
1685                          * heap_formtuple could fall off the end of memory.
1686                          */
1687                         if (cache->cc_isname[i])
1688                         {
1689                                 Name            newval = &tempNames[i];
1690
1691                                 namestrcpy(newval, DatumGetCString(keyval));
1692                                 keyval = NameGetDatum(newval);
1693                         }
1694                         values[attindex - 1] = keyval;
1695                         nulls[attindex - 1] = ' ';
1696                 }
1697                 else
1698                 {
1699                         Assert(attindex == ObjectIdAttributeNumber);
1700                         tupOid = DatumGetObjectId(keyval);
1701                 }
1702         }
1703
1704         ntp = heap_formtuple(tupDesc, values, nulls);
1705         if (tupOid != InvalidOid)
1706                 HeapTupleSetOid(ntp, tupOid);
1707
1708         pfree(values);
1709         pfree(nulls);
1710
1711         return ntp;
1712 }
1713
1714
1715 /*
1716  *      PrepareToInvalidateCacheTuple()
1717  *
1718  *      This is part of a rather subtle chain of events, so pay attention:
1719  *
1720  *      When a tuple is inserted or deleted, it cannot be flushed from the
1721  *      catcaches immediately, for reasons explained at the top of cache/inval.c.
1722  *      Instead we have to add entry(s) for the tuple to a list of pending tuple
1723  *      invalidations that will be done at the end of the command or transaction.
1724  *
1725  *      The lists of tuples that need to be flushed are kept by inval.c.  This
1726  *      routine is a helper routine for inval.c.  Given a tuple belonging to
1727  *      the specified relation, find all catcaches it could be in, compute the
1728  *      correct hash value for each such catcache, and call the specified function
1729  *      to record the cache id, hash value, and tuple ItemPointer in inval.c's
1730  *      lists.  CatalogCacheIdInvalidate will be called later, if appropriate,
1731  *      using the recorded information.
1732  *
1733  *      Note that it is irrelevant whether the given tuple is actually loaded
1734  *      into the catcache at the moment.  Even if it's not there now, it might
1735  *      be by the end of the command, or there might be a matching negative entry
1736  *      to flush --- or other backends' caches might have such entries --- so
1737  *      we have to make list entries to flush it later.
1738  *
1739  *      Also note that it's not an error if there are no catcaches for the
1740  *      specified relation.  inval.c doesn't know exactly which rels have
1741  *      catcaches --- it will call this routine for any tuple that's in a
1742  *      system relation.
1743  */
1744 void
1745 PrepareToInvalidateCacheTuple(Relation relation,
1746                                                           HeapTuple tuple,
1747                                                         void (*function) (int, uint32, ItemPointer, Oid))
1748 {
1749         CatCache   *ccp;
1750         Oid                     reloid;
1751
1752         CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1753
1754         /*
1755          * sanity checks
1756          */
1757         Assert(RelationIsValid(relation));
1758         Assert(HeapTupleIsValid(tuple));
1759         Assert(PointerIsValid(function));
1760         Assert(CacheHdr != NULL);
1761
1762         reloid = RelationGetRelid(relation);
1763
1764         /* ----------------
1765          *      for each cache
1766          *         if the cache contains tuples from the specified relation
1767          *                 compute the tuple's hash value in this cache,
1768          *                 and call the passed function to register the information.
1769          * ----------------
1770          */
1771
1772         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1773         {
1774                 /* Just in case cache hasn't finished initialization yet... */
1775                 if (ccp->cc_tupdesc == NULL)
1776                         CatalogCacheInitializeCache(ccp);
1777
1778                 if (ccp->cc_reloid != reloid)
1779                         continue;
1780
1781                 (*function) (ccp->id,
1782                                          CatalogCacheComputeTupleHashValue(ccp, tuple),
1783                                          &tuple->t_self,
1784                                          ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
1785         }
1786 }
1787
1788
1789 /*
1790  * Subroutines for warning about reference leaks.  These are exported so
1791  * that resowner.c can call them.
1792  */
1793 void
1794 PrintCatCacheLeakWarning(HeapTuple tuple)
1795 {
1796         CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1797                                                                   offsetof(CatCTup, tuple));
1798
1799         /* Safety check to ensure we were handed a cache entry */
1800         Assert(ct->ct_magic == CT_MAGIC);
1801
1802         elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1803                  ct->my_cache->cc_relname, ct->my_cache->id,
1804                  ItemPointerGetBlockNumber(&(tuple->t_self)),
1805                  ItemPointerGetOffsetNumber(&(tuple->t_self)),
1806                  ct->refcount);
1807 }
1808
1809 void
1810 PrintCatCacheListLeakWarning(CatCList *list)
1811 {
1812         elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1813                  list->my_cache->cc_relname, list->my_cache->id,
1814                  list, list->refcount);
1815 }