]> granicus.if.org Git - postgresql/blob - src/backend/utils/cache/catcache.c
f43e4181e781190222a6e82fcec9482c9366fc64
[postgresql] / src / backend / utils / cache / catcache.c
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  *        System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        src/backend/utils/cache/catcache.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/relscan.h"
21 #include "access/sysattr.h"
22 #include "access/valid.h"
23 #include "catalog/pg_operator.h"
24 #include "catalog/pg_type.h"
25 #include "miscadmin.h"
26 #ifdef CATCACHE_STATS
27 #include "storage/ipc.h"                /* for on_proc_exit */
28 #endif
29 #include "storage/lmgr.h"
30 #include "utils/builtins.h"
31 #include "utils/fmgroids.h"
32 #include "utils/inval.h"
33 #include "utils/memutils.h"
34 #include "utils/rel.h"
35 #include "utils/resowner.h"
36 #include "utils/syscache.h"
37 #include "utils/tqual.h"
38
39
40  /* #define CACHEDEBUG */       /* turns DEBUG elogs on */
41
42 /*
43  * Given a hash value and the size of the hash table, find the bucket
44  * in which the hash value belongs. Since the hash table must contain
45  * a power-of-2 number of elements, this is a simple bitmask.
46  */
47 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
48
49
50 /*
51  *              variables, macros and other stuff
52  */
53
54 #ifdef CACHEDEBUG
55 #define CACHE1_elog(a,b)                                elog(a,b)
56 #define CACHE2_elog(a,b,c)                              elog(a,b,c)
57 #define CACHE3_elog(a,b,c,d)                    elog(a,b,c,d)
58 #define CACHE4_elog(a,b,c,d,e)                  elog(a,b,c,d,e)
59 #define CACHE5_elog(a,b,c,d,e,f)                elog(a,b,c,d,e,f)
60 #define CACHE6_elog(a,b,c,d,e,f,g)              elog(a,b,c,d,e,f,g)
61 #else
62 #define CACHE1_elog(a,b)
63 #define CACHE2_elog(a,b,c)
64 #define CACHE3_elog(a,b,c,d)
65 #define CACHE4_elog(a,b,c,d,e)
66 #define CACHE5_elog(a,b,c,d,e,f)
67 #define CACHE6_elog(a,b,c,d,e,f,g)
68 #endif
69
70 /* Cache management header --- pointer is NULL until created */
71 static CatCacheHeader *CacheHdr = NULL;
72
73
74 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
75                                                          ScanKey cur_skey);
76 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
77                                                                   HeapTuple tuple);
78
79 #ifdef CATCACHE_STATS
80 static void CatCachePrintStats(int code, Datum arg);
81 #endif
82 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
83 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
84 static void CatalogCacheInitializeCache(CatCache *cache);
85 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
86                                                 uint32 hashValue, Index hashIndex,
87                                                 bool negative);
88 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
89
90
91 /*
92  *                                      internal support functions
93  */
94
95 /*
96  * Look up the hash and equality functions for system types that are used
97  * as cache key fields.
98  *
99  * XXX this should be replaced by catalog lookups,
100  * but that seems to pose considerable risk of circularity...
101  */
102 static void
103 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
104 {
105         switch (keytype)
106         {
107                 case BOOLOID:
108                         *hashfunc = hashchar;
109
110                         *eqfunc = F_BOOLEQ;
111                         break;
112                 case CHAROID:
113                         *hashfunc = hashchar;
114
115                         *eqfunc = F_CHAREQ;
116                         break;
117                 case NAMEOID:
118                         *hashfunc = hashname;
119
120                         *eqfunc = F_NAMEEQ;
121                         break;
122                 case INT2OID:
123                         *hashfunc = hashint2;
124
125                         *eqfunc = F_INT2EQ;
126                         break;
127                 case INT2VECTOROID:
128                         *hashfunc = hashint2vector;
129
130                         *eqfunc = F_INT2VECTOREQ;
131                         break;
132                 case INT4OID:
133                         *hashfunc = hashint4;
134
135                         *eqfunc = F_INT4EQ;
136                         break;
137                 case TEXTOID:
138                         *hashfunc = hashtext;
139
140                         *eqfunc = F_TEXTEQ;
141                         break;
142                 case OIDOID:
143                 case REGPROCOID:
144                 case REGPROCEDUREOID:
145                 case REGOPEROID:
146                 case REGOPERATOROID:
147                 case REGCLASSOID:
148                 case REGTYPEOID:
149                 case REGCONFIGOID:
150                 case REGDICTIONARYOID:
151                         *hashfunc = hashoid;
152
153                         *eqfunc = F_OIDEQ;
154                         break;
155                 case OIDVECTOROID:
156                         *hashfunc = hashoidvector;
157
158                         *eqfunc = F_OIDVECTOREQ;
159                         break;
160                 default:
161                         elog(FATAL, "type %u not supported as catcache key", keytype);
162                         *hashfunc = NULL;       /* keep compiler quiet */
163
164                         *eqfunc = InvalidOid;
165                         break;
166         }
167 }
168
169 /*
170  *              CatalogCacheComputeHashValue
171  *
172  * Compute the hash value associated with a given set of lookup keys
173  */
174 static uint32
175 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
176 {
177         uint32          hashValue = 0;
178         uint32          oneHash;
179
180         CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
181                                 cache->cc_relname,
182                                 nkeys,
183                                 cache);
184
185         switch (nkeys)
186         {
187                 case 4:
188                         oneHash =
189                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
190                                                                                                    cur_skey[3].sk_argument));
191                         hashValue ^= oneHash << 24;
192                         hashValue ^= oneHash >> 8;
193                         /* FALLTHROUGH */
194                 case 3:
195                         oneHash =
196                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
197                                                                                                    cur_skey[2].sk_argument));
198                         hashValue ^= oneHash << 16;
199                         hashValue ^= oneHash >> 16;
200                         /* FALLTHROUGH */
201                 case 2:
202                         oneHash =
203                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
204                                                                                                    cur_skey[1].sk_argument));
205                         hashValue ^= oneHash << 8;
206                         hashValue ^= oneHash >> 24;
207                         /* FALLTHROUGH */
208                 case 1:
209                         oneHash =
210                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
211                                                                                                    cur_skey[0].sk_argument));
212                         hashValue ^= oneHash;
213                         break;
214                 default:
215                         elog(FATAL, "wrong number of hash keys: %d", nkeys);
216                         break;
217         }
218
219         return hashValue;
220 }
221
222 /*
223  *              CatalogCacheComputeTupleHashValue
224  *
225  * Compute the hash value associated with a given tuple to be cached
226  */
227 static uint32
228 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
229 {
230         ScanKeyData cur_skey[CATCACHE_MAXKEYS];
231         bool            isNull = false;
232
233         /* Copy pre-initialized overhead data for scankey */
234         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
235
236         /* Now extract key fields from tuple, insert into scankey */
237         switch (cache->cc_nkeys)
238         {
239                 case 4:
240                         cur_skey[3].sk_argument =
241                                 (cache->cc_key[3] == ObjectIdAttributeNumber)
242                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
243                                 : fastgetattr(tuple,
244                                                           cache->cc_key[3],
245                                                           cache->cc_tupdesc,
246                                                           &isNull);
247                         Assert(!isNull);
248                         /* FALLTHROUGH */
249                 case 3:
250                         cur_skey[2].sk_argument =
251                                 (cache->cc_key[2] == ObjectIdAttributeNumber)
252                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
253                                 : fastgetattr(tuple,
254                                                           cache->cc_key[2],
255                                                           cache->cc_tupdesc,
256                                                           &isNull);
257                         Assert(!isNull);
258                         /* FALLTHROUGH */
259                 case 2:
260                         cur_skey[1].sk_argument =
261                                 (cache->cc_key[1] == ObjectIdAttributeNumber)
262                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
263                                 : fastgetattr(tuple,
264                                                           cache->cc_key[1],
265                                                           cache->cc_tupdesc,
266                                                           &isNull);
267                         Assert(!isNull);
268                         /* FALLTHROUGH */
269                 case 1:
270                         cur_skey[0].sk_argument =
271                                 (cache->cc_key[0] == ObjectIdAttributeNumber)
272                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
273                                 : fastgetattr(tuple,
274                                                           cache->cc_key[0],
275                                                           cache->cc_tupdesc,
276                                                           &isNull);
277                         Assert(!isNull);
278                         break;
279                 default:
280                         elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
281                         break;
282         }
283
284         return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
285 }
286
287
288 #ifdef CATCACHE_STATS
289
290 static void
291 CatCachePrintStats(int code, Datum arg)
292 {
293         CatCache   *cache;
294         long            cc_searches = 0;
295         long            cc_hits = 0;
296         long            cc_neg_hits = 0;
297         long            cc_newloads = 0;
298         long            cc_invals = 0;
299         long            cc_lsearches = 0;
300         long            cc_lhits = 0;
301
302         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
303         {
304                 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
305                         continue;                       /* don't print unused caches */
306                 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
307                          cache->cc_relname,
308                          cache->cc_indexoid,
309                          cache->cc_ntup,
310                          cache->cc_searches,
311                          cache->cc_hits,
312                          cache->cc_neg_hits,
313                          cache->cc_hits + cache->cc_neg_hits,
314                          cache->cc_newloads,
315                          cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
316                          cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
317                          cache->cc_invals,
318                          cache->cc_lsearches,
319                          cache->cc_lhits);
320                 cc_searches += cache->cc_searches;
321                 cc_hits += cache->cc_hits;
322                 cc_neg_hits += cache->cc_neg_hits;
323                 cc_newloads += cache->cc_newloads;
324                 cc_invals += cache->cc_invals;
325                 cc_lsearches += cache->cc_lsearches;
326                 cc_lhits += cache->cc_lhits;
327         }
328         elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
329                  CacheHdr->ch_ntup,
330                  cc_searches,
331                  cc_hits,
332                  cc_neg_hits,
333                  cc_hits + cc_neg_hits,
334                  cc_newloads,
335                  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
336                  cc_searches - cc_hits - cc_neg_hits,
337                  cc_invals,
338                  cc_lsearches,
339                  cc_lhits);
340 }
341 #endif   /* CATCACHE_STATS */
342
343
344 /*
345  *              CatCacheRemoveCTup
346  *
347  * Unlink and delete the given cache entry
348  *
349  * NB: if it is a member of a CatCList, the CatCList is deleted too.
350  * Both the cache entry and the list had better have zero refcount.
351  */
352 static void
353 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
354 {
355         Assert(ct->refcount == 0);
356         Assert(ct->my_cache == cache);
357
358         if (ct->c_list)
359         {
360                 /*
361                  * The cleanest way to handle this is to call CatCacheRemoveCList,
362                  * which will recurse back to me, and the recursive call will do the
363                  * work.  Set the "dead" flag to make sure it does recurse.
364                  */
365                 ct->dead = true;
366                 CatCacheRemoveCList(cache, ct->c_list);
367                 return;                                 /* nothing left to do */
368         }
369
370         /* delink from linked list */
371         DLRemove(&ct->cache_elem);
372
373         /* free associated tuple data */
374         if (ct->tuple.t_data != NULL)
375                 pfree(ct->tuple.t_data);
376         pfree(ct);
377
378         --cache->cc_ntup;
379         --CacheHdr->ch_ntup;
380 }
381
382 /*
383  *              CatCacheRemoveCList
384  *
385  * Unlink and delete the given cache list entry
386  *
387  * NB: any dead member entries that become unreferenced are deleted too.
388  */
389 static void
390 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
391 {
392         int                     i;
393
394         Assert(cl->refcount == 0);
395         Assert(cl->my_cache == cache);
396
397         /* delink from member tuples */
398         for (i = cl->n_members; --i >= 0;)
399         {
400                 CatCTup    *ct = cl->members[i];
401
402                 Assert(ct->c_list == cl);
403                 ct->c_list = NULL;
404                 /* if the member is dead and now has no references, remove it */
405                 if (
406 #ifndef CATCACHE_FORCE_RELEASE
407                         ct->dead &&
408 #endif
409                         ct->refcount == 0)
410                         CatCacheRemoveCTup(cache, ct);
411         }
412
413         /* delink from linked list */
414         DLRemove(&cl->cache_elem);
415
416         /* free associated tuple data */
417         if (cl->tuple.t_data != NULL)
418                 pfree(cl->tuple.t_data);
419         pfree(cl);
420 }
421
422
423 /*
424  *      CatalogCacheIdInvalidate
425  *
426  *      Invalidate entries in the specified cache, given a hash value.
427  *
428  *      We delete cache entries that match the hash value, whether positive
429  *      or negative.  We don't care whether the invalidation is the result
430  *      of a tuple insertion or a deletion.
431  *
432  *      We used to try to match positive cache entries by TID, but that is
433  *      unsafe after a VACUUM FULL on a system catalog: an inval event could
434  *      be queued before VACUUM FULL, and then processed afterwards, when the
435  *      target tuple that has to be invalidated has a different TID than it
436  *      did when the event was created.  So now we just compare hash values and
437  *      accept the small risk of unnecessary invalidations due to false matches.
438  *
439  *      This routine is only quasi-public: it should only be used by inval.c.
440  */
441 void
442 CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
443 {
444         CatCache   *ccp;
445
446         CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
447
448         /*
449          * inspect caches to find the proper cache
450          */
451         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
452         {
453                 Index           hashIndex;
454                 Dlelem     *elt,
455                                    *nextelt;
456
457                 if (cacheId != ccp->id)
458                         continue;
459
460                 /*
461                  * We don't bother to check whether the cache has finished
462                  * initialization yet; if not, there will be no entries in it so no
463                  * problem.
464                  */
465
466                 /*
467                  * Invalidate *all* CatCLists in this cache; it's too hard to tell
468                  * which searches might still be correct, so just zap 'em all.
469                  */
470                 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
471                 {
472                         CatCList   *cl = (CatCList *) DLE_VAL(elt);
473
474                         nextelt = DLGetSucc(elt);
475
476                         if (cl->refcount > 0)
477                                 cl->dead = true;
478                         else
479                                 CatCacheRemoveCList(ccp, cl);
480                 }
481
482                 /*
483                  * inspect the proper hash bucket for tuple matches
484                  */
485                 hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
486
487                 for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
488                 {
489                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
490
491                         nextelt = DLGetSucc(elt);
492
493                         if (hashValue == ct->hash_value)
494                         {
495                                 if (ct->refcount > 0 ||
496                                         (ct->c_list && ct->c_list->refcount > 0))
497                                 {
498                                         ct->dead = true;
499                                         /* list, if any, was marked dead above */
500                                         Assert(ct->c_list == NULL || ct->c_list->dead);
501                                 }
502                                 else
503                                         CatCacheRemoveCTup(ccp, ct);
504                                 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
505 #ifdef CATCACHE_STATS
506                                 ccp->cc_invals++;
507 #endif
508                                 /* could be multiple matches, so keep looking! */
509                         }
510                 }
511                 break;                                  /* need only search this one cache */
512         }
513 }
514
515 /* ----------------------------------------------------------------
516  *                                         public functions
517  * ----------------------------------------------------------------
518  */
519
520
521 /*
522  * Standard routine for creating cache context if it doesn't exist yet
523  *
524  * There are a lot of places (probably far more than necessary) that check
525  * whether CacheMemoryContext exists yet and want to create it if not.
526  * We centralize knowledge of exactly how to create it here.
527  */
528 void
529 CreateCacheMemoryContext(void)
530 {
531         /*
532          * Purely for paranoia, check that context doesn't exist; caller probably
533          * did so already.
534          */
535         if (!CacheMemoryContext)
536                 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
537                                                                                                    "CacheMemoryContext",
538                                                                                                    ALLOCSET_DEFAULT_MINSIZE,
539                                                                                                    ALLOCSET_DEFAULT_INITSIZE,
540                                                                                                    ALLOCSET_DEFAULT_MAXSIZE);
541 }
542
543
544 /*
545  *              AtEOXact_CatCache
546  *
547  * Clean up catcaches at end of main transaction (either commit or abort)
548  *
549  * As of PostgreSQL 8.1, catcache pins should get released by the
550  * ResourceOwner mechanism.  This routine is just a debugging
551  * cross-check that no pins remain.
552  */
553 void
554 AtEOXact_CatCache(bool isCommit)
555 {
556 #ifdef USE_ASSERT_CHECKING
557         if (assert_enabled)
558         {
559                 CatCache   *ccp;
560
561                 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
562                 {
563                         Dlelem     *elt;
564                         int                     i;
565
566                         /* Check CatCLists */
567                         for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
568                         {
569                                 CatCList   *cl = (CatCList *) DLE_VAL(elt);
570
571                                 Assert(cl->cl_magic == CL_MAGIC);
572                                 Assert(cl->refcount == 0);
573                                 Assert(!cl->dead);
574                         }
575
576                         /* Check individual tuples */
577                         for (i = 0; i < ccp->cc_nbuckets; i++)
578                         {
579                                 for (elt = DLGetHead(&ccp->cc_bucket[i]);
580                                          elt;
581                                          elt = DLGetSucc(elt))
582                                 {
583                                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
584
585                                         Assert(ct->ct_magic == CT_MAGIC);
586                                         Assert(ct->refcount == 0);
587                                         Assert(!ct->dead);
588                                 }
589                         }
590                 }
591         }
592 #endif
593 }
594
595 /*
596  *              ResetCatalogCache
597  *
598  * Reset one catalog cache to empty.
599  *
600  * This is not very efficient if the target cache is nearly empty.
601  * However, it shouldn't need to be efficient; we don't invoke it often.
602  */
603 static void
604 ResetCatalogCache(CatCache *cache)
605 {
606         Dlelem     *elt,
607                            *nextelt;
608         int                     i;
609
610         /* Remove each list in this cache, or at least mark it dead */
611         for (elt = DLGetHead(&cache->cc_lists); elt; elt = nextelt)
612         {
613                 CatCList   *cl = (CatCList *) DLE_VAL(elt);
614
615                 nextelt = DLGetSucc(elt);
616
617                 if (cl->refcount > 0)
618                         cl->dead = true;
619                 else
620                         CatCacheRemoveCList(cache, cl);
621         }
622
623         /* Remove each tuple in this cache, or at least mark it dead */
624         for (i = 0; i < cache->cc_nbuckets; i++)
625         {
626                 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
627                 {
628                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
629
630                         nextelt = DLGetSucc(elt);
631
632                         if (ct->refcount > 0 ||
633                                 (ct->c_list && ct->c_list->refcount > 0))
634                         {
635                                 ct->dead = true;
636                                 /* list, if any, was marked dead above */
637                                 Assert(ct->c_list == NULL || ct->c_list->dead);
638                         }
639                         else
640                                 CatCacheRemoveCTup(cache, ct);
641 #ifdef CATCACHE_STATS
642                         cache->cc_invals++;
643 #endif
644                 }
645         }
646 }
647
648 /*
649  *              ResetCatalogCaches
650  *
651  * Reset all caches when a shared cache inval event forces it
652  */
653 void
654 ResetCatalogCaches(void)
655 {
656         CatCache   *cache;
657
658         CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
659
660         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
661                 ResetCatalogCache(cache);
662
663         CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
664 }
665
666 /*
667  *              CatalogCacheFlushCatalog
668  *
669  *      Flush all catcache entries that came from the specified system catalog.
670  *      This is needed after VACUUM FULL/CLUSTER on the catalog, since the
671  *      tuples very likely now have different TIDs than before.  (At one point
672  *      we also tried to force re-execution of CatalogCacheInitializeCache for
673  *      the cache(s) on that catalog.  This is a bad idea since it leads to all
674  *      kinds of trouble if a cache flush occurs while loading cache entries.
675  *      We now avoid the need to do it by copying cc_tupdesc out of the relcache,
676  *      rather than relying on the relcache to keep a tupdesc for us.  Of course
677  *      this assumes the tupdesc of a cachable system table will not change...)
678  */
679 void
680 CatalogCacheFlushCatalog(Oid catId)
681 {
682         CatCache   *cache;
683
684         CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
685
686         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
687         {
688                 /* Does this cache store tuples of the target catalog? */
689                 if (cache->cc_reloid == catId)
690                 {
691                         /* Yes, so flush all its contents */
692                         ResetCatalogCache(cache);
693
694                         /* Tell inval.c to call syscache callbacks for this cache */
695                         CallSyscacheCallbacks(cache->id, 0);
696                 }
697         }
698
699         CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
700 }
701
702 /*
703  *              InitCatCache
704  *
705  *      This allocates and initializes a cache for a system catalog relation.
706  *      Actually, the cache is only partially initialized to avoid opening the
707  *      relation.  The relation will be opened and the rest of the cache
708  *      structure initialized on the first access.
709  */
710 #ifdef CACHEDEBUG
711 #define InitCatCache_DEBUG2 \
712 do { \
713         elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
714                  cp->cc_reloid, cp->cc_indexoid, cp->id, \
715                  cp->cc_nkeys, cp->cc_nbuckets); \
716 } while(0)
717 #else
718 #define InitCatCache_DEBUG2
719 #endif
720
721 CatCache *
722 InitCatCache(int id,
723                          Oid reloid,
724                          Oid indexoid,
725                          int nkeys,
726                          const int *key,
727                          int nbuckets)
728 {
729         CatCache   *cp;
730         MemoryContext oldcxt;
731         int                     i;
732
733         /*
734          * nbuckets is the number of hash buckets to use in this catcache.
735          * Currently we just use a hard-wired estimate of an appropriate size for
736          * each cache; maybe later make them dynamically resizable?
737          *
738          * nbuckets must be a power of two.  We check this via Assert rather than
739          * a full runtime check because the values will be coming from constant
740          * tables.
741          *
742          * If you're confused by the power-of-two check, see comments in
743          * bitmapset.c for an explanation.
744          */
745         Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
746
747         /*
748          * first switch to the cache context so our allocations do not vanish at
749          * the end of a transaction
750          */
751         if (!CacheMemoryContext)
752                 CreateCacheMemoryContext();
753
754         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
755
756         /*
757          * if first time through, initialize the cache group header
758          */
759         if (CacheHdr == NULL)
760         {
761                 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
762                 CacheHdr->ch_caches = NULL;
763                 CacheHdr->ch_ntup = 0;
764 #ifdef CATCACHE_STATS
765                 /* set up to dump stats at backend exit */
766                 on_proc_exit(CatCachePrintStats, 0);
767 #endif
768         }
769
770         /*
771          * allocate a new cache structure
772          *
773          * Note: we assume zeroing initializes the Dllist headers correctly
774          */
775         cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(Dllist));
776
777         /*
778          * initialize the cache's relation information for the relation
779          * corresponding to this cache, and initialize some of the new cache's
780          * other internal fields.  But don't open the relation yet.
781          */
782         cp->id = id;
783         cp->cc_relname = "(not known yet)";
784         cp->cc_reloid = reloid;
785         cp->cc_indexoid = indexoid;
786         cp->cc_relisshared = false; /* temporary */
787         cp->cc_tupdesc = (TupleDesc) NULL;
788         cp->cc_ntup = 0;
789         cp->cc_nbuckets = nbuckets;
790         cp->cc_nkeys = nkeys;
791         for (i = 0; i < nkeys; ++i)
792                 cp->cc_key[i] = key[i];
793
794         /*
795          * new cache is initialized as far as we can go for now. print some
796          * debugging information, if appropriate.
797          */
798         InitCatCache_DEBUG2;
799
800         /*
801          * add completed cache to top of group header's list
802          */
803         cp->cc_next = CacheHdr->ch_caches;
804         CacheHdr->ch_caches = cp;
805
806         /*
807          * back to the old context before we return...
808          */
809         MemoryContextSwitchTo(oldcxt);
810
811         return cp;
812 }
813
814 /*
815  *              CatalogCacheInitializeCache
816  *
817  * This function does final initialization of a catcache: obtain the tuple
818  * descriptor and set up the hash and equality function links.  We assume
819  * that the relcache entry can be opened at this point!
820  */
821 #ifdef CACHEDEBUG
822 #define CatalogCacheInitializeCache_DEBUG1 \
823         elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
824                  cache->cc_reloid)
825
826 #define CatalogCacheInitializeCache_DEBUG2 \
827 do { \
828                 if (cache->cc_key[i] > 0) { \
829                         elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
830                                 i+1, cache->cc_nkeys, cache->cc_key[i], \
831                                  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
832                 } else { \
833                         elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
834                                 i+1, cache->cc_nkeys, cache->cc_key[i]); \
835                 } \
836 } while(0)
837 #else
838 #define CatalogCacheInitializeCache_DEBUG1
839 #define CatalogCacheInitializeCache_DEBUG2
840 #endif
841
842 static void
843 CatalogCacheInitializeCache(CatCache *cache)
844 {
845         Relation        relation;
846         MemoryContext oldcxt;
847         TupleDesc       tupdesc;
848         int                     i;
849
850         CatalogCacheInitializeCache_DEBUG1;
851
852         relation = heap_open(cache->cc_reloid, AccessShareLock);
853
854         /*
855          * switch to the cache context so our allocations do not vanish at the end
856          * of a transaction
857          */
858         Assert(CacheMemoryContext != NULL);
859
860         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
861
862         /*
863          * copy the relcache's tuple descriptor to permanent cache storage
864          */
865         tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
866
867         /*
868          * save the relation's name and relisshared flag, too (cc_relname is used
869          * only for debugging purposes)
870          */
871         cache->cc_relname = pstrdup(RelationGetRelationName(relation));
872         cache->cc_relisshared = RelationGetForm(relation)->relisshared;
873
874         /*
875          * return to the caller's memory context and close the rel
876          */
877         MemoryContextSwitchTo(oldcxt);
878
879         heap_close(relation, AccessShareLock);
880
881         CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
882                                 cache->cc_relname, cache->cc_nkeys);
883
884         /*
885          * initialize cache's key information
886          */
887         for (i = 0; i < cache->cc_nkeys; ++i)
888         {
889                 Oid                     keytype;
890                 RegProcedure eqfunc;
891
892                 CatalogCacheInitializeCache_DEBUG2;
893
894                 if (cache->cc_key[i] > 0)
895                         keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
896                 else
897                 {
898                         if (cache->cc_key[i] != ObjectIdAttributeNumber)
899                                 elog(FATAL, "only sys attr supported in caches is OID");
900                         keytype = OIDOID;
901                 }
902
903                 GetCCHashEqFuncs(keytype,
904                                                  &cache->cc_hashfunc[i],
905                                                  &eqfunc);
906
907                 cache->cc_isname[i] = (keytype == NAMEOID);
908
909                 /*
910                  * Do equality-function lookup (we assume this won't need a catalog
911                  * lookup for any supported type)
912                  */
913                 fmgr_info_cxt(eqfunc,
914                                           &cache->cc_skey[i].sk_func,
915                                           CacheMemoryContext);
916
917                 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
918                 cache->cc_skey[i].sk_attno = cache->cc_key[i];
919
920                 /* Fill in sk_strategy as well --- always standard equality */
921                 cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
922                 cache->cc_skey[i].sk_subtype = InvalidOid;
923                 /* Currently, there are no catcaches on collation-aware data types */
924                 cache->cc_skey[i].sk_collation = InvalidOid;
925
926                 CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
927                                         cache->cc_relname,
928                                         i,
929                                         cache);
930         }
931
932         /*
933          * mark this cache fully initialized
934          */
935         cache->cc_tupdesc = tupdesc;
936 }
937
938 /*
939  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
940  *
941  * One reason to call this routine is to ensure that the relcache has
942  * created entries for all the catalogs and indexes referenced by catcaches.
943  * Therefore, provide an option to open the index as well as fixing the
944  * cache itself.  An exception is the indexes on pg_am, which we don't use
945  * (cf. IndexScanOK).
946  */
947 void
948 InitCatCachePhase2(CatCache *cache, bool touch_index)
949 {
950         if (cache->cc_tupdesc == NULL)
951                 CatalogCacheInitializeCache(cache);
952
953         if (touch_index &&
954                 cache->id != AMOID &&
955                 cache->id != AMNAME)
956         {
957                 Relation        idesc;
958
959                 /*
960                  * We must lock the underlying catalog before opening the index to
961                  * avoid deadlock, since index_open could possibly result in reading
962                  * this same catalog, and if anyone else is exclusive-locking this
963                  * catalog and index they'll be doing it in that order.
964                  */
965                 LockRelationOid(cache->cc_reloid, AccessShareLock);
966                 idesc = index_open(cache->cc_indexoid, AccessShareLock);
967                 index_close(idesc, AccessShareLock);
968                 UnlockRelationOid(cache->cc_reloid, AccessShareLock);
969         }
970 }
971
972
973 /*
974  *              IndexScanOK
975  *
976  *              This function checks for tuples that will be fetched by
977  *              IndexSupportInitialize() during relcache initialization for
978  *              certain system indexes that support critical syscaches.
979  *              We can't use an indexscan to fetch these, else we'll get into
980  *              infinite recursion.  A plain heap scan will work, however.
981  *              Once we have completed relcache initialization (signaled by
982  *              criticalRelcachesBuilt), we don't have to worry anymore.
983  *
984  *              Similarly, during backend startup we have to be able to use the
985  *              pg_authid and pg_auth_members syscaches for authentication even if
986  *              we don't yet have relcache entries for those catalogs' indexes.
987  */
988 static bool
989 IndexScanOK(CatCache *cache, ScanKey cur_skey)
990 {
991         switch (cache->id)
992         {
993                 case INDEXRELID:
994
995                         /*
996                          * Rather than tracking exactly which indexes have to be loaded
997                          * before we can use indexscans (which changes from time to time),
998                          * just force all pg_index searches to be heap scans until we've
999                          * built the critical relcaches.
1000                          */
1001                         if (!criticalRelcachesBuilt)
1002                                 return false;
1003                         break;
1004
1005                 case AMOID:
1006                 case AMNAME:
1007
1008                         /*
1009                          * Always do heap scans in pg_am, because it's so small there's
1010                          * not much point in an indexscan anyway.  We *must* do this when
1011                          * initially building critical relcache entries, but we might as
1012                          * well just always do it.
1013                          */
1014                         return false;
1015
1016                 case AUTHNAME:
1017                 case AUTHOID:
1018                 case AUTHMEMMEMROLE:
1019
1020                         /*
1021                          * Protect authentication lookups occurring before relcache has
1022                          * collected entries for shared indexes.
1023                          */
1024                         if (!criticalSharedRelcachesBuilt)
1025                                 return false;
1026                         break;
1027
1028                 default:
1029                         break;
1030         }
1031
1032         /* Normal case, allow index scan */
1033         return true;
1034 }
1035
1036 /*
1037  *      SearchCatCache
1038  *
1039  *              This call searches a system cache for a tuple, opening the relation
1040  *              if necessary (on the first access to a particular cache).
1041  *
1042  *              The result is NULL if not found, or a pointer to a HeapTuple in
1043  *              the cache.      The caller must not modify the tuple, and must call
1044  *              ReleaseCatCache() when done with it.
1045  *
1046  * The search key values should be expressed as Datums of the key columns'
1047  * datatype(s).  (Pass zeroes for any unused parameters.)  As a special
1048  * exception, the passed-in key for a NAME column can be just a C string;
1049  * the caller need not go to the trouble of converting it to a fully
1050  * null-padded NAME.
1051  */
1052 HeapTuple
1053 SearchCatCache(CatCache *cache,
1054                            Datum v1,
1055                            Datum v2,
1056                            Datum v3,
1057                            Datum v4)
1058 {
1059         ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1060         uint32          hashValue;
1061         Index           hashIndex;
1062         Dlelem     *elt;
1063         CatCTup    *ct;
1064         Relation        relation;
1065         SysScanDesc scandesc;
1066         HeapTuple       ntp;
1067
1068         /*
1069          * one-time startup overhead for each cache
1070          */
1071         if (cache->cc_tupdesc == NULL)
1072                 CatalogCacheInitializeCache(cache);
1073
1074 #ifdef CATCACHE_STATS
1075         cache->cc_searches++;
1076 #endif
1077
1078         /*
1079          * initialize the search key information
1080          */
1081         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1082         cur_skey[0].sk_argument = v1;
1083         cur_skey[1].sk_argument = v2;
1084         cur_skey[2].sk_argument = v3;
1085         cur_skey[3].sk_argument = v4;
1086
1087         /*
1088          * find the hash bucket in which to look for the tuple
1089          */
1090         hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1091         hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1092
1093         /*
1094          * scan the hash bucket until we find a match or exhaust our tuples
1095          */
1096         for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1097                  elt;
1098                  elt = DLGetSucc(elt))
1099         {
1100                 bool            res;
1101
1102                 ct = (CatCTup *) DLE_VAL(elt);
1103
1104                 if (ct->dead)
1105                         continue;                       /* ignore dead entries */
1106
1107                 if (ct->hash_value != hashValue)
1108                         continue;                       /* quickly skip entry if wrong hash val */
1109
1110                 /*
1111                  * see if the cached tuple matches our key.
1112                  */
1113                 HeapKeyTest(&ct->tuple,
1114                                         cache->cc_tupdesc,
1115                                         cache->cc_nkeys,
1116                                         cur_skey,
1117                                         res);
1118                 if (!res)
1119                         continue;
1120
1121                 /*
1122                  * We found a match in the cache.  Move it to the front of the list
1123                  * for its hashbucket, in order to speed subsequent searches.  (The
1124                  * most frequently accessed elements in any hashbucket will tend to be
1125                  * near the front of the hashbucket's list.)
1126                  */
1127                 DLMoveToFront(&ct->cache_elem);
1128
1129                 /*
1130                  * If it's a positive entry, bump its refcount and return it. If it's
1131                  * negative, we can report failure to the caller.
1132                  */
1133                 if (!ct->negative)
1134                 {
1135                         ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1136                         ct->refcount++;
1137                         ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1138
1139                         CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1140                                                 cache->cc_relname, hashIndex);
1141
1142 #ifdef CATCACHE_STATS
1143                         cache->cc_hits++;
1144 #endif
1145
1146                         return &ct->tuple;
1147                 }
1148                 else
1149                 {
1150                         CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1151                                                 cache->cc_relname, hashIndex);
1152
1153 #ifdef CATCACHE_STATS
1154                         cache->cc_neg_hits++;
1155 #endif
1156
1157                         return NULL;
1158                 }
1159         }
1160
1161         /*
1162          * Tuple was not found in cache, so we have to try to retrieve it directly
1163          * from the relation.  If found, we will add it to the cache; if not
1164          * found, we will add a negative cache entry instead.
1165          *
1166          * NOTE: it is possible for recursive cache lookups to occur while reading
1167          * the relation --- for example, due to shared-cache-inval messages being
1168          * processed during heap_open().  This is OK.  It's even possible for one
1169          * of those lookups to find and enter the very same tuple we are trying to
1170          * fetch here.  If that happens, we will enter a second copy of the tuple
1171          * into the cache.      The first copy will never be referenced again, and
1172          * will eventually age out of the cache, so there's no functional problem.
1173          * This case is rare enough that it's not worth expending extra cycles to
1174          * detect.
1175          */
1176         relation = heap_open(cache->cc_reloid, AccessShareLock);
1177
1178         scandesc = systable_beginscan(relation,
1179                                                                   cache->cc_indexoid,
1180                                                                   IndexScanOK(cache, cur_skey),
1181                                                                   SnapshotNow,
1182                                                                   cache->cc_nkeys,
1183                                                                   cur_skey);
1184
1185         ct = NULL;
1186
1187         while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1188         {
1189                 ct = CatalogCacheCreateEntry(cache, ntp,
1190                                                                          hashValue, hashIndex,
1191                                                                          false);
1192                 /* immediately set the refcount to 1 */
1193                 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1194                 ct->refcount++;
1195                 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1196                 break;                                  /* assume only one match */
1197         }
1198
1199         systable_endscan(scandesc);
1200
1201         heap_close(relation, AccessShareLock);
1202
1203         /*
1204          * If tuple was not found, we need to build a negative cache entry
1205          * containing a fake tuple.  The fake tuple has the correct key columns,
1206          * but nulls everywhere else.
1207          *
1208          * In bootstrap mode, we don't build negative entries, because the cache
1209          * invalidation mechanism isn't alive and can't clear them if the tuple
1210          * gets created later.  (Bootstrap doesn't do UPDATEs, so it doesn't need
1211          * cache inval for that.)
1212          */
1213         if (ct == NULL)
1214         {
1215                 if (IsBootstrapProcessingMode())
1216                         return NULL;
1217
1218                 ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1219                 ct = CatalogCacheCreateEntry(cache, ntp,
1220                                                                          hashValue, hashIndex,
1221                                                                          true);
1222                 heap_freetuple(ntp);
1223
1224                 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1225                                         cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1226                 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1227                                         cache->cc_relname, hashIndex);
1228
1229                 /*
1230                  * We are not returning the negative entry to the caller, so leave its
1231                  * refcount zero.
1232                  */
1233
1234                 return NULL;
1235         }
1236
1237         CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1238                                 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1239         CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1240                                 cache->cc_relname, hashIndex);
1241
1242 #ifdef CATCACHE_STATS
1243         cache->cc_newloads++;
1244 #endif
1245
1246         return &ct->tuple;
1247 }
1248
1249 /*
1250  *      ReleaseCatCache
1251  *
1252  *      Decrement the reference count of a catcache entry (releasing the
1253  *      hold grabbed by a successful SearchCatCache).
1254  *
1255  *      NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1256  *      will be freed as soon as their refcount goes to zero.  In combination
1257  *      with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1258  *      to catch references to already-released catcache entries.
1259  */
1260 void
1261 ReleaseCatCache(HeapTuple tuple)
1262 {
1263         CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1264                                                                   offsetof(CatCTup, tuple));
1265
1266         /* Safety checks to ensure we were handed a cache entry */
1267         Assert(ct->ct_magic == CT_MAGIC);
1268         Assert(ct->refcount > 0);
1269
1270         ct->refcount--;
1271         ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1272
1273         if (
1274 #ifndef CATCACHE_FORCE_RELEASE
1275                 ct->dead &&
1276 #endif
1277                 ct->refcount == 0 &&
1278                 (ct->c_list == NULL || ct->c_list->refcount == 0))
1279                 CatCacheRemoveCTup(ct->my_cache, ct);
1280 }
1281
1282
1283 /*
1284  *      SearchCatCacheList
1285  *
1286  *              Generate a list of all tuples matching a partial key (that is,
1287  *              a key specifying just the first K of the cache's N key columns).
1288  *
1289  *              The caller must not modify the list object or the pointed-to tuples,
1290  *              and must call ReleaseCatCacheList() when done with the list.
1291  */
1292 CatCList *
1293 SearchCatCacheList(CatCache *cache,
1294                                    int nkeys,
1295                                    Datum v1,
1296                                    Datum v2,
1297                                    Datum v3,
1298                                    Datum v4)
1299 {
1300         ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1301         uint32          lHashValue;
1302         Dlelem     *elt;
1303         CatCList   *cl;
1304         CatCTup    *ct;
1305         List       *volatile ctlist;
1306         ListCell   *ctlist_item;
1307         int                     nmembers;
1308         bool            ordered;
1309         HeapTuple       ntp;
1310         MemoryContext oldcxt;
1311         int                     i;
1312
1313         /*
1314          * one-time startup overhead for each cache
1315          */
1316         if (cache->cc_tupdesc == NULL)
1317                 CatalogCacheInitializeCache(cache);
1318
1319         Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1320
1321 #ifdef CATCACHE_STATS
1322         cache->cc_lsearches++;
1323 #endif
1324
1325         /*
1326          * initialize the search key information
1327          */
1328         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1329         cur_skey[0].sk_argument = v1;
1330         cur_skey[1].sk_argument = v2;
1331         cur_skey[2].sk_argument = v3;
1332         cur_skey[3].sk_argument = v4;
1333
1334         /*
1335          * compute a hash value of the given keys for faster search.  We don't
1336          * presently divide the CatCList items into buckets, but this still lets
1337          * us skip non-matching items quickly most of the time.
1338          */
1339         lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1340
1341         /*
1342          * scan the items until we find a match or exhaust our list
1343          */
1344         for (elt = DLGetHead(&cache->cc_lists);
1345                  elt;
1346                  elt = DLGetSucc(elt))
1347         {
1348                 bool            res;
1349
1350                 cl = (CatCList *) DLE_VAL(elt);
1351
1352                 if (cl->dead)
1353                         continue;                       /* ignore dead entries */
1354
1355                 if (cl->hash_value != lHashValue)
1356                         continue;                       /* quickly skip entry if wrong hash val */
1357
1358                 /*
1359                  * see if the cached list matches our key.
1360                  */
1361                 if (cl->nkeys != nkeys)
1362                         continue;
1363                 HeapKeyTest(&cl->tuple,
1364                                         cache->cc_tupdesc,
1365                                         nkeys,
1366                                         cur_skey,
1367                                         res);
1368                 if (!res)
1369                         continue;
1370
1371                 /*
1372                  * We found a matching list.  Move the list to the front of the
1373                  * cache's list-of-lists, to speed subsequent searches.  (We do not
1374                  * move the members to the fronts of their hashbucket lists, however,
1375                  * since there's no point in that unless they are searched for
1376                  * individually.)
1377                  */
1378                 DLMoveToFront(&cl->cache_elem);
1379
1380                 /* Bump the list's refcount and return it */
1381                 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1382                 cl->refcount++;
1383                 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1384
1385                 CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1386                                         cache->cc_relname);
1387
1388 #ifdef CATCACHE_STATS
1389                 cache->cc_lhits++;
1390 #endif
1391
1392                 return cl;
1393         }
1394
1395         /*
1396          * List was not found in cache, so we have to build it by reading the
1397          * relation.  For each matching tuple found in the relation, use an
1398          * existing cache entry if possible, else build a new one.
1399          *
1400          * We have to bump the member refcounts temporarily to ensure they won't
1401          * get dropped from the cache while loading other members. We use a PG_TRY
1402          * block to ensure we can undo those refcounts if we get an error before
1403          * we finish constructing the CatCList.
1404          */
1405         ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1406
1407         ctlist = NIL;
1408
1409         PG_TRY();
1410         {
1411                 Relation        relation;
1412                 SysScanDesc scandesc;
1413
1414                 relation = heap_open(cache->cc_reloid, AccessShareLock);
1415
1416                 scandesc = systable_beginscan(relation,
1417                                                                           cache->cc_indexoid,
1418                                                                           IndexScanOK(cache, cur_skey),
1419                                                                           SnapshotNow,
1420                                                                           nkeys,
1421                                                                           cur_skey);
1422
1423                 /* The list will be ordered iff we are doing an index scan */
1424                 ordered = (scandesc->irel != NULL);
1425
1426                 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1427                 {
1428                         uint32          hashValue;
1429                         Index           hashIndex;
1430
1431                         /*
1432                          * See if there's an entry for this tuple already.
1433                          */
1434                         ct = NULL;
1435                         hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1436                         hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1437
1438                         for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1439                                  elt;
1440                                  elt = DLGetSucc(elt))
1441                         {
1442                                 ct = (CatCTup *) DLE_VAL(elt);
1443
1444                                 if (ct->dead || ct->negative)
1445                                         continue;       /* ignore dead and negative entries */
1446
1447                                 if (ct->hash_value != hashValue)
1448                                         continue;       /* quickly skip entry if wrong hash val */
1449
1450                                 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1451                                         continue;       /* not same tuple */
1452
1453                                 /*
1454                                  * Found a match, but can't use it if it belongs to another
1455                                  * list already
1456                                  */
1457                                 if (ct->c_list)
1458                                         continue;
1459
1460                                 break;                  /* A-OK */
1461                         }
1462
1463                         if (elt == NULL)
1464                         {
1465                                 /* We didn't find a usable entry, so make a new one */
1466                                 ct = CatalogCacheCreateEntry(cache, ntp,
1467                                                                                          hashValue, hashIndex,
1468                                                                                          false);
1469                         }
1470
1471                         /* Careful here: add entry to ctlist, then bump its refcount */
1472                         /* This way leaves state correct if lappend runs out of memory */
1473                         ctlist = lappend(ctlist, ct);
1474                         ct->refcount++;
1475                 }
1476
1477                 systable_endscan(scandesc);
1478
1479                 heap_close(relation, AccessShareLock);
1480
1481                 /*
1482                  * Now we can build the CatCList entry.  First we need a dummy tuple
1483                  * containing the key values...
1484                  */
1485                 ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1486                 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1487                 nmembers = list_length(ctlist);
1488                 cl = (CatCList *)
1489                         palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
1490                 heap_copytuple_with_tuple(ntp, &cl->tuple);
1491                 MemoryContextSwitchTo(oldcxt);
1492                 heap_freetuple(ntp);
1493
1494                 /*
1495                  * We are now past the last thing that could trigger an elog before we
1496                  * have finished building the CatCList and remembering it in the
1497                  * resource owner.      So it's OK to fall out of the PG_TRY, and indeed
1498                  * we'd better do so before we start marking the members as belonging
1499                  * to the list.
1500                  */
1501
1502         }
1503         PG_CATCH();
1504         {
1505                 foreach(ctlist_item, ctlist)
1506                 {
1507                         ct = (CatCTup *) lfirst(ctlist_item);
1508                         Assert(ct->c_list == NULL);
1509                         Assert(ct->refcount > 0);
1510                         ct->refcount--;
1511                         if (
1512 #ifndef CATCACHE_FORCE_RELEASE
1513                                 ct->dead &&
1514 #endif
1515                                 ct->refcount == 0 &&
1516                                 (ct->c_list == NULL || ct->c_list->refcount == 0))
1517                                 CatCacheRemoveCTup(cache, ct);
1518                 }
1519
1520                 PG_RE_THROW();
1521         }
1522         PG_END_TRY();
1523
1524         cl->cl_magic = CL_MAGIC;
1525         cl->my_cache = cache;
1526         DLInitElem(&cl->cache_elem, cl);
1527         cl->refcount = 0;                       /* for the moment */
1528         cl->dead = false;
1529         cl->ordered = ordered;
1530         cl->nkeys = nkeys;
1531         cl->hash_value = lHashValue;
1532         cl->n_members = nmembers;
1533
1534         i = 0;
1535         foreach(ctlist_item, ctlist)
1536         {
1537                 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1538                 Assert(ct->c_list == NULL);
1539                 ct->c_list = cl;
1540                 /* release the temporary refcount on the member */
1541                 Assert(ct->refcount > 0);
1542                 ct->refcount--;
1543                 /* mark list dead if any members already dead */
1544                 if (ct->dead)
1545                         cl->dead = true;
1546         }
1547         Assert(i == nmembers);
1548
1549         DLAddHead(&cache->cc_lists, &cl->cache_elem);
1550
1551         /* Finally, bump the list's refcount and return it */
1552         cl->refcount++;
1553         ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1554
1555         CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1556                                 cache->cc_relname, nmembers);
1557
1558         return cl;
1559 }
1560
1561 /*
1562  *      ReleaseCatCacheList
1563  *
1564  *      Decrement the reference count of a catcache list.
1565  */
1566 void
1567 ReleaseCatCacheList(CatCList *list)
1568 {
1569         /* Safety checks to ensure we were handed a cache entry */
1570         Assert(list->cl_magic == CL_MAGIC);
1571         Assert(list->refcount > 0);
1572         list->refcount--;
1573         ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1574
1575         if (
1576 #ifndef CATCACHE_FORCE_RELEASE
1577                 list->dead &&
1578 #endif
1579                 list->refcount == 0)
1580                 CatCacheRemoveCList(list->my_cache, list);
1581 }
1582
1583
1584 /*
1585  * CatalogCacheCreateEntry
1586  *              Create a new CatCTup entry, copying the given HeapTuple and other
1587  *              supplied data into it.  The new entry initially has refcount 0.
1588  */
1589 static CatCTup *
1590 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
1591                                                 uint32 hashValue, Index hashIndex, bool negative)
1592 {
1593         CatCTup    *ct;
1594         MemoryContext oldcxt;
1595
1596         /*
1597          * Allocate CatCTup header in cache memory, and copy the tuple there too.
1598          */
1599         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1600         ct = (CatCTup *) palloc(sizeof(CatCTup));
1601         heap_copytuple_with_tuple(ntp, &ct->tuple);
1602         MemoryContextSwitchTo(oldcxt);
1603
1604         /*
1605          * Finish initializing the CatCTup header, and add it to the cache's
1606          * linked list and counts.
1607          */
1608         ct->ct_magic = CT_MAGIC;
1609         ct->my_cache = cache;
1610         DLInitElem(&ct->cache_elem, (void *) ct);
1611         ct->c_list = NULL;
1612         ct->refcount = 0;                       /* for the moment */
1613         ct->dead = false;
1614         ct->negative = negative;
1615         ct->hash_value = hashValue;
1616
1617         DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1618
1619         cache->cc_ntup++;
1620         CacheHdr->ch_ntup++;
1621
1622         return ct;
1623 }
1624
1625 /*
1626  * build_dummy_tuple
1627  *              Generate a palloc'd HeapTuple that contains the specified key
1628  *              columns, and NULLs for other columns.
1629  *
1630  * This is used to store the keys for negative cache entries and CatCList
1631  * entries, which don't have real tuples associated with them.
1632  */
1633 static HeapTuple
1634 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1635 {
1636         HeapTuple       ntp;
1637         TupleDesc       tupDesc = cache->cc_tupdesc;
1638         Datum      *values;
1639         bool       *nulls;
1640         Oid                     tupOid = InvalidOid;
1641         NameData        tempNames[4];
1642         int                     i;
1643
1644         values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1645         nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
1646
1647         memset(values, 0, tupDesc->natts * sizeof(Datum));
1648         memset(nulls, true, tupDesc->natts * sizeof(bool));
1649
1650         for (i = 0; i < nkeys; i++)
1651         {
1652                 int                     attindex = cache->cc_key[i];
1653                 Datum           keyval = skeys[i].sk_argument;
1654
1655                 if (attindex > 0)
1656                 {
1657                         /*
1658                          * Here we must be careful in case the caller passed a C string
1659                          * where a NAME is wanted: convert the given argument to a
1660                          * correctly padded NAME.  Otherwise the memcpy() done in
1661                          * heap_form_tuple could fall off the end of memory.
1662                          */
1663                         if (cache->cc_isname[i])
1664                         {
1665                                 Name            newval = &tempNames[i];
1666
1667                                 namestrcpy(newval, DatumGetCString(keyval));
1668                                 keyval = NameGetDatum(newval);
1669                         }
1670                         values[attindex - 1] = keyval;
1671                         nulls[attindex - 1] = false;
1672                 }
1673                 else
1674                 {
1675                         Assert(attindex == ObjectIdAttributeNumber);
1676                         tupOid = DatumGetObjectId(keyval);
1677                 }
1678         }
1679
1680         ntp = heap_form_tuple(tupDesc, values, nulls);
1681         if (tupOid != InvalidOid)
1682                 HeapTupleSetOid(ntp, tupOid);
1683
1684         pfree(values);
1685         pfree(nulls);
1686
1687         return ntp;
1688 }
1689
1690
1691 /*
1692  *      PrepareToInvalidateCacheTuple()
1693  *
1694  *      This is part of a rather subtle chain of events, so pay attention:
1695  *
1696  *      When a tuple is inserted or deleted, it cannot be flushed from the
1697  *      catcaches immediately, for reasons explained at the top of cache/inval.c.
1698  *      Instead we have to add entry(s) for the tuple to a list of pending tuple
1699  *      invalidations that will be done at the end of the command or transaction.
1700  *
1701  *      The lists of tuples that need to be flushed are kept by inval.c.  This
1702  *      routine is a helper routine for inval.c.  Given a tuple belonging to
1703  *      the specified relation, find all catcaches it could be in, compute the
1704  *      correct hash value for each such catcache, and call the specified
1705  *      function to record the cache id and hash value in inval.c's lists.
1706  *      CatalogCacheIdInvalidate will be called later, if appropriate,
1707  *      using the recorded information.
1708  *
1709  *      For an insert or delete, tuple is the target tuple and newtuple is NULL.
1710  *      For an update, we are called just once, with tuple being the old tuple
1711  *      version and newtuple the new version.  We should make two list entries
1712  *      if the tuple's hash value changed, but only one if it didn't.
1713  *
1714  *      Note that it is irrelevant whether the given tuple is actually loaded
1715  *      into the catcache at the moment.  Even if it's not there now, it might
1716  *      be by the end of the command, or there might be a matching negative entry
1717  *      to flush --- or other backends' caches might have such entries --- so
1718  *      we have to make list entries to flush it later.
1719  *
1720  *      Also note that it's not an error if there are no catcaches for the
1721  *      specified relation.  inval.c doesn't know exactly which rels have
1722  *      catcaches --- it will call this routine for any tuple that's in a
1723  *      system relation.
1724  */
1725 void
1726 PrepareToInvalidateCacheTuple(Relation relation,
1727                                                           HeapTuple tuple,
1728                                                           HeapTuple newtuple,
1729                                                           void (*function) (int, uint32, Oid))
1730 {
1731         CatCache   *ccp;
1732         Oid                     reloid;
1733
1734         CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1735
1736         /*
1737          * sanity checks
1738          */
1739         Assert(RelationIsValid(relation));
1740         Assert(HeapTupleIsValid(tuple));
1741         Assert(PointerIsValid(function));
1742         Assert(CacheHdr != NULL);
1743
1744         reloid = RelationGetRelid(relation);
1745
1746         /* ----------------
1747          *      for each cache
1748          *         if the cache contains tuples from the specified relation
1749          *                 compute the tuple's hash value(s) in this cache,
1750          *                 and call the passed function to register the information.
1751          * ----------------
1752          */
1753
1754         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1755         {
1756                 uint32          hashvalue;
1757                 Oid                     dbid;
1758
1759                 if (ccp->cc_reloid != reloid)
1760                         continue;
1761
1762                 /* Just in case cache hasn't finished initialization yet... */
1763                 if (ccp->cc_tupdesc == NULL)
1764                         CatalogCacheInitializeCache(ccp);
1765
1766                 hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
1767                 dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
1768
1769                 (*function) (ccp->id, hashvalue, dbid);
1770
1771                 if (newtuple)
1772                 {
1773                         uint32          newhashvalue;
1774
1775                         newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
1776
1777                         if (newhashvalue != hashvalue)
1778                                 (*function) (ccp->id, newhashvalue, dbid);
1779                 }
1780         }
1781 }
1782
1783
1784 /*
1785  * Subroutines for warning about reference leaks.  These are exported so
1786  * that resowner.c can call them.
1787  */
1788 void
1789 PrintCatCacheLeakWarning(HeapTuple tuple)
1790 {
1791         CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1792                                                                   offsetof(CatCTup, tuple));
1793
1794         /* Safety check to ensure we were handed a cache entry */
1795         Assert(ct->ct_magic == CT_MAGIC);
1796
1797         elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1798                  ct->my_cache->cc_relname, ct->my_cache->id,
1799                  ItemPointerGetBlockNumber(&(tuple->t_self)),
1800                  ItemPointerGetOffsetNumber(&(tuple->t_self)),
1801                  ct->refcount);
1802 }
1803
1804 void
1805 PrintCatCacheListLeakWarning(CatCList *list)
1806 {
1807         elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1808                  list->my_cache->cc_relname, list->my_cache->id,
1809                  list, list->refcount);
1810 }