*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.128 2006/03/05 15:58:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.129 2006/06/15 02:08:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
-/*
- * Constants related to size of the catcache.
- *
- * NCCBUCKETS must be a power of two and must be less than 64K (because
- * SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
- * practice it should be a lot less, anyway, to avoid chewing up too much
- * space on hash bucket headers.
- *
- * MAXCCTUPLES could be as small as a few hundred, if per-backend memory
- * consumption is at a premium.
- */
-#define NCCBUCKETS 256 /* Hash buckets per CatCache */
-#define MAXCCTUPLES 5000 /* Maximum # of tuples in all caches */
-
/*
* Given a hash value and the size of the hash table, find the bucket
* in which the hash value belongs. Since the hash table must contain
HeapTuple tuple);
#ifdef CATCACHE_STATS
-static void CatCachePrintStats(void);
+static void CatCachePrintStats(int code, Datum arg);
#endif
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
uint32 hashValue, Index hashIndex,
bool negative);
-static void CatalogCacheCleanup(CatCTup *savect);
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
#ifdef CATCACHE_STATS
static void
-CatCachePrintStats(void)
+CatCachePrintStats(int code, Datum arg)
{
CatCache *cache;
long cc_searches = 0;
long cc_neg_hits = 0;
long cc_newloads = 0;
long cc_invals = 0;
- long cc_discards = 0;
long cc_lsearches = 0;
long cc_lhits = 0;
- elog(DEBUG2, "catcache stats dump: %d/%d tuples in catcaches",
- CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
-
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
{
if (cache->cc_ntup == 0 && cache->cc_searches == 0)
continue; /* don't print unused caches */
- elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
+ elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
cache->cc_relname,
cache->cc_indexoid,
cache->cc_ntup,
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
cache->cc_invals,
- cache->cc_discards,
cache->cc_lsearches,
cache->cc_lhits);
cc_searches += cache->cc_searches;
cc_neg_hits += cache->cc_neg_hits;
cc_newloads += cache->cc_newloads;
cc_invals += cache->cc_invals;
- cc_discards += cache->cc_discards;
cc_lsearches += cache->cc_lsearches;
cc_lhits += cache->cc_lhits;
}
- elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
+ elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
CacheHdr->ch_ntup,
cc_searches,
cc_hits,
cc_searches - cc_hits - cc_neg_hits - cc_newloads,
cc_searches - cc_hits - cc_neg_hits,
cc_invals,
- cc_discards,
cc_lsearches,
cc_lhits);
}
return; /* nothing left to do */
}
- /* delink from linked lists */
- DLRemove(&ct->lrulist_elem);
+ /* delink from linked list */
DLRemove(&ct->cache_elem);
/* free associated tuple data */
if (assert_enabled)
{
CatCache *ccp;
- Dlelem *elt;
- /* Check CatCLists */
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
+ Dlelem *elt;
+ int i;
+
+ /* Check CatCLists */
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
{
CatCList *cl = (CatCList *) DLE_VAL(elt);
Assert(cl->refcount == 0);
Assert(!cl->dead);
}
- }
- /* Check individual tuples */
- for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = DLGetSucc(elt))
- {
- CatCTup *ct = (CatCTup *) DLE_VAL(elt);
+ /* Check individual tuples */
+ for (i = 0; i < ccp->cc_nbuckets; i++)
+ {
+ for (elt = DLGetHead(&ccp->cc_bucket[i]);
+ elt;
+ elt = DLGetSucc(elt))
+ {
+ CatCTup *ct = (CatCTup *) DLE_VAL(elt);
- Assert(ct->ct_magic == CT_MAGIC);
- Assert(ct->refcount == 0);
- Assert(!ct->dead);
+ Assert(ct->ct_magic == CT_MAGIC);
+ Assert(ct->refcount == 0);
+ Assert(!ct->dead);
+ }
+ }
}
}
#endif
Oid indexoid,
int reloidattr,
int nkeys,
- const int *key)
+ const int *key,
+ int nbuckets)
{
CatCache *cp;
MemoryContext oldcxt;
int i;
+ /*
+ * nbuckets is the number of hash buckets to use in this catcache.
+ * Currently we just use a hard-wired estimate of an appropriate size
+ * for each cache; maybe later make them dynamically resizable?
+ *
+ * nbuckets must be a power of two. We check this via Assert rather than
+ * a full runtime check because the values will be coming from constant
+ * tables.
+ *
+ * If you're confused by the power-of-two check, see comments in
+ * bitmapset.c for an explanation.
+ */
+ Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
+
/*
* first switch to the cache context so our allocations do not vanish at
* the end of a transaction
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * if first time through, initialize the cache group header, including
- * global LRU list header
+ * if first time through, initialize the cache group header
*/
if (CacheHdr == NULL)
{
CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
CacheHdr->ch_caches = NULL;
CacheHdr->ch_ntup = 0;
- CacheHdr->ch_maxtup = MAXCCTUPLES;
- DLInitList(&CacheHdr->ch_lrulist);
#ifdef CATCACHE_STATS
+ /* set up to dump stats at backend exit */
on_proc_exit(CatCachePrintStats, 0);
#endif
}
*
* Note: we assume zeroing initializes the Dllist headers correctly
*/
- cp = (CatCache *) palloc0(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
+ cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(Dllist));
/*
* initialize the cache's relation information for the relation
cp->cc_tupdesc = (TupleDesc) NULL;
cp->cc_reloidattr = reloidattr;
cp->cc_ntup = 0;
- cp->cc_nbuckets = NCCBUCKETS;
+ cp->cc_nbuckets = nbuckets;
cp->cc_nkeys = nkeys;
for (i = 0; i < nkeys; ++i)
cp->cc_key[i] = key[i];
continue;
/*
- * we found a match in the cache: move it to the front of the global
- * LRU list. We also move it to the front of the list for its
- * hashbucket, in order to speed subsequent searches. (The most
- * frequently accessed elements in any hashbucket will tend to be near
- * the front of the hashbucket's list.)
+ * We found a match in the cache. Move it to the front of the list
+ * for its hashbucket, in order to speed subsequent searches. (The
+ * most frequently accessed elements in any hashbucket will tend to be
+ * near the front of the hashbucket's list.)
*/
- DLMoveToFront(&ct->lrulist_elem);
DLMoveToFront(&ct->cache_elem);
/*
continue;
/*
- * We found a matching list: mark it as touched since the last
- * CatalogCacheCleanup() sweep. Also move the list to the front of
- * the cache's list-of-lists, to speed subsequent searches. (We do not
+ * We found a matching list. Move the list to the front of the
+ * cache's list-of-lists, to speed subsequent searches. (We do not
* move the members to the fronts of their hashbucket lists, however,
* since there's no point in that unless they are searched for
* individually.)
*/
- cl->touched = true;
DLMoveToFront(&cl->cache_elem);
/* Bump the list's refcount and return it */
if (ct->c_list)
continue;
- /* Found a match, so move it to front */
- DLMoveToFront(&ct->lrulist_elem);
-
- break;
+ break; /* A-OK */
}
if (elt == NULL)
cl->refcount = 0; /* for the moment */
cl->dead = false;
cl->ordered = ordered;
- cl->touched = false; /* we already moved members to front */
cl->nkeys = nkeys;
cl->hash_value = lHashValue;
cl->n_members = nmembers;
/*
* Finish initializing the CatCTup header, and add it to the cache's
- * linked lists and counts.
+ * linked list and counts.
*/
ct->ct_magic = CT_MAGIC;
ct->my_cache = cache;
- DLInitElem(&ct->lrulist_elem, (void *) ct);
DLInitElem(&ct->cache_elem, (void *) ct);
ct->c_list = NULL;
ct->refcount = 0; /* for the moment */
ct->negative = negative;
ct->hash_value = hashValue;
- DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
cache->cc_ntup++;
CacheHdr->ch_ntup++;
- /*
- * If we've exceeded the desired size of the caches, try to throw away the
- * least recently used entry(s). NB: be careful not to throw away the
- * newly-built entry...
- */
- if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
- CatalogCacheCleanup(ct);
-
return ct;
}
-/*
- * CatalogCacheCleanup
- * Try to reduce the size of the catcaches when they get too big
- *
- * savect can be NULL, or a specific CatCTup not to remove even if it
- * has zero refcount.
- */
-static void
-CatalogCacheCleanup(CatCTup *savect)
-{
- int tup_target;
- CatCache *ccp;
- Dlelem *elt,
- *prevelt;
-
- /*
- * Each time we have to do this, try to cut the cache size down to about
- * 90% of the maximum.
- */
- tup_target = (CacheHdr->ch_maxtup * 9) / 10;
-
- /*
- * Our strategy for managing CatCLists is that, each time we have to throw
- * away some cache entries, we first move-to-front all the members of
- * CatCLists that have been touched since the last cleanup sweep. Then we
- * do strict LRU elimination by individual tuples, zapping a list if any
- * of its members gets zapped. Before PostgreSQL 8.1, we moved members to
- * front each time their owning list was touched, which was arguably more
- * fair in balancing list members against standalone tuples --- but the
- * overhead for large lists was horrendous. This scheme is more heavily
- * biased towards preserving lists, but that is not necessarily bad
- * either.
- */
- for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
- {
- for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
- {
- CatCList *cl = (CatCList *) DLE_VAL(elt);
-
- Assert(cl->cl_magic == CL_MAGIC);
- if (cl->touched && !cl->dead)
- {
- int i;
-
- for (i = 0; i < cl->n_members; i++)
- DLMoveToFront(&cl->members[i]->lrulist_elem);
- }
- cl->touched = false;
- }
- }
-
- /* Now get rid of unreferenced tuples in reverse global LRU order */
- for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
- {
- CatCTup *ct = (CatCTup *) DLE_VAL(elt);
-
- prevelt = DLGetPred(elt);
-
- if (ct->refcount == 0 &&
- (ct->c_list == NULL || ct->c_list->refcount == 0) &&
- ct != savect)
- {
-#ifdef CATCACHE_STATS
- ct->my_cache->cc_discards++;
-#endif
- CatCacheRemoveCTup(ct->my_cache, ct);
-
- /* Quit when we've removed enough tuples */
- if (CacheHdr->ch_ntup <= tup_target)
- break;
- }
- }
-}
-
/*
* build_dummy_tuple
* Generate a palloc'd HeapTuple that contains the specified key
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.103 2006/05/03 22:45:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.104 2006/06/15 02:08:09 tgl Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
the list sorted alphabetically and adjust the cache numbers
accordingly.
- Add your entry to the cacheinfo[] array below. All cache lists are
- alphabetical, so add it in the proper place. Specify the relation
- OID, index OID, number of keys, and key attribute numbers. If the
- relation contains tuples that are associated with a particular relation
- (for example, its attributes, rules, triggers, etc) then specify the
- attribute number that contains the OID of the associated relation.
- This is used by CatalogCacheFlushRelation() to remove the correct
- tuples during a table drop or relcache invalidation event.
+ Add your entry to the cacheinfo[] array below. All cache lists are
+ alphabetical, so add it in the proper place. Specify the relation OID,
+ index OID, number of keys, key attribute numbers, and number of hash
+ buckets. If the relation contains tuples that are associated with a
+ particular relation (for example, its attributes, rules, triggers, etc)
+ then specify the attribute number that contains the OID of the associated
+ relation. This is used by CatalogCacheFlushRelation() to remove the
+ correct tuples during a table drop or relcache invalidation event.
+
+ The number of hash buckets must be a power of 2. It's reasonable to
+ set this to the number of entries that might be in the particular cache
+ in a medium-size database.
There must be a unique index underlying each syscache (ie, an index
whose key is the same as that of the cache). If there is not one
int reloidattr; /* attr number of rel OID reference, or 0 */
int nkeys; /* # of keys needed for cache lookup */
int key[4]; /* attribute numbers of key attrs */
+ int nbuckets; /* number of hash buckets for this cache */
};
static const struct cachedesc cacheinfo[] = {
0,
0,
0
- }},
+ },
+ 32
+ },
{AccessMethodRelationId, /* AMNAME */
AmNameIndexId,
0,
0,
0,
0
- }},
+ },
+ 4
+ },
{AccessMethodRelationId, /* AMOID */
AmOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 4
+ },
{AccessMethodOperatorRelationId, /* AMOPOPID */
AccessMethodOperatorIndexId,
0,
Anum_pg_amop_amopclaid,
0,
0
- }},
+ },
+ 64
+ },
{AccessMethodOperatorRelationId, /* AMOPSTRATEGY */
AccessMethodStrategyIndexId,
0,
Anum_pg_amop_amopsubtype,
Anum_pg_amop_amopstrategy,
0
- }},
+ },
+ 64
+ },
{AccessMethodProcedureRelationId, /* AMPROCNUM */
AccessMethodProcedureIndexId,
0,
Anum_pg_amproc_amprocsubtype,
Anum_pg_amproc_amprocnum,
0
- }},
+ },
+ 64
+ },
{AttributeRelationId, /* ATTNAME */
AttributeRelidNameIndexId,
Anum_pg_attribute_attrelid,
Anum_pg_attribute_attname,
0,
0
- }},
+ },
+ 2048
+ },
{AttributeRelationId, /* ATTNUM */
AttributeRelidNumIndexId,
Anum_pg_attribute_attrelid,
Anum_pg_attribute_attnum,
0,
0
- }},
+ },
+ 2048
+ },
{AuthMemRelationId, /* AUTHMEMMEMROLE */
AuthMemMemRoleIndexId,
0,
Anum_pg_auth_members_roleid,
0,
0
- }},
+ },
+ 128
+ },
{AuthMemRelationId, /* AUTHMEMROLEMEM */
AuthMemRoleMemIndexId,
0,
Anum_pg_auth_members_member,
0,
0
- }},
+ },
+ 128
+ },
{AuthIdRelationId, /* AUTHNAME */
AuthIdRolnameIndexId,
0,
0,
0,
0
- }},
+ },
+ 128
+ },
{AuthIdRelationId, /* AUTHOID */
AuthIdOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 128
+ },
{
CastRelationId, /* CASTSOURCETARGET */
CastSourceTargetIndexId,
Anum_pg_cast_casttarget,
0,
0
- }},
+ },
+ 256
+ },
{OperatorClassRelationId, /* CLAAMNAMENSP */
OpclassAmNameNspIndexId,
0,
Anum_pg_opclass_opcname,
Anum_pg_opclass_opcnamespace,
0
- }},
+ },
+ 64
+ },
{OperatorClassRelationId, /* CLAOID */
OpclassOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 64
+ },
{ConversionRelationId, /* CONDEFAULT */
ConversionDefaultIndexId,
0,
Anum_pg_conversion_conforencoding,
Anum_pg_conversion_contoencoding,
ObjectIdAttributeNumber,
- }},
+ },
+ 128
+ },
{ConversionRelationId, /* CONNAMENSP */
ConversionNameNspIndexId,
0,
Anum_pg_conversion_connamespace,
0,
0
- }},
+ },
+ 128
+ },
{ConversionRelationId, /* CONOID */
ConversionOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 128
+ },
{DatabaseRelationId, /* DATABASEOID */
DatabaseOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 4
+ },
{IndexRelationId, /* INDEXRELID */
IndexRelidIndexId,
Anum_pg_index_indrelid,
0,
0,
0
- }},
+ },
+ 1024
+ },
{InheritsRelationId, /* INHRELID */
InheritsRelidSeqnoIndexId,
Anum_pg_inherits_inhrelid,
Anum_pg_inherits_inhseqno,
0,
0
- }},
+ },
+ 256
+ },
{LanguageRelationId, /* LANGNAME */
LanguageNameIndexId,
0,
0,
0,
0
- }},
+ },
+ 4
+ },
{LanguageRelationId, /* LANGOID */
LanguageOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 4
+ },
{NamespaceRelationId, /* NAMESPACENAME */
NamespaceNameIndexId,
0,
0,
0,
0
- }},
+ },
+ 256
+ },
{NamespaceRelationId, /* NAMESPACEOID */
NamespaceOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 256
+ },
{OperatorRelationId, /* OPERNAMENSP */
OperatorNameNspIndexId,
0,
Anum_pg_operator_oprleft,
Anum_pg_operator_oprright,
Anum_pg_operator_oprnamespace
- }},
+ },
+ 1024
+ },
{OperatorRelationId, /* OPEROID */
OperatorOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 1024
+ },
{ProcedureRelationId, /* PROCNAMEARGSNSP */
ProcedureNameArgsNspIndexId,
0,
Anum_pg_proc_proargtypes,
Anum_pg_proc_pronamespace,
0
- }},
+ },
+ 2048
+ },
{ProcedureRelationId, /* PROCOID */
ProcedureOidIndexId,
0,
0,
0,
0
- }},
+ },
+ 2048
+ },
{RelationRelationId, /* RELNAMENSP */
ClassNameNspIndexId,
ObjectIdAttributeNumber,
Anum_pg_class_relnamespace,
0,
0
- }},
+ },
+ 1024
+ },
{RelationRelationId, /* RELOID */
ClassOidIndexId,
ObjectIdAttributeNumber,
0,
0,
0
- }},
+ },
+ 1024
+ },
{RewriteRelationId, /* RULERELNAME */
RewriteRelRulenameIndexId,
Anum_pg_rewrite_ev_class,
Anum_pg_rewrite_rulename,
0,
0
- }},
+ },
+ 1024
+ },
{StatisticRelationId, /* STATRELATT */
StatisticRelidAttnumIndexId,
Anum_pg_statistic_starelid,
Anum_pg_statistic_staattnum,
0,
0
- }},
+ },
+ 1024
+ },
{TypeRelationId, /* TYPENAMENSP */
TypeNameNspIndexId,
Anum_pg_type_typrelid,
Anum_pg_type_typnamespace,
0,
0
- }},
+ },
+ 1024
+ },
{TypeRelationId, /* TYPEOID */
TypeOidIndexId,
Anum_pg_type_typrelid,
0,
0,
0
- }}
+ },
+ 1024
+ }
};
-static CatCache *SysCache[
- lengthof(cacheinfo)];
+static CatCache *SysCache[lengthof(cacheinfo)];
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
cacheinfo[cacheId].indoid,
cacheinfo[cacheId].reloidattr,
cacheinfo[cacheId].nkeys,
- cacheinfo[cacheId].key);
+ cacheinfo[cacheId].key,
+ cacheinfo[cacheId].nbuckets);
if (!PointerIsValid(SysCache[cacheId]))
elog(ERROR, "could not initialize cache %u (%d)",
cacheinfo[cacheId].reloid, cacheId);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.58 2006/03/05 15:59:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.59 2006/06/15 02:08:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
* searches, each of which will result in loading a negative entry
*/
long cc_invals; /* # of entries invalidated from cache */
- long cc_discards; /* # of entries discarded due to overflow */
long cc_lsearches; /* total # list-searches */
long cc_lhits; /* # of matches against existing lists */
#endif
CatCache *my_cache; /* link to owning catcache */
/*
- * Each tuple in a cache is a member of two Dllists: one lists all the
- * elements in all the caches in LRU order, and the other lists just the
- * elements in one hashbucket of one cache, also in LRU order.
+ * Each tuple in a cache is a member of a Dllist that stores the elements
+ * of its hash bucket. We keep each Dllist in LRU order to speed repeated
+ * lookups.
*/
- Dlelem lrulist_elem; /* list member of global LRU list */
Dlelem cache_elem; /* list member of per-bucket list */
/*
* table rows satisfying the partial key. (Note: none of these will be
* negative cache entries.)
*
- * A CatCList is only a member of a per-cache list; we do not do separate
- * LRU management for CatCLists. See CatalogCacheCleanup() for the
- * details of the management algorithm.
+ * A CatCList is only a member of a per-cache list; we do not currently
+ * divide them into hash buckets.
*
* A list marked "dead" must not be returned by subsequent searches.
* However, it won't be physically deleted from the cache until its
int refcount; /* number of active references */
bool dead; /* dead but not yet removed? */
bool ordered; /* members listed in index order? */
- bool touched; /* used since last CatalogCacheCleanup? */
short nkeys; /* number of lookup keys specified */
uint32 hash_value; /* hash value for lookup keys */
HeapTupleData tuple; /* header for tuple holding keys */
{
CatCache *ch_caches; /* head of list of CatCache structs */
int ch_ntup; /* # of tuples in all caches */
- int ch_maxtup; /* max # of tuples allowed (LRU) */
- Dllist ch_lrulist; /* overall LRU list, most recent first */
} CatCacheHeader;
extern CatCache *InitCatCache(int id, Oid reloid, Oid indexoid,
int reloidattr,
- int nkeys, const int *key);
+ int nkeys, const int *key,
+ int nbuckets);
extern void InitCatCachePhase2(CatCache *cache);
extern HeapTuple SearchCatCache(CatCache *cache,