#endif
#include "storage/lmgr.h"
#include "utils/builtins.h"
+#include "utils/datum.h"
#include "utils/fmgroids.h"
+#include "utils/hashutils.h"
#include "utils/inval.h"
#include "utils/memutils.h"
#include "utils/rel.h"
/* Cache management header --- pointer is NULL until created */
static CatCacheHeader *CacheHdr = NULL;
+static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
+ int nkeys,
+ Datum v1, Datum v2,
+ Datum v3, Datum v4);
+
+static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
+ int nkeys,
+ uint32 hashValue,
+ Index hashIndex,
+ Datum v1, Datum v2,
+ Datum v3, Datum v4);
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
- ScanKey cur_skey);
-static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
+ Datum v1, Datum v2, Datum v3, Datum v4);
+static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
HeapTuple tuple);
+static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
+ const Datum *cachekeys,
+ const Datum *searchkeys);
#ifdef CATCACHE_STATS
static void CatCachePrintStats(int code, Datum arg);
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
static void CatalogCacheInitializeCache(CatCache *cache);
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
+ Datum *arguments,
uint32 hashValue, Index hashIndex,
bool negative);
-static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
+
+static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
+ Datum *keys);
+static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
+ Datum *srckeys, Datum *dstkeys);
/*
*/
/*
- * Look up the hash and equality functions for system types that are used
- * as cache key fields.
- *
- * XXX this should be replaced by catalog lookups,
- * but that seems to pose considerable risk of circularity...
+ * Hash and equality functions for system types that are used as cache key
+ * fields. In some cases, we just call the regular SQL-callable functions for
+ * the appropriate data type, but that tends to be a little slow, and the
+ * speed of these functions is performance-critical. Therefore, for data
+ * types that frequently occur as catcache keys, we hard-code the logic here.
+ * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
+ * in certain cases (like int4) we can adopt a faster hash algorithm as well.
*/
+
+static bool
+chareqfast(Datum a, Datum b)
+{
+ return DatumGetChar(a) == DatumGetChar(b);
+}
+
+static uint32
+charhashfast(Datum datum)
+{
+ return murmurhash32((int32) DatumGetChar(datum));
+}
+
+static bool
+nameeqfast(Datum a, Datum b)
+{
+ char *ca = NameStr(*DatumGetName(a));
+ char *cb = NameStr(*DatumGetName(b));
+
+ return strncmp(ca, cb, NAMEDATALEN) == 0;
+}
+
+static uint32
+namehashfast(Datum datum)
+{
+ char *key = NameStr(*DatumGetName(datum));
+
+ return hash_any((unsigned char *) key, strlen(key));
+}
+
+static bool
+int2eqfast(Datum a, Datum b)
+{
+ return DatumGetInt16(a) == DatumGetInt16(b);
+}
+
+static uint32
+int2hashfast(Datum datum)
+{
+ return murmurhash32((int32) DatumGetInt16(datum));
+}
+
+static bool
+int4eqfast(Datum a, Datum b)
+{
+ return DatumGetInt32(a) == DatumGetInt32(b);
+}
+
+static uint32
+int4hashfast(Datum datum)
+{
+ return murmurhash32((int32) DatumGetInt32(datum));
+}
+
+static bool
+texteqfast(Datum a, Datum b)
+{
+ return DatumGetBool(DirectFunctionCall2(texteq, a, b));
+}
+
+static uint32
+texthashfast(Datum datum)
+{
+ return DatumGetInt32(DirectFunctionCall1(hashtext, datum));
+}
+
+static bool
+oidvectoreqfast(Datum a, Datum b)
+{
+ return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
+}
+
+static uint32
+oidvectorhashfast(Datum datum)
+{
+ return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
+}
+
+/* Lookup support functions for a type. */
static void
-GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
+GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
{
switch (keytype)
{
case BOOLOID:
- *hashfunc = hashchar;
-
+ *hashfunc = charhashfast;
+ *fasteqfunc = chareqfast;
*eqfunc = F_BOOLEQ;
break;
case CHAROID:
- *hashfunc = hashchar;
-
+ *hashfunc = charhashfast;
+ *fasteqfunc = chareqfast;
*eqfunc = F_CHAREQ;
break;
case NAMEOID:
- *hashfunc = hashname;
-
+ *hashfunc = namehashfast;
+ *fasteqfunc = nameeqfast;
*eqfunc = F_NAMEEQ;
break;
case INT2OID:
- *hashfunc = hashint2;
-
+ *hashfunc = int2hashfast;
+ *fasteqfunc = int2eqfast;
*eqfunc = F_INT2EQ;
break;
case INT4OID:
- *hashfunc = hashint4;
-
+ *hashfunc = int4hashfast;
+ *fasteqfunc = int4eqfast;
*eqfunc = F_INT4EQ;
break;
case TEXTOID:
- *hashfunc = hashtext;
-
+ *hashfunc = texthashfast;
+ *fasteqfunc = texteqfast;
*eqfunc = F_TEXTEQ;
break;
case OIDOID:
case REGDICTIONARYOID:
case REGROLEOID:
case REGNAMESPACEOID:
- *hashfunc = hashoid;
-
+ *hashfunc = int4hashfast;
+ *fasteqfunc = int4eqfast;
*eqfunc = F_OIDEQ;
break;
case OIDVECTOROID:
- *hashfunc = hashoidvector;
-
+ *hashfunc = oidvectorhashfast;
+ *fasteqfunc = oidvectoreqfast;
*eqfunc = F_OIDVECTOREQ;
break;
default:
* Compute the hash value associated with a given set of lookup keys
*/
static uint32
-CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
+CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
+ Datum v1, Datum v2, Datum v3, Datum v4)
{
uint32 hashValue = 0;
uint32 oneHash;
+ CCHashFN *cc_hashfunc = cache->cc_hashfunc;
CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
cache->cc_relname,
switch (nkeys)
{
case 4:
- oneHash =
- DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
- cur_skey[3].sk_argument));
+ oneHash = (cc_hashfunc[3]) (v4);
+
hashValue ^= oneHash << 24;
hashValue ^= oneHash >> 8;
/* FALLTHROUGH */
case 3:
- oneHash =
- DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
- cur_skey[2].sk_argument));
+ oneHash = (cc_hashfunc[2]) (v3);
+
hashValue ^= oneHash << 16;
hashValue ^= oneHash >> 16;
/* FALLTHROUGH */
case 2:
- oneHash =
- DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
- cur_skey[1].sk_argument));
+ oneHash = (cc_hashfunc[1]) (v2);
+
hashValue ^= oneHash << 8;
hashValue ^= oneHash >> 24;
/* FALLTHROUGH */
case 1:
- oneHash =
- DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
- cur_skey[0].sk_argument));
+ oneHash = (cc_hashfunc[0]) (v1);
+
hashValue ^= oneHash;
break;
default:
* Compute the hash value associated with a given tuple to be cached
*/
static uint32
-CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
+CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
{
- ScanKeyData cur_skey[CATCACHE_MAXKEYS];
+ Datum v1 = 0,
+ v2 = 0,
+ v3 = 0,
+ v4 = 0;
bool isNull = false;
-
- /* Copy pre-initialized overhead data for scankey */
- memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
+ int *cc_keyno = cache->cc_keyno;
+ TupleDesc cc_tupdesc = cache->cc_tupdesc;
/* Now extract key fields from tuple, insert into scankey */
- switch (cache->cc_nkeys)
+ switch (nkeys)
{
case 4:
- cur_skey[3].sk_argument =
- (cache->cc_key[3] == ObjectIdAttributeNumber)
+ v4 = (cc_keyno[3] == ObjectIdAttributeNumber)
? ObjectIdGetDatum(HeapTupleGetOid(tuple))
: fastgetattr(tuple,
- cache->cc_key[3],
- cache->cc_tupdesc,
+ cc_keyno[3],
+ cc_tupdesc,
&isNull);
Assert(!isNull);
/* FALLTHROUGH */
case 3:
- cur_skey[2].sk_argument =
- (cache->cc_key[2] == ObjectIdAttributeNumber)
+ v3 = (cc_keyno[2] == ObjectIdAttributeNumber)
? ObjectIdGetDatum(HeapTupleGetOid(tuple))
: fastgetattr(tuple,
- cache->cc_key[2],
- cache->cc_tupdesc,
+ cc_keyno[2],
+ cc_tupdesc,
&isNull);
Assert(!isNull);
/* FALLTHROUGH */
case 2:
- cur_skey[1].sk_argument =
- (cache->cc_key[1] == ObjectIdAttributeNumber)
+ v2 = (cc_keyno[1] == ObjectIdAttributeNumber)
? ObjectIdGetDatum(HeapTupleGetOid(tuple))
: fastgetattr(tuple,
- cache->cc_key[1],
- cache->cc_tupdesc,
+ cc_keyno[1],
+ cc_tupdesc,
&isNull);
Assert(!isNull);
/* FALLTHROUGH */
case 1:
- cur_skey[0].sk_argument =
- (cache->cc_key[0] == ObjectIdAttributeNumber)
+ v1 = (cc_keyno[0] == ObjectIdAttributeNumber)
? ObjectIdGetDatum(HeapTupleGetOid(tuple))
: fastgetattr(tuple,
- cache->cc_key[0],
- cache->cc_tupdesc,
+ cc_keyno[0],
+ cc_tupdesc,
&isNull);
Assert(!isNull);
break;
default:
- elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
+ elog(FATAL, "wrong number of hash keys: %d", nkeys);
break;
}
- return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
+ return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
+}
+
+/*
+ * CatalogCacheCompareTuple
+ *
+ * Compare a tuple to the passed arguments.
+ */
+static inline bool
+CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
+ const Datum *cachekeys,
+ const Datum *searchkeys)
+{
+ const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
+ int i;
+
+ for (i = 0; i < nkeys; i++)
+ {
+ if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
+ return false;
+ }
+ return true;
}
/* delink from linked list */
dlist_delete(&ct->cache_elem);
- /* free associated tuple data */
- if (ct->tuple.t_data != NULL)
- pfree(ct->tuple.t_data);
+ /*
+ * Free keys when we're dealing with a negative entry, normal entries just
+ * point into tuple, allocated together with the CatCTup.
+ */
+ if (ct->negative)
+ CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
+ cache->cc_keyno, ct->keys);
+
pfree(ct);
--cache->cc_ntup;
/* delink from linked list */
dlist_delete(&cl->cache_elem);
- /* free associated tuple data */
- if (cl->tuple.t_data != NULL)
- pfree(cl->tuple.t_data);
+ /* free associated column data */
+ CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
+ cache->cc_keyno, cl->keys);
+
pfree(cl);
}
{
CatCache *cp;
MemoryContext oldcxt;
+ size_t sz;
int i;
/*
}
/*
- * allocate a new cache structure
+ * Allocate a new cache structure, aligning to a cacheline boundary
*
* Note: we rely on zeroing to initialize all the dlist headers correctly
*/
- cp = (CatCache *) palloc0(sizeof(CatCache));
+ sz = sizeof(CatCache) + PG_CACHE_LINE_SIZE;
+ cp = (CatCache *) CACHELINEALIGN(palloc0(sz));
cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
/*
cp->cc_nbuckets = nbuckets;
cp->cc_nkeys = nkeys;
for (i = 0; i < nkeys; ++i)
- cp->cc_key[i] = key[i];
+ cp->cc_keyno[i] = key[i];
/*
* new cache is initialized as far as we can go for now. print some
#define CatalogCacheInitializeCache_DEBUG2 \
do { \
- if (cache->cc_key[i] > 0) { \
+ if (cache->cc_keyno[i] > 0) { \
elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
- i+1, cache->cc_nkeys, cache->cc_key[i], \
- TupleDescAttr(tupdesc, cache->cc_key[i] - 1)->atttypid); \
+ i+1, cache->cc_nkeys, cache->cc_keyno[i], \
+ TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
} else { \
elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
- i+1, cache->cc_nkeys, cache->cc_key[i]); \
+ i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
} \
} while(0)
#else
CatalogCacheInitializeCache_DEBUG2;
- if (cache->cc_key[i] > 0)
+ if (cache->cc_keyno[i] > 0)
{
Form_pg_attribute attr = TupleDescAttr(tupdesc,
- cache->cc_key[i] - 1);
+ cache->cc_keyno[i] - 1);
keytype = attr->atttypid;
/* cache key columns should always be NOT NULL */
}
else
{
- if (cache->cc_key[i] != ObjectIdAttributeNumber)
+ if (cache->cc_keyno[i] != ObjectIdAttributeNumber)
elog(FATAL, "only sys attr supported in caches is OID");
keytype = OIDOID;
}
GetCCHashEqFuncs(keytype,
&cache->cc_hashfunc[i],
- &eqfunc);
-
- cache->cc_isname[i] = (keytype == NAMEOID);
+ &eqfunc,
+ &cache->cc_fastequal[i]);
/*
* Do equality-function lookup (we assume this won't need a catalog
CacheMemoryContext);
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
- cache->cc_skey[i].sk_attno = cache->cc_key[i];
+ cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
/* Fill in sk_strategy as well --- always standard equality */
cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
}
/*
- * SearchCatCache
+ * SearchCatCacheInternal
*
* This call searches a system cache for a tuple, opening the relation
* if necessary (on the first access to a particular cache).
Datum v3,
Datum v4)
{
- ScanKeyData cur_skey[CATCACHE_MAXKEYS];
+ return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
+}
+
+
+/*
+ * SearchCatCacheN() are SearchCatCache() versions for a specific number of
+ * arguments. The compiler can inline the body and unroll loops, making them a
+ * bit faster than SearchCatCache().
+ */
+
+HeapTuple
+SearchCatCache1(CatCache *cache,
+ Datum v1)
+{
+ return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
+}
+
+
+HeapTuple
+SearchCatCache2(CatCache *cache,
+ Datum v1, Datum v2)
+{
+ return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
+}
+
+
+HeapTuple
+SearchCatCache3(CatCache *cache,
+ Datum v1, Datum v2, Datum v3)
+{
+ return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
+}
+
+
+HeapTuple
+SearchCatCache4(CatCache *cache,
+ Datum v1, Datum v2, Datum v3, Datum v4)
+{
+ return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
+}
+
+/*
+ * Work-horse for SearchCatCache/SearchCatCacheN.
+ */
+static inline HeapTuple
+SearchCatCacheInternal(CatCache *cache,
+ int nkeys,
+ Datum v1,
+ Datum v2,
+ Datum v3,
+ Datum v4)
+{
+ Datum arguments[CATCACHE_MAXKEYS];
uint32 hashValue;
Index hashIndex;
dlist_iter iter;
dlist_head *bucket;
CatCTup *ct;
- Relation relation;
- SysScanDesc scandesc;
- HeapTuple ntp;
/* Make sure we're in an xact, even if this ends up being a cache hit */
Assert(IsTransactionState());
+ Assert(cache->cc_nkeys == nkeys);
+
/*
* one-time startup overhead for each cache
*/
- if (cache->cc_tupdesc == NULL)
+ if (unlikely(cache->cc_tupdesc == NULL))
CatalogCacheInitializeCache(cache);
#ifdef CATCACHE_STATS
cache->cc_searches++;
#endif
- /*
- * initialize the search key information
- */
- memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
- cur_skey[0].sk_argument = v1;
- cur_skey[1].sk_argument = v2;
- cur_skey[2].sk_argument = v3;
- cur_skey[3].sk_argument = v4;
+ /* Initialize local parameter array */
+ arguments[0] = v1;
+ arguments[1] = v2;
+ arguments[2] = v3;
+ arguments[3] = v4;
/*
* find the hash bucket in which to look for the tuple
*/
- hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
+ hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
/*
bucket = &cache->cc_bucket[hashIndex];
dlist_foreach(iter, bucket)
{
- bool res;
-
ct = dlist_container(CatCTup, cache_elem, iter.cur);
if (ct->dead)
if (ct->hash_value != hashValue)
continue; /* quickly skip entry if wrong hash val */
- /*
- * see if the cached tuple matches our key.
- */
- HeapKeyTest(&ct->tuple,
- cache->cc_tupdesc,
- cache->cc_nkeys,
- cur_skey,
- res);
- if (!res)
+ if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
continue;
/*
}
}
+ return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
+}
+
+/*
+ * Search the actual catalogs, rather than the cache.
+ *
+ * This is kept separate from SearchCatCacheInternal() to keep the fast-path
+ * as small as possible. To avoid that effort being undone by a helpful
+ * compiler, try to explicitly forbid inlining.
+ */
+static pg_noinline HeapTuple
+SearchCatCacheMiss(CatCache *cache,
+ int nkeys,
+ uint32 hashValue,
+ Index hashIndex,
+ Datum v1,
+ Datum v2,
+ Datum v3,
+ Datum v4)
+{
+ ScanKeyData cur_skey[CATCACHE_MAXKEYS];
+ Relation relation;
+ SysScanDesc scandesc;
+ HeapTuple ntp;
+ CatCTup *ct;
+ Datum arguments[CATCACHE_MAXKEYS];
+
+ /* Initialize local parameter array */
+ arguments[0] = v1;
+ arguments[1] = v2;
+ arguments[2] = v3;
+ arguments[3] = v4;
+
+ /*
+ * Ok, need to make a lookup in the relation, copy the scankey and fill
+ * out any per-call fields.
+ */
+ memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
+ cur_skey[0].sk_argument = v1;
+ cur_skey[1].sk_argument = v2;
+ cur_skey[2].sk_argument = v3;
+ cur_skey[3].sk_argument = v4;
+
/*
* Tuple was not found in cache, so we have to try to retrieve it directly
* from the relation. If found, we will add it to the cache; if not
cache->cc_indexoid,
IndexScanOK(cache, cur_skey),
NULL,
- cache->cc_nkeys,
+ nkeys,
cur_skey);
ct = NULL;
while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
{
- ct = CatalogCacheCreateEntry(cache, ntp,
+ ct = CatalogCacheCreateEntry(cache, ntp, arguments,
hashValue, hashIndex,
false);
/* immediately set the refcount to 1 */
if (IsBootstrapProcessingMode())
return NULL;
- ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
- ct = CatalogCacheCreateEntry(cache, ntp,
+ ct = CatalogCacheCreateEntry(cache, NULL, arguments,
hashValue, hashIndex,
true);
- heap_freetuple(ntp);
CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
Datum v3,
Datum v4)
{
- ScanKeyData cur_skey[CATCACHE_MAXKEYS];
-
/*
* one-time startup overhead for each cache
*/
if (cache->cc_tupdesc == NULL)
CatalogCacheInitializeCache(cache);
- /*
- * initialize the search key information
- */
- memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
- cur_skey[0].sk_argument = v1;
- cur_skey[1].sk_argument = v2;
- cur_skey[2].sk_argument = v3;
- cur_skey[3].sk_argument = v4;
-
/*
* calculate the hash value
*/
- return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
+ return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
}
Datum v3,
Datum v4)
{
- ScanKeyData cur_skey[CATCACHE_MAXKEYS];
+ Datum arguments[CATCACHE_MAXKEYS];
uint32 lHashValue;
dlist_iter iter;
CatCList *cl;
cache->cc_lsearches++;
#endif
- /*
- * initialize the search key information
- */
- memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
- cur_skey[0].sk_argument = v1;
- cur_skey[1].sk_argument = v2;
- cur_skey[2].sk_argument = v3;
- cur_skey[3].sk_argument = v4;
+ /* Initialize local parameter array */
+ arguments[0] = v1;
+ arguments[1] = v2;
+ arguments[2] = v3;
+ arguments[3] = v4;
/*
* compute a hash value of the given keys for faster search. We don't
* presently divide the CatCList items into buckets, but this still lets
* us skip non-matching items quickly most of the time.
*/
- lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
+ lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
/*
* scan the items until we find a match or exhaust our list
*/
dlist_foreach(iter, &cache->cc_lists)
{
- bool res;
-
cl = dlist_container(CatCList, cache_elem, iter.cur);
if (cl->dead)
*/
if (cl->nkeys != nkeys)
continue;
- HeapKeyTest(&cl->tuple,
- cache->cc_tupdesc,
- nkeys,
- cur_skey,
- res);
- if (!res)
+
+ if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
continue;
/*
PG_TRY();
{
+ ScanKeyData cur_skey[CATCACHE_MAXKEYS];
Relation relation;
SysScanDesc scandesc;
+ /*
+ * Ok, need to make a lookup in the relation, copy the scankey and
+ * fill out any per-call fields.
+ */
+ memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
+ cur_skey[0].sk_argument = v1;
+ cur_skey[1].sk_argument = v2;
+ cur_skey[2].sk_argument = v3;
+ cur_skey[3].sk_argument = v4;
+
relation = heap_open(cache->cc_reloid, AccessShareLock);
scandesc = systable_beginscan(relation,
* See if there's an entry for this tuple already.
*/
ct = NULL;
- hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
+ hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
bucket = &cache->cc_bucket[hashIndex];
if (!found)
{
/* We didn't find a usable entry, so make a new one */
- ct = CatalogCacheCreateEntry(cache, ntp,
+ ct = CatalogCacheCreateEntry(cache, ntp, arguments,
hashValue, hashIndex,
false);
}
heap_close(relation, AccessShareLock);
- /*
- * Now we can build the CatCList entry. First we need a dummy tuple
- * containing the key values...
- */
- ntp = build_dummy_tuple(cache, nkeys, cur_skey);
+ /* Now we can build the CatCList entry. */
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
nmembers = list_length(ctlist);
cl = (CatCList *)
palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
- heap_copytuple_with_tuple(ntp, &cl->tuple);
+
+ /* Extract key values */
+ CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
+ arguments, cl->keys);
MemoryContextSwitchTo(oldcxt);
- heap_freetuple(ntp);
/*
* We are now past the last thing that could trigger an elog before we
* supplied data into it. The new entry initially has refcount 0.
*/
static CatCTup *
-CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
- uint32 hashValue, Index hashIndex, bool negative)
+CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
+ uint32 hashValue, Index hashIndex,
+ bool negative)
{
CatCTup *ct;
HeapTuple dtp;
MemoryContext oldcxt;
- /*
- * If there are any out-of-line toasted fields in the tuple, expand them
- * in-line. This saves cycles during later use of the catcache entry, and
- * also protects us against the possibility of the toast tuples being
- * freed before we attempt to fetch them, in case of something using a
- * slightly stale catcache entry.
- */
- if (HeapTupleHasExternal(ntp))
- dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
- else
- dtp = ntp;
+ /* negative entries have no tuple associated */
+ if (ntp)
+ {
+ int i;
- /*
- * Allocate CatCTup header in cache memory, and copy the tuple there too.
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
- ct = (CatCTup *) palloc(sizeof(CatCTup));
- heap_copytuple_with_tuple(dtp, &ct->tuple);
- MemoryContextSwitchTo(oldcxt);
+ Assert(!negative);
+
+ /*
+ * If there are any out-of-line toasted fields in the tuple, expand
+ * them in-line. This saves cycles during later use of the catcache
+ * entry, and also protects us against the possibility of the toast
+ * tuples being freed before we attempt to fetch them, in case of
+ * something using a slightly stale catcache entry.
+ */
+ if (HeapTupleHasExternal(ntp))
+ dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
+ else
+ dtp = ntp;
+
+ /* Allocate memory for CatCTup and the cached tuple in one go */
+ oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
- if (dtp != ntp)
- heap_freetuple(dtp);
+ ct = (CatCTup *) palloc(sizeof(CatCTup) +
+ MAXIMUM_ALIGNOF + dtp->t_len);
+ ct->tuple.t_len = dtp->t_len;
+ ct->tuple.t_self = dtp->t_self;
+ ct->tuple.t_tableOid = dtp->t_tableOid;
+ ct->tuple.t_data = (HeapTupleHeader)
+ MAXALIGN(((char *) ct) + sizeof(CatCTup));
+ /* copy tuple contents */
+ memcpy((char *) ct->tuple.t_data,
+ (const char *) dtp->t_data,
+ dtp->t_len);
+ MemoryContextSwitchTo(oldcxt);
+
+ if (dtp != ntp)
+ heap_freetuple(dtp);
+
+ /* extract keys - they'll point into the tuple if not by-value */
+ for (i = 0; i < cache->cc_nkeys; i++)
+ {
+ Datum atp;
+ bool isnull;
+
+ atp = heap_getattr(&ct->tuple,
+ cache->cc_keyno[i],
+ cache->cc_tupdesc,
+ &isnull);
+ Assert(!isnull);
+ ct->keys[i] = atp;
+ }
+ }
+ else
+ {
+ Assert(negative);
+ oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ ct = (CatCTup *) palloc(sizeof(CatCTup));
+
+ /*
+ * Store keys - they'll point into separately allocated memory if not
+ * by-value.
+ */
+ CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
+ arguments, ct->keys);
+ MemoryContextSwitchTo(oldcxt);
+ }
/*
* Finish initializing the CatCTup header, and add it to the cache's
}
/*
- * build_dummy_tuple
- * Generate a palloc'd HeapTuple that contains the specified key
- * columns, and NULLs for other columns.
- *
- * This is used to store the keys for negative cache entries and CatCList
- * entries, which don't have real tuples associated with them.
+ * Helper routine that frees keys stored in the keys array.
*/
-static HeapTuple
-build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
+static void
+CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
{
- HeapTuple ntp;
- TupleDesc tupDesc = cache->cc_tupdesc;
- Datum *values;
- bool *nulls;
- Oid tupOid = InvalidOid;
- NameData tempNames[4];
int i;
- values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
- nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
+ for (i = 0; i < nkeys; i++)
+ {
+ int attnum = attnos[i];
+ Form_pg_attribute att;
+
+ /* only valid system attribute is the oid, which is by value */
+ if (attnum == ObjectIdAttributeNumber)
+ continue;
+ Assert(attnum > 0);
+
+ att = TupleDescAttr(tupdesc, attnum - 1);
+
+ if (!att->attbyval)
+ pfree(DatumGetPointer(keys[i]));
+ }
+}
+
+/*
+ * Helper routine that copies the keys in the srckeys array into the dstkeys
+ * one, guaranteeing that the datums are fully allocated in the current memory
+ * context.
+ */
+static void
+CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
+ Datum *srckeys, Datum *dstkeys)
+{
+ int i;
- memset(values, 0, tupDesc->natts * sizeof(Datum));
- memset(nulls, true, tupDesc->natts * sizeof(bool));
+ /*
+ * XXX: memory and lookup performance could possibly be improved by
+ * storing all keys in one allocation.
+ */
for (i = 0; i < nkeys; i++)
{
- int attindex = cache->cc_key[i];
- Datum keyval = skeys[i].sk_argument;
+ int attnum = attnos[i];
- if (attindex > 0)
+ if (attnum == ObjectIdAttributeNumber)
+ {
+ dstkeys[i] = srckeys[i];
+ }
+ else
{
+ Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
+ Datum src = srckeys[i];
+ NameData srcname;
+
/*
- * Here we must be careful in case the caller passed a C string
- * where a NAME is wanted: convert the given argument to a
- * correctly padded NAME. Otherwise the memcpy() done in
- * heap_form_tuple could fall off the end of memory.
+ * Must be careful in case the caller passed a C string where a
+ * NAME is wanted: convert the given argument to a correctly
+ * padded NAME. Otherwise the memcpy() done by datumCopy() could
+ * fall off the end of memory.
*/
- if (cache->cc_isname[i])
+ if (att->atttypid == NAMEOID)
{
- Name newval = &tempNames[i];
-
- namestrcpy(newval, DatumGetCString(keyval));
- keyval = NameGetDatum(newval);
+ namestrcpy(&srcname, DatumGetCString(src));
+ src = NameGetDatum(&srcname);
}
- values[attindex - 1] = keyval;
- nulls[attindex - 1] = false;
- }
- else
- {
- Assert(attindex == ObjectIdAttributeNumber);
- tupOid = DatumGetObjectId(keyval);
+
+ dstkeys[i] = datumCopy(src,
+ att->attbyval,
+ att->attlen);
}
}
- ntp = heap_form_tuple(tupDesc, values, nulls);
- if (tupOid != InvalidOid)
- HeapTupleSetOid(ntp, tupOid);
-
- pfree(values);
- pfree(nulls);
-
- return ntp;
}
-
/*
* PrepareToInvalidateCacheTuple()
*
if (ccp->cc_tupdesc == NULL)
CatalogCacheInitializeCache(ccp);
- hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
+ hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
(*function) (ccp->id, hashvalue, dbid);
{
uint32 newhashvalue;
- newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
+ newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
if (newhashvalue != hashvalue)
(*function) (ccp->id, newhashvalue, dbid);
#define CATCACHE_MAXKEYS 4
+
+/* function computing a datum's hash */
+typedef uint32 (*CCHashFN) (Datum datum);
+
+/* function computing equality of two datums */
+typedef bool (*CCFastEqualFN) (Datum a, Datum b);
+
typedef struct catcache
{
int id; /* cache identifier --- see syscache.h */
- slist_node cc_next; /* list link */
+ int cc_nbuckets; /* # of hash buckets in this cache */
+ TupleDesc cc_tupdesc; /* tuple descriptor (copied from reldesc) */
+ dlist_head *cc_bucket; /* hash buckets */
+ CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]; /* hash function for each key */
+ CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]; /* fast equal function for
+ * each key */
+ int cc_keyno[CATCACHE_MAXKEYS]; /* AttrNumber of each key */
+ dlist_head cc_lists; /* list of CatCList structs */
+ int cc_ntup; /* # of tuples currently in this cache */
+ int cc_nkeys; /* # of keys (1..CATCACHE_MAXKEYS) */
const char *cc_relname; /* name of relation the tuples come from */
Oid cc_reloid; /* OID of relation the tuples come from */
Oid cc_indexoid; /* OID of index matching cache keys */
bool cc_relisshared; /* is relation shared across databases? */
- TupleDesc cc_tupdesc; /* tuple descriptor (copied from reldesc) */
- int cc_ntup; /* # of tuples currently in this cache */
- int cc_nbuckets; /* # of hash buckets in this cache */
- int cc_nkeys; /* # of keys (1..CATCACHE_MAXKEYS) */
- int cc_key[CATCACHE_MAXKEYS]; /* AttrNumber of each key */
- PGFunction cc_hashfunc[CATCACHE_MAXKEYS]; /* hash function for each key */
+ slist_node cc_next; /* list link */
ScanKeyData cc_skey[CATCACHE_MAXKEYS]; /* precomputed key info for heap
* scans */
- bool cc_isname[CATCACHE_MAXKEYS]; /* flag "name" key columns */
- dlist_head cc_lists; /* list of CatCList structs */
- dlist_head *cc_bucket; /* hash buckets */
/*
* Keep these at the end, so that compiling catcache.c with CATCACHE_STATS
{
int ct_magic; /* for identifying CatCTup entries */
#define CT_MAGIC 0x57261502
- CatCache *my_cache; /* link to owning catcache */
+
+ uint32 hash_value; /* hash value for this tuple's keys */
+
+ /*
+ * Lookup keys for the entry. By-reference datums point into the tuple for
+ * positive cache entries, and are separately allocated for negative ones.
+ */
+ Datum keys[CATCACHE_MAXKEYS];
/*
* Each tuple in a cache is a member of a dlist that stores the elements
*/
dlist_node cache_elem; /* list member of per-bucket list */
- /*
- * The tuple may also be a member of at most one CatCList. (If a single
- * catcache is list-searched with varying numbers of keys, we may have to
- * make multiple entries for the same tuple because of this restriction.
- * Currently, that's not expected to be common, so we accept the potential
- * inefficiency.)
- */
- struct catclist *c_list; /* containing CatCList, or NULL if none */
-
/*
* A tuple marked "dead" must not be returned by subsequent searches.
* However, it won't be physically deleted from the cache until its
int refcount; /* number of active references */
bool dead; /* dead but not yet removed? */
bool negative; /* negative cache entry? */
- uint32 hash_value; /* hash value for this tuple's keys */
HeapTupleData tuple; /* tuple management header */
+
+ /*
+ * The tuple may also be a member of at most one CatCList. (If a single
+ * catcache is list-searched with varying numbers of keys, we may have to
+ * make multiple entries for the same tuple because of this restriction.
+ * Currently, that's not expected to be common, so we accept the potential
+ * inefficiency.)
+ */
+ struct catclist *c_list; /* containing CatCList, or NULL if none */
+
+ CatCache *my_cache; /* link to owning catcache */
+ /* properly aligned tuple data follows, unless a negative entry */
} CatCTup;
+/*
+ * A CatCList describes the result of a partial search, ie, a search using
+ * only the first K key columns of an N-key cache. We store the keys used
+ * into the keys attribute to represent the stored key set. The CatCList
+ * object contains links to cache entries for all the table rows satisfying
+ * the partial key. (Note: none of these will be negative cache entries.)
+ *
+ * A CatCList is only a member of a per-cache list; we do not currently
+ * divide them into hash buckets.
+ *
+ * A list marked "dead" must not be returned by subsequent searches.
+ * However, it won't be physically deleted from the cache until its
+ * refcount goes to zero. (A list should be marked dead if any of its
+ * member entries are dead.)
+ *
+ * If "ordered" is true then the member tuples appear in the order of the
+ * cache's underlying index. This will be true in normal operation, but
+ * might not be true during bootstrap or recovery operations. (namespace.c
+ * is able to save some cycles when it is true.)
+ */
typedef struct catclist
{
int cl_magic; /* for identifying CatCList entries */
#define CL_MAGIC 0x52765103
- CatCache *my_cache; /* link to owning catcache */
+
+ uint32 hash_value; /* hash value for lookup keys */
+
+ dlist_node cache_elem; /* list member of per-catcache list */
/*
- * A CatCList describes the result of a partial search, ie, a search using
- * only the first K key columns of an N-key cache. We form the keys used
- * into a tuple (with other attributes NULL) to represent the stored key
- * set. The CatCList object contains links to cache entries for all the
- * table rows satisfying the partial key. (Note: none of these will be
- * negative cache entries.)
- *
- * A CatCList is only a member of a per-cache list; we do not currently
- * divide them into hash buckets.
- *
- * A list marked "dead" must not be returned by subsequent searches.
- * However, it won't be physically deleted from the cache until its
- * refcount goes to zero. (A list should be marked dead if any of its
- * member entries are dead.)
- *
- * If "ordered" is true then the member tuples appear in the order of the
- * cache's underlying index. This will be true in normal operation, but
- * might not be true during bootstrap or recovery operations. (namespace.c
- * is able to save some cycles when it is true.)
+ * Lookup keys for the entry, with the first nkeys elements being valid.
+ * All by-reference are separately allocated.
*/
- dlist_node cache_elem; /* list member of per-catcache list */
+ Datum keys[CATCACHE_MAXKEYS];
+
int refcount; /* number of active references */
bool dead; /* dead but not yet removed? */
bool ordered; /* members listed in index order? */
short nkeys; /* number of lookup keys specified */
- uint32 hash_value; /* hash value for lookup keys */
- HeapTupleData tuple; /* header for tuple holding keys */
int n_members; /* number of member tuples */
+ CatCache *my_cache; /* link to owning catcache */
CatCTup *members[FLEXIBLE_ARRAY_MEMBER]; /* members */
} CatCList;
extern void InitCatCachePhase2(CatCache *cache, bool touch_index);
extern HeapTuple SearchCatCache(CatCache *cache,
- Datum v1, Datum v2,
- Datum v3, Datum v4);
+ Datum v1, Datum v2, Datum v3, Datum v4);
+extern HeapTuple SearchCatCache1(CatCache *cache,
+ Datum v1);
+extern HeapTuple SearchCatCache2(CatCache *cache,
+ Datum v1, Datum v2);
+extern HeapTuple SearchCatCache3(CatCache *cache,
+ Datum v1, Datum v2, Datum v3);
+extern HeapTuple SearchCatCache4(CatCache *cache,
+ Datum v1, Datum v2, Datum v3, Datum v4);
extern void ReleaseCatCache(HeapTuple tuple);
extern uint32 GetCatCacheHashValue(CatCache *cache,